repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
jacksarick/My-Code
|
Python/python challenges/euler/012_divisable_tri_nums.py
|
Python
|
mit
| 219 | 0.03653 |
## Close
### What is the value of the first triangle number to have over five hundred divisors?
pri
|
nt max
|
([len(m) for m in map(lambda k: [n for n in range(1,(k+1)) if k%n == 0], [sum(range(n)) for n in range(1,1000)])])
|
akash1808/nova_test_latest
|
nova/tests/unit/compute/test_compute_cells.py
|
Python
|
apache-2.0
| 18,463 | 0.0013 |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Compute w/ Cells
"""
import functools
import inspect
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from nova import block_device
from nova.cells import manager
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import quota
from nova import test
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
ORIG_COMPUTE_API = None
cfg.CONF.import_opt('enable', 'nova.cells.opts', group='cells')
def stub_call_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver_class=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
return fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver_class=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def deploy_stubs(stubs, api, original_instance=None):
call = stub_call_to_cells
cast = stub_cast_to_cells
if original_instance:
kwargs = dict(original_instance=original_instance)
call = functools.partial(stub_call_to_cells, **kwargs)
cast = functools.partial(stub_cast_to_cells, **kwargs)
stubs.Set(api, '_call_to_cells', call)
stubs.Set(api, '_cast_to_cells', cast)
class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def setUp(self):
super(CellsComputeAPITestCase, self).setUp()
global ORIG_COMPUTE_API
ORIG_COMPUTE_API = self.compute_api
self.flags(enable=True, group='cells')
def _fake_cell_read_only(*args, **kwargs):
return False
def _fake_validate_cell(*args, **kwargs):
return
def _nop_update(context, instance, **kwargs):
return instance
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.stubs.Set(self.compute_api, '_cell_read_only',
_fake_cell_read_only)
self.stubs.Set(self.compute_api, '_validate_cell',
_fake_validate_cell)
deploy_stubs(self.stubs, self.compute_api)
def tearDown(self):
global ORIG_COMPUTE_API
self.compute_api = ORIG_COMPUTE_API
super(CellsComputeAPITestCase, self).tearDown()
def test_instance_metadata(self):
self.skipTest("Test is incompatible with cells.")
def test_evacuate(self):
self.skipTest("Test is incompatible with cells.")
def test_error_evacuate(self):
self.skipTest("T
|
est is incompatible with cells.")
def test_delete_instance_no_cell(self):
cells_rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(cells_rpcapi,
'instance_delete_eve
|
rywhere')
inst = self._create_fake_instance_obj()
cells_rpcapi.instance_delete_everywhere(self.context,
inst, 'hard')
self.mox.ReplayAll()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, inst)
def test_delete_instance_no_cell_constraint_failure_does_not_loop(self):
with mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere'):
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
self.assertRaises(exception.ObjectActionError,
self.compute_api.delete, self.context, inst)
inst.destroy.assert_called_once_with()
def test_delete_instance_no_cell_constraint_failure_corrects_itself(self):
def add_cell_name(context, instance, delete_type):
instance.cell_name = 'fake_cell_name'
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere', side_effect=add_cell_name)
def _test(mock_delete_everywhere, mock_compute_delete):
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
self.compute_api.delete(self.context, inst)
inst.destroy.assert_called_once_with()
mock_compute_delete.assert_called_once_with(self.context, inst)
_test()
def test_delete_instance_no_cell_destroy_fails_already_deleted(self):
# If the instance.destroy() is reached during _local_delete,
# it will raise ObjectActionError if the instance has already
# been deleted by a instance_destroy_at_top, and instance.refresh()
# will raise InstanceNotFound
instance = objects.Instance(uuid='fake-uuid', cell_name=None)
actionerror = exception.ObjectActionError(action='destroy', reason='')
notfound = exception.InstanceNotFound(instance_id=instance.uuid)
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_local_delete',
side_effect=actionerror)
@mock.patch.object(instance, 'refresh', side_effect=notfound)
def _test(mock_refresh, mock_local_delete, mock_delete_everywhere,
mock_compute_delete):
self.compute_api.delete(self.context, instance)
mock_delete_everywhere.assert_called_once_with(self.context,
instance, 'hard')
mock_local_delete.assert_called_once_with(self.context,
instance, mock.ANY, 'delete', self.compute_api._do_delete)
mock_refresh.assert_called_once_
|
PetePriority/home-assistant
|
homeassistant/components/knx/climate.py
|
Python
|
apache-2.0
| 11,010 | 0 |
"""
Support for KNX/IP climate devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.knx/
"""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import (
PLATFORM_SCHEMA, SUPPORT_ON_OFF, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE, STATE_HEAT,
STATE_IDLE, STATE_MANUAL, STATE_DRY,
STATE_FAN_ONLY, STATE_ECO, ClimateDevice)
from homeassistant.const import (
ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS)
from homeassistant.core import callback
from homeassistant.components.knx import DATA_KNX, ATTR_DISCOVER_DEVICES
CONF_SETPOINT_SHIFT_ADDRESS = 'setpoint_shift_address'
CONF_SETPOINT_SHIFT_STATE_ADDRESS = 'setpoint_shift_state_address'
CONF_SETPOINT_SHIFT_STEP = 'setpoint_shift_step'
CONF_SETPOINT_SHIFT_MAX = 'setpoint_shift_max'
CONF_SETPOINT_SHIFT_MIN = 'setpoint_shift_min'
CONF_TEMPERATURE_ADDRESS = 'temperature_address'
CONF_TARGET_TEMPERATURE_ADDRESS = 'target_temperature_address'
CONF_OPERATION_MODE_ADDRESS = 'operation_mode_address'
CONF_OPERATION_MODE_STATE_ADDRESS = 'operation_mode_state_address'
CONF_CONTROLLER_STATUS_ADDRESS = 'controller_status_address'
CONF_CONTROLLER_STATUS_STATE_ADDRESS = 'controller_status_state_address'
CONF_CONTROLLER_MODE_ADDRESS = 'controller_mode_address'
CONF_CONTROLLER_MODE_STATE_ADDRESS = 'controller_mode_state_address'
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = \
'operation_mode_frost_protection_address'
CONF_OPERATION_MODE_NIGHT_ADDRESS = 'operation_mode_night_address'
CONF_OPERATION_MODE_COMFORT_ADDRESS = 'operation_mode_comfort_address'
CONF_OPERATION_MODES = 'operation_modes'
CONF_ON_OFF_ADDRESS = 'on_off_address'
CONF_ON_OFF_STATE_ADDRESS = 'on_off_state_address'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
DEFAULT_NAME = 'KNX Climate'
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEPENDENCIES = ['knx']
# Map KNX operation modes to HA modes. This list might not be full.
OPERATION_MODES = {
# Map DPT 201.100 HVAC operating modes
"Frost Protection": STATE_MANUAL,
"Night": STATE_IDLE,
"Standby": STATE_ECO,
"Comfort": STATE_HEAT,
# Map DPT 201.104 HVAC control modes
"Fan only": STATE_FAN_ONLY,
"Dehumidification": STATE_DRY
}
OPERATION_MODES_INV = dict((
reversed(item) for item in OPERATION_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOI
|
NT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STEP,
default=DEFAULT_SETPOINT
|
_SHIFT_STEP): vol.All(
float, vol.Range(min=0, max=2)),
vol.Optional(CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX):
vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN):
vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES): vol.All(cv.ensure_list,
[vol.In(OPERATION_MODES)]),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME) + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS),
group_address_controller_status=config.get(
CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.get(
CONF_CONTROLLER_STATUS_STATE_ADDRESS),
group_address_controller_mode=config.get(
CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS),
operation_modes=config.get(
CONF_OPERATION_MODES))
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_temperature=config.get(CONF_TEMPERATURE_ADDRESS),
group_address_target_temperature=config.get(
CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS),
setpoint_shift_step=config.get(CONF_SETPOINT_SHIFT_STEP),
setpoint_shift_max=config.get(CONF_SETPOINT_SHIFT_MAX),
setpoint_shift_min=config.get(CONF_SETPOINT_SHIFT_MIN),
group_address_on_off=config.get(
CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(
CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.device.mode.supports_operation_mode:
support |= SUPPORT_OPERATION_MODE
if self.device.supports_on_off:
support |= SUPPORT_ON_OFF
return support
async def async_added_to_hass(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
@property
|
tboyce021/home-assistant
|
homeassistant/components/zwave/node_entity.py
|
Python
|
apache-2.0
| 12,522 | 0.000878 |
"""Entity class that represents Z-Wave node."""
# pylint: disable=import-outside-toplevel
from itertools import count
from homeassistant.const import ATTR_BATTERY_LEVEL, ATTR_ENTITY_ID, ATTR_WAKEUP
from homeassistant.core import callback
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import async_get_registry
from .const import (
ATTR_BASIC_LEVEL,
ATTR_NODE_ID,
ATTR_SCENE_DATA,
ATTR_SCENE_ID,
COMMAND_CLASS_CENTRAL_SCENE,
COMMAND_CLASS_VERSION,
COMMAND_CLASS_WAKE_UP,
DOMAIN,
EVENT_NODE_EVENT,
EVENT_SCENE_ACTIVATED,
)
from .util import is_node_parsed, node_device_id_and_name, node_name
ATTR_QUERY_STAGE = "query_stage"
ATTR_AWAKE = "is_awake"
ATTR_READY = "is_ready"
ATTR_FAILED = "is_failed"
ATTR_PRODUCT_NAME = "product_name"
ATTR_MANUFACTURER_NAME = "manufacturer_name"
ATTR_NODE_NAME = "node_name"
ATTR_APPLICATION_VERSION = "application_version"
STAGE_COMPLETE = "Complete"
_REQUIRED_ATTRIBUTES = [
ATTR_QUERY_STAGE,
ATTR_AWAKE,
ATTR_READY,
ATTR_FAILED,
"is_info_received",
"max_baud_rate",
"is_zwave_plus",
]
_OPTIONAL_ATTRIBUTES = ["capabilities", "neighbors", "location"]
_COMM_ATTRIBUTES = [
"sentCnt",
"sentFailed",
"retries",
"receivedCnt",
"receivedDups",
"receivedUnsolicited",
"sentTS",
"receivedTS",
"lastRequestRTT",
"averageRequestRTT",
"lastResponseRTT",
"averageResponseRTT",
]
ATTRIBUTES = _REQUIRED_ATTRIBUTES + _OPTIONAL_ATTRIBUTES
class ZWaveBaseEntity(Entity):
"""Base class for Z-Wave Node and Value entities."""
def __init__(self):
"""Initialize the base Z-Wave class."""
self._update_scheduled = False
def maybe_schedule_update(self):
"""Maybe schedule state update.
If value changed after device was created but before setup_platform
was called - skip updating state.
"""
if self.hass and not self._update_scheduled:
self.hass.add_job(self._schedule_update)
@callback
def _schedule_update(self):
"""Schedule delayed update."""
if self._update_scheduled:
return
@callback
def do_update():
"""Really update."""
self.async_write_ha_state()
self._update_scheduled = False
self._update_scheduled = True
self.hass.loop.call_later(0.1, do_update)
def try_remove_and_add(self):
"""Remove this entity and add it back."""
async def _async_remove_and_add():
await self.async_remove()
self.entity_id = None
await self.platform.async_add_entities([self])
if self.hass and self.platform:
self.hass.add_job(_async_remove_and_add)
async def node_removed(self):
"""Call when a node is removed from the Z-Wave network."""
await self.async_remove()
registry = await async_get_registry(self.hass)
if self.entity_id not in registry.entities:
return
registry.async_remove(self.entity_id)
class ZWaveNodeEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node."""
def __init__(self, node, network):
"""Initialize node."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._network = network
self.node = node
self.node_id = self.node.node_id
self._name = node_name(self.node)
self._product_name = node.product_name
self._manufacturer_name = node.manufacturer_name
self._unique_id = self._compute_unique_id()
self._application_version = None
self._attributes = {}
self.wakeup_interval = None
self.location = None
self.battery_level = None
dispatcher.connect(
self.network_node_value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED
)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_NODE)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_NOTIFICATION)
dispatcher.connect(self.network_node_event, ZWaveNetwork.SIGNAL_NODE_EVENT)
dispatcher.connect(
self.network_scene_activated, ZWaveNetwork.SIGNAL_SCENE_EVENT
)
@property
def unique_id(self):
"""Return unique ID of Z-wave node."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
identifier, name = node_device_id_and_name(self.node)
info = {
"identifiers": {identifier},
"manufacturer": self.node.manufacturer_name,
"model": self.node.product_name,
"name": name,
}
if self.node_id > 1:
info["via_device"] = (DOMAIN, 1)
return info
def maybe_update_application_version(self, value):
"""Update application version if value is a Command Class Version, Application Value."""
if (
value
and value.command_class == COMMAND_CLASS_VERSION
and value.label == "Application Version"
):
self._application_version = value.data
def network_node_value_added(self, node=None, value=None, args=None):
"""Handle a added value to a none on the network."""
if node and node.node_id != self.node_id:
return
if args is not None and "nodeId" in args and args["nodeId"] != self.node_id:
return
self.maybe_update_application_version(value)
def network_node_changed(self, node=None, value=None, args=None):
"""Handle a changed node on the network."""
if node and node.node_id != self.node_id:
return
if args is not None and "nodeId" in args and args["nodeId"] != self.node_id:
return
# Process central scene activation
if value is not None and value.command_class == COMMAND_CLASS_CENTRAL_SCENE:
self.central_scene_activated(value.index, value.data)
self.maybe_update_application_version(value)
self.node_changed()
def get_node_statistics(self):
"""Retrieve statistics from the node."""
return self._network.manager.getNodeStatistics(
self._network.home_id, self.node_id
)
def node_changed(self):
"""Update node properties."""
attributes = {}
stats = self.get_node_statistics()
for attr in ATTRIBUTES:
value = getattr(self.node, attr)
if attr in _REQUIRED_ATTRIBUTES or value:
attributes[attr] = value
for attr in _COMM_ATTRIBUTES:
attributes[attr] = stats[attr]
if self.node.can_wake_up():
for value in self.node.get_values(COMMAND_CLASS_WAKE_UP).values():
if value.index != 0:
continue
self.wakeup_interval = value.data
break
else:
self.wakeup_interval = None
self.battery_level = self.node.get_battery_level()
self._product_name = self.
|
node.product_name
self._manufacturer_name = self.node.manufacturer_name
self._name = node_name(self.node)
self._attributes = attributes
if not self._unique_id:
self._unique_id = self._compute_unique_id()
if self._uniqu
|
e_id:
# Node info parsed. Remove and re-add
self.try_remove_and_add()
self.maybe_schedule_update()
async def node_renamed(self, update_ids=False):
"""Rename the node and update any IDs."""
identifier, self._name = node_device_id_and_name(self.node)
# Set the name in the devices. If they're customised
# the customisation will not be stored as name and will stick.
dev_reg = await get_dev_reg(self.hass)
device = dev_reg.async_get_device(identif
|
MarcosCommunity/odoo
|
addons/purchase/purchase.py
|
Python
|
agpl-3.0
| 93,906 | 0.0064 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
line_obj = self.pool['purchase.order.line']
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
line_price = line_obj._calc_line_base_price(cr, uid, line,
context=context)
line_qty = line_obj._calc_line_quantity(cr, uid, line,
context=context)
for c in self.pool['account.tax'].compute_all(
cr, uid, line.taxes_id, line_price, line_qty,
line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id
|
] = all(line.invoiced for line in purchase.order_line if
|
line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise osv.except_osv(_('Error!'), _("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_l
|
anhlt/twitter_cli
|
twitter_cli/main.py
|
Python
|
mit
| 3,569 | 0.00028 |
from prompt_toolkit import Application
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.shortcuts import create_eventloop
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.keys import Keys
from prompt_toolkit.buffer import Buffer, AcceptAction
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
from promp
|
t_toolkit.layout.containers import VSplit, Window, HSplit
from prompt_toolkit.layout.controls import BufferControl, FillControl, TokenListControl
from prompt_toolkit.layout.dimension import LayoutDimension as D
from prompt_to
|
olkit.auto_suggest import AutoSuggestFromHistory
from pygments.token import Token
def get_titlebar_tokens(cli):
return [
(Token.Title, ' Hello world '),
(Token.Title, ' (Press [Ctrl-Q] to quit.)'),
]
def handle_action(cli, buffer):
' When enter is pressed in the Vi command line. '
text = buffer.text # Remember: leave_command_mode resets the buffer.
buffer.delete_before_cursor(len(text))
cli.focus(DEFAULT_BUFFER)
# First leave command mode. We want to make sure that the working
# pane is focussed again before executing the command handlers.
# self.leave_command_mode(append_to_history=True)
# Execute command.
buffers[DEFAULT_BUFFER].insert_text(text)
buffers = {
DEFAULT_BUFFER: Buffer(is_multiline=True),
'PROMPT': Buffer(
accept_action=AcceptAction(handler=handle_action),
enable_history_search=True,
complete_while_typing=True,
auto_suggest=AutoSuggestFromHistory()),
'RESULT': Buffer(is_multiline=True),
}
def default_buffer_changed(cli):
"""
When the buffer on the left (DEFAULT_BUFFER) changes, update the buffer on
the right. We just reverse the text.
"""
buffers['RESULT'].text = buffers[DEFAULT_BUFFER].text[::-1]
buffers[DEFAULT_BUFFER].on_text_changed += default_buffer_changed
def get_bottom_toolbar_tokens(cli):
return [(Token.Toolbar, ' This is a toolbar. ')]
layout = VSplit([
Window(content=BufferControl(
buffer_name=DEFAULT_BUFFER, focus_on_click=True)),
Window(width=D.exact(1),
content=FillControl('|', token=Token.Line)),
Window(content=BufferControl(buffer_name='RESULT'))
])
layout = HSplit([
Window(height=D.exact(1),
content=TokenListControl(
get_titlebar_tokens, align_center=True
)),
Window(height=D.exact(1),
content=FillControl('-', token=Token.Line)),
layout,
Window(height=D.exact(1),
content=FillControl('-', token=Token.Line)),
Window(height=D.exact(2),
content=BufferControl(
buffer_name='PROMPT',
focus_on_click=True)),
])
loop = create_eventloop()
manager = KeyBindingManager()
registry = manager.registry
@registry.add_binding(Keys.ControlQ, eager=True)
def exit_(event):
event.cli.set_return_value(None)
application = Application(key_bindings_registry=registry, layout=layout,
buffers=buffers,
mouse_support=True,
use_alternate_screen=True,
editing_mode=EditingMode.VI
)
cli = CommandLineInterface(application=application, eventloop=loop)
cli.run()
print("Exiting")
|
fronzbot/blinkpy
|
blinkpy/sync_module.py
|
Python
|
mit
| 12,603 | 0.000714 |
"""Defines a sync module for Blink."""
import logging
from requests.structures import CaseInsensitiveDict
from blinkpy import api
from blinkpy.camera import BlinkCamera, BlinkCameraMini, BlinkDoorbell
from blinkpy.helpers.util import time_to_seconds
from blinkpy.helpers.constants import ONLINE
_LOGGER = logging.getLogger(__name__)
class BlinkSyncModule:
"""Class to initialize sync module."""
def __init__(self, blink, network_name, network_id, camera_list):
"""
Initialize Blink sync module.
:param blink: Blink class instantiation
"""
self.blink = blink
self.network_id = network_id
self.region_id = blink.auth.region_id
self.name = network_name
self.serial = None
self.status = "offline"
self.sync_id = None
self.host = None
self.summary = None
self.network_info = None
self.events = []
self.cameras = CaseInsensitiveDict({})
self.motion_interval = blink.motion_interval
self.motion = {}
self.last_record = {}
self.camera_list = camera_list
self.available = False
@property
def attributes(self):
"""Return sync attributes."""
attr = {
"name": self.name,
"id": self.sync_id,
"network_id": self.network_id,
"serial": self.serial,
"status": self.status,
"region_id": self.region_id,
}
return attr
@property
def urls(self):
"""Return device urls."""
return self.blink.urls
@property
def online(self):
"""Return boolean system online status
|
."""
try:
return ONLINE[self.status]
except KeyError:
|
_LOGGER.error("Unknown sync module status %s", self.status)
self.available = False
return False
@property
def arm(self):
"""Return status of sync module: armed/disarmed."""
try:
return self.network_info["network"]["armed"]
except (KeyError, TypeError):
self.available = False
return None
@arm.setter
def arm(self, value):
"""Arm or disarm camera."""
if value:
return api.request_system_arm(self.blink, self.network_id)
return api.request_system_disarm(self.blink, self.network_id)
def start(self):
"""Initialize the system."""
response = self.sync_initialize()
if not response:
return False
try:
self.sync_id = self.summary["id"]
self.serial = self.summary["serial"]
self.status = self.summary["status"]
except KeyError:
_LOGGER.error("Could not extract some sync module info: %s", response)
is_ok = self.get_network_info()
self.check_new_videos()
if not is_ok or not self.update_cameras():
return False
self.available = True
return True
def sync_initialize(self):
"""Initialize a sync module."""
response = api.request_syncmodule(self.blink, self.network_id)
try:
self.summary = response["syncmodule"]
self.network_id = self.summary["network_id"]
except (TypeError, KeyError):
_LOGGER.error(
"Could not retrieve sync module information with response: %s", response
)
return False
return response
def update_cameras(self, camera_type=BlinkCamera):
"""Update cameras from server."""
try:
for camera_config in self.camera_list:
if "name" not in camera_config:
break
blink_camera_type = camera_config.get("type", "")
name = camera_config["name"]
self.motion[name] = False
owl_info = self.get_owl_info(name)
lotus_info = self.get_lotus_info(name)
if blink_camera_type == "mini":
camera_type = BlinkCameraMini
if blink_camera_type == "lotus":
camera_type = BlinkDoorbell
self.cameras[name] = camera_type(self)
camera_info = self.get_camera_info(
camera_config["id"], owl_info=owl_info, lotus_info=lotus_info
)
self.cameras[name].update(camera_info, force_cache=True, force=True)
except KeyError:
_LOGGER.error("Could not create camera instances for %s", self.name)
return False
return True
def get_owl_info(self, name):
"""Extract owl information."""
try:
for owl in self.blink.homescreen["owls"]:
if owl["name"] == name:
return owl
except (TypeError, KeyError):
pass
return None
def get_lotus_info(self, name):
"""Extract lotus information."""
try:
for doorbell in self.blink.homescreen["doorbells"]:
if doorbell["name"] == name:
return doorbell
except (TypeError, KeyError):
pass
return None
def get_events(self, **kwargs):
"""Retrieve events from server."""
force = kwargs.pop("force", False)
response = api.request_sync_events(self.blink, self.network_id, force=force)
try:
return response["event"]
except (TypeError, KeyError):
_LOGGER.error("Could not extract events: %s", response)
return False
def get_camera_info(self, camera_id, **kwargs):
"""Retrieve camera information."""
owl = kwargs.get("owl_info", None)
if owl is not None:
return owl
lotus = kwargs.get("lotus_info", None)
if lotus is not None:
return lotus
response = api.request_camera_info(self.blink, self.network_id, camera_id)
try:
return response["camera"][0]
except (TypeError, KeyError):
_LOGGER.error("Could not extract camera info: %s", response)
return {}
def get_network_info(self):
"""Retrieve network status."""
self.network_info = api.request_network_update(self.blink, self.network_id)
try:
if self.network_info["network"]["sync_module_error"]:
raise KeyError
except (TypeError, KeyError):
self.available = False
return False
return True
def refresh(self, force_cache=False):
"""Get all blink cameras and pulls their most recent status."""
if not self.get_network_info():
return
self.check_new_videos()
for camera_name in self.cameras.keys():
camera_id = self.cameras[camera_name].camera_id
camera_info = self.get_camera_info(
camera_id,
owl_info=self.get_owl_info(camera_name),
lotus_info=self.get_lotus_info(camera_name),
)
self.cameras[camera_name].update(camera_info, force_cache=force_cache)
self.available = True
def check_new_videos(self):
"""Check if new videos since last refresh."""
try:
interval = self.blink.last_refresh - self.motion_interval * 60
except TypeError:
# This is the first start, so refresh hasn't happened yet.
# No need to check for motion.
return False
resp = api.request_videos(self.blink, time=interval, page=1)
for camera in self.cameras.keys():
self.motion[camera] = False
try:
info = resp["media"]
except (KeyError, TypeError):
_LOGGER.warning("Could not check for motion. Response: %s", resp)
return False
for entry in info:
try:
name = entry["device_name"]
clip = entry["media"]
timestamp = entry["created_at"]
if self.check_new_video_time(timestamp):
self.motion[name] = True and self.arm
|
Elivis/opsa-master
|
opsa/mysql.py
|
Python
|
gpl-2.0
| 1,197 | 0.020084 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: Elivis.Zhang <elivis.zhang@aliyun.com>
# QQ Group:99798703
# Created on Aug 8, 2015
# -*- coding: utf-8 -*-
import MySQLdb
import settings
class db_operate:
def mysql_command(self,conn,sql_cmd):
try:
ret = []
conn=MySQLdb.connect(host=conn["host"],user=conn["user"],passwd=conn["password"],db=conn["database"],port=conn["port"],charset="utf8")
cursor = conn.cursor()
n = cursor.execute(sql_cmd)
for row in cursor.fetchall():
for i in row:
ret.append(i)
except MySQLdb.Error,e:
ret.append(e)
return ret
def select_table(self,conn,sql_cmd,parmas):
try:
ret = []
conn=MySQLdb.connect(host=conn["host"],user=conn["user"],passwd=conn["password"],db=conn["database"],port=conn["port"],charset="utf8")
cursor = conn.cursor()
n = cursor.execute(sql_cmd,parmas)
for row in cursor.fetchall():
for i in row:
ret.append(
|
i)
except MySQLdb.
|
Error,e:
ret.append(e)
return ret
|
piotr1212/gofed
|
ggi.py
|
Python
|
gpl-2.0
| 8,925 | 0.032381 |
# ####################################################################
# gofed - set of tools to automize packaging of golang devel codes
# Copyright (C) 2014 Jan Chaloupka, jchaloup@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
import re
import os
import urllib2
import optparse
from subprocess import Popen, PIPE
from modules.Utils import GREEN, RED, ENDC
from modules.Packages import packageInPkgdb
from modules.Utils import FormatedPrint
from modules.ImportPath import ImportPath
from modules.ImportPathsDecomposer import ImportPathsDecomposer
from modules.GoSymbolsExtractor import GoSymbolsExtractor
from modules.Config import Config
if __name__ == "__main__":
parser = optparse.OptionParser("%prog [-a] [-c] [-d [-v]] [directory]")
parser.add_option_group( optparse.OptionGroup(parser, "directory", "Directory to inspect. If empty, current directory is used.") )
parser.add_option(
"", "-a", "--all", dest="all", action = "store_true", default = False,
help = "Display all imports including golang native"
)
parser.add_option(
"", "-c", "--classes", dest="classes", action = "store_true", default = False,
help = "Decompose imports into classes"
)
parser.add_option(
"", "-d", "--pkgdb", dest="pkgdb", action = "store_true", default = False,
help = "Check if a class is in the PkgDB (only with -c option)"
)
parser.add_option(
"", "-v", "--verbose", dest="verbose", action = "store_true", default = False,
help = "Show all packages if -d option is on"
)
parser.add_option(
"", "-s", "--short", dest="short", action = "store_true", default = False,
help = "Display just classes without its imports"
)
parser.add_option(
"", "", "--spec", dest="spec", action = "store_true", default = False,
help = "Display import path for spec file"
)
parser.add_option(
"", "-r", "--requires", dest="requires", action = "store_true", default = False,
help = "Use Requires instead of BuildRequires. Used only with --spec option."
)
parser.add_option(
"", "", "--skip-errors", dest="skiperrors", action = "store_true", default = False,
help = "Skip all errors during Go symbol parsing"
)
parser.add_option(
"", "", "--importpath", dest="importpath", default = "",
help = "Don't display class belonging to IMPORTPATH prefix"
)
parser.add_option(
"", "", "--scan-all-dirs", dest="scanalldirs", action = "store_true", default = False,
help = "Scan all dirs, including Godeps directory"
)
parser.add_option(
"", "", "--skip-dirs", dest="skipdirs", default = "",
help = "Scan all dirs except specified via SKIPDIRS. Directories are comma separated list."
)
parser.add_option(
"", "", "--all-occurrences", dest="alloccurrences", action = "store_true", default = False,
help = "List imported paths in all packages including main. Default is skip main packages."
)
parser.add_option(
"", "", "--show-occurrence", dest="showoccurrence", action = "store_true", default = False,
help = "Show occurence of import paths."
)
options, args = parser.parse_args()
path = "."
if len(args):
path = args[0]
fmt_obj = Formated
|
Print()
if not options.scanalldirs:
noGodeps = Config().getSkippedDirectories()
else:
noGodeps = []
if options.skipdirs:
for dir in options.skipdirs.split(','):
dir = dir.strip()
if dir == "":
continue
noGodeps.append(dir)
gse_obj = GoSymbolsExtractor(path, imports_only=True, skip_errors=options.skiperrors, noGodeps=noGodeps)
if not gse_obj.extract():
fmt_obj.printError(gse_obj.getError
|
())
exit(1)
package_imports_occurence = gse_obj.getPackageImportsOccurences()
ip_used = gse_obj.getImportedPackages()
ipd = ImportPathsDecomposer(ip_used)
if not ipd.decompose():
fmt_obj.printError(ipd.getError())
exit(1)
warn = ipd.getWarning()
if warn != "":
fmt_obj.printWarning("Warning: %s" % warn)
classes = ipd.getClasses()
sorted_classes = sorted(classes.keys())
# get max length of all imports
max_len = 0
for element in sorted_classes:
if element == "Native":
continue
# class name starts with prefix => filter out
if options.importpath != "" and element.startswith(options.importpath):
continue
gimports = []
for gimport in classes[element]:
if options.importpath != "" and gimport.startswith(options.importpath):
continue
gimports.append(gimport)
for gimport in gimports:
import_len = len(gimport)
if import_len > max_len:
max_len = import_len
if options.spec and options.showoccurrence:
print "# THIS IS NOT A VALID SPEC FORMAT"
print "# COMMENTS HAS TO BE STARTED AT THE BEGGINING OF A LINE"
for element in sorted_classes:
if not options.all and element == "Native":
continue
if not options.alloccurrences:
one_class = []
for gimport in classes[element]:
# does it occur only in main package?
# remove it from classes[element]
skip = True
if gimport in package_imports_occurence:
for occurrence in package_imports_occurence[gimport]:
if not occurrence.endswith(":main"):
skip = False
break
if skip:
continue
one_class.append(gimport)
classes[element] = sorted(one_class)
# class name starts with prefix => filter out
if options.importpath != "" and element.startswith(options.importpath):
continue
# filter out all members of a class prefixed by prefix
gimports = []
for gimport in classes[element]:
if options.importpath != "" and gimport.startswith(options.importpath):
continue
gimports.append(gimport)
if gimports == []:
continue
if options.classes:
# Native class is just printed
if options.all and element == "Native":
# does not make sense to check Native class in PkgDB
if options.pkgdb:
continue
print "Class: %s" % element
if not options.short:
for gimport in gimports:
if options.showoccurrence:
print "\t%s (%s)" % (gimport, ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
continue
# Translate non-native class into package name (if -d option)
if options.pkgdb:
ip_obj = ImportPath(element)
if not ip_obj.parse():
fmt_obj.printWarning("Unable to translate %s to package name" % element)
continue
pkg_name = ip_obj.getPackageName()
if pkg_name == "":
fmt_obj.printWarning(ip_obj.getError())
pkg_in_pkgdb = packageInPkgdb(pkg_name)
if pkg_in_pkgdb:
if options.verbose:
print (GREEN + "Class: %s (%s) PkgDB=%s" + ENDC) % (element, pkg_name, pkg_in_pkgdb)
else:
print (RED + "Class: %s (%s) PkgDB=%s" + ENDC ) % (element, pkg_name, pkg_in_pkgdb)
continue
# Print class
print "Class: %s" % element
if not options.short:
for gimport in sorted(gimports):
if options.showoccurrence:
print "\t%s (%s)" % (gimport, ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
continue
# Spec file BR
if options.spec:
for gimport in sorted(classes[element]):
if options.requires:
if options.showoccurrence:
import_len = len(gimport)
pri
|
mdurrant-b3/acos-client
|
acos_client/tests/unit/v30/test_slb_virtual_port.py
|
Python
|
apache-2.0
| 22,343 | 0.001074 |
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import unittest
from unittest import mock
except ImportError:
import mock
import unittest2 as unittest
from acos_client import client
import acos_client.errors as acos_errors
import json
import responses
HOSTNAME = 'fake_a10'
BASE_URL = 'https://{}:443/axapi/v3'.format(HOSTNAME)
AUTH_URL = '{}/auth'.format(BASE_URL)
VSERVER_NAME = 'test'
CREATE_URL = '{}/slb/virtual-server/{}/port/'.format(BASE_URL, VSERVER_NAME)
OBJECT_URL = '{}/slb/virtual-server/{}/port/80+http'.format(BASE_URL, VSERVER_NAME)
ALL_URL = '{}/slb/virtual-server/{}/port/'.format(BASE_URL, VSERVER_NAME)
class TestVirtualPort(unittest.TestCase):
def setUp(self):
self.client = client.Client(HOSTNAME, '30', 'fake_username', 'fake_password')
self.maxDiff = None
@responses.activate
def test_virtual_port_create_no_params(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {'response': {'status': 'OK'}}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
params = {
'port':
{
'extended-stats': 1,
'name': 'test1_VPORT',
'port-number': 80,
'protocol': 'http',
'service-group': 'po
|
ol1'
}
}
resp = self.client.slb.virtual_server.vport.create(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80',
service_group_name='pool1'
)
self.assertEqual(resp, json_response)
self.assert
|
Equal(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@responses.activate
def test_virtual_port_create_with_params(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {'response': {'status': 'OK'}}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'ipinip': 1,
'name': 'test1_VPORT',
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'ha-conn-mirror': 1,
'no-dest-nat': 1,
'conn-limit': 50000,
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template'
}
}
resp = self.client.slb.virtual_server.vport.create(
virtual_server_name=VSERVER_NAME,
name='test1_VPORT',
protocol=self.client.slb.virtual_server.vport.HTTP,
port='80',
service_group_name='pool1',
s_pers_name="test_s_pers_template",
c_pers_name="test_c_pers_template",
ha_conn_mirror=1,
no_dest_nat=1,
conn_limit=50000,
status=1,
autosnat=True,
ipinip=True,
source_nat_pool="test_nat_pool",
tcp_template="test_tcp_template",
udp_template="test_udp_template",
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@responses.activate
def test_virtual_port_create_already_exists(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {
"response": {"status": "fail", "err": {"code": 1406, "msg": "The virtual port already exists."}}
}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
with self.assertRaises(acos_errors.ACOSException):
self.client.slb.virtual_server.vport.create(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80',
service_group_name='pool1'
)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, CREATE_URL)
@mock.patch('acos_client.v30.slb.virtual_port.VirtualPort.get')
@responses.activate
def test_virtual_port_update_no_params(self, mocked_get):
mocked_get.return_value = {"foo": "bar"}
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {"foo": "bar"}
responses.add(responses.POST, OBJECT_URL, json=json_response, status=200)
params = {
"port":
{
"name": "test1_VPORT",
"service-group": "pool1",
"protocol": "http",
"port-number": 80,
"template-persist-source-ip": None,
"template-persist-cookie": None,
"extended-stats": 1,
}
}
resp = self.client.slb.virtual_server.vport.update(
VSERVER_NAME, 'test1_VPORT', protocol=self.client.slb.virtual_server.vport.HTTP, port='80',
service_group_name='pool1'
)
self.assertEqual(resp, json_response)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, OBJECT_URL)
self.assertEqual(json.loads(responses.calls[1].request.body), params)
@mock.patch('acos_client.v30.slb.virtual_port.VirtualPort.get')
@responses.activate
def test_virtual_port_create_with_templates(self, mocked_get):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
json_response = {'response': {'status': 'OK'}}
responses.add(responses.POST, CREATE_URL, json=json_response, status=200)
protocol = self.client.slb.virtual_server.vport.HTTP
if protocol.lower() == 'http':
params = {
'port':
{
'auto': 1,
'extended-stats': 1,
'ipinip': 1,
'name': 'test1_VPORT',
'pool': 'test_nat_pool',
'port-number': 80,
'protocol': 'http',
'service-group': 'pool1',
'tcp_template': 'test_tcp_template',
'template-persist-cookie': 'test_c_pers_template',
'template-persist-source-ip': 'test_s_pers_template',
'udp_template': 'test_udp_template',
'template-virtual-port': 'template_vp',
'template-http': None,
'template-policy': 'template_pl',
}
}
else:
params = {
'port':
{
'auto': 1,
'exten
|
stuckyb/sqlite_taxonomy
|
utilities/taxolib/taxonomy.py
|
Python
|
gpl-3.0
| 15,593 | 0.005066 |
"""
Provides classes that represent complete taxonomies, built using components from
the taxacomponents module.
"""
from taxacomponents import Citation, RankTable, Taxon
from taxonvisitor import TaxonVisitor
from taxonvisitors_concrete import PrintTaxonVisitor, CSVTaxonVisitor
from nameresolve import CoLNamesResolver
class TaxonomyError(Exception):
"""
A basic exception class for reporting errors encountered while working with taxonomies.
"""
def __init__(self, msg):
msg = 'Taxonomy error:\n ' + msg
Exception.__init__(self, msg)
class TaxonomyBase:
# Define the "nil" UUID constant as returned by the uuid-osp Postgres module
# function uuid_nil().
#NIL_UUID = '00000000-0000-0000-0000-000000000000'
NIL_UUID = 0
def __init__(self, taxonomy_id, name='', ismaster=False, citation=None, roottaxon=None):
self.taxonomy_id = taxonomy_id
self.name = name
self.ismaster = ismaster
self.citation = citation
self.roottaxon = roottaxon
def loadFromDB(self, pgcur, taxanum=-1, maxdepth=-1):
"""
Attempts to load the taxonomy from a taxonomy database, including the full tree
of taxa. If taxanum > 0, then only taxanum taxa will be loaded. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
query = """SELECT name, citation_id, ismaster, root_tc_id
FROM taxonomies
WHERE taxonomy_id=?"""
pgcur.execute(query, (self.taxonomy_id,))
res = pgcur.fetchone()
if res == None:
raise TaxonomyError('Taxonomy ID ' + str(self.taxonomy_id) + ' was not found in the database.')
self.name = res[0]
self.ismaster = res[2]
roottc_id = res[3]
# Create the Citation object.
self.citation = Citation()
self.citation.loadFromDB(pgcur, res[1])
# Get the rank ID and taxonomy ID of the root taxon concept.
query = """SELECT tc.rank_id, tc.taxonomy_id
|
FROM taxon_concepts tc, ranks r
WHERE tc.tc_id=? AND tc.rank_id=r.rank_id"""
pgcur.execute(query, (roottc_id,))
res = pgcur.fetchone()
rankid = res[0]
root_taxonomy_id = res[1]
# Initialize the rank lookup table.
rankt = RankTable()
rankt.loadFromDB(p
|
gcur)
# Load the taxa tree.
self.roottaxon = Taxon(self.taxonomy_id, rankid, rankt, roottaxo_id = root_taxonomy_id, isroot=True)
self.roottaxon.loadFromDB(pgcur, roottc_id, taxanum, maxdepth)
def persist(self):
"""
Persist the Taxonomy to the database. This method should be implemented by
concrete subclasses.
"""
pass
def __str__(self):
tstr = 'name: ' + self.name + '\nID: ' + str(self.taxonomy_id) + '\nmaster: '
if self.ismaster:
tstr += 'yes'
else:
tstr += 'no'
return tstr
def printTaxonomyInfo(self):
"""
Prints the metadata that describes this taxonomy.
"""
print '** Taxonomy information **'
print str(self)
print str(self.citation)
def printCSVTaxaTree(self, numtaxa=-1, maxdepth=-1):
"""
Prints the tree of taxa for this taxonomy in "flat" format as CSV outut. If
numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
if numtaxa > 0:
print '(Only printing first', numtaxa, 'taxa.)'
if maxdepth > -1:
print '(Only traversing taxa tree to a depth of ' + str(maxdepth) + '.)'
csvvisitor = CSVTaxonVisitor(numtaxa, maxdepth)
csvvisitor.visit(self.roottaxon)
def printTaxaTree(self, numtaxa=-1, maxdepth=-1):
"""
Prints the tree of taxa for this taxonomy. If numtaxa > 0, only the first numtaxa
taxa will be printed. If maxdepth > -1, the taxa tree will only be traversed to a
depth of maxdepth.
"""
print '** Taxa tree **'
if numtaxa > 0:
print '(Only printing first', numtaxa, 'taxa.)'
if maxdepth > -1:
print '(Only traversing taxa tree to a depth of ' + str(maxdepth) + '.)'
ptvisitor = PrintTaxonVisitor(numtaxa, maxdepth)
ptvisitor.visit(self.roottaxon)
def printAll(self, numtaxa=-1, maxdepth=-1):
"""
Prints a text representation of this taxonomy, including the tree of taxa.
If numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
self.printTaxonomyInfo()
print
self.printTaxaTree(numtaxa, maxdepth)
class Taxonomy(TaxonomyBase):
"""
A class that represents a single taxonomy in the MOL taxonomy database. Provides methods
to load a taxonomy from the database and persist a taxonomy to the database. Can also link
a taxonomy to the backbone taxonomy.
"""
def __init__(self, taxonomy_id, name='', ismaster=False, citation=None, roottaxon=None):
TaxonomyBase.__init__(self, taxonomy_id, name, ismaster, citation, roottaxon)
# A reference for the backbone taxonomy, which encompasses all other taxonomies.
# This reference is used if this taxonomy is linked to the backbone taxonomy.
self.bb_taxonomy = None
def linkToBackbone(self, pgcur, adjustdepth=True):
"""
Tries to connect this taxonomy to the backbone taxonomy, creating new nodes
in the backbone taxonomy, if needed, to link the two together. If adjustdepth
is True, the depth property of all nodes in the taxonomy are set to match the
correct depth relative to the root of the backbone taxonomy. Returns True if
the linking operation succeeded, False otherwise.
"""
bb_taxonomy = BackboneTaxonomy(pgcur)
if bb_taxonomy.linkTaxonomy(self):
self.bb_taxonomy = bb_taxonomy
if adjustdepth:
self.bb_taxonomy.setNodeDepths()
return True
else:
self.bb_taxonomy = None
return False
def getBackboneTaxonomy(self):
"""
Returns a reference to the backbone taxonomy object that links this taxonomy
to the MOL backbone taxonomy.
"""
return self.bb_taxonomy
def persist(self, pgcur, printprogress=False):
"""
Writes the taxonomy information to the database, if it does not already
exist. This includes calling the persist() methods on the Citation and
Taxon tree associated with this Taxonomy object.
"""
# First, check if this taxonomy already exists in the database.
query = """SELECT taxonomy_id
FROM taxonomies
WHERE taxonomy_id=? AND ismaster=?"""
pgcur.execute(query, (self.taxonomy_id, self.ismaster))
res = pgcur.fetchone()
if res == None:
# Write the citation information to the database, if needed.
citation_id = self.citation.persist(pgcur)
# Create the initial database entry for the taxonomy metadata so that the
# foreign key constraint for the child taxon concepts can be satisfied.
query = """INSERT INTO taxonomies
(taxonomy_id, name, citation_id, ismaster, root_tc_id)
VALUES (?, ?, ?, ?, ?)"""
pgcur.execute(query, (self.taxonomy_id, self.name, citation_id, self.ismaster, None))
# Make sure all taxon concepts, including those from the backbone taxonomy,
# are persisted to the database. Use the "nil" UUID as the parent_id for
# the root of the taxonomy if there is not an existing root entry.
if self.bb_taxonomy != None:
self.bb_taxonomy.roottaxon.persist(pgcur, self.NIL_UUID, printprogress,
self.roottaxon.depth)
else:
self.roottaxon.persist(pgcur, self.NIL_UUID, prin
|
gwpy/gwpy.github.io
|
docs/latest/examples/timeseries/inject-5.py
|
Python
|
gpl-3.0
| 136 | 0.007353 |
from gwpy.plot import Plot
plo
|
t = Plot(noise, signal, data, separate=True, sharex=True, s
|
harey=True)
plot.gca().set_epoch(0)
plot.show()
|
Nettacker/Nettacker
|
lib/scan/wp_timthumbs/engine.py
|
Python
|
gpl-3.0
| 10,981 | 0.003461 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author Pradeep Jairamani; github.com/pradeepjairamani
import socket
import socks
import time
import json
import threading
import string
import requests
import random
import os
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
from core._die import __die_failure
from lib.scan.wp_timthumbs import wp_timthumbs
from lib.payload.wordlists import useragents
from core.compatible import version
def extra_requirements_dict():
return {
"wp_timthumb_scan_http_method": ["GET"],
"wp_timthumb_scan_random_agent": ["True"],
}
def check(target, user_agent, timeout_sec, log_in_file, language, time_sleep, thread_tmp_filename, retries,
http_method, socks_proxy, scan_id, scan_cmd):
status_codes = [200, 401, 403]
directory_listing_msgs = ["<title>Index of /", "<a href=\"\\?C=N;O=D\">Name</a>", "Directory Listing for",
"Parent Directory</a>", "Last modified</a>", "<TITLE>Folder Listing.",
"- Browsing directory "]
time.sleep(time_sleep)
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
n = 0
while 1:
try:
if http_method == "GET":
r = requests.get(
target, timeout=timeout_sec, headers=user_agent)
elif http_method == "HEAD":
r = requests.head(
target, timeout=timeout_sec, headers=user_agent)
content = r.content
break
except:
n += 1
if n == retries:
warn(messages(language, "http_connection_timeout").format(target))
return 1
if version() == 3:
content = content.decode('utf8')
if r.status_code in status_codes:
info(messages(language, "found").format(
target, r.status_code, r.reason))
__log_into_file(thread_tmp_filename, 'w', '0', language)
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '',
'PORT': "", 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "found").format(target, r.status_code, r.reason),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
if r.status_code == 200:
for dlmsg in directory_listing_msgs:
if dlmsg in content:
info(messages(language, "directoy_listing").format(target))
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '',
'PORT': "", 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "directoy_listing").format(target), 'TIME': now(),
'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
break
return True
except:
return False
def test(target, retries, timeout_sec, user_agent, http_method, socks_proxy, verbose_level, trying, total_req, total,
num, language):
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target_to_host(target), "default_port",
'wp_timthumb_scan'))
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
n = 0
while 1:
try:
if http_method == "GET":
r = requests.get(target, timeout=timeout_sec,
headers=user_agent)
elif http_method == "HEAD":
r = requests.head(target, timeout=timeout_sec,
headers=user_agent)
return 0
except:
n += 1
if n == retries:
return 1
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(
targ
|
et) != 'HTTP' or target_type(target) != 'SINGLE_IPv6':
# rand useragent
user_agent_list = useragents.useragents()
http_methods = ["GET", "HEAD"]
user_agent = {'User-agent'
|
: random.choice(user_agent_list)}
# requirements check
new_extra_requirements = extra_requirements_dict()
if methods_args is not None:
for extra_requirement in extra_requirements_dict():
if extra_requirement in methods_args:
new_extra_requirements[
extra_requirement] = methods_args[extra_requirement]
extra_requirements = new_extra_requirements
if extra_requirements["wp_timthumb_scan_http_method"][0] not in http_methods:
warn(messages(language, "wp_timthumb_scan_get"))
extra_requirements["wp_timthumb_scan_http_method"] = ["GET"]
random_agent_flag = True
if extra_requirements["wp_timthumb_scan_random_agent"][0] == "False":
random_agent_flag = False
threads = []
total_req = len(wp_timthumbs.timthumb())
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
trying = 0
if target_type(target) != "HTTP":
target = 'https://' + target
if test(str(target), retries, timeout_sec, user_agent, extra_requirements["wp_timthumb_scan_http_method"][0],
socks_proxy, verbose_level, trying, total_req, total, num, language) == 0:
|
patrabu/carbu
|
import_csv.py
|
Python
|
gpl-3.0
| 1,167 | 0.007712 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
import_csv.py
~~~~~~~~~~~~~~~~~~~~
Import csv file into database.
The CSV file should be in this format:
Datetime;price;quantity;mileage
2013/10/03 07:00:00;34.01;25.90;149340
:copyright: (c) 2014 by Patrick Rabu.
:license: GPL-3, see LICENSE for more details.
"""
import sys
import time
impor
|
t datetime
import locale
import csv
import sqlite3
|
csvfile = sys.argv[1]
db = sqlite3.connect(sys.argv[2])
car_id = sys.argv[3]
cursor = db.cursor()
with open(csvfile, 'rb') as f:
locale.setlocale(locale.LC_ALL, 'fra_fra')
reader = csv.reader(f, delimiter=';', quoting=csv.QUOTE_NONE)
reader.next() # Skip the first row
for row in reader:
dt = datetime.datetime.strptime(row[0], "%Y/%m/%d %H:%M:%S")
price = locale.atof(row[1])
quantity = locale.atof(row[2])
mileage = locale.atoi(row[3])
cursor.execute('''insert into refills(datetime, quantity, price, mileage, car_id)
values (?, ?, ?, ?, ?)''', (dt, quantity, price, mileage, car_id))
db.commit()
db.close()
|
rlowrance/re-avm
|
AVM.py
|
Python
|
bsd-3-clause
| 3,934 | 0.001525 |
'Automated Valuation Model'
import pdb
import numpy as np
import pandas as pd
from pprint import pprint
import
|
sklearn
import sklearn.ensemble
import sklearn.linear_model
import sklearn.preprocessing
from columns_contain import columns_contain
import AVM_elastic_net
import AVM_gradient_boosting_regressor
import AVM_rando
|
m_forest_regressor
from Features import Features
cc = columns_contain
def avm_scoring(estimator, df):
'return error from using fitted estimator with test data in the dataframe'
# TODO: make a static method of class AVM
assert isinstance(estimator, AVM)
X, y = estimator.extract_and_transform(df)
assert len(y) > 0
y_hat = estimator.predict(df)
errors = y_hat - y
median_abs_error = np.median(np.abs(errors))
return -median_abs_error # because GridSearchCV chooses the model with the highest score
class AVM(sklearn.base.BaseEstimator):
'one estimator for several underlying models'
def __init__(self,
model_name=None, # parameters for all models
forecast_time_period=None,
n_months_back=None,
random_state=None,
verbose=0,
features_group=None,
implementation_module=None,
alpha=None, # for ElasticNet
l1_ratio=None,
units_X=None,
units_y=None,
n_estimators=None, # for RandomForestRegressor
max_depth=None,
max_features=None,
learning_rate=None, # for GradientBoostingRegressor
loss=None,
):
# NOTE: just capture the parameters (to conform to the sklearn protocol)
self.model_name = model_name
self.forecast_time_period = forecast_time_period
self.n_months_back = n_months_back
self.random_state = random_state
self.verbose = verbose
self.features_group = features_group
self.implementation_module = implementation_module
self.alpha = alpha
self.l1_ratio = l1_ratio
self.units_X = units_X
self.units_y = units_y
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_features = max_features
self.learning_rate = learning_rate
self.loss = loss
def fit(self, samples):
'convert samples to X,Y and fit them'
self.implementation_module = {
'ElasticNet': AVM_elastic_net,
'GradientBoostingRegressor': AVM_gradient_boosting_regressor,
'RandomForestRegressor': AVM_random_forest_regressor,
}[self.model_name]
X_train, y_train = self.extract_and_transform(samples)
fitted = self.implementation_module.fit(self, X_train, y_train)
return fitted.model # scikit learn's fitted model
def get_attributes(self):
'return both sets of attributes, with None if not used by that model'
pdb.set_trace()
attribute_names = (
'coef_', 'sparse_coef_', 'intercept_', 'n_iter_', # for linear
'estimators_', 'feature_importances_', 'oob_score_', 'oob_prediction_', # for random forest
)
return {name: getattr(self.model, name, None) for name in attribute_names}
def extract_and_transform(self, samples, transform_y=True):
'return X and y'
result = self.implementation_module.extract_and_transform(self, samples, transform_y)
return result
def predict(self, samples):
X_test, y_test = self.extract_and_transform(samples, transform_y=False)
assert y_test is None
return self.implementation_module.predict(self, X_test)
def setattr(self, parameter, value):
setattr(self, parameter, value)
return self
if False:
pd()
pprint()
Features()
|
dirkjanm/ldapdomaindump
|
ldapdomaindump/__main__.py
|
Python
|
mit
| 66 | 0 |
#!/usr/bin/env python
import ldapdomaindu
|
mp
ldapdomai
|
ndump.main()
|
mauerflitza/Probieren2
|
Webpage/cgi-bin/upload.py
|
Python
|
mit
| 1,801 | 0.048333 |
#!/usr/bin/python3
import os
import os.path
import cgi, cgitb
import re
import pickle
#own packages
import dbcPattern
def dbc_main(): # NEW except for the call to processInput
form = cgi.FieldStorage() # standard cgi script lines to here!
# use format of next two lines with YOUR names and default data
filedata = form['upload']
if filedata.file:
contents, msg_list = processInput(filedata.file) # process input into a page
print(contents)
return msg_list
return -1
def processInput(file):
sig_num=0
sig_list=[]
'''Process input parameters and return the final page as a string.'''
if file: #field really is an upload
#msg_list=[{mesg1}{mesg2}{mesg3}{...}]
#Messages has numbered dicts signals in them
msg_list = dbcPattern.dbcDataReader(file)
for message in msg_list:
for j in range(message['sig_count']):
sig_num=sig_num+1
sig_list.append(message[j]['sig_name'])
return createHTML(sig_num, sig_list),msg_list
def createHTML(sig_num, sig_list):
signale=""
i=0
file=open("Part1.txt")
html_string = file.read()
file.close()
for sig_name in sorted(sig_list, key=str.lower):
signale+="{ sig_sel: '%s'}," %(sig_name)
# print(sig_name)
html_string+=signale[:-1]
# print(html_string)
file2=open("Part2.txt")
html_string+=file2.r
|
ead()
file2.close()
file=open("htmltext.html",'w')
file.write(html_string)
file.close()
return html_string
#Muss später ins Hauptprogramm kopiert werden
try: # NEW
cgitb.enable()
print("Content-Type: text/html;charset:UTF-8") # say generating html
|
print("\n\n")
msg_list=dbc_main()
filename=os.path.join('/home/pi/datalogger/loggerconfigs/','testdump.txt')
with open(filename, 'wb') as file:
pickle.dump(msg_list, file)
except:
cgi.print_exception() # catch and print errors
|
dsparrow27/zoocore
|
zoo/libs/pyqt/uiconstants.py
|
Python
|
gpl-3.0
| 1,709 | 0.005266 |
# Colors
DARKBGCOLOR = tuple([93, 93, 93])
MEDBGCOLOR = tuple([73, 73, 73])
MAYABGCOLOR = tuple([68, 68, 68])
# D
|
PI
DEFAULT_DPI = 96
# Pixel Size is not handled by dpi, use utils.dpiScale()
MARGINS = (2, 2, 2, 2) # default margins left, top, right, bottom
SPACING
|
= 2
SSML = 4 # the regular spacing of each widget, spacing is between each sub widget
SREG = 6 # the regular spacing of each widget, spacing is between each sub widget
SLRG = 10 # larger spacing of each widget, spacing is between each sub widget
SVLRG = 15 # very large spacing of each widget, spacing is between each sub widget
TOPPAD = 10 # padding between the top widget and the top of frame. ie top of a toolset
BOTPAD = 5 # padding between the bottom widget and the bottom of frame. ie bot of a toolset
REGPAD = 10 # padding between widgets
SMLPAD = 5
LRGPAD = 15
WINSIDEPAD = 10 # the overall window each side
WINTOPPAD = 10 # the overall window padding at the top of frame
WINBOTPAD = 10 # the overall window padding at the bottom of frame
# Button Width Sizes
BTN_W_ICN_SML = 10
BTN_W_ICN_REG = 20
BTN_W_ICN_LRG = 40
BTN_W_REG_SML = 90
BTN_W_REG_LRG = 180
# Button Styles
BTN_DEFAULT = 0 # Default zoo extended button with optional text or an icon.
BTN_TRANSPARENT_BG = 1 # Default zoo extended button w transparent bg.
BTN_ICON_SHADOW = 2 # Main zoo IconPushButton button (icon in a colored box) with shadow underline
BTN_DEFAULT_QT = 3 # Default style uses vanilla QPushButton and not zoo's extended button
BTN_ROUNDED = 4 # Rounded button stylesheeted bg color and stylesheeted icon colour
# Colors
COLOR_ERROR = "00ff06" # fluorescent green
COLOR_ADMIN_GREEN = "17a600"
COLOR_ADMIN_GREEN_RGB = (23, 166, 0)
|
ActiveState/code
|
recipes/Python/578171_Just_Another_Password_Generator/recipe-578171.py
|
Python
|
mit
| 2,824 | 0.004958 |
###############################
# old_password_generator.py #
###############################
import string, random, sys
SELECT = string.ascii_letters + string.punctuation + string.digits
SAMPLE = random.SystemRandom().sample
def main():
while True:
size = get_size()
password = generate_pw(size)
print_pause(password)
def get_size():
while True:
try:
size = int(input('Size: '))
except ValueError:
print('Please enter a number.')
except EOFError:
sys.exit()
else:
if 1 <= size <= 80:
return size
print('Valid number range is 1 - 80.')
def generate_pw(size):
password = ''.join(SAMPLE(SELECT, size))
while not approved(password):
password = ''.join(SAMPLE(SELECT, size))
return password
def approved(password):
group = select(password[0
|
])
for char
|
acter in password[1:]:
trial = select(character)
if trial is group:
return False
group = trial
return True
def select(character):
for group in (string.ascii_uppercase,
string.ascii_lowercase,
string.punctuation,
string.digits):
if character in group:
return group
raise ValueError('Character was not found in any group!')
def print_pause(*values, sep=' ', end='\n', file=sys.stdout):
print(*values, sep=sep, end=end, file=file)
try:
input()
except EOFError:
pass
if __name__ == '__main__':
main()
###############################
# new_password_generator.py #
###############################
from random import SystemRandom
from string import ascii_lowercase, ascii_uppercase, digits, punctuation
CHOICE = SystemRandom().choice
GROUPS = ascii_lowercase, ascii_uppercase, digits, punctuation
def main():
while True:
print('Code:', make_password(get_size()))
def get_size():
while True:
try:
size = int(input('Size: '))
except ValueError:
print('Please enter a number.')
except EOFError:
raise SystemExit()
else:
if 10 <= size <= 80:
return size
print('Valid number range is 10 - 80.')
def make_password(size):
while True:
password = ''
pool = using = tuple(map(set, GROUPS))
while True:
selection = CHOICE(using)
character = CHOICE(tuple(selection))
password += character
if len(password) == size:
return password
selection.remove(character)
if not selection:
break
using = tuple(group for group in pool if group is not selection)
if __name__ == '__main__':
main()
|
wutron/compbio
|
rasmus/testing.py
|
Python
|
mit
| 3,789 | 0.000792 |
import optparse
import os
import shutil
import sys
import unittest
from itertools import izip
from . import util
from . import stats
#=============================================================================
# common utility functions for testing
def clean_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def make_clean_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def fequal(f1, f2, rel=.0001, eabs=1e-12):
"""assert whether two floats are approximately equal"""
if f1 == f2:
return
if f2 == 0:
err = f1
elif f1 == 0:
err = f2
else:
err = abs(f1 - f2) / abs(f2)
x = (err < rel)
if abs(f1 - f2) < eabs:
return
assert x, "%e != %e [rel=%f, abs=%f]" % (f1, f2, err, abs(f1 - f2))
def fequals(f1, f2, rel=.0001, eabs=1e-12):
for i, j in izip(f1, f2):
fequal(i, j, rel=rel, eabs=eabs)
def integrate(func, a, b, step):
return sum(func(i) * step for i in util.frange(a, b, step))
def eq_sample_pdf(samples, pdf,
ndivs=20, start=-util.INF, end=util.INF, pval=.05,
step=None):
"""Asserts a sample matches a probability density distribution"""
if step is None:
step = (max(samples) - min(samples)) / float(ndivs)
cdf = lambda x, params: integrate(pdf, x, x+step, step/10.0)
chi2, p = stats.chi_square_fit(cdf, [], samples,
ndivs=ndivs, start=start, end=end)
assert p >= pval, p
def eq_sample_pmf(samples, pmf, pval=.05):
"""Asserts a sample matches a probability mass distribution"""
import scipy.stats
hist = util.hist_dict(samples)
total = sum(hist.itervalues())
observed = []
expected = []
for sample, count in hist.iteritems():
if count >= 5:
observed.append(count)
expected.append(pmf(sample) * total)
chi2, p = scipy.stats.chisquare(
scipy.array(observed), scipy.array(expected))
assert p
|
>= pval, p
_do_pause = True
def pause(text="press enter to continue: "
|
):
"""Pause until the user presses enter"""
if _do_pause:
sys.stderr.write(text)
raw_input()
def set_pausing(enabled=True):
global _do_pause
_do_pause = enabled
#=============================================================================
# common unittest functions
def list_tests(stack=0):
# get environment
var = __import__("__main__").__dict__
for name, obj in var.iteritems():
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
for attr in dir(obj):
if attr.startswith("test"):
print "%s.%s" % (name, attr),
doc = getattr(obj, attr).__doc__
if doc:
print "--", doc.split("\n")[0]
else:
print
def test_main():
o = optparse.OptionParser()
o.add_option("-v", "--verbose", action="store_true",
help="Verbose output")
o.add_option("-q", "--quiet", action="store_true",
help="Minimal output")
o.add_option("-l", "--list_tests", action="store_true")
o.add_option("-p", "--pause", action="store_true")
conf, args = o.parse_args()
if conf.list_tests:
list_tests(1)
return
if conf.pause:
set_pausing(True)
else:
set_pausing(False)
# process unittest arguments
argv = [sys.argv[0]]
if conf.verbose:
argv.append("-v")
if conf.quiet:
argv.append("-q")
argv.extend(args)
# run unittest
unittest.main(argv=argv)
|
ecleya/project_cron
|
main.py
|
Python
|
mit
| 2,772 | 0.002165 |
import json
import os
from AppKit import NSApplication, NSStatusBar, NSMenu, NSMenuItem, NSVariableStatusItemLength, NSImage
from PyObjCTools import AppHelper
from project_cron.models import Schedule
from threading import Timer
from project_cron.utils import logutil
class App(NSApplication):
def finishLaunching(self):
# Make statusbar item
statusbar = NSStatusBar.systemStatusBar()
self.statusitem = statusbar.statusItemWithLength_(NSVariableStatusItemLength)
self.icon = NSImage.alloc().initByReferencingFile_('icon.png')
self.icon.setScalesWhenResized_(True)
self.icon.setSize_((20, 20))
self.statusitem.setImage_(self.icon)
self._schedules = []
self._menu_items = []
self._initialize_schedules()
self._initialize_menu()
self._timer = Timer(60, self.timer_callback)
self._timer.start()
def _initialize_schedules(self):
USER_ROOT = os.path.expanduser('~')
DOCUMENTS = os.path.join(USER_ROOT, 'Documents')
SCHEDULES = os.path.join(DOCUMENTS, 'schedules.json')
schedules = json.load(open(SCHEDULES, encoding='utf8'))
for raw_inf
|
o in schedules:
self._schedules.append(Schedule(raw_info))
def _init
|
ialize_menu(self):
self.menubarMenu = NSMenu.alloc().init()
for schedule in self._schedules:
menu_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(schedule.name, 'execute:', '')
self._menu_items.append(menu_item)
self.menubarMenu.addItem_(menu_item)
menu_item = NSMenuItem.separatorItem()
self.menubarMenu.addItem_(menu_item)
self.quit = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'terminate:', '')
self.menubarMenu.addItem_(self.quit)
self.statusitem.setMenu_(self.menubarMenu)
self.statusitem.setToolTip_('Crow')
def timer_callback(self):
self._timer = None
for schedule in self._schedules:
try:
schedule.execute()
except:
import traceback
logutil.error(schedule.name, traceback.format_exc())
interval = 60
self._timer = Timer(interval, self.timer_callback)
self._timer.start()
def execute_(self, notification):
for schedule in self._schedules:
if schedule.name == notification.title():
try:
schedule.execute_actions()
except:
import traceback
logutil.error(schedule.name, traceback.format_exc())
schedule._reset()
if __name__ == "__main__":
app = App.sharedApplication()
AppHelper.runEventLoop()
|
MihawkHu/CS433_ParallerProgramming_Project
|
Project4/wordcount/python_wordcount/reducer.py
|
Python
|
mit
| 574 | 0.057491 |
import sys
current_word = None
current_count = 0
word = None
for line in sys.stdin:
line = line.strip()
word,count = line.split('\t',1)
|
try:
count = int(count)
except ValueError:
continue
if current_word == word:
current_count +=count
else:
if current_word:
print '%s\t%s' %(current_word,current_count)
current_count =count
current_word =word
if current_word ==word:
prin
|
t '%s\t%s' % (current_word,current_count)
|
yloiseau/Watson
|
watson/frames.py
|
Python
|
mit
| 4,355 | 0 |
import uuid
import arrow
from collections import namedtuple
HEADERS = ('start', 'stop', 'project', 'id', 'tags', 'updated_at')
class Frame(namedtuple('Frame', HEADERS)):
def __new__(cls, start, stop, project, id, tags=None, updated_at=None,):
try:
if not isinstance(start, arrow.Arrow):
start = arrow.get(start)
if not isinstance(stop, arrow.Arrow):
stop = arrow.get(stop)
except RuntimeError as e:
from .watson import WatsonError
raise WatsonError("Error converting date: {}".format(e))
start = start.to('local')
stop = stop.to('local')
if updated_at is None:
updated_at = arrow.utcnow()
elif not isinstance(updated_at, arrow.Arrow):
updated_at = arrow.get(updated_at)
if tags is None:
tags = []
return super(Frame, cls).__new__(
cls, start, stop, project, id, tags, updated_at
)
def dump(self):
start = self.start.to('utc').timestamp
stop = self.stop.to('utc').timestamp
updated_at = self.updated_at.timestamp
return (start, stop, self.project, self.id, self.tags, updated_at)
@property
def day(self):
return self.start.floor('day')
def __lt__(self, other):
return self.start < other.start
def __lte__(self, other):
return self.start <= other.start
def __gt__(self, other):
return self.start > other.start
def __gte__(self, other):
return self.start >= other.start
class Span(object):
def __init__(self, start, stop, timeframe='day'):
self.timeframe = timeframe
self.start = start.floor(self.timeframe)
self.stop = stop.ceil(self.timeframe)
def __contains__(self, frame):
return frame.start >= self.start and frame.stop <= self.stop
class Frames(object):
def __init
|
__(self, frames=None):
if not frames:
frames = []
rows = [Frame(*frame) for frame in frames]
self._rows = rows
self.changed = False
def __len__(self):
return len(self._rows)
def __getitem__(self, key):
if key in HEADERS:
return tuple(self._get_col
|
(key))
elif isinstance(key, int):
return self._rows[key]
else:
return self._rows[self._get_index_by_id(key)]
def __setitem__(self, key, value):
self.changed = True
if isinstance(value, Frame):
frame = value
else:
frame = self.new_frame(*value)
if isinstance(key, int):
self._rows[key] = frame
else:
frame = frame._replace(id=key)
try:
self._rows[self._get_index_by_id(key)] = frame
except KeyError:
self._rows.append(frame)
def __delitem__(self, key):
self.changed = True
if isinstance(key, int):
del self._rows[key]
else:
del self._rows[self._get_index_by_id(key)]
def _get_index_by_id(self, id):
try:
return next(
i for i, v in enumerate(self['id']) if v.startswith(id)
)
except StopIteration:
raise KeyError("Frame with id {} not found.".format(id))
def _get_col(self, col):
index = HEADERS.index(col)
for row in self._rows:
yield row[index]
def add(self, *args, **kwargs):
self.changed = True
frame = self.new_frame(*args, **kwargs)
self._rows.append(frame)
return frame
def new_frame(self, project, start, stop, tags=None, id=None,
updated_at=None):
if not id:
id = uuid.uuid4().hex
return Frame(start, stop, project, id, tags=tags,
updated_at=updated_at)
def dump(self):
return tuple(frame.dump() for frame in self._rows)
def filter(self, projects=None, tags=None, span=None):
return (
frame for frame in self._rows
if (projects is None or frame.project in projects) and
(tags is None or any(tag in frame.tags for tag in tags)) and
(span is None or frame in span)
)
def span(self, start, stop):
return Span(start, stop)
|
coldeasy/python-driver
|
tests/integration/cqlengine/test_batch_query.py
|
Python
|
apache-2.0
| 8,003 | 0.002249 |
# Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import sure
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import drop_table, sync_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery
from tests.integration.cqlengine.base import BaseCassEngTestCase
from mock import patch
class TestMultiKeyModel(Model):
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
count = columns.Integer(required=False)
text = columns.Text(required=False)
class BatchQueryTests(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BatchQueryTests, cls).setUpClass()
drop_table(TestMultiKeyModel)
sync_table(TestMultiKeyModel)
@classmethod
def tearDownClass(cls):
super(BatchQueryTests, cls).tearDownClass()
drop_table(TestMultiKeyModel)
def setUp(self):
super(BatchQueryTests, self).setUp()
self.pkey = 1
for obj in TestMultiKeyModel.filter(partition=self.pkey):
obj.delete()
def test_insert_success_case(self):
b = BatchQuery()
TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4')
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
b.execute()
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
def test_update_success_case(self):
inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4')
b = BatchQuery()
inst.count = 4
inst.batch(b).save()
inst2 = TestMultiKeyModel.get(partition=self.pkey, cluster=2)
self.assertEqual(inst2.count, 3)
b.execute()
inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2)
self.assertEqual(inst3.count, 4)
def test_delete_success_case(self):
inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4')
b = BatchQuery()
inst.batch(b).delete()
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
b.execute()
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
def test_context_manager(self):
with BatchQuery() as b:
for i in range(5):
TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=i, count=3, text='4')
for i in range(5):
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=i)
for i in range(5):
TestMultiKeyModel.get(partition=self.pkey, cluster=i)
def test_bulk_delete_success_case(self):
for i in range(1):
for j in range(5):
TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{0}:{1}'.format(i,j))
with BatchQuery() as b:
TestMultiKeyModel.objects.batch(b).filter(partition=0).delete()
self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 5)
self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 0)
#cleanup
for m in TestMultiKeyModel.all():
m.delete()
def test_empty_batch(self):
b = BatchQuery()
b.execute()
with BatchQuery() as b:
pass
class BatchQueryCallbacksTests(BaseCassEngTestCase):
def test_API_managing_callbacks(self):
# Callbacks can be added at init and after
def my_callback(*args, **kwargs):
pass
# adding on init:
batch = BatchQuery()
batch.add_callback(my_callback)
batch.add_callback(my_callback, 2, named_arg='value')
batch.add_callback(my_callback, 1, 3)
self.assertEqual(batch._callbacks, [
(my_callback, (), {}),
(my_callback, (2,), {'named_arg':'value'}),
(my_callback, (1, 3), {})
])
def test_callbacks_properly_execute_callables_and_tuples(self):
call_history = []
|
def my_callback(*args, **kwargs):
|
call_history.append(args)
# adding on init:
batch = BatchQuery()
batch.add_callback(my_callback)
batch.add_callback(my_callback, 'more', 'args')
batch.execute()
self.assertEqual(len(call_history), 2)
self.assertEqual([(), ('more', 'args')], call_history)
def test_callbacks_tied_to_execute(self):
"""Batch callbacks should NOT fire if batch is not executed in context manager mode"""
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
with BatchQuery() as batch:
batch.add_callback(my_callback)
self.assertEqual(len(call_history), 1)
class SomeError(Exception):
pass
with self.assertRaises(SomeError):
with BatchQuery() as batch:
batch.add_callback(my_callback)
# this error bubbling up through context manager
# should prevent callback runs (along with b.execute())
raise SomeError
# still same call history. Nothing added
self.assertEqual(len(call_history), 1)
# but if execute ran, even with an error bubbling through
# the callbacks also would have fired
with self.assertRaises(SomeError):
with BatchQuery(execute_on_exception=True) as batch:
batch.add_callback(my_callback)
raise SomeError
# updated call history
self.assertEqual(len(call_history), 2)
def test_callbacks_work_multiple_times(self):
"""
Tests that multiple executions of execute on a batch statement
logs a warning, and that we don't encounter an attribute error.
@since 3.1
@jira_ticket PYTHON-445
@expected_result warning message is logged
@test_category object_mapper
"""
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
with warnings.catch_warnings(record=True) as w:
with BatchQuery() as batch:
batch.add_callback(my_callback)
batch.execute()
batch.execute()
self.assertEqual(len(w), 2) # package filter setup to warn always
self.assertRegexpMatches(str(w[0].message), r"^Batch.*multiple.*")
def test_disable_multiple_callback_warning(self):
"""
Tests that multiple executions of a batch statement
don't log a warning when warn_multiple_exec flag is set, and
that we don't encounter an attribute error.
@since 3.1
@jira_ticket PYTHON-445
@expected_result warning message is logged
@test_category object_mapper
"""
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
with patch('cassandra.cqlengine.query.BatchQuery.warn_multiple_exec', False):
with warnings.catch_warnings(record=True) as w:
with BatchQuery() as batch:
batch.add_callback(my_callback)
batch.execute()
batch.execute()
self.assertFalse(w)
|
andersonsilvade/python_C
|
Python32/ED/Distâncias em uma Rede.py
|
Python
|
mit
| 444 | 0.027027 |
A = [[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0]]
def Distancias(n, origem):
d = [-1] * n
d[origem] = 0
f = []
f.append(origem)
while len(f) >
|
0:
x = f[0]
del f[0]
for y in range(n):
if A[x][y] == 1 and d[y] == -1:
d[y] = d[x] + 1
print (y)
f.append(y)
return d
print (Distancias(6, 3)
|
)
|
mlperf/training_results_v0.5
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/layers/common_layers.py
|
Python
|
apache-2.0
| 132,971 | 0.007994 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import contextlib
import functools
from functools import partial
import math
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import inplace_ops
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs)
def comma_separated_string_to_integer_list(s):
return [int(i) for i in s.split(",") if i]
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", values=[x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x
|
+ 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) /
|
float(max_step))
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from 0.01 to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value
def shakeshake2_py(x, y, equal=False, individual=False):
"""The shake-shake sum of 2 tensors, python version."""
if equal:
alpha = 0.5
elif individual:
alpha = tf.random_uniform(tf.get_shape(x)[:1])
else:
alpha = tf.random_uniform([])
return alpha * x + (1.0 - alpha) * y
@function.Defun()
def shakeshake2_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_indiv_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, individual=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_equal_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, equal=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun(grad_func=shakeshake2_grad)
def shakeshake2(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
@function.Defun(grad_func=shakeshake2_indiv_grad)
def shakeshake2_indiv(x1, x2):
return shakeshake2_py(x1, x2, individual=True)
@function.Defun(grad_func=shakeshake2_equal_grad)
def shakeshake2_eqgrad(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
def shakeshake(xs, equal_grad=False):
"""Multi-argument shake-shake, currently approximated by sums of 2."""
if len(xs) == 1:
return xs[0]
div = (len(xs) + 1) // 2
arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
if equal_grad:
return shakeshake2_eqgrad(arg1, arg2)
return shakeshake2(arg1, arg2)
def convert_rgb_to_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
x /= 255.0
return x
def convert_rgb_to_symmetric_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
# Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in
# the range [-1, 1].
x = (x / 127.5) - 1
return x
def convert_real_to_rgb(x):
"""Conversion of real numbers to pixel values."""
with tf.name_scope("real_to_rgb", values=[x]):
x *= 255.0
return x
def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1):
"""Make x n-d with squeeze and expand_dims."""
if len(x.shape) > n:
while len(x.shape) != n:
x = tf.squeeze(x, [squeeze_dim])
else:
while len(x.shape) != n:
x = tf.expand_dims(x, expand_dim)
return x
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", [x]):
x_shape = shape_list(x)
x = tf.to_float(tf.reshape(x, [-
|
chrxr/wagtail
|
wagtail/wagtailcore/migrations/0023_alter_page_revision_on_delete_behaviour.py
|
Python
|
bsd-3-clause
| 622 | 0.001608 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(
|
migrations.Migration):
dependencies = [
('wagtailcore', '0022_add_site_name'),
]
|
operations = [
migrations.AlterField(
model_name='pagerevision',
name='user',
field=models.ForeignKey(
on_delete=django.db.models.deletion.SET_NULL,
verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True
),
),
]
|
aalpern/tessera
|
tessera-server/tessera/views_api.py
|
Python
|
apache-2.0
| 10,412 | 0.004898 |
# -*- mode:python -*-
import flask
import json
import logging
from datetime import datetime
import inflection
from functools import wraps
from flask import request, url_for
from werkzeug.exceptions import HTTPException
from .client.api.model import *
from . import database
from . import helpers
from .application import db
mgr = database.DatabaseManager(db)
log = logging.getLogger(__name__)
api = flask.Blueprint('api', __name__)
# =============================================================================
# API Helpers
# =============================================================================
def route_api(application, *args, **kwargs):
def decorator(fn):
@application.route(*args, **kwargs)
@wraps(fn)
def wrapper(*args, **kwargs):
headers = None
status_code = 200
try:
value = fn(*args, **kwargs)
except HTTPException as e:
raise helpers.set_exception_response(e)
if isinstance(value, tuple):
if len(value) > 2:
headers = value[2]
status_code = value[1]
value = value[0]
return helpers.jsonify(value, status_code, headers)
return fn
return decorator
def _dashboard_sort_column():
"""Return a SQLAlchemy column descriptor to sort results by, based on
the 'sort' and 'order' request parameters.
"""
columns = {
'created' : database.DashboardRecord.creation_date,
'modified' : database.DashboardRecord.last_modified_date,
'category' : database.DashboardRecord.category,
'id' : database.DashboardRecord.id,
'title' : database.DashboardRecord.title
}
colname = helpers.get_param('sort', 'created')
order = helpers.get_param('order')
column = database.DashboardRecord.creation_date
if colname in columns:
column = columns[colname]
if order == 'desc' or order == u'desc':
return column.desc()
else:
return column.asc()
def _set_dashboard_hrefs(dash):
"""Add the various ReSTful hrefs to an outgoing dashboard
representation. dash should be the dictionary for of the dashboard,
not the model object.
"""
id = dash['id']
dash['href'] = url_for('api.dashboard_get', id=id)
dash['definition_href'] = url_for('api.dashboard_get_definition', id=id)
dash['view_href'] = url_for('ui.dashboard_with_slug',
id=id,
slug=inflection.parameterize(dash['title']))
if 'definition' in dash:
definition = dash['definition']
definition['href'] = url_for('api.dashboard_get_definition', id=id)
return dash
def _dashboards_response
|
(dashboards):
"""Return a Flask response object for a list of dashboards in API
format. dashboards must be a list of dashboard model objects, which
will be converted to their JSON representation.
"""
if not isinstance(dashboards, list):
dashboards = [dashboards]
include_definition = helpers.get_param_boolean('definition', False)
return [ _set_dashboard_hrefs(d.to_json(include_definition=include_definition)) for d in dashboards
|
]
def _set_tag_hrefs(tag):
"""Add ReSTful href attributes to a tag's dictionary
representation.
"""
id = tag['id']
tag['href'] = url_for('api.tag_get', id=id)
return tag
def _tags_response(tags):
"""Return a Flask response object for a list of tags in API
format. tags must be a list of tag model objects, which
will be converted to their JSON representation.
"""
if not isinstance(tags, list):
tags = [tags]
return [_set_tag_hrefs(t.to_json()) for t in tags]
# =============================================================================
# Dashboards
# =============================================================================
@route_api(api, '/dashboard/')
def dashboard_list():
"""Listing for all dashboards. Returns just the metadata, not the
definitions.
"""
imported_from = request.args.get('imported_from')
if imported_from:
query = database.DashboardRecord.query.filter_by(imported_from=imported_from) \
.order_by(_dashboard_sort_column())
else:
query = database.DashboardRecord.query.order_by(_dashboard_sort_column())
dashboards = [d for d in query.all()]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/tagged/<tag>')
def dashboard_list_tagged(tag):
"""Listing for a set of dashboards with a tag applied. Returns just
the metadata, not the definitions.
"""
tag = database.TagRecord.query.filter_by(name=tag).first()
if not tag:
return _dashboards_response([])
dashboards = [d for d in tag.dashboards.order_by(_dashboard_sort_column()) if tag]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/<category>')
def dashboard_list_dashboards_in_category(category):
"""Listing for a set of dashboards in a specified category. Returns
just the metadata, not the definitions.
"""
dashboards = [d for d in database.DashboardRecord.query
.filter_by(category=category)
.order_by(_dashboard_sort_column()) ]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/')
def dashboard_list_all_dashboard_categories():
result = db.session.query(
database.DashboardRecord.category,
db.func.count(database.DashboardRecord.category)
).group_by(database.DashboardRecord.category).all()
categories = []
for (name, count) in result:
categories.append({
'name' : name,
'count' : count,
})
return categories
@route_api(api, '/dashboard/<id>')
def dashboard_get(id):
"""Get the metadata for a single dashboard.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
rendering = helpers.get_param('rendering', False)
include_definition = helpers.get_param_boolean('definition', False)
dash = _set_dashboard_hrefs(dashboard.to_json(rendering or include_definition))
if rendering:
dash['preferences'] = helpers.get_preferences()
return dash
@route_api(api, '/dashboard/<id>/for-rendering')
def dashboard_get_for_rendering(id):
"""Get a dashboard with its definition, and current settings necessary
for rendering.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
dash = _set_dashboard_hrefs(dashboard.to_json(True))
return {
'dashboard' : dash,
'preferences' : helpers.get_preferences()
}
@route_api(api, '/dashboard/', methods=['POST'])
def dashboard_create():
"""Create a new dashboard with an empty definition.
"""
dashboard = database.DashboardRecord.from_json(request.json)
if not dashboard.title:
return {
'error_message': "Missing required field 'title'"
}, 400
if 'definition' in request.json:
dashboard.definition = database.DefinitionRecord(dumps(request.json['definition']))
else:
dashboard.definition = database.DefinitionRecord(dumps(DashboardDefinition()))
mgr.store_dashboard(dashboard)
href = url_for('api.dashboard_get', id=dashboard.id)
return {
'dashboard_href' : href,
'view_href' : url_for('ui.dashboard_with_slug',
id=dashboard.id,
slug=inflection.parameterize(dashboard.title))
}, 201, { 'Location' : href }
@route_api(api, '/dashboard/<id>', methods=['PUT'])
def dashboard_update(id):
"""Update the metadata for an existing dashboard.
"""
body = request.json
dashboard = database.DashboardRecord.query.get_or_404(id)
dashboard.merge_from_json(body)
mgr.store_dashboard(dashboard)
# TODO - return similar to create, above
return {}
@route_api(api, '/dashboard/<id>', methods=['DELETE'])
def dashboard_delete(id):
"""Delete a dashboard. Use with caution.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
db.sessi
|
bugsduggan/locust
|
docs/conf.py
|
Python
|
mit
| 2,962 | 0.005739 |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import os
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
from locust import __version__
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
# autoclass options
#autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
#templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Locust'
#copyright = ''
# Intersphinx config
intersphinx_mapping = {
'requests': ('http://requests.readthedocs.org/en/latest/', None),
}
# The full version, including alpha/beta/rc tags.
release = __version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_m
|
odule_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# Sphinx will recurse into subversion configuration folders and try to read
|
# any document file within. These should be ignored.
# Note: exclude_dirnames is new in Sphinx 0.5
exclude_dirnames = []
# Options for HTML output
# -----------------------
html_show_sourcelink = False
html_file_suffix = ".html"
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# HTML theme
#html_theme = "haiku"
#html_theme = "default"
#html_theme_options = {
# "rightsidebar": "true",
# "codebgcolor": "#fafcfa",
# "bodyfont": "Arial",
#}
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
|
physicalattraction/PATinderBot
|
src/common.py
|
Python
|
gpl-3.0
| 821 | 0 |
import os
from typing import Dict, List, Union
OptionalJSON = Union[List, Dict, float, int, str, bool, None]
def ensure_dir_exists(directory):
if not os.path.exists(directory):
os.mkdir(directory)
def get_dir(directory: str) -> str:
"""
Return a string which contains the complete path to the input directory
Current directory structure:
PATinderBot
src
img
like
match
nope
json
data
|
:p
|
aram directory: string of the directory to search for
:return: string with the complete path to the searched for directory
"""
current_dir = os.path.dirname(__file__)
project_dir = os.path.join(current_dir, '..')
result = os.path.join(project_dir, directory)
ensure_dir_exists(result)
return result
|
taijiji/sample_jsnapy
|
run_test_bgp_advertised_route.py
|
Python
|
mit
| 2,089 | 0.011967 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
# arranged print
from pprint import pprint, pformat
# Jinja2 Template Engine
from jinja2 import Template, Environment
# JSNAPy
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
from jnpr.jsnapy import SnapAdmin
|
template_dir_name = './test_templates/'
template_base_name = 'test_bgp_advertised_route.jinja2'
param_advertised_route = {
|
"neighbor_address_ipv4" : "192.168.35.2",
"advertised_route_address_ipv4" : "10.10.10.0",
"advertised_route_subnet_ipv4" : "24",
}
print 'Load test_template : '
template_filename = template_dir_name + template_base_name
with open(template_filename, 'r') as conf:
template_txt = conf.read()
test_yml = Environment().from_string(template_txt).render(param_advertised_route)
test_base_name = template_base_name.rstrip('.jinja2') +\
'_' + param_advertised_route["neighbor_address_ipv4"] + '.yml'
test_base_name = test_base_name.rstrip('.yml').replace('.','-') + '.yml'
print 'Test file : ' + test_base_name
print 'Test_yml: ' + test_yml
print 'Save test on ./tests : '
test_dir_name = './tests/'
test_filename = test_dir_name + test_base_name
with open(test_filename, 'w') as f:
f.write(test_yml)
print test_filename
jsnapy_config =\
'''
tests:
- %s
''' % (test_filename)
dev1 = Device(
host = '192.168.34.16',
user = 'user1',
password = 'password1')
dev1.open()
jsnapy = SnapAdmin()
snapcheck_dict = jsnapy.snapcheck(
data = jsnapy_config,
dev = dev1,
file_name = "snap01")
print '##### JSNAPy Test : Start #####'
for snapcheck in snapcheck_dict:
print "Devece : ", snapcheck.device
print "Final result : ", snapcheck.result
print "Total passed : ", snapcheck.no_passed
print "Total failed : ", snapcheck.no_failed
print 'snapcheck test_details : '
print '-'*30
pprint(dict(snapcheck.test_details))
print '-'*30
print '##### JSNAPy Test : End #####'
dev1.close()
|
Smerity/glove-guante
|
cosine_similarity.py
|
Python
|
mit
| 1,457 | 0.008922 |
import sys
import numpy as np
if __name__ == '__main__':
print 'Loading word vectors...'
wordvecs = None
wordlist = []
for i, line in enumerate(sys.
|
stdin):
word, vec = line.strip().split(' ', 1)
vec = map(float, vec.split())
if wordvecs is None:
wordvecs = np.ones((400000, len(vec)), dtype=np.float)
wordvecs[i] = vec
wordlist.append(word)
words = dict((k, wordvecs[v]) for v, k in enumerate(wordlist))
tests = [('he', words['he']), ('she', words['she'
|
])]
tests = [
('athens-greece+berlin', words['athens'] - words['greece'] + words['berlin']),
('sydney-australia+berlin', words['sydney'] - words['australia'] + words['berlin']),
('australia-sydney+germany', words['australia'] - words['sydney'] + words['berlin']),
('king-male+female', words['king'] - words['male'] + words['female']),
('king-man+woman', words['king'] - words['man'] + words['woman']),
('queen-female+male', words['queen'] - words['female'] + words['male']),
('queen-woman+man', words['queen'] - words['woman'] + words['man']),
('plane-air+rail', words['train'] - words['air'] + words['rail']),
]
for test, tvec in tests:
results = []
print '=-=-' * 10
print 'Testing {}'.format(test)
res = np.dot(wordvecs, tvec) / np.linalg.norm(tvec) / np.linalg.norm(wordvecs, axis=1)
results = zip(res, wordlist)
print '\n'.join([w for _, w in sorted(results, reverse=True)[:20]])
|
dichen001/Go4Jobs
|
JackChen/string/385. Mini Parser.py
|
Python
|
gpl-3.0
| 2,721 | 0.00294 |
"""
Given a nested list of integers represented as a string, implement a parser to deserialize it.
Each element is either an integer, or a list -- whose elements may also be integers or other lists.
Note: You may assume that the string is well-formed:
String is non-empty.
String does not contain white spaces.
String contains only digits 0-9, [, - ,, ].
Example 1:
Given s = "324",
You should return a NestedInteger object which contains a single integer 324.
Example 2:
Given s = "[123,[456,[789]]]",
Return a NestedInteger object containing a nested list with 2 elements:
1. An integer containing value 123.
2. A nested list containing two elements:
i. An integer containing value 456.
ii. A nested list with one element:
a. An integer containing value 789.
"""
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# ""
|
"
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a si
|
ngle integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution(object):
def deserialize(self, s):
"""
:type s: str
:rtype: NestedInteger
"""
def parse():
num = ''
while s[0] in '-0123456789':
num += s.pop(0)
if num:
return NestedInteger(int(num))
s.pop(0)
result = NestedInteger()
while s[0] != ']':
result.add(parse())
if s[0] == ',':
s.pop(0)
s.pop(0)
return result
s = list(s + ' ')
return parse()
|
Matrixeigs/Optimization
|
Two_stage_stochastic_optimization/optimal_power_flows/opf_branch_power.py
|
Python
|
mit
| 4,744 | 0.005691 |
"""
Optimal power flow based on branch power flow modelling
Additional case33 is added to the test cases
Note: The proposed method has been verified
@author: Tianyang Zhao
@email: zhaoty@ntu.edu.sg
"""
from Two_stage_stochastic_optimization.power_flow_modelling import case33
from pypower import runopf
from gurobipy import *
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, diag, concatenate
from scipy.sparse import csr_matrix as sparse
from scipy.sparse import hstack, vstack, diags
def run(mpc):
"""
Gurobi based optimal power flow modelling and solution
:param mpc: The input case of optimal power flow
:return: obtained solution
"""
# Data format
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I, QD
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
from pypower.ext2int import ext2int
mpc = ext2int(mpc)
baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"]
nb = shape(mpc['bus'])[0] ## number of buses
nl = shape(mpc['branch'])[0] ## number of branches
ng = shape(mpc['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection mat
|
rix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], rang
|
e(ng))), (nb, ng))
Branch_R = branch[:, BR_R]
Branch_X = branch[:, BR_X]
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Qij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = turn_to_power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Qg_l = gen[:, QMIN] / baseMVA
Pij_u = Slmax
Qij_u = Slmax
Iij_u = Slmax
Vm_u = turn_to_power(bus[:, VMAX], 2)
Pg_u = 2 * gen[:, PMAX] / baseMVA
Qg_u = 2 * gen[:, QMAX] / baseMVA
lx = concatenate([Pij_l,Qij_l,Iij_l,Vm_l,Pg_l,Qg_l])
ux = concatenate([Pij_u, Qij_u, Iij_u, Vm_u, Pg_u, Qg_u])
model = Model("OPF")
# Define the decision variables
x = {}
nx = 3 * nl + nb + 2 * ng
for i in range(nx):
x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
# Add system level constraints
Aeq_p = hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng))])
beq_p = bus[:, PD] / baseMVA
# Add constraints for each sub system
Aeq_q = hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg])
beq_q = bus[:, QD] / baseMVA
Aeq_KVL = hstack([-2 * diags(Branch_R), -2 * diags(Branch_X),
diags(turn_to_power(Branch_R, 2)) + diags(turn_to_power(Branch_X, 2)), Cf.T - Ct.T,
zeros((nl, 2 * ng))])
beq_KVL = zeros(nl)
Aeq = vstack([Aeq_p, Aeq_q, Aeq_KVL])
Aeq = Aeq.todense()
beq = concatenate([beq_p, beq_q, beq_KVL])
neq = len(beq)
for i in range(neq):
expr = 0
for j in range(nx):
expr += x[j] * Aeq[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
for i in range(nl):
model.addConstr(x[i]*x[i] + x[i+nl]*x[i+nl] <= x[i+2*nl]*x[f[i]+3*nl], name='"rc{0}"'.format(i))
obj = 0
for i in range(ng):
obj += gencost[i, 4] * x[i + 3 * nl + nb] * x[i + 3 * nl + nb] * baseMVA * baseMVA + gencost[i, 5] * x[
i + 3 * nl + nb] * baseMVA + gencost[i, 6]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
Pij = xx[0:nl]
Qij = xx[nl + 0:2 * nl]
Iij = xx[2 * nl:3 * nl]
Vi = xx[3 * nl:3 * nl + nb]
Pg = xx[3 * nl + nb:3 * nl + nb + ng]
Qg = xx[3 * nl + nb + ng:3 * nl + nb + 2 * ng]
primal_residual = []
for i in range(nl):
primal_residual.append(Pij[i]*Pij[i] + Qij[i]*Qij[i] - Iij[i]*Vi[int(f[i])])
return xx, obj, primal_residual
def turn_to_power(list, power=1):
return [number ** power for number in list]
if __name__ == "__main__":
from pypower import runopf
mpc = case33.case33() # Default test case
(xx, obj,residual) = run(mpc)
result = runopf.runopf(case33.case33())
gap = 100*(result["f"]-obj)/obj
print(gap)
print(max(residual))
|
winguru/graphite-api
|
tests/test_render.py
|
Python
|
apache-2.0
| 24,960 | 0.00004 |
# coding: utf-8
import json
import os
import time
from graphite_api._vendor import whisper
from . import TestCase, WHISPER_DIR
try:
from flask.ext.cache import Cache
except ImportError:
Cache = None
class RenderTest(TestCase):
db = os.path.join(WHISPER_DIR, 'test.wsp')
url = '/render'
def create_db(self):
whisper.create(self.db, [(1, 60)])
self.ts = int(time.time())
whisper.update(self.db, 0.5, self.ts - 2)
whisper.update(self.db, 0.4, self.ts - 1)
whisper.update(self.db, 0.6, self.ts)
def test_render_view(self):
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'json',
'noCache': 'true'})
self.assertEqual(json.loads(response.data.decode('utf-8')), [])
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'raw',
'noCache': 'true'})
self.assertEqual(response.data.decode('utf-8'), "")
self.assertEqual(response.headers['Content-Type'], 'text/plain')
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'pdf'})
self.assertEqual(response.headers['Content-Type'], 'application/x-pdf')
response = self.app.get(self.url, query_string={'target': 'test'})
self.assertEqual(response.headers['Content-Type'], 'image/png')
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'dygraph',
'noCache': 'true'})
self.assertEqual(json.loads(response.data.decode('utf-8')), {})
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'rickshaw',
'noCache': 'true'})
self.assertEqual(json.loads(response.data.decode('utf-8')), [])
self.create_db()
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
end = data[0]['datapoints'][-4:]
try:
self.assertEqual(
end, [[None, self.ts - 3], [0.5, self.ts - 2],
[0.4, self.ts - 1], [0.6, self.ts]])
except AssertionError:
self.assertEqual(
end, [[0.5, self.ts - 2], [0.4, self.ts - 1],
[0.6, self.ts], [None, self.ts + 1]])
response = self.app.get(self.url, query_string={'target': 'test',
'maxDataPoints': 2,
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
# 1 is a time race cond
self.assertTrue(len(data[0]['datapoints']) in [1, 2])
response = self.app.get(self.url, query_string={'target': 'test',
'maxDataPoints': 200,
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
# 59 is a time race cond
self.assertTrue(len(data[0]['datapoints']) in [59, 60])
response = self.app.get(self.url, query_string={'target': 'test',
'noNullPoints': 1,
'format': 'json'})
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data[0]['datapoints'],
[[0.5, self.ts - 2],
[0.4, self.ts - 1],
[0.6, self.ts]])
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'raw'})
try:
self.assertEqual(
response.data.decode('utf-8'),
'test,%d,%d,1|%s' % (self.ts - 59, self.ts + 1,
'None,' * 57 + '0.5,0.4,0.6\n'))
except AssertionError:
self.assertEqual(
response.data.decode('utf-8'),
'test,%d,%d,1|%s' % (self.ts - 58, self.ts + 2,
'None,' * 56 + '0.5,0.4,0.6,None\n'))
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'dygraph'})
data = json.loads(response.data.decode('utf-8'))
end = data['data'][-4:]
try:
self.assertEqual(
end, [[(self.ts - 3) * 1000, None],
[(self.ts - 2) * 1000, 0.5],
[(self.ts - 1) * 1000, 0.4],
[self.ts * 1000, 0.6]])
except AssertionError:
self.assertEqual(
end, [[(self.ts - 2) * 1000, 0.5],
[(self.ts - 1) * 1000, 0.4],
[self.ts * 1000, 0.6],
[(self.ts + 1) * 1000, None]])
response = self.app.get(self.url, query_string={'target': 'test',
'format': 'rickshaw'})
data = json.loads(response.data.decode('utf-8'))
end = data[0]['datapoints'][-4:]
try:
self.assertEqual(
end, [{'x': self.ts - 3, 'y': None},
{'x': self.ts - 2, 'y': 0.5},
{'x': self.ts - 1, 'y': 0.4},
{'x': self.ts, 'y': 0.6}])
except AssertionError:
self.assertEqual(
end, [{'x': self.ts - 2, 'y': 0.5},
{'x': self.ts - 1, 'y': 0.4},
{'x': self.ts, 'y': 0.6},
{'x': self.ts + 1, 'y': None}])
def test_render_constant_line(self):
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)'})
self.assertEqual(response.headers['Content-Type'], 'image/png')
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)', 'format': 'json'})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual(len(data), 3)
for point, ts in data:
self.assertEqual(point, 12)
response = self.app.get(self.url, query_string={
'target': 'constantLine(12)', 'format': 'json',
'maxDataPoints': 12})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual(len(data), 3)
for point, ts in data:
self.assertEqual(point, 12)
def test_float_maxdatapoints(self):
response = self.app.get(self.url, query_string={
'target': 'sin("foo")', 'format': 'json',
'maxDataPoints': 5.5}) # rounded to int
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual(len(data), 5)
def test_constantline_pathexpr(self):
response = self.app.get(self.url, query_string={
'target': 'sumSeries(constantLine(12), constantLine(5))',
'format': 'json',
})
data = json.loads(response.data.decode('utf-8'))[0]['datapoints']
self.assertEqual([d[0] for d in data], [17, 17, 17])
def test_area_between(self):
response = self.app.get(self.url, query_string={
'target': ['areaBetween(sin("foo"), sin("bar", 2))'],
'format': 'json',
})
|
data = js
|
on.loads(response.data.decode('utf-8'))
self.assertEqual(len(data), 2)
def test_sumseries(self):
response = self.app.get(self.url, query_string={
'target': ['sumSeries(sin("foo"), sin("bar", 2))',
|
anderscui/spellchecker
|
simple_checker/checker_tests_google_dict.py
|
Python
|
mit
| 17,844 | 0.00863 |
from common.persistence import from_pickle
NWORDS = from_pickle('../data/en_dict.pkl')
print(len(NWORDS))
print(NWORDS['word'])
print(NWORDS['spell'])
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
s = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in s if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b) > 1]
replaces = [a + c + b[1:] for a, b in s for c in alphabet if b]
inserts = [a + c + b for a, b in s for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
################ Testing code from here on ################
def spelltest(tests, bias=None, verbose=False):
import time
n, bad, unknown, start = 0, 0, 0, time.clock()
if bias:
for target in tests: NWORDS[target] += bias
for target, wrongs in tests.items():
for wrong in wrongs.split():
n += 1
w = correct(wrong)
if w != target:
bad += 1
unknown += (target not in NWORDS)
if verbose:
print 'correct(%r) => %r (%d); expected %r (%d)' % (
wrong, w, NWORDS[w], target, NWORDS[target])
return dict(bad=bad, n=n, bias=bias, pct=int(100. - 100. * bad / n),
unknown=unknown, secs=int(time.clock() - start))
tests1 = {'access': 'acess', 'accessing': 'accesing', 'accommodation':
'accomodation acommodation acomodation', 'account': 'acount', 'address':
'adress adres', 'addressable': 'addresable', 'arranged': 'aranged arrainged',
'arrangeing': 'aranging', 'arrangement': 'arragment', 'articles': 'articals',
'aunt': 'annt anut arnt', 'auxiliary': 'auxillary', 'available': 'avaible',
'awful': 'awfall afful', 'basically': 'basicaly', 'beginning': 'begining',
'benefit': 'benifit', 'benefits': 'benifits', 'between': 'beetween', 'bicycle':
'bicycal bycicle bycycle', 'biscuits':
'biscits biscutes biscuts bisquits buiscits buiscuts', 'built': 'biult',
'cake': 'cak', 'career': 'carrer',
'cemetery': 'cemetary semetary', 'centrally': 'centraly', 'certain': 'cirtain',
'challenges': 'chalenges chalenges', 'chapter': 'chaper chaphter chaptur',
'choice': 'choise', 'choosing': 'chosing', 'clerical': 'clearical',
'committee': 'comittee', 'compare': 'compair', 'completely': 'completly',
'consider': 'concider', 'considerable': 'conciderable', 'contented':
'contenpted contende contended contentid', 'curtains':
'cartains certans courtens cuaritains curtans curtians curtions', 'decide': 'descide', 'decided':
'descided', 'definitely': 'definately difinately', 'definition': 'defenition',
'definitions': 'defenitions', 'description': 'discription', 'desiccate':
'desicate dessicate dessiccate', 'diagrammatically': 'diagrammaticaally',
'different': 'diffrent', 'driven': 'dirven', 'ecstasy': 'exstacy ecstacy',
'embarrass': 'embaras embarass', 'establishing': 'astablishing establising',
'experience': 'experance experiance', 'experiences': 'experances', 'extended':
'extented', 'extremely': 'extreamly', 'fails': 'failes', 'families': 'familes',
'february': 'febuary', 'further': 'futher', 'gallery': 'galery gallary gallerry gallrey',
'hierarchal': 'hierachial', 'hierarchy': 'hierchy', 'inconvenient':
'inconvienient inconvient inconvinient', 'independent': 'independant independant',
'initial': 'intial', 'initials': 'inetials inistals initails initals intials',
'juice': 'guic juce jucie juise juse', 'latest': 'lates latets latiest latist',
'laugh': 'lagh lauf laught lugh', 'level': 'leval',
'levels': 'levals', 'liaison': 'liaision liason', 'lieu': 'liew', 'literature':
'litriture', 'loans': 'lones', 'locally': 'localy', 'magnificent':
'magnificnet magificent magnifcent magnifecent magnifiscant magnifisent magnificant',
'management': 'managment', 'meant': 'ment', 'minuscule': 'miniscule',
'minutes': 'muinets', 'monitoring': 'monitering', 'necessary':
'neccesary necesary neccesary necassary necassery neccasary', 'occurrence':
'occurence occurence', 'often': 'ofen offen offten ofton', 'opposite':
'opisite oppasite oppesite oppisit oppisite opposit oppossite oppossitte', 'parallel':
'paralel paralell parrallel parralell parrallell', 'particular': 'particulaur',
'perhaps': 'perhapse', 'personnel': 'personnell', 'planned': 'planed', 'poem':
'poame', 'poems': 'poims pomes', 'poetry': 'poartry poertry poetre poety powetry',
'position': 'possition', 'possible': 'possable', 'pretend':
'pertend protend prtend pritend', 'problem': 'problam proble promblem proplen',
'pronunciation': 'pronounciation', 'purple': 'perple perpul poarple',
'questionnaire': 'questionaire', 'really': 'realy relley relly', 'receipt':
'receit receite reciet recipt', 'receive': 'recieve', 'refreshment':
'reafreshment refreshmant refresment refressmunt', 'remember': 'rember remeber rememmer rermember',
'remind': 'remine remined', 'scarcely': 'scarcly scarecly scarely scarsely',
'scissors': 'sci
|
sors sissors', 'separate': 'seperate',
'singular': 'singulaur', 'someone': 'somone', 'sources': 'sorces', 'southern':
'southen', 'special': 'speaical specail specal speical', 'splendid':
'spledid splended splened splended', 'standardizing': 'stanerdizing', 'stomach':
'stomac stomache stomec stumache', 'supersede': 'supercede superceed', 'there': 'ther',
'totally': 'totaly', 'transferred':
|
'transfred', 'transportability':
'transportibility', 'triangular': 'triangulaur', 'understand': 'undersand undistand',
'unexpected': 'unexpcted unexpeted unexspected', 'unfortunately':
'unfortunatly', 'unique': 'uneque', 'useful': 'usefull', 'valuable': 'valubale valuble',
'variable': 'varable', 'variant': 'vairiant', 'various': 'vairious',
'visited': 'fisited viseted vistid vistied', 'visitors': 'vistors',
'voluntary': 'volantry', 'voting': 'voteing', 'wanted': 'wantid wonted',
'whether': 'wether', 'wrote': 'rote wote'}
tests2 = {'forbidden': 'forbiden', 'decisions': 'deciscions descisions',
'supposedly': 'supposidly', 'embellishing': 'embelishing', 'technique':
'tecnique', 'permanently': 'perminantly', 'confirmation': 'confermation',
'appointment': 'appoitment', 'progression': 'progresion', 'accompanying':
'acompaning', 'applicable': 'aplicable', 'regained': 'regined', 'guidelines':
'guidlines', 'surrounding': 'serounding', 'titles': 'tittles', 'unavailable':
'unavailble', 'advantageous': 'advantageos', 'brief': 'brif', 'appeal':
'apeal', 'consisting': 'consisiting', 'clerk': 'cleark clerck', 'component':
'componant', 'favourable': 'faverable', 'separation': 'seperation', 'search':
'serch', 'receive': 'recieve', 'employees': 'emploies', 'prior': 'piror',
'resulting': 'reulting', 'suggestion': 'sugestion', 'opinion': 'oppinion',
'cancellation': 'cancelation', 'criticism': 'citisum', 'useful': 'usful',
'humour': 'humor', 'anomalies': 'anomolies', 'would': 'whould', 'doubt':
'doupt', 'examination': 'eximination', 'therefore': 'therefoe', 'recommend':
'recomend', 'separated': 'seperated', 'successful': 'sucssuful succesful',
'appare
|
NDykhuis/team-formation-study-software
|
humanagent.py
|
Python
|
gpl-2.0
| 25,571 | 0.013922 |
#
# humanagent.py - provides server-side backend for interaction with
# human players in team formation
#
# Copyright (C) 2015 Nathan Dykhuis
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Agent class to allow humans on client computers to play team formation.
The HumanAgent class is the server-side backend of the human interaction
system. It performs all calculation of payoffs, etc, and forwards the info
to the Frontend client over TCP. The Frontend client is a fairly-thin wrapper
which simply takes messages describing what screen to display, with what info,
and receives input from the user.
Numerous functions use send_and_receive to ask for user input, and ensure that
the program (or thread) blocks until it has been received.
"""
import json
import numpy as np
from configuration import Configuration
from agentgroup import Agent
from utils import send_message, receive_message, send_and_receive
CURR = u'\xA7'
class HumanAgent(Agent):
"""HumanAgent class implements all functions necessary for team formation
Each function gathers the relevant information, then ships it off to a
Frontend instance over TCP for evaluation and decision by a human player.
Attributes:
slow: always True, since humans take time to decide
type: 'human'
client: the sockets connection to the Frontend
finalpay: total pay, only set after the exit survey is submitted
messages: list accumulator of message strings that are sent to the user as
a summary of the turn during the postprocess stage.
"""
def __init__(self, cfg, connection, adat=None, skills=None, aid=None):
super(HumanAgent, self).__init__(cfg, adat, skills, aid)
self.client = connection
self.messages = []
self.slow = True
self.type = 'human'
# THIS IS DUPLICATED FROM FRONTEND.PY - should move to configuration?
self.gletters = [chr(ord('A')+i) for i in range(Configuration.n)]
if Configuration.hide_publicgoods:
self.anames = ['Agent '+str(i) for i in range(Configuration.n)]
else:
# Icky hardcoding
self.anames = ['Cat', 'Dog', 'Bear', 'Monkey', 'Cow', 'Elephant',
'Gorilla', 'Fish', 'Sheep', 'Frog', 'Bird', 'Lion',
'Owl', 'Panda', 'Penguin', 'Pig', 'Rabbit', 'Rooster',
'Bee', 'Donkey']
self.current_ratings = {}
self.finalpay = -1
send_message(self.client, ('setmyid', self.id))
self.sendcfg()
def sendcfg(self):
"""Send the current configuration to the client as a dictionary.
Sends only the variables which can be packed with JSON.
Blocks until client confirms that it has received the message
"""
cfgdict = self.cfg.outputcfg(showhidden=True)
# remove all things that are not jsonnable
jsoncfg = {}
for k,v in cfgdict.iteritems():
try: # This is hacky...
json.dumps(v)
jsoncfg[k] = v
except TypeError:
pass
send_and_receive(self.client, ('setconfig', jsoncfg))
# Make sure the config gets set before moving on
def gname(self, gid):
"""Get the name (letter) of a group from an integer group ID"""
return self.gletters[gid]
def aname(self, aid):
"""Get the name of an agent from an integer agent ID"""
if aid == self.id:
return "You"
return self.anames[aid]
def initvideo(self):
"""Tell client to start video capture and open a preview window"""
cfg = self.cfg
# send additional video info here
vdata = (cfg._do_video, cfg._dblog.sessionid, self.id)
send_message(self.client, ('initvideo', vdata))
send_message(self.client, ('startpreview', 0))
def instructions(self):
"""Tell client to show instructions screen and close preview window"""
if self.cfg.do_ratings: self.hideratings()
send_message(self.client, ('endpreview', 0))
self.sendcfg()
send_and_receive(self.client, ('instructions', 0))
#if self.cfg.do_ratings: self.showratings()
self.logp(("Instructions done for", self.id))
def initratings(self, neighbors):
"""Tell client to create the ratings sidebar"""
send_message(self.client, ('initratings', neighbors))
def showratings(self):
"""Tell client to show the ratings sidebar"""
send_message(self.client, ('showratings', 0))
def hideratings(self):
"""Tell client to hide the ratings sidebar"""
send_message(self.client, ('hideratings', 0))
def disableratings(self):
"""Tell client to make ratings sidebar un-clickable"""
send_message(self.client, ('disableratings', 0))
def introsurvey(self):
"""Tell client to present intro survey screen, and record response"""
gender, college, status = send_and_receive(self.client, ('introsurvey', 0))
self.cfg._dblog.log_introsurvey(self.id, (gender, college, status))
def exitsurvey(self):
"""Tell client to present exit survey, and after submit, get final pay"""
self.logratings()
send_message(self.client, ('exitsurvey', 0))
# Receive num of questions, and then each question and response
n_qs = receive_mes
|
sage(self.client)
responses = []
for i in range(n_qs):
(qtext, qresponse) = receive_message(self.client)
responses.append( (qtext, qresponse) )
self.finalpay = receive_message(self.client)
self.hideratings()
self.cfg._dblog.log_exitsurvey(self.id, responses)
self.logratings(step='exitsurvey')
self.logratingstatus('final', range(self.cfg.n)) # Log ratings of everyone
|
self.logp(("Agent", self.id, "exit survey submitted"), 0)
def startcapture(self):
"""Client: start video capture"""
send_message(self.client, ('startcapture', 0))
def stopcapture(self):
"""Client: pause video capture"""
send_message(self.client, ('stopcapture', 0))
def endcapture(self):
"""Client: terminate video capture"""
send_message(self.client, ('endcapture', 0))
def update(self):
"""Update current pay and neighbors here and in the GUI"""
if self.cfg.bias:
self.nowpay = self.nowpaycalc(self.cfg.task(self.group.skills))
else:
self.nowpay = self.cfg.task(self.group.skills)/self.group.gsize
send_message(self.client, ('updatepay', self.nowpay) )
if self.cfg.show_skills:
send_message(self.client, ('updatemyteam', (self.group.id, int(np.where(self.skills)[0][0])) ))
else:
send_message(self.client, ('updatemyteam', (self.group.id, -1)) )
send_message(self.client, ('updateteam', sorted([a.id for a in self.group.agents])))
self.updatenbrs()
def updatenbrs(self):
"""Update graphical view of neighbors in the GUI"""
#nbrdata = [(n.id, n.group.id) for n in self.nbrs] # old nbrdata
if self.cfg.show_skills:
nbrdata = [(n.id, n.group.id, int(np.where(n.skills)[0][0])) for n in self.nbrs]
else:
nbrdata = [(n.id, n.group.id, -1) for n in self.nbrs]
send_message(self.client, ('updatenbrs', nbrdata) )
def getframetimes(self):
"""Get the start and end frame numbers and timestamps from the last event.
Returns:
tuple of (start frame, end frame, start time, end time)
frame numbers are ints, times are Unix timestamps
"""
return send_and_receive(self.client, ('getframetimes', 0))
def logratings(self, simnum = None, iternum = None, step = 'NA'):
"""Get all accumulated ratings from the client and log to database.
Also update self.current_ratings with the most recent rating assigned.
Arguments:
|
flake123p/ProjectH
|
Python/_Basics_/A11_Reference/test.py
|
Python
|
gpl-3.0
| 817 | 0.007344 |
"""
A list is a sequence
1.Can be any type
2.The values in a list are called elements or sometimes items
3.Declare with square brackets: [ ]
4.Can be nested. [x, y, [z1, z2]]
"""
myStr1 = 'aabbcc'
myStr2 = 'aabbcc'
print('myStr1 = ', myStr1)
print('myStr2 = ', myStr2)
print('myStr1 is myStr2 = ', myStr1 is myStr2, ' (Equivalent + Identical)')
myList1 = [10, 20, 30]
myList2 = [10, 20, 30]
print('myList1 = ', myList1)
print('myList2 = ', myList2)
print('myList1 is myList2 = ', myList1 is myList2, ' (Equivalent + Not Identical)')
print('When you pass a list to a function, the
|
function gets a reference
|
to the list.')
t1 = [1, 2]
t2 = t1.append(3)
t3 = t1 + [3]
print('t1 = [1, 2]')
print('t2 = t1.append(3)')
print('t3 = t1 + [3]')
print('t1 now is ', t1)
print('t2 now is ', t2)
print('t3 now is ', t3)
|
roncapat/RWOL
|
rwol-web-src/utilities.py
|
Python
|
gpl-3.0
| 3,772 | 0.012725 |
#!/usr/bin/python
#coding: UTF-8
#COPIRIGHT: Patrick Roncagliolo
#LICENCE: GNU GPL 3
import cgi, json
argsDict = cgi.FieldStorage()
EMPTY_DICT = {}
def getState (init = False):
dataDict = getDataDict ()
if dataDict is None \
and init is True:
(key, uri) = generateTOTP ()
generateQR (key, uri)
d
|
ataDict = newDataDict (key, uri)
setDataDict (dataDict)
devDict = getDevDict ()
if devDict is None \
and init is True:
devDict = newDevDict ()
setDevDict (devDict)
return (dataDict, devDict)
def generateTOTP ():
import string, rand
|
om
from otpauth import OtpAuth as otpauth
key=''.join((random.choice(string.ascii_uppercase + string.digits)) for x in range(30))
auth = otpauth(key)
uri = auth.to_uri('totp', 'patrick@WakeOnLAN', 'WakeOnLAN')
return (key, uri)
def generateQR (key, uri):
import os, qrcode
from glob import glob
img = qrcode.make(uri)
for oldImg in glob("data/*.png"):
os.remove(oldImg)
img.save("data/%s.png" % key)
def newDataDict (key, uri):
return {'otp-type': 'totp', 'key': key, 'uri': uri, 'post-token': '0'}
def getDataDict ():
try:
with open('data/data.json', 'r') as f:
dataDict = json.load(f)
except IOError:
dataDict = None
return dataDict
def setDataDict(dataDict):
with open('data/data.json', 'w') as dataFile:
json.dump(dataDict, dataFile)
def newDevDict():
return {}
def getDevDict():
try:
with open('data/devices.json', 'r') as devFile:
devDict = json.load(devFile)
except IOError:
devDict = None
return devDict
def setDevDict(devDict):
with open('data/devices.json', 'w') as devFile:
json.dump(devDict, devFile)
def addDevice(devDict, devname, devaddr):
devname = devname.lower().capitalize()
devaddr = devaddr.lower().replace('-',':')
if devname not in devDict:
devDict[devname]=devaddr
setDevDict(devDict)
return True
else:
return False
def rmvDevice(devDict, devname):
devname = devname.lower().capitalize()
if devname in devDict:
del devDict[devname]
setDevDict(devDict)
return True
else:
return False
def checkToken(dataDict):
if 'post-token' in dataDict.keys():
data_token = int(dataDict['post-token'])
token = data_token + 1
else:
raise KeyError
if 'action' in argsDict.keys() \
and 'token' in argsDict.keys():
post_token = int(argsDict['token'].value)
if post_token > data_token:
updateToken(dataDict, post_token)
token = post_token + 1
return (True, token)
else:
return (False, token)
else:
return (False, token)
def updateToken(dataDict, post_token):
dataDict['post-token'] = post_token
with open('data/data.json', 'w') as dataFile:
json.dump(dataDict, dataFile)
return int(dataDict['post-token'])
def printIndexHeader(stylesheets):
print 'Content-type: text/html\n\n',
print '<!DOCTYPE html>',
print '<meta name="viewport" content="width=device-width, initial-scale=1.0">',
print '<title>RWOLS - Remote WakeOnLan Server</title>',
for stylesheet in stylesheets:
print '<link rel="stylesheet" type="text/css" href="%s">' % stylesheet,
print '<script src="https://cdn.jsdelivr.net/clipboard.js/1.5.13/clipboard.min.js"></script>',
print '<h1>Remote WakeOnLan Server</h1>'
def printBottomButton(label, link):
print '<form method="post"'
print 'action="%s">' % link,
print '<input type="submit"'
print 'value="%s">' % label,
print '</form>'
|
NoctuaNivalis/qutebrowser
|
tests/unit/browser/test_tab.py
|
Python
|
gpl-3.0
| 3,502 | 0 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be u
|
seful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/lic
|
enses/>.
import pytest
from qutebrowser.browser import browsertab
pytestmark = pytest.mark.usefixtures('redirect_webengine_data')
try:
from PyQt5.QtWebKitWidgets import QWebView
except ImportError:
QWebView = None
try:
from PyQt5.QtWebEngineWidgets import QWebEngineView
except ImportError:
QWebEngineView = None
@pytest.fixture(params=[QWebView, QWebEngineView])
def view(qtbot, config_stub, request):
if request.param is None:
pytest.skip("View not available")
v = request.param()
qtbot.add_widget(v)
return v
@pytest.fixture(params=['webkit', 'webengine'])
def tab(request, qtbot, tab_registry, cookiejar_and_cache, mode_manager):
if request.param == 'webkit':
webkittab = pytest.importorskip('qutebrowser.browser.webkit.webkittab')
tab_class = webkittab.WebKitTab
elif request.param == 'webengine':
webenginetab = pytest.importorskip(
'qutebrowser.browser.webengine.webenginetab')
tab_class = webenginetab.WebEngineTab
else:
assert False
t = tab_class(win_id=0, mode_manager=mode_manager)
qtbot.add_widget(t)
yield t
class Zoom(browsertab.AbstractZoom):
def _set_factor_internal(self, _factor):
pass
def factor(self):
assert False
class Tab(browsertab.AbstractTab):
# pylint: disable=abstract-method
def __init__(self, win_id, mode_manager, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
parent=parent)
self.history = browsertab.AbstractHistory(self)
self.scroller = browsertab.AbstractScroller(self, parent=self)
self.caret = browsertab.AbstractCaret(win_id=self.win_id,
mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = Zoom(win_id=self.win_id)
self.search = browsertab.AbstractSearch(parent=self)
self.printing = browsertab.AbstractPrinting()
self.elements = browsertab.AbstractElements(self)
self.action = browsertab.AbstractAction()
def _install_event_filter(self):
pass
@pytest.mark.xfail(run=False, reason='Causes segfaults, see #1638')
def test_tab(qtbot, view, config_stub, tab_registry, mode_manager):
tab_w = Tab(win_id=0, mode_manager=mode_manager)
qtbot.add_widget(tab_w)
assert tab_w.win_id == 0
assert tab_w._widget is None
tab_w._set_widget(view)
assert tab_w._widget is view
assert tab_w.history._tab is tab_w
assert tab_w.history._history is view.history()
assert view.parent() is tab_w
with qtbot.waitExposed(tab_w):
tab_w.show()
|
kennethlove/django_bookmarks
|
dj_bookmarks/dj_bookmarks/wsgi.py
|
Python
|
bsd-3-clause
| 402 | 0 |
"""
WSGI config for d
|
j_bookmarks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_bookmarks.settings")
application = get_wsgi_applica
|
tion()
|
fqez/JdeRobot
|
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/test.py
|
Python
|
gpl-3.0
| 31,243 | 0.004161 |
'''
MAVLink protocol implementation (auto-generated by mavgen.py)
Generated from: test.xml
Note: this file has been auto-generated. DO NOT EDIT
'''
import struct, array, time, json, os, sys, platform
from ...generator.mavcrc import x25crc
import hashlib
WIRE_PROTOCOL_VERSION = '2.0'
DIALECT = 'test'
PROTOCOL_MARKER_V1 = 0xFE
PROTOCOL_MARKER_V2 = 0xFD
HEADER_LEN_V1 = 6
HEADER_LEN_V2 = 10
MAVLINK_SIGNATURE_BLOCK_LEN = 13
MAVLINK_IFLAG_SIGNED = 0x01
native_supported = platform.system() != 'Windows' # Not yet supported on other dialects
native_force = 'MAVNATIVE_FORCE' in os.environ # Will force use of native code regardless of what client app wants
native_testing = 'MAVNATIVE_TESTING' in os.environ # Will force both native and legacy code to be used and their results compared
if native_supported and float(WIRE_PROTOCOL_VERSION) <= 1:
try:
import mavnative
except ImportError:
print('ERROR LOADING MAVNATIVE - falling back to python implementation')
native_supported = False
else:
# mavnative isn't supported for MAVLink2 yet
native_supported = False
# some base types from mavlink_types.h
MAVLINK_TYPE_CHAR = 0
MAVLINK_TYPE_UINT8_T = 1
MAVLINK_TYPE_INT8_T = 2
MAVLINK_TYPE_UINT16_T = 3
MAVLINK_TYPE_INT16_T = 4
MAVLINK_TYPE_UINT32_T = 5
MAVLINK_TYPE_INT32_T = 6
MAVLINK_TYPE_UINT64_T = 7
MAVLINK_TYPE_INT64_T = 8
MAVLINK_TYPE_FLOAT = 9
MAVLINK_TYPE_DOUBLE = 10
class MAVLink_header(object):
'''MAVLink message header'''
def __init__(self, msgId, incompat_flags=0, compat_flags=0, mlen=0, seq=0, srcSystem=0, srcComponent=0):
self.mlen = mlen
self.seq = seq
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.msgId = msgId
self.incompat_flags = incompat_flags
self.compat_flags = compat_flags
def pack(self, force_mavlink1=False):
if WIRE_PROTOCOL_VERSION == '2.0' and not force_mavlink1:
return struct.pack('<BBBBBBBHB', 253, self.mlen,
self.incompat_flags, self.compat_flags,
self.seq, self.srcSystem, self.srcComponent,
self.msgId&0xFFFF, self.msgId>>16)
return struct.pack('<BBBBBB', PROTOCOL_MARKER_V1, self.mlen, self.seq,
self.srcSystem, self.srcComponent, self.msgId)
class MAVLink_message(object):
'''base MAVLink message class'''
def __init__(self, msgId, name):
self._header = MAVLink_header(msgId)
self._payload = None
self._msgbuf = None
self._crc = None
self._fieldnames = []
self._type = name
self._signed = False
self._link_id = None
def get_msgbuf(self):
if isinstance(self._msgbuf, bytearray):
return self._msgbuf
return bytearray(self._msgbuf)
def get_header(self):
return self._header
def get_payload(self):
return self._payload
def get_crc(self):
return self._crc
def get_fieldnames(self):
return self._fieldnames
def get_type(self):
return self._type
def get_msgId(self):
return self._header.msgId
def get_srcSystem(self):
return self._header.srcSystem
def get_srcComponent(self):
return self._header.srcComponent
def get_seq(self):
return self._header.seq
def get_signed(self):
return self._signed
def get_link_id(self):
return self._link_id
def __str__(self):
ret = '%s {' % self._type
for a in self._fieldnames:
v = getattr(self, a)
ret += '%s : %s, ' % (a, v)
ret = ret[0:-2] + '}'
return ret
def __ne__(self, other):
return not self.__eq__(other)
def
|
__eq__(self, other):
if other == None:
return False
if self.get_type() != other.get_type():
return False
# We do not compare CRC because native code doesn't provide it
#if self.get_crc() != other.get_crc():
# return False
if self.get_seq() != other.get_seq():
return False
if self.get_srcSystem() != othe
|
r.get_srcSystem():
return False
if self.get_srcComponent() != other.get_srcComponent():
return False
for a in self._fieldnames:
if getattr(self, a) != getattr(other, a):
return False
return True
def to_dict(self):
d = dict({})
d['mavpackettype'] = self._type
for a in self._fieldnames:
d[a] = getattr(self, a)
return d
def to_json(self):
return json.dumps(self.to_dict())
def sign_packet(self, mav):
h = hashlib.new('sha256')
self._msgbuf += struct.pack('<BQ', mav.signing.link_id, mav.signing.timestamp)[:7]
h.update(mav.signing.secret_key)
h.update(self._msgbuf)
sig = h.digest()[:6]
self._msgbuf += sig
mav.signing.timestamp += 1
def pack(self, mav, crc_extra, payload, force_mavlink1=False):
plen = len(payload)
if WIRE_PROTOCOL_VERSION != '1.0' and not force_mavlink1:
# in MAVLink2 we can strip trailing zeros off payloads. This allows for simple
# variable length arrays and smaller packets
while plen > 1 and payload[plen-1] == chr(0):
plen -= 1
self._payload = payload[:plen]
incompat_flags = 0
if mav.signing.sign_outgoing:
incompat_flags |= MAVLINK_IFLAG_SIGNED
self._header = MAVLink_header(self._header.msgId,
incompat_flags=incompat_flags, compat_flags=0,
mlen=len(self._payload), seq=mav.seq,
srcSystem=mav.srcSystem, srcComponent=mav.srcComponent)
self._msgbuf = self._header.pack(force_mavlink1=force_mavlink1) + self._payload
crc = x25crc(self._msgbuf[1:])
if True: # using CRC extra
crc.accumulate_str(struct.pack('B', crc_extra))
self._crc = crc.crc
self._msgbuf += struct.pack('<H', self._crc)
if mav.signing.sign_outgoing and not force_mavlink1:
self.sign_packet(mav)
return self._msgbuf
# enums
class EnumEntry(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.param = {}
enums = {}
# message IDs
MAVLINK_MSG_ID_BAD_DATA = -1
MAVLINK_MSG_ID_TEST_TYPES = 0
class MAVLink_test_types_message(MAVLink_message):
'''
Test all field types
'''
id = MAVLINK_MSG_ID_TEST_TYPES
name = 'TEST_TYPES'
fieldnames = ['c', 's', 'u8', 'u16', 'u32', 'u64', 's8', 's16', 's32', 's64', 'f', 'd', 'u8_array', 'u16_array', 'u32_array', 'u64_array', 's8_array', 's16_array', 's32_array', 's64_array', 'f_array', 'd_array']
ordered_fieldnames = [ 'u64', 's64', 'd', 'u64_array', 's64_array', 'd_array', 'u32', 's32', 'f', 'u32_array', 's32_array', 'f_array', 'u16', 's16', 'u16_array', 's16_array', 'c', 's', 'u8', 's8', 'u8_array', 's8_array' ]
format = '<Qqd3Q3q3dIif3I3i3fHh3H3hc10sBb3B3b'
native_format = bytearray('<QqdQqdIifIifHhHhccBbBb', 'ascii')
orders = [16, 17, 18, 12, 6, 0, 19, 13, 7, 1, 8, 2, 20, 14, 9, 3, 21, 15, 10, 4, 11, 5]
lengths = [1, 1, 1, 3, 3, 3, 1, 1, 1, 3, 3, 3, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3]
array_lengths = [0, 0, 0, 3, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0, 3, 3, 0, 10, 0, 0, 3, 3]
crc_extra = 103
def __init__(self, c, s, u8, u16, u32, u64, s8, s16, s32, s64, f, d, u8_array, u16_array, u32_array, u64_array, s8_array, s16_array, s32_array, s64_array, f_array, d_array):
MAVLink_message.__init__(self, MAVLink_test_types_message.id, MAVLink_test_types_message.name)
self._fieldnames = MAVLink_test_types_message.fieldnames
self.c = c
self.s = s
self.u8 = u8
self.u16 =
|
bfg-repo-cleaner-demos/eclipselink.runtime-bfg-strip-big-blobs
|
jpa/eclipselink.jpa.test/resource/weblogic/wls_composite_reset.py
|
Python
|
epl-1.0
| 998 | 0.018036 |
############################################################################
# Generic script applicable on any Operating Environments (Unix, Windows)
# ScriptName : wls_reset.py
# Properties : weblogic.properties
# Author : Kevin Yuan
############################################################################
#===========================================================================
# Connect to wls server
#===========================================================================
connect('%%WL_USR%%','%%WL_PWD%%','t3://%%WL_HOST%%:%%WL_PORT%%')
#==============================================================
|
=============
# Remove Data Sources using wlst on-line commonds for three composite models
#===========================================================================
edit()
startEdit()
delete('EclipseLinkDS','JDBCSystemResource')
delete('EclipseLinkDS2','JDBCSystemResource')
delete('EclipseLinkDS3','JDBCSystemResource')
save()
act
|
ivate()
exit()
|
avatartwo/avatar2
|
avatar2/archs/x86.py
|
Python
|
apache-2.0
| 6,401 | 0.010155 |
from capstone import *
from .architecture import Architecture
from avatar2.installer.config import GDB_X86, OPENOCD
class X86(Architecture):
get_gdb_executable = Architecture.resolve(GDB_X86)
get_oocd_executable = Architecture.resolve(OPENOCD)
qemu_name = 'i386'
gdb_name = 'i386'
registers = {'eax': 0,
'ecx': 1,
'edx': 2,
'ebx': 3,
'esp': 4,
'ebp': 5,
'esi': 6,
'edi': 7,
'eip': 8,
'pc': 8,
'eflags': 9,
'cs': 10,
'ss': 11,
'ds': 12,
'es': 13,
'fs': 14,
'gs': 15, }
special_registers = {
#SSE
'xmm0': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm0.v4_int32',
},
'xmm1': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm1.v4_int32',
},
'xmm2': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm2.v4_int32',
},
'xmm3': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm3.v4_int32',
},
'xmm4': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm4.v4_int32',
},
'xmm5': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm5.v4_int32',
},
'xmm6': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm6.v4_int32',
},
'xmm7': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm7.v4_int32',
},
'xmm8': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm8.v4_int32',
},
'xmm9': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm9.v4_int32',
},
'xmm10': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm10.v4_int32',
},
'xmm11': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm11.v4_int32',
},
'xmm12': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm12.v4_int32',
},
'xmm13': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm13.v4_int32',
},
'xmm14': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm14.v4_int32',
},
'xmm15': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm15.v4_int32',
},
#AVX
'ymm0': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm0.v8_int32',
},
'ymm1': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm1.v8_int32',
},
'ymm2': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm2.v8_int32',
},
'ymm3': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm3.v8_int32',
},
'ymm4': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm4.v8_int32',
},
'ymm5': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm5.v8_int32',
},
'ymm6': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm6.v8_int32',
},
|
'ymm7': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm7.v8_int32',
},
'ymm8': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm8.v8_int32',
},
'ymm9': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_
|
expression': '$ymm9.v8_int32',
},
'ymm10': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm10.v8_int32',
},
'ymm11': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm11.v8_int32',
},
'ymm12': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm12.v8_int32',
},
'ymm13': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm13.v8_int32',
},
'ymm14': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm14.v8_int32',
},
'ymm15': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm15.v8_int32',
},
}
sr_name = 'eflags'
unemulated_instructions = []
capstone_arch = CS_ARCH_X86
capstone_mode = CS_MODE_32
word_size = 32
class X86_64(X86):
qemu_name = 'x86_64'
gdb_name = 'i386:x86-64'
registers = {'rax': 0,
'rbx': 1,
'rcx': 2,
'rdx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'pc': 16,
'eflags': 17,
'cs': 18,
'ss': 19,
'ds': 20,
'es': 21,
'fs': 22,
'gs': 23,
}
capstone_mode = CS_MODE_64
unemulated_instructions = []
capstone_mode = CS_MODE_64
word_size = 64
|
Boussadia/weboob
|
modules/dlfp/browser.py
|
Python
|
agpl-3.0
| 9,155 | 0.005789 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
import re
import hashlib
import lxml
from weboob.tools.browser import BaseBrowser, BrowserHTTPNotFound, BrowserHTTPError, BrowserIncorrectPassword, BrokenPageError
from weboob.capabilities.messages import CantSendMessage
from .pages.index import IndexPage, LoginPage
from .pages.news import ContentPage, NewCommentPage, NodePage, CommentPage, NewTagPage, RSSComment
from .pages.board import BoardIndexPage
from .pages.wiki import WikiEditPage
from .tools import id2url, url2id
# Browser
class DLFP(BaseBrowser):
DOMAIN = 'linuxfr.org'
PROTOCOL = 'https'
PAGES = {'https?://[^/]*linuxfr\.org/?': IndexPage,
'https?://[^/]*linuxfr\.org/compte/connexion': LoginPage,
'https?://[^/]*linuxfr\.org/news/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/wiki/(?!nouveau)[^/]+': ContentPage,
'https?://[^/]*linuxfr\.org/wiki': WikiEditPage,
'https?://[^/]*linuxfr\.org/wiki/nouveau': WikiEditPage,
'https?://[^/]*linuxfr\.org/wiki/[^\.]+/modifier': WikiEditPage,
'https?://[^/]*linuxfr\.org/suivi/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/sondages/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/users/[^\./]+/journaux/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/forums/[^\./]+/posts/[^\.]+': ContentPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments/(\d+)': CommentPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments/nouveau': NewCommentPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments': NodePage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/tags/nouveau': NewTagPage,
'https?://[^/]*linuxfr\.org/board/index.xml': BoardIndexPage,
'https?://[^/]*linuxfr\.org/nodes/(\d+)/comments.atom': RSSComment,
}
last_board_msg_id = None
def parse_id(self, _id):
if re.match('^https?://.*linuxfr.org/nodes/\d+/comments/\d+$', _id):
return _id, None
url = id2url(_id)
if url is None:
if url2id(_id) is not None:
url = _id
_id = url2id(url)
else:
return None, None
return url, _id
def get_wiki_content(self, _id):
url, _id = self.parse_id('W.%s' % _id)
if url is None:
return None
try:
self.location('%s/modifier' % url)
except BrowserHTTPNotFound:
return ''
assert self.is_on_page(WikiEditPage)
return self.page.get_body()
def _go_on_wi
|
ki_edit_page(self, name):
"""
Go on the wiki page named 'name'.
Return True if this is a new page, or False if
the page already exist.
Return None if it isn't a right wiki page name.
"""
url, _id = self.parse_id('W.%s' % name)
if url is None:
return None
try:
self.location('%s/modifier' % url)
except BrowserHTTPNotFound:
self.location('/wiki/nouveau')
|
new = True
else:
new = False
assert self.is_on_page(WikiEditPage)
return new
def set_wiki_content(self, name, content, message):
new = self._go_on_wiki_edit_page(name)
if new is None:
return None
if new:
title = name.replace('-', ' ')
else:
title = None
self.page.post_content(title, content, message)
def get_wiki_preview(self, name, content):
if self._go_on_wiki_edit_page(name) is None:
return None
self.page.post_preview(content)
if self.is_on_page(WikiEditPage):
return self.page.get_preview_html()
elif self.is_on_page(ContentPage):
return self.page.get_article().body
def get_hash(self, url):
self.location(url)
if self.page.document.xpath('//entry'):
myhash = hashlib.md5(lxml.etree.tostring(self.page.document)).hexdigest()
return myhash
else:
return None
def get_content(self, _id):
url, _id = self.parse_id(_id)
if url is None:
return None
self.location(url)
self.page.url = self.absurl(url)
if self.is_on_page(CommentPage):
content = self.page.get_comment()
elif self.is_on_page(ContentPage):
m = re.match('.*#comment-(\d+)$', url)
if m:
content = self.page.get_comment(int(m.group(1)))
else:
content = self.page.get_article()
else:
raise BrokenPageError('Not on a content or comment page (%r)' % self.page)
if _id is not None:
content.id = _id
return content
def _is_comment_submit_form(self, form):
return 'comment_new' in form.action
def post_comment(self, thread, reply_id, title, message):
url = id2url(thread)
if url is None:
raise CantSendMessage('%s is not a right ID' % thread)
self.location(url)
assert self.is_on_page(ContentPage)
self.location(self.page.get_post_comment_url())
assert self.is_on_page(NewCommentPage)
self.select_form(predicate=self._is_comment_submit_form)
self.set_all_readonly(False)
if title is not None:
self['comment[title]'] = title.encode('utf-8')
self['comment[wiki_body]'] = message.encode('utf-8')
if int(reply_id) > 0:
self['comment[parent_id]'] = str(reply_id)
self['commit'] = 'Poster le commentaire'
try:
self.submit()
except BrowserHTTPError as e:
raise CantSendMessage('Unable to send message to %s.%s: %s' % (thread, reply_id, e))
if self.is_on_page(NodePage):
errors = self.page.get_errors()
if len(errors) > 0:
raise CantSendMessage('Unable to send message: %s' % ', '.join(errors))
return None
def login(self):
if self.username is None:
return
# not usefull for the moment
#self.location('/', no_login=True)
data = {'account[login]': self.username,
'account[password]': self.password,
'account[remember_me]': 1,
#'authenticity_token': self.page.get_login_token(),
}
self.location('/compte/connexion', urllib.urlencode(data), no_login=True)
if not self.is_logged():
raise BrowserIncorrectPassword()
self._token = self.page.document.xpath('//input[@name="authenticity_token"]')
def is_logged(self):
return (self.username is None or (self.page and self.page.is_logged()))
def close_session(self):
if self._token:
self.openurl('/compte/deconnexion', urllib.urlencode({'authenticity_token': self._token[0].attrib['value']}))
def plusse(self, url):
return self.relevance(url, 'for')
def moinse(self, url):
return self.relevance(url, 'against')
def relevance(self, url, what):
comment = self.get_content(url)
if comment is None:
raise ValueError('The given URL isn\'t a comment.')
if comment.relevance_token is None:
return False
res = self.readurl('%s%s' % (comment.relevance_url, what),
|
akx/shoop
|
shoop_workbench/settings/__init__.py
|
Python
|
agpl-3.0
| 1,393 | 0.000718 |
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
from django.core.exceptions import ImproperlyConfigured
from shoop.utils.setup import Setup
from . import base_settings
def configure(setup):
base_settings.configure(setup)
local_settings_file = os.getenv('LOCAL_SETTINGS_FILE')
# Backward compatibility: Find from current directory, if
# LOCAL_SETTINGS_FILE environment variables is unset
if local_settings_file is None:
cand = os.path.join(os.path.dirname(__file__), 'local_settings.py')
if os.path.exists(cand):
local_settings_file = cand
# Load local settings from file
if local_settings_file:
local_settings_ns = {
'__file__': local_settings_file,
}
with open(local_settings_file, 'rb') as fp:
compiled = compile(fp.read(), local_settings_file, 'exec')
exec(compiled, local_setti
|
ngs_ns)
if 'configure' not in local_settings_ns:
raise ImproperlyConfigured('No configure in local_settings')
local_configure = local_settings_ns['c
|
onfigure']
local_configure(setup)
return setup
globals().update(Setup.configure(configure))
|
dserv01/BackupMailToHTML
|
SavableLazyMail.py
|
Python
|
gpl-3.0
| 10,434 | 0.007188 |
from LazyMail import LazyMail, encode_string
import email
import datetime
import os
import cgi
import re
import logging
from email.header import decode_header
__author__ = 'doms'
class SavableLazyMail(LazyMail):
def __init__(self, config, mail_connection, uid, stats):
self.STATS = stats
self.CONFIG = config
LazyMail.__init__(self, mail_connection, uid)
#Gets a LazyMail and saves it to disk
#It will use the Hashcode as Filename and the date as path
#The Date-Path can be configured
#Returns true if successful. If it returns false there was at least a little failure. No rollback is made
def saveMailToHardDisk(self):
#Getting path from date
parsed_mail = self.getParsedMail()
date_raw = email.utils.parsedate_tz(parsed_mail['Date'])
if date_raw:
local_date_raw = datetime.datetime.fromtimestamp(email.utils.mktime_tz(date_raw))
path = local_date_raw.strftime(self.CONFIG.FOLDER_SYSTEM)
else:
path = "NoDate/"
#Save to file
try:
#Create Path if not exist
mail_folder_path = os.path.join(self.CONFIG.BACKUP_FOLDER_PATH, path)
if not os.path.exists(mail_folder_path):
os.makedirs(mail_folder_path)
#save eml file which can be opened with thunderbird (is more or less what the server has returned)
if self.CONFIG.SAVE_EML:
eml_path = os.path.join(mail_folder_path, "eml", )
if not os.path.exists(eml_path):
os.makedirs(eml_path)
self.saveEMLtoFile(eml_path)
#Save attachments: If there are none, False will be returned
check_attachments, attachments = self.saveAttachmentsToHardDisk(mail_folder_path)
#Create HTML-File
full_path = os.path.join(mail_folder_path, self.getHashcode()) + ".html"
file_message_without_attachment = open(full_path, 'w')
check_html = self.writeToHTML(attachments, file_message_without_attachment)
file_message_without_attachment.close()
except Exception as e:
#If anything has failed
logging.error("Failed to save mail (%s,%s) because of %s", self.getDate(), self.getSubject(), e)
return False
if check_attachments and check_html:
logging.info("Saved mail (From: %s, Subject: %s) to %s", self.getFrom(), self.getSubject(), full_path)
return True
elif check_attachments or check_html:
logging.info("Partly saved mail (From: %s, Subject: %s) to %s", self.getFrom(), self.getSubject(), full_path)
return False
else:
logging.info("Could not save mail (From: %s, Subject: %s)", self.getFrom(), self.getSubject())
return False
#Writes a lazy_mail to a given HTML-File
def writeToHTML(self, attachments, html_file):
check = True
try:
#HTML-Header
html_file.write("<!DOCTYPE html> <html lang=\"en\"> <head> <title>")
html_file.write(self.getSubject())
html_file.write("</title> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> </head> <body> <div class=\"row\"> <div class=\"col-md-12\">")
#HTML-Table with To,From,Subject
html_file.write("<table boarder=\"1\">\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>From: </td>\n")
html_file.write("\t\t<td>" + self.getFrom() + "</td>\n")
html_file.write("\t<tr>\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>To: </td>\n")
html_file.write("\t\t<td>" + self.getTo() + "</td>\n")
html_file.write("\t<tr>\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>Subject: </td>\n")
html_file.write("\t\t<td>" + self.getSubject() + "</td>\n")
html_file.write("\t<tr>\n")
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>Date: </td>\n")
html_file.write("\t\t<td>" + self.getDate() + "</td>\n")
html_file.write("\t<tr>\n")
#Information in Table if Attachments
if len(attachments) > 0:
html_file.write("\t<tr>\n")
html_file.write("\t\t<td>Attachments: </td><td>")
for attachment in attachments:
html_file.write("<a href=\"" + attachment[0] + "\">" + cgi.escape(encode_string(str(attachment[1]), None)) + "</a>")
if attachment is not attachments[-1]:
html_file.write(", ")
html_file.write("</td>\n")
html_file.write("\t<tr>\n")
html_file.write("</table>\n")
html_file.write("<div class=\"col-md-8 col-md-offset-1 footer\"> <hr /><div style=\"white-space: pre-wrap;\">")
#Write content to File
check, content_of_mail = self.getContent()
if content_of_mail['text']:
html_file.write("<pre>")
strip_header = re.sub(r"(?i)<html>.*?<head>.*?</head>.*?<body>", "", content_of_mail['text'],
flags=re.DOTALL)
strip_header = re.sub(r"(?i)</body>.*?</html>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)<!DOCTYPE.*?>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)POSITION: absolute;", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)TOP: .*?;", "", strip_header, flags=re.DOTALL)
html_file.write(strip_header)
html_file.write("</pre>\n")
if content_of_mail['html']:
strip_header = re.sub(r"(?i)<html>.*?<head>.*?</head>.*?<body>", "", content_of_mail['html'],
flags=re.DOTALL)
strip_header = re.sub(r"(?i)</body>.*?</html>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)<!D
|
OCTYPE.*?>", "", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)POSITION: absolute;",
|
"", strip_header, flags=re.DOTALL)
strip_header = re.sub(r"(?i)TOP: .*?;", "", strip_header, flags=re.DOTALL)
html_file.write(strip_header)
#HTML-Footer
#html_file.write("</div> <div class=\"col-md-8 col-md-offset-1 footer\"> <hr /><div style=\"white-space: pre-wrap;\">")
#html_file.write(lazy_mail.getHeader())
html_file.write("</div></div></body></html>")
except Exception as e:
logging.error("Could not write HTML because of %s", e)
raise e
return check
#Saves the attachments of a LazyMail to disk. Uses the Path 'folder_prefix-filename'
#E.g for folder_prefix="2014/05/03/4a9fd924" and filename="photo.jpg" it will be "2014/05/03/4a9fd924-photo.jpg"
def saveAttachmentsToHardDisk(self, folder):
attachments_tuple_for_html = []
filename_count = dict() #to handle attachments with same name
successful = True
for part in self.getParsedMail().walk():
attachment_filename = "(Could not encode)"
attachment_filename_encoding = None
try:
content_maintype = part.get_content_maintype()
if content_maintype == 'multipart' or content_maintype == 'text' or content_maintype == 'html':
continue
if part.get('Content-Disposition') == None:
continue
try:
attachment_filename = decode_header(part.get_filename())[0][0]
attachment_filename_encoding = decode_header(part.get_filename())[0][1]
except Exception as e:
logging.debug("Workaround Filename Encoding")
logging.debug(str(part))
try:
attachment_filename = encode_string(part.get_filename(), None) #"(could not encode filename)"
|
alexandrovteam/pyImagingMSpec
|
pyImagingMSpec/scripts/process_mz_query.py
|
Python
|
apache-2.0
| 1,468 | 0.017711 |
from __future__ import print_function
import numpy as np
import sys
import bisect
import datetime
import gzip
def my_print(s):
print("[" + str(datetime.datetime.now()) + "] " + s, file=sys.stderr)
if len(sys.argv) < 3:
print("Usage: process_mz_query.py dump_file[.gz] query_file")
exit(0)
my_print("Reading dump file from %s..." % sys.argv[1])
if sys.argv[1][-2:] == 'gz':
|
f = gzip.open(sys.argv[1], 'rb')
else:
f = open(sys.argv[1])
spectra = []
arr = []
for line in f:
arr = line.strip().split("|")
if len(arr) < 3:
continue
spectra.append( ( arr[0], np.array([ float(x) for x in arr[2].split(" ") ]), np.array([ float(x) for x in arr[1].split(" ") ]) ) )
f.close()
## at this point, spectra array contains triples of the form
## (group_id, list of mzs, list of intensities)
my_print(
|
"Reading and processing queries from %s..." % sys.argv[2])
def get_one_group_total(mz_lower, mz_upper, mzs, intensities):
return np.sum(intensities[ bisect.bisect_left(mzs, mz_lower) : bisect.bisect_right(mzs, mz_upper) ])
def get_all_totals(mz, tol, spectra):
mz_lower = mz - tol
mz_upper = mz + tol
return [ (x[0], get_one_group_total(mz_lower, mz_upper, x[1], x[2])) for x in spectra ]
with open(sys.argv[2]) as f:
for line in f:
arr = line.strip().split(",")
print(" ".join([ "%s:%.3f" % x for x in get_all_totals(float(arr[0]), float(arr[1]), spectra)]))
my_print("All done!")
exit(0)
|
FederatedAI/FATE
|
python/federatedml/feature/binning/quantile_tool.py
|
Python
|
apache-2.0
| 3,447 | 0.001451 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.statistic import data_overview
from federatedml.util import consts, LOGGER
class QuantileBinningTool(QuantileBinning):
"""
Use for quantile binning data directly.
"""
def __init__(self, bin_nums=consts.G_BIN_NUM, param_obj: FeatureBinningParam = None,
abnormal_list=None, allow_duplicate=False):
if param_obj is None:
param_obj = FeatureBinningParam(bin_num=bin_nums)
super().__init__(params=param_obj, abnormal_list=abnormal_list, allow_duplicate=allow_duplicate)
self.has_fit = False
def fit_split_points(self, data_instances):
res = super(QuantileBinningTool, self).fit_split_points(data_instances)
self.has_fit = True
return res
def fit_summary(self, data_instances, is_sparse=None):
if is_sparse is None:
is_sparse = data_overview.is_sparse_data(data_instances)
LOGGER.debug(f"is_sparse: {is_sparse}")
f = functools.partial(self.feature_summary,
params=self.params,
abnormal_list=self.abnormal_list,
cols_dict=self.bin_inner_param.bin_cols_map,
header=self.header,
|
is_sparse=is_sparse)
summary_dict_table = data_instances.mapReducePartitions(f, self.copy_merge)
# summary_dict = dict(summary_dict.collect())
if is_sparse:
total_count = data_instances.count()
summary_dict_table = summary_dict_table.mapValues(lambda x: x.set_total_count(total_count))
return summa
|
ry_dict_table
def get_quantile_point(self, quantile):
"""
Return the specific quantile point value
Parameters
----------
quantile : float, 0 <= quantile <= 1
Specify which column(s) need to apply statistic.
Returns
-------
return a dict of result quantile points.
eg.
quantile_point = {"x1": 3, "x2": 5... }
"""
if not self.has_fit:
raise RuntimeError("Quantile Binning Tool's split points should be fit before calling"
" get quantile points")
f = functools.partial(self._get_split_points,
allow_duplicate=self.allow_duplicate,
percentile_rate=[quantile])
quantile_points = dict(self.summary_dict.mapValues(f).collect())
quantile_points = {k: v[0] for k, v in quantile_points.items()}
return quantile_points
def get_median(self):
return self.get_quantile_point(0.5)
|
MrNuggelz/sklearn-glvq
|
sklearn_lvq/gmlvq.py
|
Python
|
bsd-3-clause
| 12,014 | 0.000083 |
# -*- coding: utf-8 -*-
# Author: Joris Jensen <jjensen@techfak.uni-bielefeld.de>
#
# License: BSD 3 clause
from __future__ import division
import math
from math import log
import numpy as np
from scipy.optimize import minimize
from .glvq import GlvqModel
from sklearn.utils import validation
class GmlvqModel(GlvqModel):
"""Generalized Matrix Learning Vector Quantization
Parameters
----------
prototypes_per_class : int or list of int, optional (default=1)
Number of prototypes per class. Use list to specify different numbers
per class.
initial_prototypes : array-like,
shape = [n_prototypes, n_features + 1], optional
Prototypes to start with. If not given initialization near the class
means. Class label must be placed as last entry of each prototype
initial_matrix : array-like, shape = [dim, n_features], optional
Relevance matrix to start with.
If not given random initialization for rectangular matrix and unity
for squared matrix.
regularization : float, optional (default=0.0)
Value between 0 and 1. Regularization is done by the log determinant
of the relevance matrix. Without regularization relevances may
degenerate to zero.
dim : int, optional (default=nb_features)
Maximum rank or projection dimensions
max_iter : int, optional (default=2500)
The maximum number of iterations.
gtol : float, optional (default=1e-5)
Gradient norm must be less than gtol before successful
termination of l-bfgs-b.
beta : int, optional (default=2)
Used inside phi.
1 / (1 + np.math.exp(-beta * x))
C : array-like, shape = [2,3] ,optional
Weights for wrong classification of form (y_real,y_pred,weight)
Per default all weights are one, meaning you only need to specify
the weights not equal one.
display : boolean, optional (default=False)
Print information about the bfgs steps.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is
|
the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
w_ : array-like, shape = [n_prototypes, n_features]
Prototype vector, where n_prototypes in the number of prototypes and
n_features is the number of features
c_w_ : array-like, shape = [n_prototypes]
Prototype classes
|
classes_ : array-like, shape = [n_classes]
Array containing labels.
dim_ : int
Maximum rank or projection dimensions
omega_ : array-like, shape = [dim, n_features]
Relevance matrix
See also
--------
GlvqModel, GrlvqModel, LgmlvqModel
"""
def __init__(self, prototypes_per_class=1, initial_prototypes=None,
initial_matrix=None, regularization=0.0, dim=None,
max_iter=2500, gtol=1e-5, beta=2, C=None, display=False,
random_state=None):
super(GmlvqModel, self).__init__(prototypes_per_class,
initial_prototypes, max_iter,
gtol, beta, C, display, random_state)
self.regularization = regularization
self.initial_matrix = initial_matrix
self.initialdim = dim
def _optgrad(self, variables, training_data, label_equals_prototype,
random_state, lr_relevances=0, lr_prototypes=1):
n_data, n_dim = training_data.shape
variables = variables.reshape(variables.size // n_dim, n_dim)
nb_prototypes = self.c_w_.shape[0]
omega_t = variables[nb_prototypes:].conj().T
# dist = _squared_euclidean(training_data.dot(omega_t),
# variables[:nb_prototypes].dot(omega_t))
dist = self._compute_distance(training_data, variables[:nb_prototypes],
omega_t.T)
d_wrong = dist.copy()
d_wrong[label_equals_prototype] = np.inf
distwrong = d_wrong.min(1)
pidxwrong = d_wrong.argmin(1)
d_correct = dist
d_correct[np.invert(label_equals_prototype)] = np.inf
distcorrect = d_correct.min(1)
pidxcorrect = d_correct.argmin(1)
distcorrectpluswrong = distcorrect + distwrong
distcorectminuswrong = distcorrect - distwrong
mu = distcorectminuswrong / distcorrectpluswrong
mu = np.vectorize(self.phi_prime)(mu)
mu *= self.c_[label_equals_prototype.argmax(1), d_wrong.argmin(1)]
g = np.zeros(variables.shape)
distcorrectpluswrong = 4 / distcorrectpluswrong ** 2
if lr_relevances > 0:
gw = np.zeros(omega_t.T.shape)
for i in range(nb_prototypes):
idxc = i == pidxcorrect
idxw = i == pidxwrong
dcd = mu[idxw] * distcorrect[idxw] * distcorrectpluswrong[idxw]
dwd = mu[idxc] * distwrong[idxc] * distcorrectpluswrong[idxc]
if lr_relevances > 0:
difc = training_data[idxc] - variables[i]
difw = training_data[idxw] - variables[i]
gw -= np.dot(difw * dcd[np.newaxis].T, omega_t).T.dot(difw) - \
np.dot(difc * dwd[np.newaxis].T, omega_t).T.dot(difc)
if lr_prototypes > 0:
g[i] = dcd.dot(difw) - dwd.dot(difc)
elif lr_prototypes > 0:
g[i] = dcd.dot(training_data[idxw]) - \
dwd.dot(training_data[idxc]) + \
(dwd.sum(0) - dcd.sum(0)) * variables[i]
f3 = 0
if self.regularization:
f3 = np.linalg.pinv(omega_t.conj().T).conj().T
if lr_relevances > 0:
g[nb_prototypes:] = 2 / n_data \
* lr_relevances * gw - self.regularization * f3
if lr_prototypes > 0:
g[:nb_prototypes] = 1 / n_data * lr_prototypes \
* g[:nb_prototypes].dot(omega_t.dot(omega_t.T))
g = g * (1 + 0.0001 * random_state.rand(*g.shape) - 0.5)
return g.ravel()
def _optfun(self, variables, training_data, label_equals_prototype):
n_data, n_dim = training_data.shape
variables = variables.reshape(variables.size // n_dim, n_dim)
nb_prototypes = self.c_w_.shape[0]
omega_t = variables[nb_prototypes:] # .conj().T
# dist = _squared_euclidean(training_data.dot(omega_t),
# variables[:nb_prototypes].dot(omega_t))
dist = self._compute_distance(training_data, variables[:nb_prototypes],
omega_t)
d_wrong = dist.copy()
d_wrong[label_equals_prototype] = np.inf
distwrong = d_wrong.min(1)
d_correct = dist
d_correct[np.invert(label_equals_prototype)] = np.inf
distcorrect = d_correct.min(1)
distcorrectpluswrong = distcorrect + distwrong
distcorectminuswrong = distcorrect - distwrong
mu = distcorectminuswrong / distcorrectpluswrong
if self.regularization > 0:
reg_term = self.regularization * log(
np.linalg.det(omega_t.conj().T.dot(omega_t)))
return np.vectorize(self.phi)(mu).sum(0) - reg_term # f
return np.vectorize(self.phi)(mu).sum(0)
def _optimize(self, x, y, random_state):
if not isinstance(self.regularization,
float) or self.regularization < 0:
raise ValueError("regularization must be a positive float ")
nb_prototypes, nb_features = self.w_.shape
if self.initialdim is None:
self.dim_ = nb_features
elif not isinstance(self.initialdim, int) or self.initialdim <= 0:
raise ValueError("dim must be an positive int")
else:
self.dim_ = self.initialdim
if self.initial_matrix is None:
if self.dim_ == nb_features:
|
earthreader/libearth
|
libearth/sanitizer.py
|
Python
|
gpl-2.0
| 5,176 | 0.000193 |
""":mod:`libearth.sanitizer` --- Sanitize HTML tags
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import cgi
try:
import htmlentitydefs
import HTMLParser
except ImportError:
from html import entities as htmlentitydefs, parser as HTMLParser
import re
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
from .compat import unichr, xrange
__all__ = 'HtmlSanitizer', 'MarkupTagCleaner', 'clean_html', 'sanitize_html'
def clean_html(html):
"""Strip *all* markup tags from ``html`` string.
That means, it simply makes the given ``html`` document a plain text.
:param html: html string to clean
:type html: :class:`str`
:returns: cleaned plain text
:rtype: :class:`str`
"""
parser = MarkupTagCleaner()
parser.feed(html)
return ''.join(parser.fed)
def sanitize_html(html, base_uri=None):
"""Sanitize the given ``html`` string. It removes the following
tags and attributes that are not secure nor useful for RSS reader layout:
- ``<script>`` tags
- ``display: none;`` styles
- JavaScript event attributes e.g. ``onclick``, ``onload``
- ``href`` attributes that start with ``javascript:``, ``jscript:``,
``livescript:``, ``vbscript:``, ``data:``, ``about:``, or ``mocha:``.
Also, it rebases all links on the ``base_uri`` if it's given.
:param html: html string to sanitize
:type html: :class:`str`
:param base_uri: an optional base url to be used throughout the document
for relative url addresses
:type base_uri: :class:`str`
:returns: cleaned plain text
:rtype: :class:`str`
.. versionadded:: 0.4.0
The ``base_uri`` parameter.
"""
parser = HtmlSanitizer(base_uri)
parser.feed(html)
return ''.join(parser.fed)
class MarkupTagCleaner(HTMLParser.HTMLParser):
"""HTML parser that is internally used by :func:`clean_html()` function."""
entity_map = htmlentitydefs.name2codepoint
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
try:
codepoint = self.entity_map[name]
except KeyError:
pass
else:
self.fed.append(unichr(codepoint))
def handle_charref(self, name):
if name.startswith('x'):
codepoint = int(name[1:], 16)
else:
codepoint = int(name)
self.fed.append(unichr(codepoint))
class HtmlSanitizer(HTMLParser.HTMLParser):
"""HTML parser that is internally used by :func:`sanitize_html()`
function.
"""
#: (:class:`re.RegexObject`) The regular expression pattern that matches to
#: disallowed CSS properties.
DISALLOWED_STYLE_PATTERN = re.compile(
r'(^|;)\s*display\s*:\s*[a-z-]+\s*(?:;\s*|$)',
re.IGNORECASE
)
#: (:class:`collections.Set`) The set of disallowed URI schemes e.g.
#: ``javascript:``.
DISALLOWED_SCHEMES = frozenset([
'javascript', 'jscript', 'livescript', 'vbscript', 'data',
'about', 'mocha'
])
def __init__(self, base_uri):
HTMLParser.HTMLParser.__init__(self)
self.base_uri = base_uri
self.fed = []
self.ignore = False
def handle_starttag(self, tag, attrs):
if tag == 'script':
self.ignore = True
return
elif self.ignore:
return
remove_css = self.DISALLOWED_STYLE_PATTERN.sub
self.fed.extend(('<', tag))
disallowed_schemes = tuple(scheme + ':'
for scheme in self.DISALLOWED_SCHEMES)
if self.base_uri is not None and tag in ('a', 'link') and attrs:
for i in xrange(len(attrs)):
a, v = attrs[i]
if a == 'href':
attrs[i] = a, urlparse.urljoin(self.base_uri, v)
self.fed.extend(
chunk
|
for name, value in attrs
if not name.startswith('on'
|
)
for chunk in (
[' ', name]
if value is None else
[
' ', name, '="', cgi.escape(
('' if value.startswith(disallowed_schemes) else value)
if name == 'href' else
(remove_css('\\1', value) if name == 'style' else value)
), '"'
]
)
)
self.fed.append('>')
def handle_endtag(self, tag):
if tag == 'script':
self.ignore = False
return
self.fed.extend(('</', tag, '>'))
def handle_data(self, d):
if self.ignore:
return
self.fed.append(d)
def handle_entityref(self, name):
if self.ignore:
return
self.fed.extend(('&', name, ';'))
def handle_charref(self, name):
if self.ignore:
return
self.fed.extend(('&#' + name + ';'))
def handle_comment(self, data):
if self.ignore:
return
self.fed.extend(('<!-- ', data, ' -->'))
|
COMP90024CloudComputing/Submit_Cloud_Computing
|
analysis.py
|
Python
|
apache-2.0
| 8,425 | 0.01543 |
import couchdb
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
import json
import matplotlib.path as mplPath
import numpy as np
import requests
from textblob import TextBlob
import Queue
import time, socket, threading
import re
from pycorenlp import StanfordCoreNLP
from google.cloud import language
import random
with open ('polygon.json') as data_file:
polygon=json.load(data_file)
language_client = language.Client()
#Connect to couchdb server
couch = couchdb.Server('http://127.0.0.1:5984')
data_db = couch['twitter_data']
result_db = couch['suburb_data']
count = 0
num = 0
fitness=['fitness','gym','workout', 'push up', 'deadlift','bench press', 'squat','crunch','diets','weight loss','body building','yoga']
sports=['football','basketball','soccer','cricket','baseball','tennis','rugby','golf','badminton','table tennis']
outdoor=['outdoor', 'camping','trekking','swimming','surfing','running','cycling','climbing','hiking','fishing']
keywords={'fitness':fitness,'sports':sports,'outdoor':outdoor}
# Connect to NLP server
nlp = StanfordCoreNLP('http://localhost:9000')
print ' Connect to NLP server '
q1=Queue.Queue()
q2=Queue.Queue()
q3=Queue.Queue()
q4=Queue.Queue()
q5=Queue.Queue()
def dowork(q):
while True:
while not q.empty():
print "Read from queue"
#read from queue
try:
queue_data = q.get()
try:
json_data = json.loads(queue_data)
print " Load data"
except:
print " Fail load data"
continue
postcode = 0
text = json_data['text']
coordinates = json_data['coordinates']
print coordinates
_id = json_data['id']
lang = json_data['lang']
if lang!= "en":
print "Not english"
continue
place = json_data['place']
is_finance = json_data['is_finance']
created_at = json_data['created_at']
encodetext=text.encode("ascii","ignore")
plaintext = re.sub('http.*', '', encodetext) + '.'
# Get postcode
if coordinates!= 'null':
for a in polygon['features']:
bbPath = mplPath.Path(np.array(a['geometry']['coordinates'][0][0]))
#print ("%s in %s" %(bbPath.contains_point(coordinates),a['properties']['postcode']))
if bbPath.contains_point(coordinates):
print "Contains point"
postcode = str(a['properties']['postcode'].encode('ascii'))
print ("%s in %s" %(bbPath.contains_point(coordinates),a['properties']['postcode']))
break
# Search for keywords
for k in keywords:
for b in keywords.get(k):
if b in text.lower():
for suburbs in result_db:
doc = result_db.get(suburbs)
if postcode == doc['postcode']:
doc[k] += 1
result_db.save(doc)
searched_for_brunch = 'true'
print " Found one %s result in %s" %(k, postcode)
break
else:
searched_for_brunch = 'false'
print "Finish postcode and keywords"
# Stanford NLP
res = nlp.annotate(plaintext,
properties={
'annotators': 'sentiment',
'outputFormat': 'json',
'time
|
out': '1000' })
sentiment_value = 0
tweets = ""
count_tweet_sentence = 0
sentiment_desc=""
for s in res["sentences"]:
sentiment_value += int(s['sentimentValue'].encode('ascii'))
tweets += " ".join([t["word"] for t in s["tokens"]])
count_tweet_sentence
|
= s["index"]
if plaintext != '' and count_tweet_sentence == 0:
count_tweet_sentence = 1
if count_tweet_sentence != 0:
# Calculate sentiment value
average_sentiment_value= sentiment_value/count_tweet_sentence
if sentiment_value/count_tweet_sentence == 0:
sentiment_desc = "Very negative"
if sentiment_value/count_tweet_sentence ==1:
sentiment_desc = "Negative"
if sentiment_value/count_tweet_sentence ==2:
sentiment_desc = "Neutral"
if sentiment_value/count_tweet_sentence ==3:
sentiment_desc = "Positive"
if sentiment_value/count_tweet_sentence ==4:
sentiment_desc = "Very positive"
print "tweets: %s has sentiment value %d" % (tweets, sentiment_value/count_tweet_sentence)
google_score=0
magnitude = 0
# Google nature language API
document = language_client.document_from_text(plaintext)
sentiment = document.analyze_sentiment().sentiment
google_score = sentiment.score
magnitude = sentiment.magnitude
print "%s has google score of %s" % (plaintext, str(google_score))
# Textblob
b=TextBlob(plaintext)
polarity = b.sentiment[0]
subjectivity = b.sentiment[1]
print "Save textblob data"
tweet_data = {'id':_id, 'text':plaintext, 'coordinates':coordinates, 'postcode':postcode, 'lang':lang,'city':place, 'is_finance':is_finance, 'created_at':created_at,
'searched_for_brunch':searched_for_brunch, 'sentiment_value':average_sentiment_value, 'sentiment':sentiment_desc, 'sentiment_score_google':google_score,
'magnitude':magnitude, 'polarity':polarity, 'subjectivity':subjectivity}
try:
data_db[str(_id)] = tweet_data
print ' Analyzed and saved one tweet to database'
except:
print "Skip update duplicate"
except Exception as e:
print e
continue
print "None in queue"
def tcplink(sock, addr):
print 'Accept new connection from %s:%s...' % addr
sock.send('Welcome!')
while True:
data = sock.recv(100000)
if data == 'exit' :
break
if data:
# Distribute work to threads
x = random.randint(1,5)
if x == 1:
q1.put(data)
print "Put to queue 1"
if x == 2:
q2.put(data)
print "Put to queue 2"
if x == 3:
q3.put(data)
print "Put to queue 3"
if x == 4:
q4.put(data)
print "Put to queue 4"
if x == 5:
q5.put(data)
print "Put to queue 5"
print "Disconnected"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0',9999))
s.listen(15)
# start 5 worker threads
a=threading.Thread(target=dowork,args=(q1,))
a.start()
print " Start process 1 analyzing message"
b=threading.Thread(target=dowork,args=(q2,))
b.start()
print " Start process 2 analyzing message"
c=threading.Thread(target=dowork,args=(q3,))
c.start()
print " Start process 3 analyzing message"
d=threading.Thread(target=dowork,args=(q4,))
d.start()
print " Start process 4 analyzing message"
e=threading.Thread(
|
freeitaly/Trading-System
|
vn.trader/dataRecorderAlone/DataRecorder -Paolo版本/uiDataRecorder1.py
|
Python
|
mit
| 13,588 | 0.003854 |
# encoding: UTF-8
'''
1. 合约选择
Contracts_init.json中写入需要订阅的期货品种,如需要订阅pp和IF,写入
{
"pp": "0",
"IF": "0"
}
2. 主力合约判断
运行程序后,点击‘合约初始化’按钮,程序会获取通联的期货数据,自动判断主力合约。并写入Contracts_init.json中。
注:通联选择的是持仓量判断主力,本程序选择的是昨日成交量判断,两者不同时会给出提示。
3. 合约订阅
4. Tick存储
'''
import json
import os
import pymongo
import tushare as ts
# ts.set_token('575593eb7696aec7339224c0fac2313780d8645f68b77369dcb35f8bcb419a0b')
ts.set_token('ced15aa738976abf2136cc9e197fbcd34776e0f8183c7660b7fdcd626a715b3b') # paolo
import time
from uiBasicWidget import QtGui, QtCore, BasicCell
from eventEngine import *
from ctaAlgo.ctaBase import *
from vtConstant import *
from vtGateway import VtSubscribeReq
########################################################################
class DataRecorder(QtGui.QFrame):
"""
用来记录历史数据的工具(基于CTA策略),
可单独运行,
本工具会记录Tick数据。
"""
# 策略的基本参数
name = u'期货合约Tick订阅@存储' # 策略实例名称
tickDbName = TICK_DB_NAME
# barDbName = MINUTE_DB_NAME
signal = QtCore.pyqtSignal(type(Event()))
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(DataRecorder, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.ctaEngine = self.mainEngine.ctaEngine
self.ctpConnected = False # 是否登录CTP
self.contractsDict = {} # 保存订阅symbol主力合约的字典
self.initUi()
self.registerEvent()
# 记录日志
self.writeCtaLog(u'CTA引擎启动成功')
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'CTA@ Tick订阅&存储')
# 按钮
ctpButton = QtGui.QPushButton(u'登录CTP')
mongoButton = QtGui.QPushButton(u'连接数据库')
initButton = QtGui.QPushButton(u'
|
合约初始化 (判断主力合约)')
startButton = QtGui.QPushButton(u'启动订阅')
stopButton = QtGui.QPushButton(u'停止订阅')
ctpButton.clicked.connect(self.ctpConnect)
mongoButton.clicked.connect(self.dbConnect)
|
initButton.clicked.connect(self.contractsInit) # 初始化合约,主力合约判断
startButton.clicked.connect(self.startAll)
stopButton.clicked.connect(self.stopAll)
# 放置订阅合约(由于订阅的合约较多,所以选择了两个monitor展示订阅的合约)
self.symbolMonitor1 = SymbolMonitor()
self.symbolMonitor2 = SymbolMonitor()
# CTA组件的日志监控
self.ctaLogMonitor = QtGui.QTextEdit()
self.ctaLogMonitor.setReadOnly(True)
self.ctaLogMonitor.setMaximumHeight(200)
# 设置布局
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(ctpButton)
hbox1.addWidget(mongoButton)
hbox1.addWidget(initButton)
hbox1.addStretch()
# hbox2 = QtGui.QHBoxLayout()
# hbox2.addWidget(initButton)
# hbox2.addStretch()
hbox3 = QtGui.QHBoxLayout()
hbox3.addWidget(startButton)
hbox3.addWidget(stopButton)
hbox3.addStretch()
hbox4 = QtGui.QHBoxLayout()
hbox4.addWidget(self.symbolMonitor1)
hbox4.addWidget(self.symbolMonitor2)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
# vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addWidget(self.ctaLogMonitor)
self.setLayout(vbox)
#----------------------------------------------------------------------
def dbConnect(self):
"""连接MongoDB数据库"""
if not self.mainEngine.dbClient:
try:
self.mainEngine.dbConnect()
self.writeCtaLog(u'MongoDB连接成功')
except pymongo.errors.ConnectionFailure:
self.writeCtaLog(u'MongoDB连接失败')
#----------------------------------------------------------------------
def ctpConnect(self):
# 登录CTP
self.mainEngine.connect('CTP')
self.ctpConnected = True
self.writeCtaLog(u'CTP登录成功')
#----------------------------------------------------------------------
def contractsInit(self):
"""获取期货合约"""
# 载入json文件
fileName = 'Contracts_init.json'
try:
f = open(fileName, 'r')
except IOError:
self.writeCtaLog(u'读取合约初始化信息出错,请检查')
return
# 解析json文件
self.contractsDict = json.load(f)
f.close()
# 获取上个交易日lastDate
todayDate = time.strftime('%Y-%m-%d',time.localtime())
mt = ts.Master()
Cal = mt.TradeCal(exchangeCD='XSGE',beginDate=''.join(todayDate.split('-')),endDate=''.join(todayDate.split('-')),field='')
lastDate = Cal.at[0, 'prevTradeDate']
lastDate = ''.join(lastDate.split('-'))
# 获取主力合约
st = ts.Market()
for contract in self.contractsDict.keys():
data = st.MktMFutd(tradeDate=lastDate,contractObject=contract,field='ticker,mainCon,turnoverVol')
# 通联持仓主力
ticker1 = data[data['mainCon'] == 1]['ticker'].values
# 昨日成交量主力
ticker2 = data.at[data['turnoverVol'].argmax(), 'ticker']
# 默认选择成交量主力
self.contractsDict[contract] = unicode(ticker2)
# 当成交量主力于持仓主力不一致时,输出信息
if ticker1 != ticker2:
self.writeCtaLog(u'期货 %s: 请确认主力合约(默认使用成交量):\n %s -通联持仓主力\n %s -昨日成交量主力' % (contract, ticker1, ticker2))
print u'期货 %s: 请确认主力合约(默认使用成交量):\n %s -通联持仓主力\n %s -昨日成交量主力' % (contract, ticker1, ticker2)
print data
# 写入文件
f = json.dumps(self.contractsDict)
file = open(fileName, 'w')
file.write(f)
file.close()
self.writeCtaLog(u'合约初始化成功')
# (由于本人订阅的合约较多,所以选择了两个monitor展示订阅的合约)
# 另外只展示了主力合约代码,没有展示tick最新更新时间等信息,个人感觉用处不大
contractsDict1 = {}
contractsDict2 = {}
total = len(self.contractsDict)
self.writeCtaLog(u'订阅合约数量: %s' % total)
for i, (symbol, contract) in enumerate(self.contractsDict.items()):
if i < (total + 1) / 2:
contractsDict1[symbol] = contract
else:
contractsDict2[symbol] = contract
# 写入Monitor
self.symbolMonitor1.contractsDict = contractsDict1
self.symbolMonitor1.updateTable()
self.symbolMonitor2.contractsDict = contractsDict2
self.symbolMonitor2.updateTable()
#----------------------------------------------------------------------
def startAll(self):
if self.ctpConnected is False:
self.writeCtaLog(u'未登录CTP, 期货Tick 订阅失败')
return
if self.mainEngine.dbClient is None:
self.writeCtaLog(u'未连接数据库, 期货Tick 订阅失败')
return
# 订阅合约
print self.contractsDict.values() # 打印所有订阅合约
for contract in self.contractsDict.values():
try:
# print contract
req = VtSubscribeReq()
req.symbol = contract
self.mainEngine.subscribe(req, 'CTP')
except:
self.writeCtaLog(u'期货Tick , 合约%s 订阅失败' %(contract))
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
self.writeCtaLog(u'期货Tick 订阅成功')
#----------------------------------------------------------------------
def stopAll(self):
# 取消订阅
self.eventEngine.unregister(EVENT_TICK, self.procecssTickEvent)
self.writeCtaLog(u'期货Tick 取消订阅')
#----------------------------------------------------------------------
def insertTick(self, tick, symbol):
"""向数据库中插入tick数据"""
self.ctaEngine.insertData(self.tickDbName, symbol, tick)
#----------------------------------------------------------------------
def insertBar(self, bar, symbol):
"""向数据库中插入bar数据"""
self.ctaEngine.insertData(self.barDbName, symbol, bar)
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
self.onTick(tick)
#--------------------------------------
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/bar/error_x/_arrayminussrc.py
|
Python
|
mit
| 429 | 0 |
import _plotly_utils.basevalidators
class ArrayminussrcValidator(_plotly_uti
|
ls.basevalidators.SrcValidator):
def __init__(
self, plotly_name="arrayminussrc", parent_name="bar.error_x", **kwargs
):
super(ArrayminussrcValidator, self).__init__(
plotly_name=plotly_name,
paren
|
t_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
lizardsystem/lizard-neerslagradar
|
lizard_neerslagradar/management/commands/create_geotiffs.py
|
Python
|
gpl-3.0
| 2,139 | 0.002338 |
from optparse import make_option
from optparse import OptionParser
import logging
#import os
#import sys
import contextlib
#import hashlib
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
#from django.db.models import Q
import dateutil
import netCDF4
from lizard_neerslagradar import netcdf
logger = logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ""
help = "Create a geotiff per timestep from the radar.nc file."
option_list = BaseCommand.option_list + (
make_option(
"--from", action="store", type="string",
dest="from_", default="2011-01-07",
help="Generate geotiffs starting from this datetime. "
"Use a string in the format YYYY-MM-DD HH:MM "
"(fuzzy substrings are allowed)"),
make_option("--skip-existing", action="store_true",
dest="skip_existing", default=False,
help="Skip existing geotiffs"),
)
def handle(self, *args, **options):
parser = OptionParser(option_list=self.option_list)
(options, args) = parser.parse_args()
logger.warn("IGNORED from=%s", options.from_)
logger.warn("IGNORED skip_existing=%s", options.skip_existing)
time_from = dateutil.parser.parse('2011-01-07T00:00:00.000Z')
time_to = dateutil.parser.parse('2011-01-08T00:00:00.000Z')
|
times_list = [time_from]
if time_to:
interval = datetime.timedelta(minutes=5)
time = time_from
|
while time < time_to:
time += interval
times_list.append(time)
nc = netCDF4.Dataset(settings.RADAR_NC_PATH, 'r')
with contextlib.closing(nc):
for time in times_list:
try:
path = netcdf.time_2_path(time)
netcdf.mk_geotiff(nc, time, path)
logger.info('Created geotiff for {}'.format(time))
except:
logger.exception(
'While creating geotiff for {}'.format(time))
|
wexi/python-for-android
|
pythonforandroid/recipes/libsecp256k1/__init__.py
|
Python
|
mit
| 1,139 | 0 |
from pythonforandroid.toolchain import shprint, current_directory
from pythonforandroid.recipe import Recipe
from multiprocessing import cpu_count
from os.path import exists
import sh
class LibSecp256k1Recipe(Recipe):
url = 'https://github.com/bitcoin-core/secp256k1/archive/master.zip'
def build_arch(self, arch):
super(LibSecp256k1Recipe, self).build_arch(arch)
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
if not exists('configure'):
shprint(sh.Command('./autogen.sh'), _env=env)
shprint(
sh.Command('./con
|
figure'),
'
|
--host=' + arch.toolchain_prefix,
'--prefix=' + self.ctx.get_python_install_dir(),
'--enable-shared',
'--enable-module-recovery',
'--enable-experimental',
'--enable-module-ecdh',
_env=env)
shprint(sh.make, '-j' + str(cpu_count()), _env=env)
libs = ['.libs/libsecp256k1.so']
self.install_libs(arch, *libs)
recipe = LibSecp256k1Recipe()
|
astronomeralex/morphology-software
|
morphology.py
|
Python
|
mit
| 3,255 | 0.009831 |
import numpy as np
import scipy.interpolate as interp
import warnings
from astropy.io import fits
def concentration(radii, phot, eta_radius=0.2, eta_radius_factor=1.5, interp_kind='linear', add_zero=False):
"""
Calculates the concentration parameter
C = 5 * log10(r_80 / r2_0)
Inputs:
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
interp_kind -- kind of interpolation; passed to scipy.interpolate.interp1d.
Some options are linear, quadratic, and cubic.
add_zero -- add a 0 radius and zero flux point to their respective arrays
to help with interpolation at small radii; should only matter for quadratic or
cubic interpolation
"""
assert len(radii) == len(phot)
assert np.all(radii > 0)
assert np.all(phot > 0)
if add_zero:
radii = np.insert(radii, 0, 0)
phot = np.insert(phot, 0, 0)
eta_vals = eta(radii, phot)
if np.any(eta_vals < 0.2):
eta_interp = interp.interp1d(eta_vals, radii, kind=interp_kind)
eta_r = eta_radius_factor * eta_interp(eta_radius)
else:
warnings.warn("eta is never less than " + str(eta_radius) + ". Using lowest eta value as proxy")
eta_r = eta_radius_factor * radii[np.argmin(eta_vals)]
phot_interp = interp.interp1d(radii, phot, kind=interp_kind)
if eta_r
|
< np.max(radii):
maxphot = phot_interp(eta_r)
else:
maxphot = np.max(phot)
norm_phot = phot / maxphot
radius_interp = interp.interp1d(norm_phot, radii, kind=interp_kind)
r20 = radius_interp(0.2)
r80 = radius_interp(0.8)
assert r20 < r80 < np.max
|
(radii)
c = 5 * np.log10(r80 / r20)
return c
def eta(radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
def find_eta(eta_val, radii, phot):
eta_interp = interp.interp1d(eta(radii, phot), radii)
return eta_interp(eta_val)
def snr(name):
"""
name before fits and apphot files
"""
#first calculate the image uncertainty using the MAD
hdulist = fits.open(name + '_bs.fits')
im_med = np.median(hdulist[0].data)
im_err = np.median(np.abs(hdulist[0].data - im_med))
#now get the total flux
apphot = np.loadtxt(name + ".apphot", usecols=[0,1])
radii = apphot[:,0]
phot = apphot[:,1]
try:
eta_rad = find_eta(0.2, radii, phot)
if eta_rad > np.max(radii)/1.5:
eta_rad = np.max(radii)/1.5
except ValueError:
eta_rad = 1.0
phot_interp = interp.interp1d(radii, phot)
total_phot = phot_interp(1.5*eta_rad)
return total_phot / np.sqrt(np.pi*(1.5*eta_rad)**2 * im_err**2)
|
SOCR/HTML5_WebSite
|
HTML5/BrainPainter/X/lib/closure-library/closure/bin/build/depswriter.py
|
Python
|
lgpl-3.0
| 6,203 | 0.009028 |
#!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates out a Closure deps.js file given a list of JavaScript sources.
Paths can be specified as arguments or (more commonly) specifying trees
with the flags (call with --help for descriptions).
Usage: depswriter.py [path/to/js1.js [path/to/js2.js] ...]
"""
import logging
import optparse
import os
import posixpath
import shlex
import sys
import source
import treescan
def MakeDepsFile(source_map):
"""Make a generated deps file.
Args:
source_map: A dict map of the source path to source.Source object.
Returns:
str, A generated deps file source.
"""
# Write in path alphabetical order
paths = source_map.keys()
paths.sort()
lines = []
for path in paths:
js_source = source_map[path]
# We don't need to add entries that don't provide anything.
if js_source.provides:
lines.append(_GetDepsLine(path, js_source))
return ''.join(lines)
def _GetDepsLine(path, js_source):
"""Get a deps.js file string for a source."""
provides = list(js_source.provides)
provides.sort()
requires = list(js_source.requires)
requires.sort()
return 'goog.addDependency(\'%s\', %s, %s);\n' % (p
|
ath, provides, requires)
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
parser.add_option('--root',
dest='roots',
|
default=[],
action='append',
help='A root directory to scan for JS source files. '
'Paths of JS files in generated deps file will be '
'relative to this path. This flag may be specified '
'multiple times.')
parser.add_option('--root_with_prefix',
dest='roots_with_prefix',
default=[],
action='append',
help='A root directory to scan for JS source files, plus '
'a prefix (if either contains a space, surround with '
'quotes). Paths in generated deps file will be relative '
'to the root, but preceded by the prefix. This flag '
'may be specified multiple times.')
parser.add_option('--path_with_depspath',
dest='paths_with_depspath',
default=[],
action='append',
help='A path to a source file and an alternate path to '
'the file in the generated deps file (if either contains '
'a space, surround with whitespace). This flag may be '
'specified multiple times.')
return parser
def _NormalizePathSeparators(path):
"""Replaces OS-specific path separators with POSIX-style slashes.
Args:
path: str, A file path.
Returns:
str, The path with any OS-specific path separators (such as backslash on
Windows) replaced with URL-compatible forward slashes. A no-op on systems
that use POSIX paths.
"""
return path.replace(os.sep, posixpath.sep)
def _GetRelativePathToSourceDict(root, prefix=''):
"""Scans a top root directory for .js sources.
Args:
root: str, Root directory.
prefix: str, Prefix for returned paths.
Returns:
dict, A map of relative paths (with prefix, if given), to source.Source
objects.
"""
# Remember and restore the cwd when we're done. We work from the root so
# that paths are relative from the root.
start_wd = os.getcwd()
os.chdir(root)
path_to_source = {}
for path in treescan.ScanTreeForJsFiles('.'):
prefixed_path = _NormalizePathSeparators(os.path.join(prefix, path))
path_to_source[prefixed_path] = source.Source(source.GetFileContents(path))
os.chdir(start_wd)
return path_to_source
def _GetPair(s):
"""Return a string as a shell-parsed tuple. Two values expected."""
try:
# shlex uses '\' as an escape character, so they must be escaped.
s = s.replace('\\', '\\\\')
first, second = shlex.split(s)
return (first, second)
except:
raise Exception('Unable to parse input line as a pair: %s' % s)
def main():
"""CLI frontend to MakeDepsFile."""
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
path_to_source = {}
# Roots without prefixes
for root in options.roots:
path_to_source.update(_GetRelativePathToSourceDict(root))
# Roots with prefixes
for root_and_prefix in options.roots_with_prefix:
root, prefix = _GetPair(root_and_prefix)
path_to_source.update(_GetRelativePathToSourceDict(root, prefix=prefix))
# Source paths
for path in args:
path_to_source[path] = source.Source(source.GetFileContents(path))
# Source paths with alternate deps paths
for path_with_depspath in options.paths_with_depspath:
srcpath, depspath = _GetPair(path_with_depspath)
path_to_source[depspath] = source.Source(source.GetFileContents(srcpath))
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
out.write('// This file was autogenerated by %s.\n' % sys.argv[0])
out.write('// Please do not edit.\n')
out.write(MakeDepsFile(path_to_source))
if __name__ == '__main__':
main()
|
chrisndodge/edx-platform
|
common/lib/xmodule/xmodule/capa_base.py
|
Python
|
agpl-3.0
| 62,222 | 0.002588 |
"""Implements basics of Capa, including class CapaModule."""
import cgi
import copy
import datetime
import hashlib
import json
import logging
import os
import traceback
import struct
import sys
import re
# We don't want to force a dependency on datadog, so make the import conditional
try:
import dogstats_wrapper as dog_stats_api
except ImportError:
dog_stats_api = None
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.responsetypes import StudentInputError, \
ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames, get_inner_html_from_xpath
from .progress import Progress
from xmodule.exceptions import NotFoundError
from xblock.fields import Scope, String, Boolean, Dict, Integer, Float
from .fields import Timedelta, Date
from django.utils.timezone import UTC
from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER
from django.conf import settings
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
# Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20
# Never produce more than this many different seeds, no matter what.
MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id):
"""
Pick a randomization bin for the problem given the user's seed and a problem id.
We do this because we only want e.g. 20 randomizations of a problem to make analytics
interesting. To avoid having sets of students that always get the same problems,
we'll combine the system's per-student seed with the problem id in picking the bin.
"""
r_hash = hashlib.sha1()
r_hash.update(str(seed))
r_hash.update(str(problem_id))
# get the first few digits of the hash, convert to an int, then mod.
return int(r_hash.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS
class Randomization(String):
"""
Define a field to store how to randomize a problem.
"""
def from_json(self, value):
if value in ("", "true"):
return RANDOMIZATION.ALWAYS
elif value == "false":
return RANDOMIZATION.PER_STUDENT
return value
to_json = from_json
class ComplexEncoder(json.JSONEncoder):
"""
Extend the JSON encoder to correctly handle complex numbers
"""
def default(self, obj):
"""
Print a nicely formatted complex number, or default to the JSON encoder
"""
if isinstance(obj, complex):
return u"{real:.7g}{imag:+.7g}*j".format(real=obj.real, imag=obj.imag)
return json.JSONEncoder.default(self, obj)
class CapaFields(object):
"""
Define the possible fields for a Capa problem
"""
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizo
|
ntal navigation at the top of the page."),
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_
|
with_default for those
default=_("Blank Advanced Problem")
)
attempts = Integer(
help=_("Number of attempts taken by the student on this problem"),
default=0,
scope=Scope.user_state)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
due = Date(help=_("Date that this problem is due by"), scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of time after the due date that submissions will be accepted"),
scope=Scope.settings
)
showanswer = String(
display_name=_("Show Answer"),
help=_("Defines when to show the answer to the problem. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=SHOWANSWER.FINISHED,
values=[
{"display_name": _("Always"), "value": SHOWANSWER.ALWAYS},
{"display_name": _("Answered"), "value": SHOWANSWER.ANSWERED},
{"display_name": _("Attempted"), "value": SHOWANSWER.ATTEMPTED},
{"display_name": _("Closed"), "value": SHOWANSWER.CLOSED},
{"display_name": _("Finished"), "value": SHOWANSWER.FINISHED},
{"display_name": _("Correct or Past Due"), "value": SHOWANSWER.CORRECT_OR_PAST_DUE},
{"display_name": _("Past Due"), "value": SHOWANSWER.PAST_DUE},
{"display_name": _("Never"), "value": SHOWANSWER.NEVER}]
)
force_save_button = Boolean(
help=_("Whether to force the save button to appear on the page"),
scope=Scope.settings,
default=False
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button"),
help=_("Determines whether a 'Reset' button is shown so the user may reset their answer. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=default_reset_button
)
rerandomize = Randomization(
display_name=_("Randomization"),
help=_(
'Defines when to randomize the variables specified in the associated Python script. '
'For problems that do not randomize values, specify \"Never\". '
),
default=RANDOMIZATION.NEVER,
scope=Scope.settings,
values=[
{"display_name": _("Always"), "value": RANDOMIZATION.ALWAYS},
{"display_name": _("On Reset"), "value": RANDOMIZATION.ONRESET},
{"display_name": _("Never"), "value": RANDOMIZATION.NEVER},
{"display_name": _("Per Student"), "value": RANDOMIZATION.PER_STUDENT}
]
)
data = String(help=_("XML data for the problem"), scope=Scope.content, default="<problem></problem>")
correct_map = Dict(help=_("Dictionary with the correctness of current student answers"),
scope=Scope.user_state, default={})
input_state = Dict(help=_("Dictionary for maintaining the state of inputtypes"), scope=Scope.user_state)
student_answers = Dict(help=_("Dictionary with the current student responses"), scope=Scope.user_state)
done = Boolean(help=_("Whether the student has answered the problem"), scope=Scope.user_state)
seed = Integer(help=_("Random seed for this student"), scope=Scope.user_state)
last_submission_time = Date(help=_("Last submission time"), scope=Scope.user_state)
submission_wait_seconds = Integer(
display_name=_("Timer Between Attempts"),
help=_("Seconds a student must wait between submissions for a problem with multiple attempts."),
scope=Scope.settings,
default=0)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. "
"If the value is not set, each response field in the problem is worth one point."),
values={"min": 0, "step": .1},
scope=Scope.settings
)
markdown = String(help=_("Markdown source of this module"), default=None, scope=Scope.settings)
source_code = String(
help=_("Source code for LaTeX and Word problems. This feature is not well-supported."),
scope=Scope.settings
)
text_customization = Dict(
help=_("String customization substitutions for particular locations"),
scope=Scope.settings
# TODO: someday it should be possible to not duplicate this definition here
# and in inheritance.py
)
use_latex_compiler = Boolean(
help=_("Enable LaTeX templates?"),
default=False,
scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for
|
gramps-project/gramps
|
gramps/gui/widgets/grampletbar.py
|
Python
|
gpl-2.0
| 28,336 | 0.002188 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2011 Nick Hall
# Copyright (C) 2011 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Module that implements the gramplet bar fuctionality.
"""
#-------------------------------------------------------------------------
#
# Set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger('.grampletbar')
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import time
import os
import configparser
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.const import URL_MANUAL_PAGE, URL_WIKISTRING, VERSION_DIR
from gramps.gen.config import config
from gramps.gen.constfunc import win
from ..managedwindow import ManagedWindow
from ..display import display_help, display_url
from .grampletpane import (AVAILABLE_GRAMPLETS,
GET_AVAILABLE_GRAMPLETS,
GET_GRAMPLET_LIST,
get_gramplet_opts,
get_gramplet_options_by_name,
make_requested_gramplet,
GuiGramplet)
from .undoablebuffer import UndoableBuffer
from ..utils import is_right_click
from ..dialog import QuestionDialog
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = URL_WIKISTRING + URL_MANUAL_PAGE + '_-_Gramplets'
|
WIKI_HELP_GRAMPLETBAR = URL_WIKISTR
|
ING + URL_MANUAL_PAGE + '_-_Main_Window#Gramplet_Bar_Menu'
WIKI_HELP_ABOUT_GRAMPLETS = URL_WIKISTRING + URL_MANUAL_PAGE + '_-_Gramplets#What_is_a_Gramplet'
NL = "\n"
#-------------------------------------------------------------------------
#
# GrampletBar class
#
#-------------------------------------------------------------------------
class GrampletBar(Gtk.Notebook):
"""
A class which defines the graphical representation of the GrampletBar.
"""
def __init__(self, dbstate, uistate, pageview, configfile, defaults):
Gtk.Notebook.__init__(self)
self.dbstate = dbstate
self.uistate = uistate
self.pageview = pageview
self.configfile = os.path.join(VERSION_DIR, "%s.ini" % configfile)
self.defaults = defaults
self.detached_gramplets = []
self.empty = False
self.close_buttons = []
self.set_group_name("grampletbar")
self.set_show_border(False)
self.set_scrollable(True)
book_button = Gtk.Button()
# Arrow is too small unless in a box
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
arrow = Gtk.Arrow(arrow_type=Gtk.ArrowType.DOWN,
shadow_type=Gtk.ShadowType.NONE)
arrow.show()
box.add(arrow)
box.show()
book_button.add(box)
book_button.set_relief(Gtk.ReliefStyle.NONE)
book_button.connect('clicked', self.__button_clicked)
book_button.set_property("tooltip-text", _("Gramplet Bar Menu"))
book_button.show()
self.set_action_widget(book_button, Gtk.PackType.END)
self.connect('page-added', self.__page_added)
self.connect('page-removed', self.__page_removed)
self.connect('create-window', self.__create_window)
config_settings, opts_list = self.__load(defaults)
opts_list.sort(key=lambda opt: opt["page"])
for opts in opts_list:
if opts["name"] in AVAILABLE_GRAMPLETS():
all_opts = get_gramplet_opts(opts["name"], opts)
gramplet = make_requested_gramplet(TabGramplet, self, all_opts,
self.dbstate, self.uistate)
if gramplet:
self.__add_tab(gramplet)
if len(opts_list) == 0:
self.empty = True
self.__create_empty_tab()
if config_settings[0]:
self.show()
self.set_current_page(config_settings[1])
uistate.connect('grampletbar-close-changed', self.cb_close_changed)
# Connect after gramplets added to prevent making them active
self.connect('switch-page', self.__switch_page)
def _get_config_setting(self, configparser, section, setting, fn=None):
"""
Get a section.setting value from the config parser.
Takes a configparser instance, a section, a setting, and
optionally a post-processing function (typically int).
Always returns a value of the appropriate type.
"""
value = ""
try:
value = configparser.get(section, setting)
value = value.strip()
if fn:
value = fn(value)
except:
if fn:
value = fn()
else:
value = ""
return value
def __load(self, defaults):
"""
Load the gramplets from the configuration file.
"""
retval = []
visible = True
default_page = 0
filename = self.configfile
if filename and os.path.exists(filename):
cp = configparser.ConfigParser()
try:
cp.read(filename, encoding='utf-8')
except:
pass
for sec in cp.sections():
if sec == "Bar Options":
if "visible" in cp.options(sec):
visible = self._get_config_setting(cp, sec, "visible") == "True"
if "page" in cp.options(sec):
default_page = self._get_config_setting(cp, sec, "page", int)
else:
data = {}
for opt in cp.options(sec):
if opt.startswith("data["):
temp = data.get("data", {})
#temp.append(self._get_config_setting(cp, sec, opt))
pos = int(opt[5:-1])
temp[pos] = self._get_config_setting(cp, sec, opt)
data["data"] = temp
else:
data[opt] = self._get_config_setting(cp, sec, opt)
if "data" in data:
data["data"] = [data["data"][key]
for key in sorted(data["data"].keys())]
if "name" not in data:
data["name"] = "Unnamed Gramplet"
data["tname"] = _("Unnamed Gramplet")
retval.append(data)
else:
# give defaults as currently known
for name in defaults:
if name in AVAILABLE_GRAMPLETS():
retval.append(GET_AVAILABLE_GRAMPLETS(name))
return ((visible, default_page), retval)
def __save(self):
|
hbenaouich/Learning-Python
|
class-1/ex10_confParse.py
|
Python
|
apache-2.0
| 760 | 0.009211 |
#!/usr/bin/env python
import re
from ciscoconfparse import CiscoConfParse
def main():
'''
using the ciscoconfparse to
|
find the crypto maps that are not using AES
'''
cisco_file = 'cisco_ipsec.txt'
cisco_cfg = CiscoConfParse(cisco_file)
crypto_maps = cisco_cfg.find_objects_wo_child(parentspec=r"^crypto map CRYPTO", childspec=r"AES")
print "\n Crypto M
|
aps not using AES:"
for entry in crypto_maps:
for child in entry.children:
if 'transform' in child.text:
match = re.search(r"set transform-set (.*)$", child.text)
encryption = match.group(1)
print " {0} >>> {1}".format(entry.text.strip(), encryption)
print
if __name__ == "__main__":
main()
|
nathanhilbert/FPA_Core
|
openspending/forum/management/models.py
|
Python
|
agpl-3.0
| 8,135 | 0 |
# -*- coding: utf-8 -*-
"""
flaskbb.management.models
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains all management related models.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from wtforms import (TextField, IntegerField, FloatField, BooleanField,
SelectField, SelectMultipleField, validators)
from flask_wtf import Form
from openspending.forum._compat import max_integer, text_type, iteritems
from openspending.core import db, cache
from openspending.forum.utils.database import CRUDMixin
class SettingsGroup(db.Model, CRUDMixin):
__tablename__ = "forum_settingsgroup"
key = db.Column(db.String(255), primary_key=True)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=False)
settings = db.relationship("Setting", lazy="dynamic", backref="group",
cascade="all, delete-orphan")
class Setting(db.Model, CRUDMixin):
__tablename__ = "forum_settings"
key = db.Column(db.String(255), primary_key=True)
value = db.Column(db.PickleType, nullable=False)
settingsgroup = db.Column(db.String,
db.ForeignKey('forum_settingsgroup.key',
use_alter=True,
name="fk_settingsgroup"),
nullable=False)
# The name (displayed in the form)
name = db.Column(db.String(200), nullable=False)
# The description (displayed in the form)
description = db.Column(db.Text, nullable=False)
# Available types: string, integer, float, boolean, select, selectmultiple
value_type = db.Column(db.String(20), nullable=False)
# Extra attributes like, validation things (min, max length...)
# For Select*Fields required: choices
extra = db.Column(db.PickleType)
@classmethod
def get_form(cls, group):
"""Returns a Form for all settings found in :class:`SettingsGroup`.
:param group: The settingsgroup name. It is used to get the settings
which are in the specified group.
"""
class SettingsForm(Form):
pass
# now parse the settings in this group
for setting in group.settings:
field_validators = []
if setting.value_type in ("integer", "float"):
validator_class = validators.NumberRange
elif setting.value_type == "string":
validator_class = validators.Length
# generate the validators
if "min" in setting.extra:
# Min number validator
field_validators.append(
validator_class(min=setting.extra["min"])
)
if "max" in setting.extra:
# Max number validator
field_validators.append(
validator_class(max=setting.extra["max"])
)
# Generate the fields based on value_type
# IntegerField
if setting.value_type == "integer":
setattr(
SettingsForm, setting.key,
IntegerField(setting.name, validators=field_validators,
description=setting.description)
)
# FloatField
elif setting.value_type == "float":
setattr(
SettingsForm, setting.key,
FloatField(setting.name, validators=field_validators,
description=setting.description)
)
# TextField
elif setting.value_type == "string":
setattr(
SettingsForm, setting.key,
TextField(setting.name, validators=field_validators,
description=setting.description)
)
# SelectMultipleField
elif setting.value_type == "selectmultiple":
# if no coerce is found, it will fallback to unicode
if "coerce" in setting.extra:
coerce_to = setting.extra['coerce']
else:
coerce_to = text_type
setattr(
SettingsForm, setting.key,
SelectMultipleField(
setting.name,
choices=setting.extra['choices'](),
coerce=coerce_to,
description=setting.description
)
)
# SelectField
elif setting.value_type == "select":
# if no coerce is found, it will fallback to unicode
if "coerce" in setting.extra:
coerce_to = setting.extra['coerce']
else:
coerce_to = text_type
setattr(
SettingsForm, setting.key,
SelectField(
setting.name,
coerce=coerce_to,
choices=setting.extra['choices'](),
description=setting.description)
)
# BooleanField
elif setting.value_type == "boolean":
setattr(
SettingsForm, setting.key,
BooleanField(setting.name, description=setting.description)
)
return SettingsForm
@classmethod
def get_all(cls):
return cls.query.all()
@classmethod
def update(cls, settings, app=None):
"""Updates the cache and stores the changes in the
database.
:param settings: A dictionary with setting items.
"""
# update the database
for key, value in iteritems(settings):
setting = cls.query.filter(Setting.key == key.lower()).first()
setting.va
|
lue = value
db.session.add(setting)
db.session.commit()
cls.invalidate_cache()
@classmethod
def get_settings(cls, from_group=None):
"""This will return all settings with the key as the key for the dict
and the values are packed again in a dict which contains
the remaining
|
attributes.
:param from_group: Optionally - Returns only the settings from a group.
"""
result = None
if from_group is not None:
result = from_group.settings
else:
result = cls.query.all()
settings = {}
for setting in result:
settings[setting.key] = {
'name': setting.name,
'description': setting.description,
'value': setting.value,
'value_type': setting.value_type,
'extra': setting.extra
}
return settings
@classmethod
@cache.memoize(timeout=max_integer)
def as_dict(cls, from_group=None, upper=True):
"""Returns all settings as a dict. This method is cached. If you want
to invalidate the cache, simply execute ``self.invalidate_cache()``.
:param from_group: Returns only the settings from the group as a dict.
:param upper: If upper is ``True``, the key will use upper-case
letters. Defaults to ``False``.
"""
settings = {}
result = None
if from_group is not None:
result = SettingsGroup.query.filter_by(key=from_group).\
first_or_404()
result = result.settings
else:
print(Setting.query)
result = cls.query.all()
for setting in result:
if upper:
setting_key = setting.key.upper()
else:
setting_key = setting.key
settings[setting_key] = setting.value
return settings
@classmethod
def invalidate_cache(cls):
"""Invalidates this objects cached metadata."""
cache.delete_memoized(cls.as_dict, cls)
|
branchard/ludacity
|
manage.py
|
Python
|
gpl-2.0
| 253 | 0 |
# -*- coding: utf-8 -*-
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ludacity.settings")
|
from django.core.management import execute_from_co
|
mmand_line
execute_from_command_line(sys.argv)
|
ingwinlu/python-twitch
|
twitch/api/v3/blocks.py
|
Python
|
gpl-3.0
| 430 | 0 |
# -*- encoding: utf-8 -*
|
-
# https://github.com/justintv/Twitch-API/blob/master/v3_resources/blocks.md
from twitch.queries import query
# Needs Authentification
@query
def by_name(user):
raise NotImplementedError
# Needs Authentification, needs PUT
@query
def add_block(user, target):
raise NotImplementedError
# Needs Authentification, needs DELETE
@query
def del_block(user, target):
raise NotImplementedError
| |
daicang/Leetcode-solutions
|
087-scramble-string.py
|
Python
|
mit
| 759 | 0.00527 |
class Solution(object):
def isScramble(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
if s1 == s2:
return True
|
if len(s1) != len(s2):
return False
from collections import Counter
if Counter(s1) != Counter(s2):
return False
for i in range(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i]):
return True
return False
s = Solution()
inputs = [
["great", "rgeat"],
["abcde"
|
, "caebd"]
]
for i in inputs:
print s.isScramble(*i)
|
dontnod/weblate
|
weblate/wladmin/models.py
|
Python
|
gpl-3.0
| 6,438 | 0.000777 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import dateutil.parser
import requests
from django.conf import settings
from django.contrib.admin import ModelAdmin
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy
from weblate import USER_AGENT
from weblate.auth.models import User
from weblate.trans.models import Component, Project
from weblate.utils.backup import backup, get_paper_key, initialize, make_password, prune
from weblate.utils.site import get_site_url
from weblate.utils.stats import GlobalStats
from weblate.vcs.ssh import get_key_data
class WeblateModelAdmin(ModelAdmin):
"""Customized Model Admin object."""
delete_confirmation_template = 'wladmin/delete_confirmation.html'
delete_selected_confirmation_template = 'wladmin/delete_selected_confirmation.html'
class ConfigurationErrorManager(models.Manager):
def add(self, name, message, timestamp=None):
if timestamp is None:
timestamp = timezone.now()
obj, created = self.get_or_create(
name=name, defaults={'message': message, 'timestamp': timestamp}
)
if created:
return obj
if obj.message != message or obj.timestamp != timestamp:
obj.message = message
obj.timestamp = timestamp
obj.save(update_fields=['message', 'timestamp'])
return obj
def remove(self, name):
self.filter(name=name).delete()
@python_2_unicode_compatible
class ConfigurationError(models.Model):
name = models.CharField(unique=True, max_length=150)
message = models.TextField()
timestamp = models.DateTimeField(default=timezone.now)
ignored = models.BooleanField(default=False, db_index=True)
objects = ConfigurationErrorManager()
class Meta(object):
index_together = [('ignored', 'timestamp')]
def __str__(self):
return self.name
SUPPORT_NAMES = {
'community': ugettext_lazy('Community support'),
'hosted': ugettext_lazy('Hosted service'),
'basic': ugettext_lazy('Basic self-hosted support'),
'extended': ugettext_lazy('Extended self-hosted support'),
}
class SupportStatusManager(models.Manager):
def get_current(self):
try:
return self.latest('expiry')
except SupportStatus.DoesNotExist:
return SupportStatus(name='community')
@python_2_unicode_compatible
class SupportStatus(models.Model):
name = models.CharField(max_length=150)
secret = models.CharField(max_length=400)
expiry = models.DateTimeField(db_index=True, null=True)
in_limits = models.BooleanField(default=True)
objects = SupportStatusManager()
def get_verbose(self):
return SUPPORT_NAMES.get(self.name, self.name)
def __str__(self):
return '{}:{}'.format(self.name, self.expiry)
def refresh(self):
stats = GlobalStats()
data = {
'secret': self.secret,
'site_url': get_site_url(),
'site_title': settings.SITE_TITLE,
'users': User.objects.count(),
'projects': Project.objects.count(),
'components': Component.objects.count(),
'languages': stats.languages,
'source_strings': stats.source_strings,
}
ssh_key = get_key_data()
if ssh_key:
data['ssh_key'] = ssh_key['key']
headers = {'User-Agent': USER_AGENT}
response = requests.request(
|
'post', settings.SUPPORT_API_URL, headers=headers, data=data
)
response.raise_for_status()
payload = response.json()
self.name = payload['name']
self.expiry = dateutil.parser.parse(paylo
|
ad['expiry'])
self.in_limits = payload['in_limits']
BackupService.objects.get_or_create(
repository=payload['backup_repository'], defaults={"enabled": False}
)
@python_2_unicode_compatible
class BackupService(models.Model):
repository = models.CharField(
max_length=500, default='', verbose_name=ugettext_lazy('Backup repository')
)
enabled = models.BooleanField(default=True)
timestamp = models.DateTimeField(default=timezone.now)
passphrase = models.CharField(max_length=100, default=make_password)
paperkey = models.TextField()
def __str__(self):
return self.repository
def last_logs(self):
return self.backuplog_set.order_by('-timestamp')[:10]
def ensure_init(self):
if not self.paperkey:
log = initialize(self.repository, self.passphrase)
self.backuplog_set.create(event='init', log=log)
self.paperkey = get_paper_key(self.repository)
self.save()
def backup(self):
log = backup(self.repository, self.passphrase)
self.backuplog_set.create(event='backup', log=log)
def prune(self):
log = prune(self.repository, self.passphrase)
self.backuplog_set.create(event='prune', log=log)
@python_2_unicode_compatible
class BackupLog(models.Model):
service = models.ForeignKey(BackupService, on_delete=models.deletion.CASCADE)
timestamp = models.DateTimeField(default=timezone.now)
event = models.CharField(
max_length=100,
choices=(
('backup', ugettext_lazy('Backup performed')),
('prune', ugettext_lazy('Deleted the oldest backups')),
('init', ugettext_lazy('Repository initialization')),
),
)
log = models.TextField()
def __str__(self):
return '{}:{}'.format(self.service, self.event)
|
slowrunner/RWPi
|
posie/posie-web/rwpilib/motorsClass.py
|
Python
|
gpl-3.0
| 27,889 | 0.033992 |
#!/usr/bin/python
#
# motorsClass.py MOTORS CLASS
#
# METHODS:
# motors(readingPerSec) # create instance and motor control thread
# cancel() # stop motors, close motor control thread
# drive(driveSpeed) # ramp speed to go fwd(+) or back(-) at 0-100%
# travel(distance.inInches, driveSpeed=MEDIUM) # go fwd(+) or back(-) a distance
# spin(spinSpeed) # ramp spin speed to go ccw(+) or cw(-) at 0-100%
# turn(Motors.DIRECTION) # Turn ccw(+) cw(-) to angle from 0
# stop() # come to graceful stop
# modeToStr(mode=motorsMode) # string version of motorsMode or passed mode constant
# mode() # returns Motors.STOPPED,DRIVE,TRAVEL,SPIN,TURN,STOP
# halt() # immediate stop
# currentSpeed() # numerical speed percent +/- 0-100 of minToMove to max speed
# speedToStr(speed=_currentSpeed) # returns string name or str() of param or currentSpeed
# calibrate() # find minFwdPwr, minBwdPwr,
# # minCCWDPwr, minCWPwr,
# # biasFwd, biasBwd
# waitForStopped(timeout=60) # call to wait for motion to end with timeout
# VARIABLES
# readingsPerSec
# CONSTANTS
#
# Motors.NONE,CW360,CCW360,CW180,CCW180,CW135,CCW135,CW90,CCW90,CW45,CCW45 (TURNDIRS)
# dirToStr() # returns string for Motor.TURNDIRS
#
# ### INTERNAL METHODS
# __init__(readingsPerSec=10) # initialize instance of class
# setup_motor_pins() # set up Pi Droid Alpha and GPIO
#
# ### THREAD METHODS
#
# pollMotors(tSleep=0.1) # motor control thread
#
# rampTgtCurStep(target,current,rampStep) # calculate next speed on ramp to target
# speed2Pwr(s,driveSpeed,spinSpeed) # convert speed (+/- 0-100%) to
# setMotorsPwr(lPwr,rPwr) # Apply power to motors Pwr: +/- (0-255)
# # power between (+/- 255 and minimum to move)
# control() # dispatch to control methods based on motorsMode
# controlDrive() # monitor drive mode
# controlSpin() # monitor spin mode
#
|
controlTravel() # monitor drive until distance reached
# controlTurn() # monitor spin until angle reached
# controlStop() # monitor drive or spin while stopping
# controlStopped() # routine called while motors are not running
#
# motors_off()
#
# motors_fwd()
# motors_bwd()
# motors_ccw()
# motors_cw()
# INTERNAL VARS
#
# motorsMode
# self.debugLevel 0=off 1=basic 99=all
|
import sys
# uncomment when testing below rwpilib\
#sys.path.insert(0,'..')
import PDALib
import myPDALib
import myPyLib
from myPyLib import sign, clamp
import time
import threading
import traceback
import datetime
import encoders
class Motors():
# CLASS VARS (Avail to all instances)
# Access as Motors.class_var_name
pollThreadHandle=None # the SINGLE read sensor thread for the Motors class
tSleep=0.1 # time for read_sensor thread to sleep
debugLevel=0 # set self.debugLevel (or motors.debugLevel) =99 for all, =1 for some
# Empirical settings for minimum drive to turn each wheel
# PWM_frequency dependent, PiDALib default is 490
# PWM_f RMotorMinF LMotorMinF
# 10000 215 185
# 490 83 73 <--
# 100 34 33
# 33 22 20
# RMotorMinF = 83 # no load (takes more to get right going reliably)
# LMotorMinF = 73 # no load
# RMotorMinB = 94 # no load (takes more to get right going reliably)
# LMotorMinB = 86 # no load
# Motor Pins
# SRV 6 Motor 1 Speed (PWM)
# SRV 7 Motor 2 Speed (PWM)
RMotor = 6
LMotor = 7
# DIO 12 (A4) Motor 1 Dir A (0=coast 1=F/Brake)
# DIO 13 (A5) Motor 1 Dir B (0=coast 1=R/Brake)
# DIO 14 (A6) Motor 2 Dir A (0=coast 1=F/Brake)
# DIO 15 (A7) Motor 2 Dir B (0=coast 1=R/Brake)
M1DirA = 12
M1DirB = 13
M2DirA = 14
M2DirB = 15
minFwdPwr = 145 # 83 # minimum to start moving forward
minBwdPwr = 145 # 120 # 94 # minimum to start moving backward
driveSpeed = 0 # target 0 to +/-100% of speed range
_currentSpeed = 0 # current speed at the moment, ramps up or down
rampStep = 13 # amount to change speed each time through control loop
minCCWPwr = 120 # 86 # minimum drive to spin CCW
minCWPwr = 120 # 94 # minimum drive to spin CW
biasFwd = 21 # amount of right more than left needed to go Fwd straight
biasBwd = 0 # amount of right more than left needed to go Bwd straight
maxPwr = 255
driveDistance = 0 # distance in inches fwd(+) or bwd(-)
currentDistance= 0 # how far travelled since told to travel
initialLeftCount = 0 # place to store the counter value when starting motion
initialRightCount = 0
initialMeanCount = 0
targetTime= 0 # time to stop travel (till encoders)
spinSpeed = 0 # speed to spin ccw(+) cw(-)
turnDir = 0
#Modes
STOPPED = 0
DRIVE = 1
TRAVEL = 2
SPIN = 3
TURN = 4
STOP = 5
Modes2Str = { STOPPED : 'STOPPED',
STOP : 'STOP',
DRIVE : 'DRIVE',
TRAVEL : 'TRAVEL',
SPIN : 'SPIN',
TURN : 'TURN' }
motorsMode = STOPPED
def mode(self):
return self.motorsMode
def modeToStr(mode = motorsMode):
return Modes2Str[mode]
lastMotorsMode = STOPPED
#Speeds
NONE = 0
SLOW = 1
WALK = 5
MEDIUM = 50
FAST = 100
SpeedsToStr = {NONE : 'NONE',
SLOW : 'SLOW',
WALK : 'WALK',
MEDIUM : 'MEDIUM',
FAST : 'FAST',
-SLOW : '-SLOW',
-WALK : '-WALK',
-MEDIUM : '-MEDIUM',
-FAST : '-FAST' }
def currentSpeed():
return _currentSpeed
def speedToStr(nSpeed=_currentSpeed):
if (nSpeed in SpeedsToStr):
speedStr=SpeedsToStr[nSpeed]
else:
speedStr=str(nSpeed)
InchesPerSec = { # travel distance per second (for 24")
SLOW : 1.5,
WALK : 2.0,
MEDIUM : 3.1,
FAST : 6.7,
-SLOW : 1.5,
-WALK : 2.0,
-MEDIUM: 3.1,
-FAST : 6.7 }
MotorRampTime = 0.25 # NOT IMPLEMENTED
CCW360 = 3.15 # seconds to turn at Motors.MEDIUM
CCW180 = 1.58
CCW135 = 1.15
CCW90 = 0.84
CCW45 = 0.5
CW360 = -CCW360
CW180 = -CCW180
CW135 = -CCW135
CW90 = -CCW90 * 0.93
CW45 = -CCW45 * 0.9
NOTURN = 0
DirsToStr = {
CCW45 : 'CCW45',
CCW90 : 'CCW90',
CCW135 : 'CCW135',
CCW180 : 'CCW180',
CCW360 : 'CCW360',
CW45 : 'CW45',
CW90 : 'CW90',
CW135 : 'CW135',
CW180 : 'CW180',
CW360 : 'CW360',
NOTURN : 'NO TURN'}
def dirToStr(self, mDir):
if (mDir in self.DirsToStr):
strDir=self.DirsToStr[mDir]
else:
strDir='?'
return strDir
# end of class vars definition
# ### encoder methods
def setInitialCounts(self):
initialLeftCount=encoders.leftCount()
initialRightCount=encoders.rightCount()
initialMeanCount=(initialLeftCount+initialRightCount)/2.0
def distanceTraveled(self):
currentLeftCount = encoders.leftCount()
currentRightCount = encoders.rightCount()
currentMeanCount = ( currentLeftCount + currentRightCount) / 2.0
countsTraveled = (currentMeanCount - self.initialMeanCount)
distance=countsTraveled * encoders.InchesPerCount
if (self.debugLevel > 1):
print "motorsClass:distanceTraveled: called"
print "encoder status:"
encoders.printStatus()
print "distance traveled:", distance
return distance
|
KaranToor/MA450
|
google-cloud-sdk/lib/surface/compute/health_checks/create/http.py
|
Python
|
apache-2.0
| 3,471 | 0.004033 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating HTTP health checks."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import health_checks_utils
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class Create(base_classes.BaseAsyncCreator):
"""Create a HTTP health check to monitor load balanced instances."""
@staticmethod
def Args(parser):
health_checks_utils.AddHttpRelatedCreationArgs(parser)
health_checks_utils.AddProtocolAgnosticCreationArgs(parser, 'HTTP')
@property
def service(self):
return self.compute.healthChecks
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'healthChecks'
def CreateRequests(self, args):
"""Returns the request necessary for adding the health check."""
health_check_ref = self.CreateGlobalReference(
args.name, resource_type='healthChecks')
proxy_header = self.messages.HTTPHealthCheck.ProxyHeaderValueValuesEnum(
args.proxy_header)
request = self.messages.ComputeHealthChecksInsertRequest(
healthCheck=self.messages.HealthCheck(
name=health_check_ref.Name(),
description=args.description,
type=self.messages.HealthCheck.TypeValueValuesEnum.HTTP,
httpHealthCheck=self.messages.HTTPHealthCheck(
host=args.host,
port=args.port,
portName=args.port_name,
requestPath=args.request_path,
proxyHeader=proxy_header),
checkIntervalSec=args.check_interval,
timeoutSec=args.timeout,
healthyThreshold=args.healthy_threshold,
unhealthyThreshold=args.unhealthy_threshold,
),
project=self.project)
return [request]
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateAlpha(Create):
"""Create a HTTP health check to monitor load balanced instances."""
@staticmethod
def Args(parser):
Create.Args(parser)
health_checks_utils.AddHttpRelatedResponseArg(parser)
def CreateRequests(self, args):
"""Returns the request necessary for adding the health check."""
requests = super(CreateAlpha, self).CreateRequests(args)
requests[0]
|
.healthC
|
heck.httpHealthCheck.response = args.response
return requests
Create.detailed_help = {
'brief': ('Create a HTTP health check to monitor load balanced instances'),
'DESCRIPTION': """\
*{command}* is used to create a HTTP health check. HTTP health checks
monitor instances in a load balancer controlled by a target pool. All
arguments to the command are optional except for the name of the health
check. For more information on load balancing, see
[](https://cloud.google.com/compute/docs/load-balancing-and-autoscaling/)
""",
}
|
cako/mpl_grandbudapest
|
grandbudapest.py
|
Python
|
mit
| 1,318 | 0 |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('grandbudapest')
# Fixing random state for reproducibility
np.random.seed(1)
fig, axes = plt.subplots(ncols=2, nrows=2)
ax1, ax2, ax3, a
|
x4 = axes.ravel()
# scatter plot (Note: `plt.scatter` doesn't use default colors)
x, y = np.random.normal(size=(2, 200))
ax1.plot(x, y, 'o')
ax1.set_title('Scatter plot')
# sinusoidal lines with colors from default color cycle
L = 2*np.pi
x = np.linspace(0, L)
ncolors = len(plt.rcParams['axes.prop_cycle'])
shift = np.linspace(0, L, ncolors, endpoint=False)
for s in shift:
|
ax2.plot(x, np.sin(x + s), '-')
ax2.margins(0)
ax2.set_title('Line plot')
# bar graphs
x = np.arange(5)
y1, y2 = np.random.randint(1, 25, size=(2, 5))
width = 0.25
ax3.bar(x, y1, width)
ax3.bar(x + width, y2, width,
color=list(plt.rcParams['axes.prop_cycle'])[2]['color'])
ax3.set_xticks(x + width)
ax3.set_xticklabels(['a', 'b', 'c', 'd', 'e'])
ax3.set(xlabel='X labels', ylabel='Y labels')
# circles with colors from default color cycle
for i, color in enumerate(plt.rcParams['axes.prop_cycle']):
xy = np.random.normal(size=2)
ax4.add_patch(plt.Circle(xy, radius=0.3, color=color['color']))
ax4.axis('equal')
ax4.margins(0)
fig.savefig('grandbudapest.png', bbox_inches='tight')
plt.show()
|
ashwingoldfish/eddy
|
eddy/core/commands/nodes.py
|
Python
|
gpl-3.0
| 14,989 | 0.000267 |
# -*- coding: utf-8 -*-
##########################################################################
# #
# Eddy: a graphical editor for the specification of Graphol ontologies #
# Copyright (C) 2015 Daniele Pantaleone <danielepantaleone@me.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
#
|
#
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
#
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##################### ##################### #
# #
# Graphol is developed by members of the DASI-lab group of the #
# Dipartimento di Ingegneria Informatica, Automatica e Gestionale #
# A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it #
# #
# - Domenico Lembo <lembo@dis.uniroma1.it> #
# - Valerio Santarelli <santarelli@dis.uniroma1.it> #
# - Domenico Fabio Savo <savo@dis.uniroma1.it> #
# - Daniele Pantaleone <pantaleone@dis.uniroma1.it> #
# - Marco Console <console@dis.uniroma1.it> #
# #
##########################################################################
from PyQt5 import QtWidgets
from eddy.core.functions.misc import first
from eddy.core.items.common import AbstractItem
class CommandNodeAdd(QtWidgets.QUndoCommand):
"""
This command is used to add a node to a diagram.
"""
def __init__(self, diagram, node):
"""
Initialize the command.
:type diagram: Diagram
:type node: AbstractNode
"""
super().__init__('add {0}'.format(node.name))
self.diagram = diagram
self.node = node
def redo(self):
"""redo the command"""
self.diagram.addItem(self.node)
self.diagram.sgnItemAdded.emit(self.diagram, self.node)
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
self.diagram.removeItem(self.node)
self.diagram.sgnItemRemoved.emit(self.diagram, self.node)
self.diagram.sgnUpdated.emit()
class CommandNodeSetDepth(QtWidgets.QUndoCommand):
"""
This command is used to change the Z value of diagram nodes.
"""
def __init__(self, diagram, node, zValue):
"""
Initialize the command.
:type diagram: Diagram
:type node: AbstractNode
:type zValue: float
"""
super().__init__('change {0} depth'.format(node.name))
self.node = node
self.diagram = diagram
self.depth = {'redo': zValue, 'undo': node.zValue()}
def redo(self):
"""redo the command"""
self.node.setZValue(self.depth['redo'])
self.node.updateEdges()
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
self.node.setZValue(self.depth['undo'])
self.node.updateEdges()
self.diagram.sgnUpdated.emit()
class CommandNodeRezize(QtWidgets.QUndoCommand):
"""
This command is used to resize nodes.
"""
def __init__(self, diagram, node, data):
"""
Initialize the command.
:type diagram: Diagram
:type node: AbstractNode
:type data: dict
"""
super().__init__('resize {0}'.format(node.name))
self.diagram = diagram
self.node = node
self.data = data
def redo(self):
"""redo the command"""
# TURN CACHING OFF
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.NoCache)
self.node.background.setGeometry(self.data['redo']['background'])
self.node.selection.setGeometry(self.data['redo']['selection'])
self.node.polygon.setGeometry(self.data['redo']['polygon'])
for edge, pos in self.data['redo']['anchors'].items():
self.node.setAnchor(edge, pos)
self.node.updateTextPos(moved=self.data['redo']['moved'])
self.node.updateNode()
self.node.updateEdges()
self.node.update()
# TURN CACHING ON
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
# TURN CACHING OFF
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.NoCache)
self.node.background.setGeometry(self.data['undo']['background'])
self.node.selection.setGeometry(self.data['undo']['selection'])
self.node.polygon.setGeometry(self.data['undo']['polygon'])
for edge, pos in self.data['undo']['anchors'].items():
self.node.setAnchor(edge, pos)
self.node.updateTextPos(moved=self.data['undo']['moved'])
self.node.updateNode()
self.node.updateEdges()
self.node.update()
# TURN CACHING ON
for edge in self.node.edges:
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
self.diagram.sgnUpdated.emit()
class CommandNodeMove(QtWidgets.QUndoCommand):
"""
This command is used to move nodes (1 or more).
"""
def __init__(self, diagram, undo, redo):
"""
Initialize the command.
:type diagram: Diagram
:type undo: dict
:type redo: dict
"""
self._diagram = diagram
self._edges = set()
self._redo = redo
self._undo = undo
for node in self._redo['nodes']:
self._edges |= node.edges
if len(self._redo['nodes']) != 1:
name = 'move {0} nodes'.format(len(self._redo['nodes']))
else:
name = 'move {0}'.format(first(self._redo['nodes'].keys()).name)
super().__init__(name)
def redo(self):
"""redo the command"""
# Turn off caching.
for edge in self._edges:
edge.setCacheMode(AbstractItem.NoCache)
# Update edges breakpoints.
for edge, breakpoints in self._redo['edges'].items():
for i in range(len(breakpoints)):
edge.breakpoints[i] = breakpoints[i]
# Update nodes positions.
for node, data in self._redo['nodes'].items():
node.setPos(data['pos'])
# Update edge anchors.
for edge, pos in data['anchors'].items():
node.setAnchor(edge, pos)
# Update edges.
for edge in self._edges:
edge.updateEdge()
# Turn on caching.
for edge in self._edges:
edge.setCacheMode(AbstractItem.DeviceCoordinateCache)
# Emit updated signal.
self._diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
# Turn off caching.
for edge in self._edges:
edge.setCacheMode(AbstractItem.NoCache)
# Update edges breakpoints.
for edge, breakpoints in self._undo['edges'].items():
for i in range(len(breakpoints)):
edge.breakpoints[i] = breakpoints[i]
|
marktoakley/LamarckiAnt
|
SCRIPTS/AMBER/symmetrise_prmtop/perm-prmtop.ff02.py
|
Python
|
gpl-3.0
| 25,578 | 0.020916 |
#!/usr/bin/env python
import os
import os.path
import sys
import string
###############################################################
## #
## Edyta Malolepsza #
## David Wales' group, University of Cambridge #
## in case of problems please send email: em427@cam.ac.uk #
## #
###############################################################
## #
## program finds in prmtop file from LEaP wrong defined order #
## of atoms in IMPROPER, permutes appropriate atoms and write #
## new prmtop file #
## #
## how to use: #
## ./perm-top.py NAME_OF_OLD_PRMTOP NAME_OF_NEW_PRMTOP #
## #
## IMPORTANT: #
## 1. please change names of terminal amino acid residues #
## according to warnings below #
## 2. please change path to libraries #
## 3. program changes the atom order ONLY for amino acid and #
## nucleic residues #
## #
###############################################################
# khs26> changed the path to use the $AMBERHOME environment variable
amberhome = os.environ["AMBERHOME"]
path = os.path.join(amberhome, "dat/leap/lib")
#########################
## some useful functions
#########################
def exchange_atoms(atom_type, a, aa, residue, dihedrals, currentAtomNumber):
find_atom = a[aa.index(residue)].index(atom_type)
atomNumber = find_atom+currentAtomNumber
atomNumberIndex = atomNumber*3
for j in range(len(dihedrals)):
if (dihedrals[j][1]==str(atomNumberIndex)):
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
def exchange_atoms_nt(atom_type, a, aa, residue, dihedrals):
find_atom = a[aa.index(residue)].index(atom_type)
for j in range(len(dihedrals)):
if (dihedrals[j][1]==str(atomIndex[find_atom])):
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
def exchange_atoms_arg(a, aa, residue, dihedrals, currentAtomNumber):
## IMPROPER responsible for trouble with NH2 group permutation:
find_atom1 = a[aa.index(residue)].index('NE')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('NH1')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
find_atom3 = a[aa.index(residue)].index('CZ')
atomNumber3 = find_atom3+currentAtomNumber
atomNumberIndex3 = atomNumber3*3
find_atom4 = a[aa.index(residue)].index('NH2')
atomNumber4 = find_atom4+currentAtomNumber
atomNumberIndex4 = atomNumber4*3
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
d0 = dihedrals[j][0]
d1 = dihedrals[j][1]
dihedrals[j][0] = d1
dihedrals[j][1] = d0
def exchange_atoms_ring1(a, aa, residue, dihedrals):
find_atom1 = a[aa.index(residue)].index('CD1')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('CD2')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
d0 = '-'+dihedrals[j][0]
d1 = dihedrals[j][1]
d3 = dihedrals[j][3][1:]
dihedrals[j][0] = d1
dihedrals[j][1] = d3
dihedrals[j][3] = d0
def exchange_atoms_ring2(a, aa, residue, dihedrals):
find_atom1 = a[aa.index(residue)].index('CG')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('CE2')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
find_atom3 = a[aa.index(residue)].index('CZ')
atomNumber3 = find_atom3+currentAtomNumber
atomNumberIndex3 = atomNumber3*3
find_atom4 = a[aa.index(residue)].index('CD2')
atomNumber4 = find_atom4+currentAtomNumber
atomNumberIndex4 = atomNumber4*3
find_atom5 = a[aa.index(residue)].index('CD1')
atomNumber5 = find_atom5+currentAtomNumber
atomNumberIndex5 = atomNumber5*3
find_atom6 = a[aa.index(residue)].index('CE1')
atomNumber6 = find_atom6+currentAtomNumber
atomNumberIndex6 = atomNumber6*3
# for j in range(len(dihedrals)): # this is ok
# if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex3)) and (dihedrals[j][1]==str(atomNumberIndex4))):
d1 = '-'+dihedrals[j][1]
d2 = dihedrals[j][3][1:]
dihedrals[j][1] = d2
dihedrals[j][3] = d1
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex5)) and (dihedrals[j][1]==str(atomNumberIndex3))):
d1 = '-'+dihedrals[j][1]
d2 = dihedrals[j][3][1:]
dihedrals[j][1] = d2
dihedrals[j][3] = d1
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex6))):
## to compare IMPROPER before and after permutation
##test a1 = (int(dihedrals[j][0])-currentAtomNumber)/3
##test a2 = (int(dihedrals[j][1])-currentAtomNumber)/3
##test a3 = (int(dihedrals[j][2][1:])-currentAtomNumber)/3
##test a4 = (int(dihedrals[j][3][1:])-currentAtomNumber)/3
##test print dihedrals[j], a[aa.index(residue)][a1], a[aa.index(residue)][a2], a[aa.index(residue)][
|
a3], a[aa.index(residue)][a4]
d1 = '-'+dihedrals[j][0]
d2 = dihedrals[j][3][1:]
dihedrals[j][0] = d2
dihedrals[j][3] = d1
##test a1 = (int(dihedrals[j][0])-currentAtomNumber)/3
##test a2 = (int(dihedrals[j][1])-currentAtomNumber)/3
##test a3 = (int(dihedrals[j][2][1:])-currentAtomNumber)/3
##test a4 = (int(dihedrals
|
[j][3][1:])-currentAtomNumber)/3
##test print dihedrals[j], a[aa.index(residue)][a1], a[aa.index(residue)][a2], a[aa.index(residue)][a3], a[aa.index(residue)][a4]
def exchange_atoms_ring3(a, aa, residue, dihedrals):
find_atom1 = a[aa.index(residue)].index('CE1')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('CE2')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
d0 = '-'+dihedrals[j][0]
d1 = dihedrals[j][1]
d3 = dihedrals[j][3][1:]
dihedrals[j][0] = d1
dihedrals[j][1] = d3
dihedrals[j][3] = d0
####################################
## reading all_amino02.lib library
####################################
print '\nDear user, please notice that only residues from the following libraries are taken into account:'
print ' ions94.lib'
print ' all_amino02.lib'
aalib = open("%s/all_amino02.lib" % path).read()
aa = string.split(aalib, "\n")
q1 = aa.index("!!index array str")
q2 = aa.index("!entry.ALA.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg")
aaNames = [] # amino acid names
aTypes = [] # atom types
aNames = [] # atom names
for i in range(q2-q1-1):
aaNames.append(aa[q1+1+i][2:5])
for i in range(len(aaNames)):
q1 = aa.index("!entry.%s.unit.atoms table str name str type int typex int resx int flags int seq int elmnt dbl chg" % aaNames[i])
q2 = aa.index("!entry.%s.unit.atomspertinfo table str pname str ptype int ptypex int pelmnt dbl pchg" % aaNames[i])
aT = []
aN = []
for j i
|
lhupfeldt/multiconf
|
multiconf/bits.py
|
Python
|
bsd-3-clause
| 515 | 0.001942 |
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is
|
under a BSD license, see LICENSE.TXT.
def int_to_bin_str(value, max_bits=8192):
"""Convert an int to a string representation of a bitmask (binary number)"""
mask = value
bits = 1
while 1 << bits < value or bits < 16 and bits < max_bits:
bits *= 2
rep = ''
while bits:
rep =
|
('1' if mask & 1 else '0') + rep
bits = bits - 1
mask = mask >> 1
return '0b' + rep
|
pfitzer/youtube2mp3
|
youtube2mp3/__init__.py
|
Python
|
gpl-2.0
| 55 | 0 |
#
__author__ = 'Michael Pfister'
__versi
|
on__ = '1.4.0
|
'
|
SFII/cufcq-new
|
modules/linechart_module.py
|
Python
|
mit
| 5,898 | 0.000848 |
from modules.chart_module import ChartModule
import tornado.web
import logging
class LineChartModule(ChartModule):
def render(self, raw_data, keys, chart_id="linechart"):
self.chart_id = chart_id
self.chart_data = self.overtime_linechart_data(raw_data, keys)
return self.render_string('modules/linechart.html', chart_id=self.chart_id)
def overtime_linechart_data(self, raw_data, keys,
yearterms_key='fcqs_yearterms',
overtime_key='fcqs_overtime'):
def _overtime_builder(overtime_data, key):
def _transform_overtime_data(yearterm):
value = overtime_data[str(yearterm)][key]
roundto = {
'percent_a': 3,
'percent_b': 3,
'percent_c': 3,
'percent_d': 3,
'percent_f': 3,
'percent_incomplete': 3,
'average_grade': 3
}.get(key, 1)
if value is not None:
return round(value, roundto)
else:
return None
return _transform_overtime_data
def _overtime_dataset_builder(key):
color = {
'course_howmuchlearned_average': (247, 92, 3),
'course_challenge_average': (217, 3, 104),
'courseoverall_average': (130, 2, 99),
'course_priorinterest_average': (4, 167, 119),
'instructor_effectiveness_average': (247, 92, 3),
'instructor_respect_average': (217, 3, 104),
'instructoroverall_average': (130, 2, 99),
'instructor_availability_average': (4, 167, 119),
'TTT_instructoroverall_average': (197, 27, 125),
'OTH_instructoroverall_average': (233, 163, 201),
'TA_instructoroverall_average': (253, 224, 239),
'GR_courseoverall_average': (77, 146, 33),
'UD_courseoverall_average': (161, 215, 106),
'LD_courseoverall_average': (230, 245, 106),
'percent_a': (44, 123, 182),
'percent_b': (171, 217, 233),
'percent_c': (255, 255, 191),
'percent_d': (253, 174, 97),
'percent_f': (215, 25, 28),
'percent_incomplete': (48, 48, 48),
'average_grade': (48, 48, 48),
}.get(key, (48, 48, 48))
yaxis_i
|
d = {
'percent_a': 'y-axis-3',
'percent_b': 'y-axis-3',
'percent_c': 'y-axis-3',
'percent_d': 'y-axis-3',
|
'percent_f': 'y-axis-3',
'percent_incomplete': 'y-axis-3',
'average_grade': 'y-axis-2',
}.get(key, 'y-axis-1')
fill = {
'percent_a': True,
'percent_b': True,
'percent_c': True,
'percent_d': True,
'percent_f': True,
'percent_incomplete': True,
}.get(key, False)
label = {
'course_howmuchlearned_average': 'Amount Learned',
'course_challenge_average': 'Challenge',
'courseoverall_average': 'Course Overall',
'course_priorinterest_average': 'Prior Interest',
'instructor_effectiveness_average': 'Effectiveness',
'instructor_respect_average': 'Respect',
'instructoroverall_average': 'Instructor Overall',
'instructor_availability_average': 'Availability',
'TTT_instructoroverall_average': 'TTT instructors',
'OTH_instructoroverall_average': 'OTH instructors',
'TA_instructoroverall_average': 'TA instructors',
'GR_courseoverall_average': 'GR Course Overall',
'UD_courseoverall_average': 'UD Course Overall',
'LD_courseoverall_average': 'LD Course Overall',
'percent_a': 'A Grade',
'percent_b': 'B Grade',
'percent_c': 'C Grade',
'percent_d': 'D Grade',
'percent_f': 'F Grade',
'percent_incomplete': 'Incomplete',
'average_grade': 'Average GPA'
}.get(key, '???')
background_alpha = 1.0 if fill else 0.2
return {
'label': label,
'fill': fill,
'yAxisID': yaxis_id,
'backgroundColor': "rgba({0},{1},{2},{background_alpha})".format(*color, background_alpha=background_alpha),
'borderColor': "rgba({0},{1},{2},1)".format(*color),
'pointBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBorderColor': "#fff",
'pointHoverBorderWidth': 2,
'pointHoverRadius': 5,
'data': list(map(_overtime_builder(overtime_data, key), yearterms))
}
yearterms = raw_data[yearterms_key]
overtime_data = raw_data[overtime_key]
labels = list(map(self.convert_date, yearterms))
datasets = list(map(_overtime_dataset_builder, keys))
return tornado.escape.json_encode({
'labels': labels,
'datasets': datasets,
})
def embedded_javascript(self):
options = tornado.escape.json_encode(self.chart_options())
foo = '''
new Chart(document.getElementById("{2}").getContext("2d"),{{
type:'line',
data:{1},
options:{0}
}});
'''.format(options, self.chart_data, self.chart_id)
return foo
|
iandmyhand/python-utils
|
DataStructuresAndAlgorithmsInPython/Palindrome.py
|
Python
|
mit
| 595 | 0.011765 |
def firstCharacter(str):
return str[:1]
assert(firstCharacter("abc") is "a")
def lastCharacter(str):
return str[-1:]
assert(lastCharacter("abc") is "c")
def middleCharacters(str
|
):
return str[1:-1]
assert(middleCharacters("abc") == "b")
assert(middleCharacters("abcde") == "bcd")
def isPalindrome(str):
if len(str) <= 1:
return
|
True
if firstCharacter(str) != lastCharacter(str):
return False
return isPalindrome(middleCharacters(str))
assert(isPalindrome("a") == True)
assert(isPalindrome("taste") == False)
assert(isPalindrome("roror") == True)
|
colinta/MotionLookitup
|
compile.py
|
Python
|
bsd-2-clause
| 4,942 | 0.006273 |
if __name__ == '__main__':
import os
from bs4 import BeautifulSoup
def get_class(cls):
class_name = cls['name']
_instance_methods = cls.find_all('method', recursive=False, class_method=lambda m: m != 'true')
retval = cls.find('retval')
if retval:
if retval.has_attr('declared_type64'):
return_type = retval['declared_type64']
else:
return_type = retval['declared_type']
else:
return_type = 'void'
instance_methods = [
{ 'name': method['selector'], 'args': get_args(method), 'return': return_type }
for method in _instance_methods
]
_class_methods = cls.find_all('method', recursive=False, class_method='true')
class_methods = [
{ 'name': method['selector'], 'args': get_args(method), 'return': return_type }
for method in _class_methods
]
return {
'name': class_name,
'methods': instance_methods,
'class_methods': class_methods
}
def get_func(func):
retval = func.find('retval')
if retval:
if retval.has_attr('declared_type64'):
return_type = retval['declared_type64']
else:
return_type = retval['declared_type']
else:
return_type = 'void'
return { 'name': func['name'], 'args': get_args(func), 'return': return_type }
def get_args(method):
return [
get_arg_name(selector, index)
for index, selector in enumerate(method.find_all('arg', recursive=False))
]
def get_arg_name(selector, index):
if selector.has_attr('declared_type'):
declared_type = selector['declared_type']
else:
declared_type = 'id'
if selector.has_attr('name'):
return { 'name': selector['name'], 'type': declared_type}
else:
return { 'name': str(sel
|
ector), 'type': declared_type}
def get_const_name(const):
# do this at "output time"
# return const['name'][0].upper() + const['name'][1:]
return const['name']
RUBYMOTION_FOLDER = '/Libr
|
ary/RubyMotion/data/'
def parse_bridgesupport(prefix):
everything = {}
for filename in os.listdir(os.path.join(RUBYMOTION_FOLDER, prefix)):
name, ext = os.path.splitext(filename)
print((prefix + '/' + name).replace('/BridgeSupport/', '/'))
bridgesupport = BeautifulSoup(open(os.path.join(RUBYMOTION_FOLDER, prefix, name + '.bridgesupport')), 'xml')
_constants = bridgesupport.find('signatures').find_all('constant', recursive=False)
_enums = bridgesupport.find('signatures').find_all('enum', recursive=False)
constants = [get_const_name(const) for const in _constants]
constants.extend([get_const_name(const) for const in _enums])
_functions = bridgesupport.find('signatures').find_all('function', recursive=False)
functions = [get_func(func) for func in _functions]
_classes = bridgesupport.find('signatures').find_all('class', recursive=False)
classes = {}
for cls in _classes:
entry = get_class(cls)
classes[entry['name']] = entry
_protocols = bridgesupport.find('signatures').find_all('informal_protocol', recursive=False)
protocols = {}
for proto in _protocols:
entry = get_class(proto)
protocols[entry['name']] = entry
framework = {
'name': name,
'classes': classes,
'protocols': protocols,
'constants': constants,
'functions': functions,
}
everything[name] = framework
return everything
all_the_things = { 'ios': None, 'osx': None }
ios_attempt = 'ios/8.0/BridgeSupport', 'ios/7.1/BridgeSupport', 'ios/7.0/BridgeSupport', 'ios/6.1/BridgeSupport', 'ios/6.0/BridgeSupport'
found = None
for version in ios_attempt:
if os.path.exists(os.path.join(RUBYMOTION_FOLDER, version)):
found = version
break
if not found:
raise 'Couldn\'t find an iOS version'
all_the_things['ios'] = parse_bridgesupport(found)
osx_attempt = 'osx/10.10/BridgeSupport', 'osx/10.9/BridgeSupport', 'osx/10.8/BridgeSupport'
found = None
for version in osx_attempt:
if os.path.exists(os.path.join(RUBYMOTION_FOLDER, version)):
found = version
break
if not found:
raise 'Couldn\'t find an OS X version'
all_the_things['osx'] = parse_bridgesupport(version)
import json
with open('all_the_things.json', 'w') as fptr:
print('Writing all_the_things.json')
json.dump(all_the_things, fptr)
|
zachriggle/idapython
|
hrdoc.py
|
Python
|
bsd-3-clause
| 3,025 | 0.00562 |
import os
import sys
import shutil
from glob import glob
# --------------------------------------------------------------------------
DOC_DIR = 'hr-html'
PYWRAPS_FN = 'idaapi.py'
# --------------------------------------------------------------------------
def add_footer(lines):
S1 = 'Generated by Epydoc'
S2 = '</table>'
p = lines.find(S1)
if p == -1:
return None
p = lines.find(S2, p)
if p == -1:
return None
p += len(S2)
return lines[0:p] + '\n<!--#include virtual="/footer.shtml" -->' + lines[p:]
# --------------------------------------------------------------------------
def define_idaapi_resolver():
"""
Whenever a module named \"idaapi_<something>\" is
spotted, turn it into \"idaapi\".
"""
import epydoc.apidoc
dn = epydoc.apidoc.DottedName.__init__
def resolver(piece):
if piece is not None and isinstance(piece, basestring) and piece.startswith("idaapi_"):
return "idaapi"
else:
return piece
def wrapper(self, *pieces, **options):
return dn(self, *map(resolver, pieces), **options);
epydoc.apidoc.DottedName.__init__ = wrapper
# --------------------------------------------------------------------------
def gen_docs():
import epydoc.cli
import swigdocs
define_idaapi_resolver()
swigdocs.gen_docs(outfn = 'pywraps.py')
# append obj/x86_win_vc_32/idaapi.py to it
# os.system(r'copy /b idaapi.py+..\obj\x86_win_vc_32\idaapi.py idaapi.py')
# delete all output files
for fn in glob('hr-html/*'):
os.unlink(fn)
epydoc.cli.optparse.sys.argv = [ 'epydoc',
'--config', '../hrdoc.cfg',
'--simple-term'
]
# Generate the documentation
epydoc.cli.cli()
# --------------------------------------------------------------------------
def patch_docs():
shutil.copy('../../hrdoc.css', 'epydoc.css')
os.system('chmod +w epydoc.css')
for fn in glob('*.html'):
f = open(fn, 'r')
lines = f.read()
f.close()
r = add_footer(lines)
if not r:
print "-",
continue
f = open(fn, 'w')
f.write(r)
f.close()
print "+",
print "\nDocumentation patched!"
# -------
|
-------------------------------------------------------------------
def main():
# Save old directory and adjust import path
curdir = os.getcwd() + os.sep
sys.pa
|
th.append(curdir + 'python')
sys.path.append(curdir + 'tools')
sys.path.append(curdir + 'docs')
old_dir = os.getcwd()
try:
print "Generating documentation....."
os.chdir('docs')
gen_docs()
os.chdir(DOC_DIR)
patch_docs()
print "Documentation generated!"
finally:
os.chdir(old_dir)
# --------------------------------------------------------------------------
if __name__ == '__main__':
main()
Exit(0)
|
JoaquimPatriarca/senpy-for-gis
|
gasp/ine/__init__.py
|
Python
|
gpl-3.0
| 41 | 0.02439 |
"""
Tools to put to g
|
ood use
|
INE data
"""
|
hyperized/ansible
|
lib/ansible/modules/cloud/amazon/aws_netapp_cvs_FileSystems.py
|
Python
|
gpl-3.0
| 12,011 | 0.001915 |
#!/usr/bin/python
# (c) 2019, NetApp Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""AWS Cloud Volumes Services - Manage fileSystem"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_netapp_cvs_FileSystems
short_description: NetApp AWS Cloud Volumes Service Manage FileSystem.
extends_documentation_fragment:
- netapp.awscvs
version_added: '2.9'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create,
|
Update, Delete fileSystem on AWS Cloud Volumes Servi
|
ce.
options:
state:
description:
- Whether the specified fileSystem should exist or not.
required: true
choices: ['present', 'absent']
type: str
region:
description:
- The region to which the filesystem belongs to.
required: true
type: str
creationToken:
description:
- Name of the filesystem
required: true
type: str
quotaInBytes:
description:
- Size of the filesystem
- Required for create
type: int
serviceLevel:
description:
- Service Level of a filesystem.
choices: ['standard', 'premium', 'extreme']
type: str
exportPolicy:
description:
- The policy rules to export the filesystem
type: dict
suboptions:
rules:
description:
- Set of rules to export the filesystem
- Requires allowedClients, access and protocol
type: list
suboptions:
allowedClients:
description:
- Comma separated list of ip address blocks of the clients to access the fileSystem
- Each address block contains the starting IP address and size for the block
type: str
cifs:
description:
- Enable or disable cifs filesystem
type: bool
nfsv3:
description:
- Enable or disable nfsv3 fileSystem
type: bool
nfsv4:
description:
- Enable or disable nfsv4 filesystem
type: bool
ruleIndex:
description:
- Index number of the rule
type: int
unixReadOnly:
description:
- Should fileSystem have read only permission or not
type: bool
unixReadWrite:
description:
- Should fileSystem have read write permission or not
type: bool
'''
EXAMPLES = """
- name: Create FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
exportPolicy:
rules:
- allowedClients: 172.16.0.4
cifs: False
nfsv3: True
nfsv4: True
ruleIndex: 1
unixReadOnly: True
unixReadWrite: False
quotaInBytes: 100000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
- name: Update FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
exportPolicy:
rules:
- allowedClients: 172.16.0.4
cifs: False
nfsv3: True
nfsv4: True
ruleIndex: 1
unixReadOnly: True
unixReadWrite: False
quotaInBytes: 200000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
- name: Delete FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
quotaInBytes: 100000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
"""
RETURN = """
"""
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp_module import NetAppModule
from ansible.module_utils.netapp import AwsCvsRestAPI
class AwsCvsNetappFileSystem(object):
"""
Contains methods to parse arguments,
derive details of AWS_CVS objects
and send requests to AWS CVS via
the restApi
"""
def __init__(self):
"""
Parse arguments, setup state variables,
check paramenters and ensure request module is installed
"""
self.argument_spec = netapp_utils.aws_cvs_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
region=dict(required=True, type='str'),
creationToken=dict(required=True, type='str'),
quotaInBytes=dict(required=False, type='int'),
serviceLevel=dict(required=False, choices=['standard', 'premium', 'extreme']),
exportPolicy=dict(
type='dict',
options=dict(
rules=dict(
type='list',
options=dict(
allowedClients=dict(required=False, type='str'),
cifs=dict(required=False, type='bool'),
nfsv3=dict(required=False, type='bool'),
nfsv4=dict(required=False, type='bool'),
ruleIndex=dict(required=False, type='int'),
unixReadOnly=dict(required=False, type='bool'),
unixReadWrite=dict(required=False, type='bool')
)
)
)
),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['region', 'creationToken', 'quotaInBytes']),
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic AWSCVS restApi class
self.restApi = AwsCvsRestAPI(self.module)
self.data = {}
for key in self.parameters.keys():
self.data[key] = self.parameters[key]
def get_filesystemId(self):
# Check given FileSystem is exists
# Return fileSystemId is found, None otherwise
list_filesystem, error = self.restApi.get('FileSystems')
if error:
self.module.fail_json(msg=error)
for FileSystem in list_filesystem:
if FileSystem['creationToken'] == self.parameters['creationToken']:
return FileSystem['fileSystemId']
return None
def get_filesystem(self, fileSystemId):
# Get FileSystem information by fileSystemId
# Return fileSystem Information
filesystemInfo, error = self.restApi.get('FileSystems/%s' % fileSystemId)
if error:
self.module.fail_json(msg=error)
else:
return filesystemInfo
return None
def is_job_done(self, response):
# check jobId is present and equal to 'done'
# return True on success, False otherwise
try:
job_id = response['jobs'][0]['jobId']
except TypeError:
job_id = None
if job_id is not None and self.restApi.get_state(job_id) == 'done':
return True
return False
def create_fileSystem(self):
# Create fileSystem
api = 'FileSystems'
response, error = self.restApi.post(api, self.data)
if not error:
if self.is_job_done(response):
return
|
AtsushiSakai/PyAdvancedControl
|
finite_horizon_optimal_control/main.py
|
Python
|
mit
| 2,743 | 0.032446 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
u"""
Finite Horizon Optimal Control
author Atsushi Sakai
"""
import numpy as np
import scipy.linalg as la
def CalcFiniteHorizonOptimalInput(A,B,Q,R,P,N,x0):
u"""
Calc Finite Horizon Optimal Input
# TODO optimize
in: see below
min x'Px+sum(x'Qx+u'Ru)
s.t xk+1=Axk+Bu
out: uopt optimal input
"""
# print("CalcFiniteHorizonOptimalInput start")
# data check
if A.shape[1] is not x0.shape[0]:
print("Data Error: A's col == x0's row")
print("A shape:")
|
print(A.shape)
print("x0 shape:")
print(x0.shape)
return None
elif B.shape[1] is not R.shape[1]:
print("Data Error
|
: B's col == R's row")
print("B shape:")
print(B.shape)
print("R's shape:")
print(R.shape)
return None
sx=np.eye(A.ndim)
su=np.zeros((A.ndim,B.shape[1]*N))
#calc sx,su
for i in range(N):
#generate sx
An=np.linalg.matrix_power(A, i+1)
sx=np.r_[sx,An]
#generate su
tmp=None
for ii in range(i+1):
tm=np.linalg.matrix_power(A, ii)*B
if tmp is None:
tmp=tm
else:
tmp =np.c_[tm,tmp]
for ii in np.arange(i,N-1):
tm=np.zeros(B.shape)
if tmp is None:
tmp=tm
else:
tmp =np.c_[tmp,tm]
su=np.r_[su,tmp]
tm1=np.eye(N+1)
tm1[N,N]=0
tm2=np.zeros((N+1,N+1))
tm2[N,N]=1
Qbar=np.kron(tm1,Q)+np.kron(tm2,P)
Rbar=np.kron(np.eye(N),R)
uopt=-(su.T*Qbar*su+Rbar).I*su.T*Qbar*sx*x0
# print(uBa)
costBa=x0.T*(sx.T*Qbar*sx-sx.T*Qbar*su*(su.T*Qbar*su+Rbar).I*su.T*Qbar*sx)*x0
# print(costBa)
return uopt
if __name__ == '__main__':
import matplotlib.pyplot as plt
A=np.matrix([[0.77,-0.35],[0.49,0.91]])
print("A:")
print(A)
B=np.matrix([0.04,0.15]).T
print("B:")
print(B)
x0=np.matrix([1,-1]).T
print("x0")
print(x0)
Q=np.matrix([[500,0.0],[0.0,100]])
print("Q")
print(Q)
R=np.matrix([1.0])
print("R")
print(R)
P=np.matrix([[1500,0.0],[0.0,100]])
print("P")
print(P)
N=20#Number of horizon
uopt=CalcFiniteHorizonOptimalInput(A,B,Q,R,P,N,x0)
#simulation
u_history=[]
x1_history=[]
x2_history=[]
x=x0
for u in uopt:
u_history.append(float(u[0]))
x=A*x+B*u
x1_history.append(float(x[0]))
x2_history.append(float(x[1]))
plt.plot(u_history,"-r",label="input")
plt.plot(x1_history,"-g",label="x1")
plt.plot(x2_history,"-b",label="x2")
plt.grid(True)
plt.legend()
plt.show()
|
wcmitchell/insights-core
|
insights/parsers/sysconfig.py
|
Python
|
apache-2.0
| 7,321 | 0.000137 |
"""
Sysconfig - files in ``/etc/sysconfig/``
========================================
This is a collection of parsers that all deal with the system's configuration
files under the ``/etc/sysconfig/`` folder. Parsers included in this module
are:
ChronydSysconfig - file ``/etc/sysconfig/chronyd``
--------------------------------------------------
DockerSysconfig - file ``/etc/sysconfig/docker``
------------------------------------------------
HttpdSysconfig - file ``/etc/sysconfig/httpd``
----------------------------------------------
IrqbalanceSysconfig - file ``/etc/sysconfig/irqbalance``
--------------------------------------------------------
KdumpSysconfig - file ``/etc/sysconfig/kdump``
----------------------------------------------
MongodSysconfig - file ``/etc/sysconfig/mongod``
------------------------------------------------
NtpdSysconfig - file ``/etc/sysconfig/ntpd``
--------------------------------------------
VirtWhoSysconfig - file ``/etc/sysconfig/virt-who``
---------------------------------------------------
"""
from .. import parser, SysconfigOptions
from insights.specs import docker_sysconfig
from insights.specs import sysconfig_chronyd
from insights.specs import sysconfig_httpd
from insights.specs import sysconfig_irqbalance
from insights.specs import sysconfig_kdump
from insights.specs import sysconfig_mongod
from insights.specs import sysconfig_ntpd
from insights.specs import sysconfig_virt_who
@parser(sysconfig_chronyd)
class ChronydSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``chronyd`` service config file in the
``/etc/sysconfig`` directory.
Sample Input::
OPTIONS="-d"
#HIDE="me"
Examples:
>>> service_opts = shared[ChronydSysconfig]
>>> 'OPTIONS' in service_opts
True
>>> 'HIDE' in service_opts
False
>>> service_opts['OPTIONS']
'-d'
"""
pass
@parser(sysconfig_ntpd)
class NtpdSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``ntpd`` service config file in the
``/etc/sysconfig`` directory
Sample Input::
OPTIONS="-x -g"
#HIDE="me"
Examples:
>>> service_opts = shared[NTPDService]
>>> 'OPTIONS' in service_opts
True
>>> 'HIDE' in service_opts
False
>>> service_opts['OPTIONS']
'-x -g'
"""
pass
@parser(docker_sysconfig)
class DockerSysconfig(SysconfigOptions):
"""
Class for parsing the ``/etc/sysconfig/docker`` file using the standard
``SysconfigOptions`` parser class. The 'OPTIONS' variable is also provided
in the ``options`` property as a convenience.
Examples:
>>> conf = shared[DockerSysconfig]
>>> 'OPTIONS' in conf
True
>>> conf['OPTIONS']
'--selinux-enabled'
>>> conf.options
'--selinux-enabled'
>>> conf['DOCKER_CERT_PATH']
'/etc/docker'
"""
@property
def options(self):
""" Return the value of the 'OPTIONS' variable, or '' if not defined. """
return self.data.get('OPTIONS', '')
@parser(sysconfig_httpd)
class HttpdSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``httpd`` service config file in the
``/etc/sysconfig`` directory.
Sample Input::
# The default processing model (MPM) is the process-based
# 'prefork' model. A thread-based model, 'worker', is also
# available, but does not work with some modules (such as PHP).
# The service must be stopped before changing this variable.
#
HTTPD=/usr/sbin/httpd.worker
#
# To pass additional options (for instance, -D definitions) to the
# httpd binary at startup, set OPTIONS here.
#
OPTIONS=
Examples:
>>> httpd_syscfg = shared[HttpdSysconfig]
>>> httpd_syscfg['HTTPD']
'/usr/sbin/httpd.worker'
>>> httpd_syscfg.ge
|
t('OPTIONS')
''
>>> 'NOOP' in httpd_syscfg
False
|
"""
pass
@parser(sysconfig_irqbalance)
class IrqbalanceSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``irqbalance`` service config file in the
``/etc/sysconfig`` directory.
Sample Input::
#IRQBALANCE_ONESHOT=yes
#
# IRQBALANCE_BANNED_CPUS
# 64 bit bitmask which allows you to indicate which cpu's should
# be skipped when reblancing irqs. Cpu numbers which have their
# corresponding bits set to one in this mask will not have any
# irq's assigned to them on rebalance
#
IRQBALANCE_BANNED_CPUS=f8
IRQBALANCE_ARGS="-d"
Examples:
>>> irqb_syscfg = shared[IRQBalanceSysconfig]
>>> irqb_syscfg['IRQBALANCE_BANNED_CPUS']
'f8'
>>> irqb_syscfg.get('IRQBALANCE_ARGS') # quotes will be stripped
'-d'
>>> irqb_syscfg.get('IRQBALANCE_ONESHOT')
None
>>> 'ONESHOT' in irqb_syscfg
False
"""
pass
@parser(sysconfig_kdump)
class KdumpSysconfig(SysconfigOptions):
"""
Read data from the ``/etc/sysconfig/kdump`` file.
This sets the following properties for ease of access:
* KDUMP_COMMANDLINE
* KDUMP_COMMANDLINE_REMOVE
* KDUMP_COMMANDLINE_APPEND
* KDUMP_KERNELVER
* KDUMP_IMG
* KDUMP_IMG_EXT
* KEXEC_ARGS
These are set to the value of the named variable in the kdump sysconfig
file, or '' if not found.
"""
KDUMP_KEYS = [
'KDUMP_COMMANDLINE',
'KDUMP_COMMANDLINE_REMOVE',
'KDUMP_COMMANDLINE_APPEND',
'KDUMP_KERNELVER',
'KDUMP_IMG',
'KDUMP_IMG_EXT',
'KEXEC_ARGS',
]
def parse_content(self, content):
super(KdumpSysconfig, self).parse_content(content)
for key in self.KDUMP_KEYS:
setattr(self, key, self.data.get(key, ''))
@parser(sysconfig_virt_who)
class VirtWhoSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``virt-who`` service configuration file in the
``/etc/sysconfig`` directory.
Sample Input::
# Register ESX machines using vCenter
# VIRTWHO_ESX=0
# Register guests using RHEV-M
VIRTWHO_RHEVM=1
# Options for RHEV-M mode
VIRTWHO_RHEVM_OWNER=
TEST_OPT="A TEST"
Examples:
>>> vwho_syscfg = shared[VirtWhoSysconfig]
>>> vwho_syscfg['VIRTWHO_RHEVM']
'1'
>>> vwho_syscfg.get('VIRTWHO_RHEVM_OWNER')
''
>>> vwho_syscfg.get('NO_SUCH_OPTION')
None
>>> 'NOSUCHOPTION' in vwho_syscfg
False
>>> vwho_syscfg.get('TEST_OPT') # Quotes are stripped
'A TEST'
"""
pass
@parser(sysconfig_mongod)
class MongodSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``mongod`` service configuration file in
the ``etc/sysconfig`` directory, contains 'etc/sysconfig/mongod' and
'/etc/opt/rh/rh-mongodb26/sysconfig/mongod'.
Sample Input::
OPTIONS="--quiet -f /etc/mongod.conf"
Examples:
>>> mongod_syscfg = shared[MongodWhoSysconfig]
>>> mongod_syscfg.get('OPTIONS')
'--quiet -f /etc/mongod.conf'
>>> mongod_syscfg.get('NO_SUCH_OPTION')
None
>>> 'NOSUCHOPTION' in mongod_syscfg
False
"""
pass
|
timvideos/flumotion
|
flumotion/test/test_worker_medium.py
|
Python
|
lgpl-2.1
| 2,617 | 0 |
# -*- Mode: Python; test-case-name:flumotion.test.test_worker_worker
|
-*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
|
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from twisted.internet import defer
from twisted.spread import pb
from flumotion.common import testsuite
from flumotion.test import realm
from flumotion.twisted import pb as fpb
from flumotion.worker import medium
class TestWorkerAvatar(fpb.PingableAvatar):
def __init__(self, avatarId, mind):
fpb.PingableAvatar.__init__(self, avatarId)
self.setMind(mind)
class TestWorkerRealm(realm.TestRealm):
deferredAvatar = None
deferredLogout = None
def getDeferredAvatar(self):
if self.deferredAvatar is None:
self.deferredAvatar = defer.Deferred()
return self.deferredAvatar
def getDeferredLogout(self):
if self.deferredLogout is None:
self.deferredLogout = defer.Deferred()
return self.deferredLogout
def requestAvatar(self, avatarId, keycard, mind, *ifaces):
avatar = TestWorkerAvatar(avatarId, mind)
self.getDeferredAvatar().callback(avatar)
return (pb.IPerspective, avatar,
lambda: self.avatarLogout(avatar))
def avatarLogout(self, avatar):
self.debug('worker logged out: %s', avatar.avatarId)
self.getDeferredLogout().callback(avatar)
class TestWorkerMedium(testsuite.TestCase):
def setUp(self):
self.realm = TestWorkerRealm()
def tearDown(self):
return self.realm.shutdown()
def testConnect(self):
m = medium.WorkerMedium(None)
connectionInfo = self.realm.getConnectionInfo()
connectionInfo.authenticator.avatarId = 'foo'
m.startConnecting(connectionInfo)
def connected(avatar):
m.stopConnecting()
return self.realm.getDeferredLogout()
def disconnected(avatar):
self.assertEquals(avatar.avatarId, 'foo')
d = self.realm.getDeferredAvatar()
d.addCallback(connected)
d.addCallback(disconnected)
return d
|
eshandas/simple_django_logger
|
simple_django_logger/admin.py
|
Python
|
mit
| 1,281 | 0.000781 |
from django.contrib import admin
from .models import (
Log,
RequestLog,
EventLog,
)
class LogAdmin(admin.ModelAdmin):
readonly_fields = [
'log_level', 'request_url', 'request_method', 'get_data',
'request_body', 'cookies', 'meta',
'exception_type', 'message', 'stack_trace', 'user_id',
'user_name', 'request_browser', 'request_os', 'request_device',
'response_body', 'response_status', 'response_headers', 'response_content_type',
'is_mobile', 'is_tablet', 'is_touch_capable', 'is_pc',
'is_bot', 'created_on']
def has_add_permission(self, request):
return False
class RequestLogAdmin(admin.ModelAdmin):
readonly_fields = [
'method', 'url', 'request_data', 'request_headers',
'response_text', 'response_status', 'response_reason',
'response_time', 'created_on']
def has_add_permission(self, request):
return False
class EventLogAdmin(admin.ModelAdmin):
r
|
eadonly_fields = [
'log_level', 'message', 'stack_trace', 'tag',
'created_on']
def has_add_permission(self, request):
return False
admin.site.register(Log, LogAdmin)
admin.site.register(RequestLog, RequestLogAdmin
|
)
admin.site.register(EventLog, EventLogAdmin)
|
apache/bloodhound
|
bloodhound_multiproduct/tests/versioncontrol/api.py
|
Python
|
apache-2.0
| 4,299 | 0.001163 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's repository API in product environments"""
import unittest
from trac.resource import Resource, get_resource_description, get_resource_url
from trac.versioncontrol.api import Repository
from trac.versioncontrol.tests.api import ResourceManagerTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductResourceManagerTestCase(ResourceManagerTestCase,
|
MultiproductTestCase):
@property
def env(self):
env = getattr(self,
|
'_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_resource_changeset(self):
res = Resource('changeset', '42')
self.assertEqual('Changeset 42', get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/changeset/42',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('changeset', '42', parent=repo)
self.assertEqual('Changeset 42 in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/changeset/42/repo',
get_resource_url(self.env, res, self.env.href))
def test_resource_source(self):
res = Resource('source', '/trunk/src')
self.assertEqual('path /trunk/src',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/trunk/src',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('source', '/trunk/src', parent=repo)
self.assertEqual('path /trunk/src in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/repo/trunk/src',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('source', '/trunk/src', version=42, parent=repo)
self.assertEqual('path /trunk/src@42 in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/repo/trunk/src?rev=42',
get_resource_url(self.env, res, self.env.href))
def test_resource_repository(self):
res = Resource('repository', 'testrepo')
self.assertEqual('Repository testrepo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/testrepo',
get_resource_url(self.env, res, self.env.href))
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductResourceManagerTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/Cheetah-2.2.2-py2.7-linux-x86_64-ucs4.egg/Cheetah/CacheStore.py
|
Python
|
gpl-3.0
| 3,059 | 0.006538 |
'''
Provides several CacheStore backends for Cheetah's caching framework. The
methods provided by these classes have the same semantics as those in the
python-memcached API, except for their return values:
set(key, val, time=0)
set the value unconditionally
add(key, val, time=0)
set only if the server doesn't already have this key
replace(key, val, time=0)
set only if the server already have this key
get(key, val)
returns val or raises a KeyError
delete(key)
deletes or raises a KeyError
'''
import time
from Cheetah.Utils.memcache import Client as MemcachedClient
class Error(Exception):
pass
class AbstractCacheStore(object):
def set(self, key, val, time=None):
raise NotImplementedError
def add(self, key, val, time=None):
raise NotImplementedError
def replace(self, key, val, time=None):
raise NotImplementedErr
|
or
def delete(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class MemoryCacheStore(AbstractCacheStore):
def __init__(self):
self._data = {}
def set(self, key, val, time=0):
self._data[key] = (val,
|
time)
def add(self, key, val, time=0):
if self._data.has_key(key):
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
if self._data.has_key(key):
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
del self._data[key]
def get(self, key):
(val, exptime) = self._data[key]
if exptime and time.time() > exptime:
del self._data[key]
raise KeyError(key)
else:
return val
def clear(self):
self._data.clear()
class MemcachedCacheStore(AbstractCacheStore):
servers = ('127.0.0.1:11211')
def __init__(self, servers=None, debug=False):
if servers is None:
servers = self.servers
self._client = MemcachedClient(servers, debug)
def set(self, key, val, time=0):
self._client.set(key, val, time)
def add(self, key, val, time=0):
res = self._client.add(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
res = self._client.replace(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
res = self._client.delete(key, time=0)
if not res:
raise KeyError(key)
def get(self, key):
val = self._client.get(key)
if val is None:
raise KeyError(key)
else:
return val
def clear(self):
self._client.flush_all()
|
grundprinzip/sublemacspro
|
lib/mark_ring.py
|
Python
|
bsd-3-clause
| 2,686 | 0.004468 |
import sublime, sublime_plugin
#
# Classic emacs mark ring with multi-cursor support. Each entr
|
y in t
|
he ring is implemented
# with a named view region with an index, so that the marks are adjusted automatically by
# Sublime. The special region called "jove_mark" is used to display the current mark. It's
# a copy of the current mark with gutter display properties turned on.
#
# Each entry is an array of 1 or more regions.
#
class MarkRing:
MARK_RING_SIZE = 16
def __init__(self, view):
self.view = view
self.index = 0
# in case any left over from before
self.view.erase_regions("jove_mark")
for i in range(self.MARK_RING_SIZE):
self.view.erase_regions(self.get_key(i))
def get_key(self, index):
return "jove_mark:" + str(index)
def clear(self):
self.view.erase_regions("jove_mark")
def has_visible_mark(self):
return self.view.get_regions("jove_mark") != None and len(self.view.get_regions("jove_mark")) > 0
#
# Update the display to show the current mark.
#
def display(self):
# display the mark's dot
regions = self.get()
if regions is not None:
self.view.add_regions("jove_mark", regions, "mark", "dot", sublime.HIDDEN)
#
# Get the current mark(s).
#
def get(self):
return self.view.get_regions(self.get_key(self.index))
#
# Set the mark to pos. If index is supplied we overwrite that mark, otherwise we push to the
# next location.
#
def set(self, regions, reuse_index=False):
if self.get() == regions:
# don't set another mark in the same place
return
if not reuse_index:
self.index = (self.index + 1) % self.MARK_RING_SIZE
self.view.add_regions(self.get_key(self.index), regions, "mark", "", sublime.HIDDEN)
self.display()
#
# Exchange the current mark with the specified pos, and return the current mark.
#
def exchange(self, regions):
current = self.get()
if current is not None:
self.set(regions, True)
return current
#
# Pops the current mark from the ring and returns it. The caller sets point to that value. The
# new mark is the previous mark on the ring.
#
def pop(self):
regions = self.get()
# find a non-None mark in the ring
start = self.index
while True:
self.index -= 1
if self.index < 0:
self.index = self.MARK_RING_SIZE - 1
if self.get() or self.index == start:
break
self.display()
return regions
|
chrissly31415/amimanera
|
keras_tools.py
|
Python
|
lgpl-3.0
| 10,881 | 0.01351 |
#!/usr/bin/python
# coding: utf-8
import numpy as np
from keras.models import Sequential, Model, load_model
from keras.optimizers import SGD,Adagrad,RMSprop,Adam
from keras.layers import Dense, Input, Activation
from keras.layers import BatchNormalization, Add, Dropout
from keras import optimizers
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU,LeakyReLU
from keras.utils import np_utils, generic_utils
from sklearn.base import BaseEstimator
import types
import tempfile
import keras.models
from keras import callbacks
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
'''
From kaggle forum:
NN is the average of 30 neural networks with the same parameters fed by x^(2/3) transformed features and by results of KNN with N = 27 (KNN gained .002 for my best solution). NN was implemented on Keras, I've found this library very nice and fast (with CUDA-enabled Theano). Layers were (512,256,128), the score was .428
Dropout(.15) -> Dense(n_in, l1, activation='tanh') -> BatchNormalization((l1,)) -> Dropout(.5) -> Dense(l1, l2) -> PReLU((l2,)) -> BatchNormalization((l2,)) -> Dropout(.3) -> Dense(l2, l3) -> PReLU((l3,)) -> BatchNormalization((l3,)) -> Dropout(.1) -> Dense(l3, n_out) -> Activation('softmax')
sgd = SGD(lr=0.004, decay=1e-7, momentum=0.99, nesterov=True)
Rossmann 3d place: https://github.com/entron/category-embedding-rossmann/blob/master/models.py "categorical embedding"
avito challenge https://www.kaggle.com/rightfit/avito-duplicate-ads-detection/get-hash-from-images/code
'''
def RMSE(y_true, y_pred):
loss = T.sqrt(T.sqr(y_true - y_pred).mean(axis=-1))
#print(loss)
return loss
def make_keras_picklable():
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = { 'model_str': model_str }
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls = keras.models.Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
#https://gist.github.com/MaxHalford/9bfaa8daf8b4bc17a7fb7ba58c880675#file-fit-py
early_stopping = callbacks.EarlyStopping(monitor='val_loss', patience=1, verbose=0, mode='auto')
def create_classification_model(input_dim=64,learning_rate=0.001,activation='relu',batchnorm=False,layers=[256,256],dropouts=[0.0,0.0],optimizer=None):
# create model
model = Sequential()
for i,(layer,dropout) in enumerate(zip(layers,dropouts)):
if i==0:
model.add(Dense(layer, input_dim=input_dim, kernel_initializer='uniform'))
if batchnorm: model.add(BatchNormalization()) # problem with CUDA?
model.add(Activation(activation))
model.add(Dropout(dropout))
else:
model.add(Dense(layer, kernel_initializer='uniform'))
if batchnorm: model.add(BatchNormalization())
model.add(Activation(activation))
model.add(Dropout(dropout))
if batchnorm: model.add(BatchNormalization())
model.add(Dense(1, kernel_initializer='uniform',activation='sigmoid'))
# Compile model
if optimizer is None:
optimizer = optimizers.SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False) # normal
elif 'adam' in optimizer:
optimizer = optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # deep nets
elif 'adadelta' in optimizer:
optimizer = optimizers.Adadelta(lr=learning_rate, rho=0.95, epsilon=1e-08, decay=0.0)
elif 'adagrad' in optimizer:
optimizer = Adagrad(lr=self.learning_rate)
else:
optimizer = optimizers.SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False) # normal
model.compile(loss='binary_crossentropy',optimizer=optimizer,metrics=['accuracy'])
return model
def create_regression_model_old(input_dim=64,learning_rate=0.001,activation='sigmoid',layers=[256,256],dropouts=[0.0,0.0],loss='mean_absolute_error',optimizer=None):
# create model
model = Sequential()
for i,(layer,dropout) in enumerate(zip(layers,dropouts)):
if i==0:
model.add(Dropout(dropout))
model.add(Dense(layer, input_dim=input_dim, kernel_initializer='normal', activation=activation)
|
)
else:
model.add(Dropout(dropout))
model.add(Dense(layer, kernel_initializer='normal', activation=activation))
model.add(Dense(1, kernel_initializer='normal',activation='linear'))
# Compile model
#model.compile(loss='mean_squared_error', optimizer=optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0))
#model.
|
compile(loss='mean_squared_error', optimizer=Adagrad(lr=self.learning_rate) # 0.01
if optimizer is None:
optimizer = optimizers.RMSprop(lr=learning_rate)
model.compile(loss=loss,optimizer=optimizer)
return model
def create_regression_model(input_dim=64,learning_rate=0.001,layers=[256,256],dropouts=[0.0,0.0],loss='mean_absolute_error',optimizer=None):
inp = Input(shape=(input_dim,))
for i,(layer,dropout) in enumerate(zip(layers,dropouts)):
x = Dense(layer)(inp)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(dropout)(x)
out = Dense(1, activation="linear")(x)
model = Model(inputs=inp, outputs=[out])
if optimizer is None:
#optimizer = optimizers.RMSprop(lr=learning_rate)
optimizer = Adam()
model.compile(loss=loss, optimizer=optimizer)
return model
class KerasNN(BaseEstimator):
def __init__(self, dims=66, nb_classes=1, nb_epoch=30, learning_rate=0.5, validation_split=0.0, batch_size=64,
loss='categorical_crossentropy', layers=[32,32], activation='relu', dropout=[0.2,0.2],verbose=1):
self.dims = dims
self.nb_classes = nb_classes
self.classes_ = None # list containing classes
self.nb_epoch = nb_epoch
self.learning_rate = learning_rate
self.validation_split = validation_split
self.batch_size = batch_size
self.loss = loss
self.layers = layers
self.activation = activation
self.dropout = dropout
self.verbose = verbose
self.hist = ""
self.model = Sequential()
# Keras model
for i,dropout in enumerate(self.dropout):
if i>0:
dims = self.layers[i-1]
if 'maxout' in self.activation:
print("Currently not implemented...")
#self.model.add(MaxoutDense(output_dim=layers[i], nb_feature=4, input_dim=dims))
else:
self.model.add(Dense(output_dim=layers[i], input_dim=dims, init='glorot_uniform'))
#https://www.reddit.com/r/MachineLearning/comments/22u1yt/is_deep_learning_basically_just_neural_networks/
#https://www.kaggle.com/c/job-salary-prediction/forums/t/4208/congratulations-to-the-preliminary-winners?page=2
if 'PReLU' in self.activation:
self.model.add(PReLU())
elif 'LeakyReLU' in self.activation:
|
stephtdouglas/k2spin
|
k2io.py
|
Python
|
mit
| 2,475 | 0.006061 |
"""Read in lightcurve files."""
import logging
import numpy as np
import astropy.io.ascii as at
def read_single_aperture(filename):
"""Read in one of AMC's K2 light curves,
inputs
------
filename: string
should look like EPIC_205030103_xy_ap1.5_fixbox_cleaned.dat
outputs
-------
time, flux, unc_flux, x_pos, y_pos, qual_flux: arrays
aperture: float
"""
# Read in the file
lc = at.read(filename, delimiter=' ',data_start=1)
split_filename = filename.split("/")[-1].split('_')
logging.debug(split_filename)
if split_filename[0]=="EPIC":
epicID = split_filename[1]
else:
epicID = split_filename[0]
aperture = split_filename[3]
if aperture.startswith("ap"):
aperture = aperture[2:]
if aperture.endswith(".dat"):
aperture = aperture[:-4]
# Extract the useful columns
time = lc["Dates"]
flux = lc["Flux"]
try:
unc_flux = lc["Uncert{}".format(aperture)]
except:
unc_flux = np.ones_like(flux)
x_pos = lc["Xpos"]
y_pos = lc["Ypos"]
try:
qual_flux = lc["Quality"]
except:
qual_flux = np.ones_like(flux)
aperture = float(aperture)
# Return the columns
return time, flux, unc_flux, x_pos, y_pos, qual_flux, aperture
def read_double_aperture(filename):
"""Read in one of AMC's K2 lc files with 2 aperture extractions.
inputs
------
filename: string
should look like EPIC_205030103_xy_ap#.#_#.#_fixbox.dat
outputs
-------
time: array
flux, unc_flux: arrays, shape=(2, n_datapoints)
A flux and uncertainty array for each aperture
|
in the file
x_pos, y_pos, qual_flux: arrays
apertures: array, length=2
The apertures contained in the file
|
"""
# Read in the file
lc = at.read(filename, delimiter=' ',data_start=1)
split_filename = filename.split("/")[-1].split('_')
logging.debug(split_filename)
epicID = split_filename[1]
# Extract the useful columns
time = lc["Dates"]
fluxes = np.array([lc["Flux5"], lc["Flux3"]])
unc_fluxes = np.array([lc["Uncert5"], lc["Uncert3"]])
apertures = np.array([5.,3.])
x_pos = lc["Xpos"]
y_pos = lc["Ypos"]
qual_flux = lc["Quality"]
# Return the columns
return time, fluxes, unc_fluxes, x_pos, y_pos, qual_flux, apertures
def read_list(file_list):
"""Read in a list of lightcurve filenames."""
pass
|
patrikhuber/eos
|
share/scripts/convert-bfm2017-to-eos.py
|
Python
|
apache-2.0
| 3,067 | 0.005546 |
import numpy as np
import eos
import h5py
# This script converts the Basel Face Model 2017 (BFM2017, [1]) to the eos model format,
# specifically the files model2017-1_face12_nomouth.h5 and model2017-1_bfm_nomouth.h5 from the BFM2017 download.
#
# The BFM2017 does not come with texture (uv-) coordinates. If you have texture coordinates for the BFM, they can be
# added to the eos.morphablemodel.MorphableModel(...) constructor in the third argument. Note that eos only supports one
# uv-coordinate per vertex.
#
# [1]: Morphable Face Models - An Open Framework,
# T. Gerig, A. Morel-Forster, C. Blumer, B. Egger, M. Lüthi, S. Schönborn and T. Vetter,
# arXiv preprint, 2017.
# http://faces.cs.unibas.ch/bfm/bfm2017.html
# Set this to the path of the model2017-1_bfm_nomouth.h5 or model2017-1_face12_nomouth.h5 file from the BFM2017 download:
bfm2017_file = r"./model2017-1_bfm_nomouth.h5"
with h5py.File(bfm2017_file, 'r') as hf:
# The PCA shape model:
shape_mean = np.array(hf['shape/model/mean'])
shape_orthogonal_pca_basis = np.array(hf['shape/model/pcaBasis'])
# Their basis is unit norm: np.linalg.norm(shape_pca_basis[:,0]) == ~1.0
# And the basis vectors are orthogonal: np.dot(shape_pca_basis[:,0], shape_pca_basis[:,0]) == 1.0
# np.dot(shape_pca_basis[:,0], shape_pca_basis[:,1]) == 1e-10
shape_pca_variance = np.array(hf['shape/model/pcaVariance']) # the PCA variances are the eigenvectors
triangle_list = np.array(hf['shape/representer/cells'])
shape_model = eos.morphablemodel.PcaModel(shape_mean, shape_orthogonal_pca_basis, shape_pca_variance,
triangle_list.transpose().tolist())
# PCA colour model:
color_mean = np.array(hf['color/model/mean'])
color_orthogonal_pca_basis = np.array(hf['color/model/pcaBasis'])
color_pca_variance = np.array(hf['color/model/pcaVariance'])
color_model = eos.morphablemodel.PcaModel(color_mean, color_orthogonal_pca_basis, color_pca_variance,
triangle_list.transpose().tolist())
# PCA expression model:
expression_mean = np.array(hf['expression/model/mean'])
expression_pca_basis = np.array(hf['expression/model/pcaBasis'])
expression_pca_variance = np.array(hf['expression/model/pcaVariance'])
expression_model = eos.morphablemodel.PcaModel(expression_mean, expression_pca_basis, expression_pca_variance,
|
triangle_list.transpose().tolist())
# Construct and save an eos model from the BFM data:
model = eos.morphablemodel.MorphableModel(shape_model, expression_model, color_model, vertex_definitions=
|
None,
texture_coordinates=[],
texture_triangle_indices=[]) # uv-coordinates can be added here
eos.morphablemodel.save_model(model, "bfm2017-1_bfm_nomouth.bin")
print("Converted and saved model as bfm2017-1_bfm_nomouth.bin.")
|
JackDanger/sentry
|
src/sentry/templatetags/sentry_assets.py
|
Python
|
bsd-3-clause
| 1,943 | 0.003603 |
from __future__ import absolute_
|
import
from django.conf import settings
from django.template import Library
from sen
|
try import options
from sentry.utils.assets import get_asset_url
from sentry.utils.http import absolute_uri
register = Library()
register.simple_tag(get_asset_url, name='asset_url')
@register.simple_tag
def absolute_asset_url(module, path):
"""
Returns a versioned absolute asset URL (located within Sentry's static files).
Example:
{% absolute_asset_url 'sentry' 'dist/sentry.css' %}
=> "http://sentry.example.com/_static/74d127b78dc7daf2c51f/sentry/dist/sentry.css"
"""
return absolute_uri(get_asset_url(module, path))
@register.simple_tag
def crossorigin():
"""
Returns an additional crossorigin="anonymous" snippet for use in a <script> tag if
our asset urls are from a different domain than the system.url-prefix.
"""
if absolute_uri(settings.STATIC_URL).startswith(options.get('system.url-prefix')):
# They share the same domain prefix, so we don't need CORS
return ''
return ' crossorigin="anonymous"'
@register.simple_tag(takes_context=True)
def locale_js_include(context):
"""
If the user has a non-English locale set, returns a <script> tag pointing
to the relevant locale JavaScript file
"""
request = context['request']
try:
lang_code = request.LANGUAGE_CODE
except AttributeError:
# it's possible that request at this point, LANGUAGE_CODE hasn't be bound
# to the Request object yet. This specifically happens when rendering our own
# 500 error page, resulting in yet another error trying to render our error.
return ''
if lang_code == 'en' or lang_code not in settings.SUPPORTED_LANGUAGES:
return ''
href = get_asset_url("sentry", "dist/locale/" + lang_code + ".js")
return "<script src=\"{0}\"{1}></script>".format(href, crossorigin())
|
barrachri/epcon
|
p3/admin.py
|
Python
|
bsd-2-clause
| 27,045 | 0.00281 |
# -*- coding: UTF-8 -*-
from collections import defaultdict
from decimal import Decimal
from django import forms
from django import http
from
|
django.conf import settings
from django.conf.urls import url, patterns
from django.contrib import admin
from django.core import urlresolvers
from dj
|
ango.db.models import Q
from django.contrib.auth.models import User
from assopy import admin as aadmin
from assopy import models as amodels
from assopy import stats as astats
from assopy import utils as autils
from conference import admin as cadmin
from conference import models as cmodels
from conference import forms as cforms
from p3 import models
from p3 import dataaccess
from p3 import utils
### Customg list filters
class DiscountListFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = 'discounts'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'discounts'
def lookups(self, request, model_admin):
return (
('yes', 'With discounts'),
('no', 'Regular order'),
)
def queryset(self, request, queryset):
if self.value() == 'yes':
return queryset.filter(orderitem__price__lt=0)
elif self.value() == 'no':
return queryset.exclude(orderitem__price__lt=0)
###
_TICKET_CONFERENCE_COPY_FIELDS = ('shirt_size', 'python_experience', 'diet', 'tagline', 'days', 'badge_image')
def ticketConferenceForm():
class _(forms.ModelForm):
class Meta:
model = models.TicketConference
fields = '__all__'
fields = _().fields
class TicketConferenceForm(forms.ModelForm):
shirt_size = fields['shirt_size']
python_experience = fields['python_experience']
diet = fields['diet']
tagline = fields['tagline']
days = fields['days']
badge_image = fields['badge_image']
class Meta:
model = cmodels.Ticket
fields = '__all__'
def __init__(self, *args, **kw):
if 'instance' in kw:
o = kw['instance']
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
pass
else:
if p3c:
initial = kw.pop('initial', {})
for k in _TICKET_CONFERENCE_COPY_FIELDS:
initial[k] = getattr(p3c, k)
kw['initial'] = initial
return super(TicketConferenceForm, self).__init__(*args, **kw)
return TicketConferenceForm
class TicketConferenceAdmin(cadmin.TicketAdmin):
list_display = cadmin.TicketAdmin.list_display + (
'frozen',
'_order',
'_order_date',
'_assigned',
'_shirt_size',
'_diet',
'_python_experience',
#'_tagline',
)
list_select_related = True
list_filter = cadmin.TicketAdmin.list_filter + (
'fare__code',
'orderitem__order___complete',
'frozen',
'p3_conference__shirt_size',
'p3_conference__diet',
'p3_conference__python_experience',
'orderitem__order__created',
)
search_fields = cadmin.TicketAdmin.search_fields + (
'orderitem__order__code',
'fare__code',
)
actions = cadmin.TicketAdmin.actions + (
'do_assign_to_buyer',
'do_update_ticket_name',
)
form = ticketConferenceForm()
class Media:
js = ('p5/j/jquery-flot/jquery.flot.js',)
def _order(self, obj):
url = urlresolvers.reverse('admin:assopy_order_change',
args=(obj.orderitem.order.id,))
return '<a href="%s">%s</a>' % (url, obj.orderitem.order.code)
_order.allow_tags = True
def _order_date(self, o):
return o.orderitem.order.created
_order_date.admin_order_field = 'orderitem__order__created'
def _assigned(self, ticket):
if ticket.p3_conference:
assigned_to = ticket.p3_conference.assigned_to
if assigned_to:
comment = ''
user = None
try:
user = autils.get_user_account_from_email(assigned_to)
except User.MultipleObjectsReturned:
comment = ' (email not unique)'
except User.DoesNotExist:
try:
user = autils.get_user_account_from_email(assigned_to,
active_only=False)
except User.DoesNotExist:
comment = ' (does not exist)'
else:
comment = ' (user inactive)'
if user is not None:
url = urlresolvers.reverse('admin:auth_user_change',
args=(user.id,))
user_name = ('%s %s' %
(user.first_name, user.last_name)).strip()
if not user_name:
user_name = assigned_to
comment += ' (no name set)'
return '<a href="%s">%s</a>%s' % (url, user_name, comment)
elif not comment:
comment = ' (missing user account)'
return '%s%s' % (assigned_to, comment)
else:
return '(not assigned)'
else:
return '(old style ticket)'
_assigned.allow_tags = True
_assigned.admin_order_field = 'p3_conference__assigned_to'
def do_assign_to_buyer(self, request, queryset):
if not queryset:
self.message_user(request, 'no tickets selected', level='error')
return
for ticket in queryset:
# Assign to buyer
utils.assign_ticket_to_user(ticket, ticket.user)
do_assign_to_buyer.short_description = 'Assign to buyer'
def do_update_ticket_name(self, request, queryset):
if not queryset:
self.message_user(request, 'no tickets selected')
return
for ticket in queryset:
# Find selected user
if not ticket.p3_conference:
continue
assigned_to = ticket.p3_conference.assigned_to
try:
user = autils.get_user_account_from_email(assigned_to)
except User.MultipleObjectsReturned:
self.message_user(request,
'found multiple users with '
'email address %s' % assigned_to,
level='error')
return
except User.DoesNotExist:
self.message_user(request,
'no user record found or user inactive for '
' email address %s' % assigned_to,
level='error')
return
if user is None:
self.message_user(request,
'no user record found for '
' email address %s' % assigned_to,
level='error')
# Reassign to selected user
utils.assign_ticket_to_user(ticket, user)
do_update_ticket_name.short_description = 'Update ticket name'
def _shirt_size(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
return p3c.shirt_size
def _diet(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
return p3c.diet
def _python_experience(self, o):
try:
p3c = o.p3_conference
except models.TicketConference.DoesNotExist:
return ''
return p3c.python_experience
_python_experience.admin_order_field = 'p3_conference__python_experience'
def
|
valtandor/easybuild-easyblocks
|
easybuild/easyblocks/generic/versionindependentpythonpackage.py
|
Python
|
gpl-2.0
| 4,223 | 0.002368 |
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.
Python installs libraries by defailt in site-packages/python-xxx/
But packages that are not dependend on the python version can be installed in a different prefix, e.g. lib
as long as we add this folder to the pythonpath.
@author: Kenneth Hoste, Jens Timmerman (Ghent University)
"""
import os
import re
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.pythonpackage import EASY_INSTALL_CMD, PythonPackage
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class VersionIndependentPythonPackage(PythonPackage):
"""Support for building/installing python packages without requ
|
iring a specific python package."""
def build_step(self):
|
"""No build procedure."""
pass
def prepare_step(self):
"""Set pylibdir"""
self.pylibdir = 'lib'
super(VersionIndependentPythonPackage, self).prepare_step()
def install_step(self):
"""Custom install procedure to skip selection of python package versions."""
full_pylibdir = os.path.join(self.installdir, self.pylibdir)
env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))
try:
os.mkdir(full_pylibdir)
except OSError, err:
# this will raise an error and not return
raise EasyBuildError("Failed to install: %s", err)
if self.install_cmd.startswith(EASY_INSTALL_CMD):
self.cfg.update('installopts', '--install-dir=%s' % full_pylibdir)
else:
extra_installopts = [
'--install-lib=%s' % full_pylibdir,
'--single-version-externally-managed',
'--record %s' % os.path.join(self.builddir, 'record'),
'--no-compile',
]
self.cfg.update('installopts', ' '.join(extra_installopts))
cmd = self.compose_install_command(self.installdir)
run_cmd(cmd, log_all=True, simple=True, log_output=True)
# setuptools stubbornly replaces the shebang line in scripts with
# the full path to the Python interpreter used to install;
# we change it (back) to '#!/usr/bin/env python' here
shebang_re = re.compile("^#!/.*python")
bindir = os.path.join(self.installdir, 'bin')
if os.path.exists(bindir):
for script in os.listdir(bindir):
script = os.path.join(bindir, script)
if os.path.isfile(script):
try:
txt = open(script, 'r').read()
if shebang_re.search(txt):
new_shebang = "#!/usr/bin/env python"
self.log.debug("Patching shebang header line in %s to '%s'" % (script, new_shebang))
txt = shebang_re.sub(new_shebang, txt)
open(script, 'w').write(txt)
except IOError, err:
raise EasyBuildError("Failed to patch shebang header line in %s: %s", script, err)
|
mozaik-association/mozaik
|
mozaik_mandate/models/res_partner.py
|
Python
|
agpl-3.0
| 3,660 | 0.000273 |
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, fields, models
class ResPartner(models.Model):
_inherit = "res.partner"
_allowed_inactive_link_models = ["res.partner"]
_inactive_cascade = True
sta_mandate_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
sta_mandate_inactive_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates (Inactive)",
domain=[("active", "=", False)],
)
int_mandate_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
int_mandate_inactive_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates (Inactive)",
domain=[("active", "=", False)],
)
ext_mandate_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
ext_mandate_inactive_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates (Inactive)",
domain=[("active", "=
|
", False)],
)
ext_mandate_count = fields.Integer(
string="External Mandates Nbr", compute="_compute_mandate_assembly_co
|
unt"
)
ext_assembly_count = fields.Integer(
string="External Assemblies", compute="_compute_mandate_assembly_count"
)
def get_mandate_action(self):
"""
return an action for an ext.mandate contains into the domain a
specific tuples to get concerned mandates
"""
self.ensure_one()
res_ids = self._get_assemblies()._get_mandates().ids
domain = [("id", "in", res_ids)]
# get model's action to update its domain
action = self.env["ir.actions.act_window"]._for_xml_id(
"mozaik_mandate.ext_mandate_action"
)
action["domain"] = domain
return action
def _get_assemblies(self):
"""
return the assemblies of the current partner
"""
self.ensure_one()
assembly_model = "ext.assembly"
if self.is_assembly:
field = "partner_id"
else:
field = "ref_partner_id"
domain = [(field, "=", self.id)]
assembly_obj = self.env[assembly_model]
assemblies = assembly_obj.search(domain)
return assemblies
def _compute_mandate_assembly_count(self):
"""
count the number of assemblies linked to the current partner
count the number of mandates linked to the assemblies of the
current partner
"""
for partner in self:
assemblies = partner._get_assemblies()
partner.ext_assembly_count = len(assemblies)
partner.ext_mandate_count = len(assemblies._get_mandates())
def add_mandate_action(self):
self.ensure_one()
return {
"type": "ir.actions.act_window",
"name": _("Add a new mandate"),
"res_model": self._context.get("mandate_model"),
"context": {"default_partner_id": self.id},
"view_mode": "form",
"target": "new",
}
|
ricotabor/opendrop
|
opendrop/app/common/image_acquisition/configurator/__init__.py
|
Python
|
gpl-2.0
| 1,447 | 0 |
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
# (i.e. you cannot make commercial derivatives).
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant dr
|
op tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what
|
purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from . import configurator, usb_camera, genicam
|
formicablu/digischool
|
schools/migrations/0004_auto__add_field_metrictype_unit.py
|
Python
|
gpl-2.0
| 2,337 | 0.007274 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MetricType.unit'
db.add_column(u'schools_metrictype', 'unit',
self.gf('django.db.models.fields.CharField')(default='', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MetricType.unit'
db.delete_column(u'schools_metrictype', 'unit')
models = {
u'schools.metric': {
'Meta': {'object_name': 'Metric'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['schools.MetricType']"}),
'metric_value': ('django.db.models.fields.TextField', [], {}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metrics'", 'to': u"orm['schools.School']"}),
'year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['schools.Year']"})
},
u'schools.metrictype': {
'Meta': {'object_name': 'MetricType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mtype': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'schools.school': {
'Meta': {'object_name': 'School'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.contrib.gis.db.models.fields.PointField', [], {})
},
u'schools.year
|
': {
'Meta': {'object_name': 'Year'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['schools']
|
MagicSolutions/django-email-subscription
|
setup.py
|
Python
|
mit
| 888 | 0 |
import os
from setuptools import setup, find_packages
R
|
EADME = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
os
|
.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-email-subscription',
url='https://github.com/MagicSolutions/django-email-subscription',
version='0.0.1',
description='Django app for creating subcription accoutns.',
long_description=README,
install_requires=[
'django-simple-captcha>=0.4.2',
],
packages=find_packages(),
package_data={'': ['LICENSE']},
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
)
|
EPiCS/soundgates
|
hardware/tools/to_rawdata.py
|
Python
|
mit
| 1,000 | 0.019 |
#!/usr/bin/env python
# -*- coding: ascii -*-
"""
package.module
~~~~~~~~~~~~~
A description which can be long and explain the complete
functionality of this module even with indented code examples.
Class/Function however should not be documented here.
:copyright: year by my name, see AUTHORS for more details
:license: license_name, see LICENSE for more details
"""
import struct
import sys
outputfilename = 'raw_audio.out'
def do_convert(filename):
""" """
try:
f_in = open(filename,
|
'r')
f_out = open(outputfilename, 'wb')
sample = 0
for line in f_in:
try:
sample = int(line)
data = struct.pack("i", sample) # pack integer in a binary string
f_out
|
.write(data)
except:
print "Cannot convert: " + line
finally:
f_in.close()
f_out.close()
if __name__=='__main__':
print "Converting..."
do_convert(sys.argv[1])
print "done. Written to " + outputfilename
|
varlib1/servermall
|
jenkins/test_jenkins.py
|
Python
|
bsd-3-clause
| 5,889 | 0.000849 |
# stdlib
from collections import defaultdict
import datetime
import logging
import os
import shutil
import tempfile
# 3p
import xml.etree.ElementTree as ET
# project
from tests.checks.common import AgentCheckTest
logger = logging.getLogger(__file__)
DATETIME_FORMAT = '%Y-%m-%d_%H-%M-%S'
LOG_DATA = 'Finished: SUCCESS'
SUCCESSFUL_BUILD = {'number': '99', 'result': 'SUCCESS', 'duration': '60'}
NO_RESULTS_YET = {'number': '99'
|
, 'duration': '60'}
UNSUCCESSFUL_BUILD = {'number': '99', 'result': 'ABORTED', 'duration': '60'}
CONFIG = """
init_config:
instances:
- name: default
jenkins_home: <JENKINS_HOME>
"""
def dict_to_xml(metadata_dict):
""" Convert a dict to xml for use in a build.xml file """
build = ET.Element('build')
for k, v in metadata_dict.iteritems():
node = ET.SubElem
|
ent(build, k)
node.text = v
return ET.tostring(build)
def write_file(file_name, log_data):
with open(file_name, 'w') as log_file:
log_file.write(log_data)
class TestJenkins(AgentCheckTest):
CHECK_NAME = 'jenkins'
def setUp(self):
super(TestJenkins, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.config = {
'init_config': {},
'instances': [{
'name': 'default',
'jenkins_home': self.tmp_dir
}]
}
self.instance = self.config['instances'][0]
self.config_yaml = CONFIG.replace('<JENKINS_HOME>', self.tmp_dir)
self._create_old_build()
def tearDown(self):
super(TestJenkins, self).tearDown()
# Clean up the temp directory
shutil.rmtree(self.tmp_dir)
def _create_old_build(self):
# As coded, the jenkins dd agent needs more than one result
# in order to get the last valid build.
# Create one for yesterday.
metadata = dict_to_xml(SUCCESSFUL_BUILD)
yesterday = datetime.date.today() - datetime.timedelta(days=1)
self._populate_build_dir(metadata, yesterday)
def _create_check(self):
# Create the jenkins check
self.load_check(self.config)
def _populate_build_dir(self, metadata, time=None):
# The jenkins dd agent requires the build metadata file and a log file of results
time = time or datetime.datetime.now()
datestring = time.strftime(DATETIME_FORMAT)
build_dir = os.path.join(self.tmp_dir, 'jobs', 'foo', 'builds', datestring)
os.makedirs(build_dir)
log_file = os.path.join(build_dir, 'log')
log_data = LOG_DATA
write_file(log_file, log_data)
metadata_file = os.path.join(build_dir, 'build.xml')
build_metadata = metadata
write_file(metadata_file, build_metadata)
def testParseBuildLog(self):
"""
Test doing a jenkins check. This will parse the logs but since there was no
previous high watermark no event will be created.
"""
metadata = dict_to_xml(SUCCESSFUL_BUILD)
self._populate_build_dir(metadata)
self._create_check()
self.run_check(self.config)
# The check method does not return anything, so this testcase passes
# if the high_watermark was set and no exceptions were raised.
self.assertTrue(self.check.high_watermarks[self.instance['name']]['foo'] > 0)
def testCheckSuccessfulEvent(self):
"""
Test that a successful build will create the correct metrics.
"""
metadata = dict_to_xml(SUCCESSFUL_BUILD)
self._populate_build_dir(metadata)
self._create_check()
# Set the high_water mark so that the next check will create events
self.check.high_watermarks['default'] = defaultdict(lambda: 0)
self.run_check(self.config)
metrics_names = [m[0] for m in self.metrics]
assert len(metrics_names) == 2
assert 'jenkins.job.success' in metrics_names
assert 'jenkins.job.duration' in metrics_names
metrics_tags = [m[3] for m in self.metrics]
for tag in metrics_tags:
assert 'job_name:foo' in tag.get('tags')
assert 'result:SUCCESS' in tag.get('tags')
assert 'build_number:99' in tag.get('tags')
def testCheckUnsuccessfulEvent(self):
"""
Test that an unsuccessful build will create the correct metrics.
"""
metadata = dict_to_xml(UNSUCCESSFUL_BUILD)
self._populate_build_dir(metadata)
self._create_check()
# Set the high_water mark so that the next check will create events
self.check.high_watermarks['default'] = defaultdict(lambda: 0)
self.run_check(self.config)
metrics_names = [m[0] for m in self.metrics]
assert len(metrics_names) == 2
assert 'jenkins.job.failure' in metrics_names
assert 'jenkins.job.duration' in metrics_names
metrics_tags = [m[3] for m in self.metrics]
for tag in metrics_tags:
assert 'job_name:foo' in tag.get('tags')
assert 'result:ABORTED' in tag.get('tags')
assert 'build_number:99' in tag.get('tags')
def testCheckWithRunningBuild(self):
"""
Test under the conditions of a jenkins build still running.
The build.xml file will exist but it will not yet have a result.
"""
metadata = dict_to_xml(NO_RESULTS_YET)
self._populate_build_dir(metadata)
self._create_check()
# Set the high_water mark so that the next check will create events
self.check.high_watermarks['default'] = defaultdict(lambda: 0)
self.run_check(self.config)
# The check method does not return anything, so this testcase passes
# if the high_watermark was NOT updated and no exceptions were raised.
assert self.check.high_watermarks[self.instance['name']]['foo'] == 0
|
arcapix/gpfsapi-examples
|
type_sizes_piechart.py
|
Python
|
mit
| 519 | 0.001927 |
from collections import Counter
from os.path import splitext
import matplotlib.pyplot as plt
from arcapix.fs.gpfs import ListProcessingRule, ManagementPolicy
def type_sizes(file_list):
c = Counter()
for f in file_list:
|
c.update({splitext(f.name): f.filesize})
|
return c
p = ManagementPolicy()
r = p.rules.new(ListProcessingRule, 'types', type_sizes)
result = p.run('mmfs1')['types']
plt.pie(list(result.values()), labels=list(result.keys()), autopct='%1.1f%%')
plt.axis('equal')
plt.show()
|
rdmorganiser/rdmo
|
rdmo/questions/migrations/0043_django2.py
|
Python
|
apache-2.0
| 1,023 | 0.00391 |
# Generated by Django 2.2rc1 on 2019-03-26 13:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0042_remove_null_true'),
]
operations = [
migrations.AlterModelOptions(
name='catalog',
options={'ordering': ('order',), 'verbose_name': 'Catalog', 'verbose_name_plural': 'Catalogs'},
),
migrations.AlterModelOptions(
name='question',
options={'ordering': ('questionset', 'order'), 'verbose_name': 'Question', 'verbose_name_plural': 'Questions'},
),
|
migrations.AlterModelOptions(
name='questionset',
options={'ordering': ('section', 'order'), 'verbose_name': 'Question set', 'verbose_name_plural': 'Question set'},
),
migrations.AlterM
|
odelOptions(
name='section',
options={'ordering': ('catalog__order', 'order'), 'verbose_name': 'Section', 'verbose_name_plural': 'Sections'},
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.