text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import numpy as np
from astropy.coordinates import EarthLocation, SkyCoord
__all__ = ['MWA_LOC', 'MWA_FIELD_EOR0', 'MWA_FIELD_EOR1', 'MWA_FIELD_EOR2',
'MWA_FREQ_EOR_ALL_40KHZ', 'MWA_FREQ_EOR_ALL_80KHZ',
'MWA_FREQ_EOR_HI_40KHZ', 'MWA_FREQ_EOR_HI_80KHZ',
'MWA_FREQ_EOR_LOW_40KHZ', 'MWA_FREQ_EOR_LOW_80KHZ',
'HERA_ANT_DICT', 'F21']
F21 = 1420.405751786e6
MWA_LOC = EarthLocation(lat='−26d42m11.94986s', lon='116d40m14.93485s',
height=377.827)
MWA_FIELD_EOR0 = SkyCoord(ra='0.0h', dec='-30.0d')
MWA_FIELD_EOR1 = SkyCoord(ra='4.0h', dec='-30.0d')
MWA_FIELD_EOR2 = SkyCoord(ra='10.33h', dec='-10.0d')
MWA_FREQ_EOR_LOW_40KHZ = np.arange(138.895, 167.055, 0.04)
MWA_FREQ_EOR_HI_40KHZ = np.arange(167.055, 195.255, 0.04)
MWA_FREQ_EOR_ALL_40KHZ = np.arange(138.895, 195.255, 0.04)
MWA_FREQ_EOR_LOW_80KHZ = np.arange(138.915, 167.075, 0.08)
MWA_FREQ_EOR_HI_80KHZ = np.arange(167.075, 195.275, 0.08)
MWA_FREQ_EOR_ALL_80KHZ = np.arange(138.915, 195.275, 0.08)
HERA_ANT_DICT = {'hera19': 3, 'hera37': 4, 'hera61': 5, 'hera91': 6,
'hera127': 7, 'hera169': 8, 'hera217': 9, 'hera271': 10,
'hera331': 11} | piyanatk/sim | opstats/utils/settings.py | Python | mit | 1,196 | 0.000838 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from warehouse import i18n
def test_sets_locale(monkeypatch):
locale_obj = pretend.stub()
locale_cls = pretend.stub(
parse=pretend.call_recorder(lambda l: locale_obj),
)
monkeypatch.setattr(i18n, "Locale", locale_cls)
request = pretend.stub(locale_name=pretend.stub())
assert i18n._locale(request) is locale_obj
assert locale_cls.parse.calls == [pretend.call(request.locale_name)]
def test_includeme():
config_settings = {}
config = pretend.stub(
add_request_method=pretend.call_recorder(lambda f, name, reify: None),
get_settings=lambda: config_settings,
)
i18n.includeme(config)
assert config.add_request_method.calls == [
pretend.call(i18n._locale, name="locale", reify=True),
]
assert config_settings == {
"jinja2.filters": {
"format_date": "warehouse.i18n.filters:format_date",
"format_datetime": "warehouse.i18n.filters:format_datetime",
"format_rfc822_datetime":
"warehouse.i18n.filters:format_rfc822_datetime",
"format_number": "warehouse.i18n.filters:format_number",
},
}
| alex/warehouse | tests/unit/i18n/test_init.py | Python | apache-2.0 | 1,717 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
# Responsible for making a remote call to rhic_serve to fetch data for RHIC to Product mapping
#
import gzip
import httplib
import json
import logging
import time
import urllib
import StringIO
from django.conf import settings
from splice.common import config
from splice.common.connect import BaseConnection
from splice.common.exceptions import RequestException
_LOG = logging.getLogger(__name__)
def get_connection(host, port, cert, key, accept_gzip=False):
# Note: this method will be mocked out in unit tests
return BaseConnection(host, port, handler="", https=True, cert_file=cert, key_file=key, accept_gzip=accept_gzip)
def _form_url(url, last_sync=None, offset=None, limit=None):
query_params = {}
if last_sync:
query_params["modified_date__gt"] = last_sync,
if offset is not None:
query_params["offset"] = offset
if limit is not None:
query_params["limit"] = limit
if query_params:
data = urllib.urlencode(query_params, True)
url = url +"?" + data
return url
def get_single_rhic(host, port, url, uuid):
cfg = config.get_rhic_serve_config_info()
url = url + uuid + "/"
try:
conn = get_connection(host, port, cfg["client_cert"], cfg["client_key"])
return conn.GET(url)
except Exception, e:
_LOG.exception("Caught exception from 'get_single_rhic' with config info: %s" % (cfg))
raise
def get_all_rhics(host, port, url, last_sync=None, offset=None, limit=None, accept_gzip=True):
cfg = config.get_rhic_serve_config_info()
try:
conn = get_connection(host, port, cfg["client_cert"], cfg["client_key"], accept_gzip=accept_gzip)
url_with_params = _form_url(url, last_sync, offset, limit)
status, data = conn.GET(url_with_params)
if status == 200:
return data["objects"], data["meta"]
raise RequestException(status, data)
except Exception, e:
_LOG.exception("Caught exception from 'get_all_rhics' with config info: %s" % (cfg))
raise
if __name__ == "__main__":
from datetime import timedelta
from datetime import datetime
from dateutil.tz import tzutc
last_sync = datetime.now(tzutc()) - timedelta(days=30)
config.init(settings.SPLICE_CONFIG_FILE)
cfg = config.get_rhic_serve_config_info()
data, meta = get_all_rhics(host=cfg["host"], port=cfg["port"], url=cfg["rhics_url"],
offset=0, limit=1000,
last_sync=last_sync, accept_gzip=True)
print "--- Test Sync all RHICs ---"
print data
if len(data) > 0:
uuid = data[0]["uuid"]
print "\n---Test A Single RHIC ---\n"
print get_single_rhic(host=cfg["host"], port=cfg["port"], url=cfg["rhics_url"], uuid=uuid)
print "\n -- Test an unknown RHIC ---\n"
uuid = "1a1aa1aa-f6f4-45be-9d86-deb97a79d181"
print get_single_rhic(host=cfg["host"], port=cfg["port"], url=cfg["rhics_url"], uuid=uuid)
| splice/splice-server | src/splice/common/rhic_serve_client.py | Python | gpl-2.0 | 3,508 | 0.005703 |
from __future__ import unicode_literals
import copy
from dateutil.relativedelta import relativedelta
import six
from dash.utils import get_month_range
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from . import fields as filter_fields
from . import utils
class FilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.org = kwargs.pop('org')
super(FilterForm, self).__init__(*args, **kwargs)
# Create a shallow copy of the data to ensure that it is
# mutable. Some filters need the ability to overwrite the
# data that was passed in.
if self.data is not None:
self.data = copy.copy(self.data)
class Filter(six.with_metaclass(DeclarativeFieldsMetaclass, object)):
# The metaclass is what does the work to set up fields
# that are declared as attributes of the class.
pass
class DateRangeFilter(Filter):
DATE_WINDOW_CHOICES = (
('', ''),
('month', _("Current month")),
('30-days', _("Last 30 days")),
('60-days', _("Last 60 days")),
('90-days', _("Last 90 days")),
('6-months', _("Last 6 months")),
('12-months', _("Last 12 months")),
('custom', _("Custom range...")),
)
date_range = forms.ChoiceField(
label=_("Date range"),
choices=DATE_WINDOW_CHOICES)
start_date = filter_fields.FilterDateField(
label=_("Start date"),
required=False)
end_date = filter_fields.FilterDateField(
label=_("End date"),
required=False)
def clean(self):
self.cleaned_data = super(DateRangeFilter, self).clean()
window = self.cleaned_data.get('date_range')
if window == 'custom':
# Only apply additional checks if data did not have errors.
if 'start_date' not in self.errors and 'end_date' not in self.errors:
start_date = self.cleaned_data.get('start_date')
end_date = self.cleaned_data.get('end_date')
# Require at least one date filter.
if not start_date and not end_date:
self.add_error(
forms.ALL_FIELDS,
_("Please choose a start date or an end date."))
# Ensure date filter order makes sense.
elif (start_date and end_date) and start_date > end_date:
self.add_error(
'end_date',
_("End date must be after start date."))
# Set default values for start date and end date.
else:
self.cleaned_data.setdefault('start_date', None)
self.cleaned_data.setdefault('end_date', None)
self.data.setdefault('start_date', None)
self.data.setdefault('end_date', None)
else:
# Throw out user-submitted dates.
self.cleaned_data.pop('start_date', None)
self.cleaned_data.pop('end_date', None)
self.data.pop('start_date', None)
self.data.pop('end_date', None)
self._errors.pop('start_date', None)
self._errors.pop('end_date', None)
# Calculate the correct date window.
if window:
if window == 'month':
# get_month_range() a tuple with datetimes representing
# midnight of the first day of the current month, and
# midnight of the first day of the following month.
start_date, end_date = get_month_range()
# Show the user the last day of the month,
# e.g., show June 1 to June 30 rather than June 1 to July 1.
end_date = end_date - relativedelta(days=1)
else:
number, unit = window.split('-') # e.g., 6-months
end_date = utils.midnight(timezone.now())
start_date = end_date - relativedelta(**{unit: int(number)})
self.cleaned_data['start_date'] = start_date
self.cleaned_data['end_date'] = end_date
self.data['start_date'] = start_date
self.data['end_date'] = end_date
# Pad the end_date by one day so that results for all times during
# the end_date are accounted for in the query.
end_date = self.cleaned_data.get('end_date')
if end_date is not None:
self.cleaned_data['end_date'] = end_date + relativedelta(days=1)
return self.cleaned_data
class DataFieldFilter(Filter):
def __init__(self, *args, **kwargs):
super(DataFieldFilter, self).__init__(*args, **kwargs)
self.contact_fields = []
for data_field in self.org.datafield_set.visible():
field_name = 'contact_{}'.format(data_field.key)
self.contact_fields.append((field_name, data_field))
self.fields[field_name] = forms.CharField(
label='Contact: {}'.format(data_field.display_name),
required=False)
def filter_contacts(self, queryset=None):
"""Filter queryset to match all contact field search input."""
contacts = queryset if queryset is not None else self.org.contacts.all()
for name, data_field in self.contact_fields:
value = self.cleaned_data.get(name)
if value:
contacts = contacts.filter(
contactfield__field=data_field,
contactfield__value__icontains=value)
return contacts
| xkmato/tracpro | tracpro/charts/filters.py | Python | bsd-3-clause | 5,748 | 0.000696 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .notebook import run_notebook_hook
from .state import curstate
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'output_file',
'output_notebook',
'reset_output',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def output_file(filename, title="Bokeh Plot", mode=None, root_dir=None):
'''Configure the default output state to generate output saved
to a file when :func:`show` is called.
Does not change the current ``Document`` from ``curdoc()``. File and notebook
output may be active at the same time, so e.g., this does not clear the
effects of ``output_notebook()``.
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document (default: "Bokeh Plot")
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked.
'''
curstate().output_file(
filename,
title=title,
mode=mode,
root_dir=root_dir
)
def output_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000, notebook_type='jupyter'):
''' Configure the default output state to generate output in notebook cells
when :func:`show` is called. Note that, :func:`show` may be called multiple
times in a single cell to display multiple objects in the output cell. The
objects will be displayed in order.
Args:
resources (Resource, optional) :
How and where to load BokehJS from (default: CDN)
verbose (bool, optional) :
whether to display detailed BokehJS banner (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
load_timeout (int, optional) :
Timeout in milliseconds when plots assume load timed out (default: 5000)
notebook_type (string, optional):
Notebook type (default: jupyter)
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
'''
# verify notebook_type first in curstate().output_notebook
curstate().output_notebook(notebook_type)
run_notebook_hook(notebook_type, 'load', resources, verbose, hide_banner, load_timeout)
def reset_output(state=None):
''' Clear the default state of all output modes.
Returns:
None
'''
curstate().reset()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ericmjl/bokeh | bokeh/io/output.py | Python | bsd-3-clause | 4,608 | 0.006293 |
"""The tests for the pilight component."""
from datetime import timedelta
import logging
import socket
from unittest.mock import patch
from voluptuous import MultipleInvalid
from homeassistant.components import pilight
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
_LOGGER = logging.getLogger(__name__)
class PilightDaemonSim:
"""Class to fake the interface of the pilight python package.
Is used in an asyncio loop, thus the mock cannot be accessed to
determine if methods where called?!
This is solved here in a hackish way by printing errors
that can be checked using logging.error mocks.
"""
callback = None
called = None
test_message = {
"protocol": "kaku_switch",
"uuid": "1-2-3-4",
"message": {"id": 0, "unit": 0, "off": 1},
}
def __init__(self, host, port):
"""Init pilight client, ignore parameters."""
def send_code(self, call): # pylint: disable=no-self-use
"""Handle pilight.send service callback."""
_LOGGER.error("PilightDaemonSim payload: %s", call)
def start(self):
"""Handle homeassistant.start callback.
Also sends one test message after start up
"""
_LOGGER.error("PilightDaemonSim start")
# Fake one code receive after daemon started
if not self.called:
self.callback(self.test_message)
self.called = True
def stop(self): # pylint: disable=no-self-use
"""Handle homeassistant.stop callback."""
_LOGGER.error("PilightDaemonSim stop")
def set_callback(self, function):
"""Handle pilight.pilight_received event callback."""
self.callback = function
_LOGGER.error("PilightDaemonSim callback: %s", function)
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_connection_failed_error(mock_error, hass):
"""Try to connect at 127.0.0.1:5001 with socket error."""
with assert_setup_component(4):
with patch("pilight.pilight.Client", side_effect=socket.error) as mock_client:
assert not await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_connection_timeout_error(mock_error, hass):
"""Try to connect at 127.0.0.1:5001 with socket timeout."""
with assert_setup_component(4):
with patch("pilight.pilight.Client", side_effect=socket.timeout) as mock_client:
assert not await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code_no_protocol(hass):
"""Try to send data without protocol information, should give error."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call without protocol info, should raise an error
try:
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data={"noprotocol": "test", "value": 42},
blocking=True,
)
await hass.async_block_till_done()
except MultipleInvalid as error:
assert "required key not provided @ data['protocol']" in str(error)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code(mock_pilight_error, hass):
"""Try to send proper data."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
service_data["protocol"] = [service_data["protocol"]]
assert str(service_data) in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_send_code_fail(mock_pilight_error, hass):
"""Check IOError exception error message."""
with assert_setup_component(4):
with patch("pilight.pilight.Client.send_code", side_effect=IOError):
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert "Pilight send failed" in str(error_log_call)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code_delay(mock_pilight_error, hass):
"""Try to send proper data with delay afterwards."""
with assert_setup_component(4):
assert await async_setup_component(
hass,
pilight.DOMAIN,
{pilight.DOMAIN: {pilight.CONF_SEND_DELAY: 5.0}},
)
# Call with protocol info, should not give error
service_data1 = {"protocol": "test11", "value": 42}
service_data2 = {"protocol": "test22", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data1,
blocking=True,
)
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data2,
blocking=True,
)
service_data1["protocol"] = [service_data1["protocol"]]
service_data2["protocol"] = [service_data2["protocol"]]
async_fire_time_changed(hass, dt_util.utcnow())
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data1) in str(error_log_call)
new_time = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, new_time)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data2) in str(error_log_call)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_start_stop(mock_pilight_error, hass):
"""Check correct startup and stop of pilight daemon."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Test startup
await hass.async_start()
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-2]
assert "PilightDaemonSim callback" in str(error_log_call)
error_log_call = mock_pilight_error.call_args_list[-1]
assert "PilightDaemonSim start" in str(error_log_call)
# Test stop
with patch.object(hass.loop, "stop"):
await hass.async_stop()
error_log_call = mock_pilight_error.call_args_list[-1]
assert "PilightDaemonSim stop" in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_receive_code(mock_debug, hass):
"""Check if code receiving via pilight daemon works."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Test startup
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_exact_match(mock_debug, hass):
"""Check whitelist filter with matched data."""
with assert_setup_component(4):
whitelist = {
"protocol": [PilightDaemonSim.test_message["protocol"]],
"uuid": [PilightDaemonSim.test_message["uuid"]],
"id": [PilightDaemonSim.test_message["message"]["id"]],
"unit": [PilightDaemonSim.test_message["message"]["unit"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_partial_match(mock_debug, hass):
"""Check whitelist filter with partially matched data, should work."""
with assert_setup_component(4):
whitelist = {
"protocol": [PilightDaemonSim.test_message["protocol"]],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_or_match(mock_debug, hass):
"""Check whitelist filter with several subsection, should work."""
with assert_setup_component(4):
whitelist = {
"protocol": [
PilightDaemonSim.test_message["protocol"],
"other_protocol",
],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_no_match(mock_debug, hass):
"""Check whitelist filter with unmatched data, should not work."""
with assert_setup_component(4):
whitelist = {
"protocol": ["wrong_protocol"],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
debug_log_call = mock_debug.call_args_list[-3]
assert not ("Event pilight_received" in debug_log_call)
async def test_call_rate_delay_throttle_enabled(hass):
"""Test that throttling actually work."""
runs = []
delay = 5.0
limit = pilight.CallRateDelayThrottle(hass, delay)
action = limit.limited(lambda x: runs.append(x))
for i in range(3):
await hass.async_add_executor_job(action, i)
await hass.async_block_till_done()
assert runs == [0]
exp = []
now = dt_util.utcnow()
for i in range(3):
exp.append(i)
shifted_time = now + (timedelta(seconds=delay + 0.1) * i)
async_fire_time_changed(hass, shifted_time)
await hass.async_block_till_done()
assert runs == exp
def test_call_rate_delay_throttle_disabled(hass):
"""Test that the limiter is a noop if no delay set."""
runs = []
limit = pilight.CallRateDelayThrottle(hass, 0.0)
action = limit.limited(lambda x: runs.append(x))
for i in range(3):
action(i)
assert runs == [0, 1, 2]
| partofthething/home-assistant | tests/components/pilight/test_init.py | Python | apache-2.0 | 14,678 | 0.000409 |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of OpenSND.
# OpenSND is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenSND is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with OpenSND. If not, see <http://www.gnu.org/licenses/>.
from common import *
from objects import *
from images import *
from fonts import *
from sounds import *
from PySide.QtCore import Qt
import os
class Frame2(Frame):
name = 'Login'
index = 1
width = 800
height = 600
background = (0, 0, 0)
def initialize(self):
self.create_object(ValidChar_22, 123, 620)
self.create_object(Msg_16, 303, 699)
self.create_object(VerCheck_25, 337, 712)
self.create_object(IrcNick_27, 466, -94)
self.create_object(Newid_28, -2, -66)
self.create_object(Channel1_36, -232, 435)
self.create_object(Title_4, 0, 0)
self.create_object(GlobalName_6, 3, -48)
self.create_object(CheckVersion_8, 180, -42)
self.create_object(Ip2_9, 432, -109)
self.create_object(Port2_10, 462, -72)
self.create_object(Connect_11, 400, 455)
self.create_object(String2_12, 9, 575)
self.create_object(MooSock_13, 567, -65)
self.create_object(CheckUser_14, 666, -34)
self.create_object(StringParser_15, 700, -62)
self.create_object(Timeout_18, 371, -24)
self.create_object(RemoteIP_20, 155, -95)
self.create_object(EigeneIP_21, 173, -121)
self.create_object(ScreenshotNr_23, 588, 621)
self.create_object(Version_26, 532, 659)
self.create_object(SvrKills_29, 68, -112)
self.create_object(SvrDeaths_30, 66, -92)
self.create_object(SvrPoints_31, 63, -73)
self.create_object(SvrKills2_32, 116, -112)
self.create_object(SvrDeaths2_33, 113, -90)
self.create_object(SvrPoints2_34, 113, -71)
self.create_object(String12_37, 51, 515)
self.create_object(String11_17, 50, 514)
self.create_object(BinaryObject_38, 312, -123)
self.create_object(Name_5, 280, 394)
self.create_object(Ini_7, 150, -71)
self.create_object(Edit_19, 31, 641)
self.create_object(Edit2_24, 294, 655)
self.groups = {
'links' : True,
'Check name' : False,
'Get Info' : False,
'Access' : False,
'Check version' : False,
}
def on_start(self):
self.set_event_id(1)
self.get(Name_5).limit_size(15)
self.get(Name_5).set_focus(True)
self.get(Ini_7).set_filename((os.getcwd()+'\\')+'data.ini')
self.get(Ini_7).set_group('Data')
self.get(Ini_7).set_item('Name')
self.get(Name_5).set_value(left_string(self.get(Ini_7).get(), 15))
self.get(Ini_7).set_item('UID')
self.get(Newid_28).set_value(self.get(Ini_7).get())
self.values[10] = 0
self.values[6] = 0
self.get(Connect_11).values[0] = 0
self.show_cursor()
self.get(StringParser_15).add_delimiter('\r\n')
self.get(Edit2_24).load_file('Screenshot.pak')
add_encryption_key('\xf88\xfa2J\xdb\xae\x91=\xd5.\x99\xb3_y\x7f/U%0C\xd9')
self.set_event_id(2)
self.get(ScreenshotNr_23).set_value(self.get(Edit2_24).get_number())
self.set_event_id(3)
if self.get(Newid_28).text == '':
self.get(Newid_28).set_value('0')
self.set_event_id(4)
if self.get(Ini_7).get_value_item('Footsteps') == 1:
self.players[1].lives = 1
self.set_event_id(5)
if self.get(Ini_7).get_value_item('Footsteps') != 1:
self.players[1].lives = 0
self.set_event_id(6)
if (self.get(Ini_7).get_value_item('Music') == 1 and
self.get_global_value(0) == 0):
self.values[0] = 1
self.values[12] = 1
self.set_mod_volume(0, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(1, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(2, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(3, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(4, self.get(Ini_7).get_value_item('MusicVolume'))
self.cross_fade_mod(0, 0, 3000)
self.set_event_id(7)
if self.get(Name_5).get_value() == '':
self.get(Name_5).set_value('Player')
pass
def loop_name(self):
self.set_event_id(21)
if (self.groups['Check name'] and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '[' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != ']' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '!' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '$' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '+' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '*' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != "'" and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '#' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '/' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '\\' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '|'):
self.get(IrcNick_27).set_value(self.get(IrcNick_27).text+mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1))
self.set_event_id(22)
for loop_index in xrange(len(self.get(ValidChar_22).text)):
self.loop_indexes['ValidChar'] = loop_index
if self.loop_valid_char() == False: break
pass
def loop_valid_char(self):
self.set_event_id(23)
if (self.groups['Check name'] and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) == mid_string(self.get(ValidChar_22).text, self.get_loop_index('ValidChar'), 1)):
return False # 'ValidChar'
self.set_event_id(24)
if (self.groups['Check name'] and
self.get_loop_index('ValidChar') == len(self.get(ValidChar_22).text)-1 and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) == ' '):
self.groups['Check name'] = False
self.get(Connect_11).set_transparency(0)
self.get(Connect_11).values[0] = 0
self.get(String11_17).set_value('Space is an invalid character, use _ instead')
self.get(CheckUser_14).set_value(0)
self.get(Name_5).set_read_only(False)
self.get(String12_37).set_value('Space is an invalid character, use _ instead')
self.set_event_id(25)
if (self.groups['Check name'] and
self.get_loop_index('ValidChar') == len(self.get(ValidChar_22).text)-1 and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != ' '):
self.groups['Check name'] = False
self.get(Connect_11).set_transparency(0)
self.get(Connect_11).values[0] = 0
self.get(String11_17).set_value(mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1)+' is an invalid character')
self.get(CheckUser_14).set_value(0)
self.get(Name_5).set_read_only(False)
self.get(String12_37).set_value(mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1)+' is an invalid character')
pass
def on_mouse_press(self, x, y, button):
if self.get(Connect_11).is_over(x, y):
self.set_event_id(9)
if (select(self.get(CheckUser_14).get_value() == 0) and
select(self.get(Connect_11).values.get(0, 0) == 0) and
self.get(Name_5).get_value() != ''):
self.get(String11_17).set_value('')
self.get(Connect_11).values[0] = 1
self.get(CheckUser_14).set_value(3)
self.get(Connect_11).restore_animation()
self.get(Connect_11).set_transparency(70)
self.get(Edit_19).set_value('GET http://www.seekanddread.de/Game/login.php'+' HTTP/1.0'+'\r\n'+'Host: www.seekanddread.de'+'\r\n'+'From: webmaster@seekanddread.de'+'\r\n'+'User-Agent: HTTPTool/1.0'+'\r\n'+'\r\n')
self.groups['Check name'] = True
self.get(Name_5).set_read_only(True)
self.get(IrcNick_27).set_value('')
self.get(String12_37).set_value('')
self.set_event_id(10)
if (select(self.get(CheckUser_14).get_value() == 0) and
select(self.get(Connect_11).values.get(0, 0) == 0) and
self.get(Name_5).get_value() == ''):
self.get(String11_17).set_value('Enter a name')
self.get(String12_37).set_value('Enter a name')
pass
def on_sock_receive(self, instance):
if type(instance) == MooSock_13:
self.set_event_id(16)
self.get(Edit_19).set_value(self.get(MooSock_13).get_bytes(1024))
self.groups['Get Info'] = True
self.get(Timeout_18).set_value(4)
pass
def on_sock_connect(self, instance):
if type(instance) == MooSock_13:
self.set_event_id(13)
self.get(MooSock_13).send_text(self.get(Edit_19).get_value())
pass
def on_sock_disconnect(self, instance):
if type(instance) == MooSock_13:
self.set_event_id(15)
self.get(Connect_11).flags[1] = False
pass
def on_sock_connection(self, instance):
if type(instance) == MooSock_13:
self.set_event_id(14)
self.get(Connect_11).flags[1] = True
self.get(MooSock_13).accept()
pass
def update(self, dt):
self.set_event_id(8)
if (Qt.Key_Escape in self.scene.key_presses and
select(self.get(CheckUser_14).get_value() == 0)):
self.end_application()
self.set_event_id(11)
if (self.every(1.0) and
select(self.get(Timeout_18).get_value() > 0)):
self.get(Timeout_18).subtract_value(1)
self.set_event_id(12)
if select(self.get(Timeout_18).get_value() == 0):
self.get(Connect_11).set_transparency(0)
self.get(Connect_11).values[0] = 0
self.get(String11_17).set_value("Can't connect to server please try again later")
self.get(Timeout_18).set_value(-1)
self.get(MooSock_13).disconnect()
self.get(CheckUser_14).set_value(0)
self.get(String12_37).set_value("Can't connect to server please try again later")
if self.groups['Check name']:
self.set_event_id(18)
if len(self.get(Name_5).get_value()) < 5:
self.groups['Check name'] = False
self.get(Connect_11).set_transparency(0)
self.get(Connect_11).values[0] = 0
self.get(String11_17).set_value('Your name must have at least 5 characters')
self.get(CheckUser_14).set_value(0)
self.get(Name_5).set_read_only(False)
self.get(String12_37).set_value('Your name must have at least 5 characters')
self.set_event_id(19)
if self.get(Name_5).get_value() == 'Admin':
self.groups['Check name'] = False
self.get(Connect_11).set_transparency(0)
self.get(Connect_11).values[0] = 0
self.get(String11_17).set_value('Name is reserved')
self.get(CheckUser_14).set_value(0)
self.get(Name_5).set_read_only(False)
self.get(String12_37).set_value('Name is reserved')
self.set_event_id(20)
if True:
for loop_index in xrange(len(self.get(Name_5).get_value())):
self.loop_indexes['Name'] = loop_index
if self.loop_name() == False: break
self.set_event_id(26)
if True:
self.get(MooSock_13).connect('www.seekanddread.de', 80)
self.groups['Check name'] = False
self.get(Timeout_18).set_value(10)
if self.groups['links']:
self.set_event_id(29)
if (self.get(Connect_11).mouse_over() and
select(self.get(Connect_11).values.get(0, 0) == 0)):
self.get(Connect_11).force_animation('User defined 1')
self.set_event_id(30)
if (negate(self.get(Connect_11).mouse_over()) and
select(self.get(Connect_11).values.get(0, 0) == 0)):
self.get(Connect_11).restore_animation()
if self.groups['Get Info']:
self.set_event_id(33)
if (self.every(1.0) and
select(self.get(Timeout_18).get_value() > 1)):
self.get(Timeout_18).subtract_value(1)
self.set_event_id(34)
if select(self.get(Timeout_18).get_value() == 1):
self.get(StringParser_15).set_value(self.get(Edit_19).get_value())
self.get(Msg_16).set_value(self.get(StringParser_15).get_element(-1))
self.get(BinaryObject_38).insert(self.get(Msg_16).text, 0)
self.get(BinaryObject_38).replace('-', '+')
self.get(BinaryObject_38).replace('_', '/')
self.get(BinaryObject_38).replace('.', '=')
self.get(BinaryObject_38).replace('\n', '')
self.get(BinaryObject_38).replace('\r', '')
self.get(BinaryObject_38).decode_base64()
self.get(Msg_16).set_value(self.get(BinaryObject_38).get_string(0, self.get(BinaryObject_38).get_size()))
self.get(StringParser_15).clear_delimiters()
self.get(StringParser_15).add_delimiter(',')
self.get(StringParser_15).set_value(self.get(Msg_16).text)
self.get(RemoteIP_20).set_value(self.get(StringParser_15).get_element(-1 + 1))
self.get(Ip2_9).set_value(self.get(StringParser_15).get_element(-1 + 2))
self.get(Port2_10).set_value(self.get(StringParser_15).get_element(-1 + 3))
self.get(VerCheck_25).set_value(self.get(StringParser_15).get_element(-1 + 5))
self.get(Ini_7).set_group_item_value('Data', 'UID', self.get(StringParser_15).get_element(-1 + 6))
self.get(GlobalName_6).set_value(self.get(Name_5).get_value())
self.get(EigeneIP_21).set_value(self.get(MooSock_13).get_local_ip())
self.get(SvrKills_29).set_value(to_number(self.get(StringParser_15).get_element(-1 + 7)))
self.get(SvrKills2_32).set_value(to_number(self.get(StringParser_15).get_element(-1 + 7)))
self.get(SvrDeaths_30).set_value(to_number(self.get(StringParser_15).get_element(-1 + 8)))
self.get(SvrDeaths2_33).set_value(to_number(self.get(StringParser_15).get_element(-1 + 8)))
self.get(SvrPoints_31).set_value(to_number(self.get(StringParser_15).get_element(-1 + 9)))
self.get(SvrPoints2_34).set_value(to_number(self.get(StringParser_15).get_element(-1 + 9)))
self.get(Newid_28).set_value(self.get(StringParser_15).get_element(-1 + 6))
self.get(Channel1_36).set_value(self.get(StringParser_15).get_element(-1 + 10))
self.get(MooSock_13).disconnect()
self.groups['Get Info'] = False
self.groups['Check version'] = True
if self.groups['Access']:
self.set_event_id(37)
if (self.every(1.0) and
select(self.get(Timeout_18).get_value() > 1)):
self.get(Timeout_18).subtract_value(1)
self.set_event_id(38)
if select(self.get(Timeout_18).get_value() == 1):
self.values[3] = self.get(SvrKills2_32).get_value()
self.values[14] = self.get(SvrDeaths2_33).get_value()
self.values[15] = self.get(SvrPoints2_34).get_value()
self.get(Ini_7).set_group_item_value('Data', 'Name', self.get(Name_5).get_value())
self.set_frame(2)
if self.groups['Check version']:
self.set_event_id(41)
if self.get(VerCheck_25).text == '1.44':
self.groups['Access'] = True
self.get(Timeout_18).set_value(4)
self.get(String11_17).set_color((107, 199, 103))
self.get(String11_17).set_value(self.get(StringParser_15).get_element(-1 + 4))
self.groups['Check version'] = False
self.get(String12_37).set_value(self.get(StringParser_15).get_element(-1 + 4))
self.set_event_id(42)
if self.get(VerCheck_25).text != '1.44':
self.groups['Check version'] = False
self.get(String11_17).set_color((255, 0, 0))
self.get(String11_17).set_value("You don't have the latest version, download version "+self.get(StringParser_15).get_element(-1 + 5)+' at http://www.seekanddread.de !')
self.get(Timeout_18).set_value(-1)
self.get(CheckUser_14).set_value(0)
self.get(String12_37).set_value("You don't have the latest version, download version "+self.get(StringParser_15).get_element(-1 + 5)+' at http://www.seekanddread.de !')
pass
| matpow2/opensnd | frame2.py | Python | gpl-3.0 | 18,104 | 0.005689 |
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os, cv2, csv_utils, shutil
import numpy as np
import collections
# logging
import logging
logger = logging.getLogger(__name__)
from file_methods import save_object, load_object
from version_utils import VersionFormat
from version_utils import read_rec_version
def correlate_data(data,timestamps):
'''
data: list of data :
each datum is a dict with at least:
timestamp: float
timestamps: timestamps list to correlate data to
this takes a data list and a timestamps list and makes a new list
with the length of the number of timestamps.
Each slot contains a list that will have 0, 1 or more assosiated data points.
Finally we add an index field to the datum with the associated index
'''
timestamps = list(timestamps)
data_by_frame = [[] for i in timestamps]
frame_idx = 0
data_index = 0
data.sort(key=lambda d: d['timestamp'])
while True:
try:
datum = data[data_index]
# we can take the midpoint between two frames in time: More appropriate for SW timestamps
ts = ( timestamps[frame_idx]+timestamps[frame_idx+1] ) / 2.
# or the time of the next frame: More appropriate for Sart Of Exposure Timestamps (HW timestamps).
# ts = timestamps[frame_idx+1]
except IndexError:
# we might loose a data point at the end but we dont care
break
if datum['timestamp'] <= ts:
datum['index'] = frame_idx
data_by_frame[frame_idx].append(datum)
data_index +=1
else:
frame_idx+=1
return data_by_frame
def update_recording_to_recent(rec_dir):
meta_info = load_meta_info(rec_dir)
update_meta_info(rec_dir,meta_info)
# Reference format: v0.7.4
rec_version = read_rec_version(meta_info)
# Convert python2 to python3
if rec_version < VersionFormat('0.8.7'):
update_recording_bytes_to_unicode(rec_dir)
if rec_version >= VersionFormat('0.7.4'):
pass
elif rec_version >= VersionFormat('0.7.3'):
update_recording_v073_to_v074(rec_dir)
elif rec_version >= VersionFormat('0.5'):
update_recording_v05_to_v074(rec_dir)
elif rec_version >= VersionFormat('0.4'):
update_recording_v04_to_v074(rec_dir)
elif rec_version >= VersionFormat('0.3'):
update_recording_v03_to_v074(rec_dir)
else:
logger.Error("This recording is too old. Sorry.")
return
# Incremental format updates
if rec_version < VersionFormat('0.8.2'):
update_recording_v074_to_v082(rec_dir)
if rec_version < VersionFormat('0.8.3'):
update_recording_v082_to_v083(rec_dir)
if rec_version < VersionFormat('0.8.6'):
update_recording_v083_to_v086(rec_dir)
if rec_version < VersionFormat('0.8.7'):
update_recording_v086_to_v087(rec_dir)
if rec_version < VersionFormat('0.9.1'):
update_recording_v087_to_v091(rec_dir)
# How to extend:
# if rec_version < VersionFormat('FUTURE FORMAT'):
# update_recording_v081_to_FUTURE(rec_dir)
def load_meta_info(rec_dir):
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
return meta_info
def update_meta_info(rec_dir, meta_info):
logger.info('Updating meta info')
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'w',newline='') as csvfile:
csv_utils.write_key_value_file(csvfile,meta_info)
def update_recording_v074_to_v082(rec_dir):
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.2'
update_meta_info(rec_dir,meta_info)
def update_recording_v082_to_v083(rec_dir):
logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
meta_info_path = os.path.join(rec_dir,"info.csv")
for d in pupil_data['gaze_positions']:
if 'base' in d:
d['base_data'] = d.pop('base')
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.3'
update_meta_info(rec_dir,meta_info)
def update_recording_v083_to_v086(rec_dir):
logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
meta_info_path = os.path.join(rec_dir,"info.csv")
for topic in pupil_data.keys():
for d in pupil_data[topic]:
d['topic'] = topic
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.6'
update_meta_info(rec_dir,meta_info)
def update_recording_v086_to_v087(rec_dir):
logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
meta_info_path = os.path.join(rec_dir,"info.csv")
def _clamp_norm_point(pos):
'''realisitic numbers for norm pos should be in this range.
Grossly bigger or smaller numbers are results bad exrapolation
and can cause overflow erorr when denormalized and cast as int32.
'''
return min(100,max(-100,pos[0])),min(100,max(-100,pos[1]))
for g in pupil_data.get('gaze_positions', []):
if 'topic' not in g:
#we missed this in one gaze mapper
g['topic'] = 'gaze'
g['norm_pos'] = _clamp_norm_point(g['norm_pos'])
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.7'
update_meta_info(rec_dir,meta_info)
def update_recording_v087_to_v091(rec_dir):
logger.info("Updating recording from v0.8.7 format to v0.9.1 format")
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.9.1'
update_meta_info(rec_dir,meta_info)
def update_recording_bytes_to_unicode(rec_dir):
logger.info("Updating recording from bytes to unicode.")
# update to python 3
meta_info_path = os.path.join(rec_dir, "info.csv")
def convert(data):
if isinstance(data, bytes):
return data.decode()
elif isinstance(data, str) or isinstance(data, np.ndarray):
return data
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
for file in os.listdir(rec_dir):
if file.startswith('.') or os.path.splitext(file)[1] == '.mp4':
continue
rec_file = os.path.join(rec_dir, file)
try:
rec_object = load_object(rec_file)
converted_object = convert(rec_object)
if converted_object != rec_object:
logger.info('Converted `{}` from bytes to unicode'.format(file))
save_object(rec_object, rec_file)
except (ValueError, IsADirectoryError):
continue
with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
with open(meta_info_path, 'w', newline='') as csvfile:
csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v073_to_v074(rec_dir):
logger.info("Updating recording from v0.7x format to v0.7.4 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
modified = False
for p in pupil_data['pupil_positions']:
if p['method'] == "3D c++":
p['method'] = "3d c++"
try:
p['projected_sphere'] = p.pop('projectedSphere')
except:
p['projected_sphere'] = {'center':(0,0),'angle':0,'axes':(0,0)}
p['model_confidence'] = p.pop('modelConfidence')
p['model_id'] = p.pop('modelID')
p['circle_3d'] = p.pop('circle3D')
p['diameter_3d'] = p.pop('diameter_3D')
modified = True
if modified:
save_object(load_object(os.path.join(rec_dir, "pupil_data")),os.path.join(rec_dir, "pupil_data_old"))
try:
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
except IOError:
pass
def update_recording_v05_to_v074(rec_dir):
logger.info("Updating recording from v0.5x/v0.6x/v0.7x format to v0.7.4 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
save_object(pupil_data,os.path.join(rec_dir, "pupil_data_old"))
for p in pupil_data['pupil_positions']:
p['method'] = '2d python'
try:
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
except IOError:
pass
def update_recording_v04_to_v074(rec_dir):
logger.info("Updating recording from v0.4x format to v0.7.4 format")
gaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy'))
pupil_array = np.load(os.path.join(rec_dir,'pupil_positions.npy'))
gaze_list = []
pupil_list = []
for datum in pupil_array:
ts, confidence, id, x, y, diameter = datum[:6]
pupil_list.append({'timestamp':ts,'confidence':confidence,'id':id,'norm_pos':[x,y],'diameter':diameter,'method':'2d python'})
pupil_by_ts = dict([(p['timestamp'],p) for p in pupil_list])
for datum in gaze_array:
ts,confidence,x,y, = datum
gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[x,y],'base':[pupil_by_ts.get(ts,None)]})
pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list}
try:
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
except IOError:
pass
def update_recording_v03_to_v074(rec_dir):
logger.info("Updating recording from v0.3x format to v0.7.4 format")
pupilgaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy'))
gaze_list = []
pupil_list = []
for datum in pupilgaze_array:
gaze_x,gaze_y,pupil_x,pupil_y,ts,confidence = datum
#some bogus size and confidence as we did not save it back then
pupil_list.append({'timestamp':ts,'confidence':confidence,'id':0,'norm_pos':[pupil_x,pupil_y],'diameter':50,'method':'2d python'})
gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[gaze_x,gaze_y],'base':[pupil_list[-1]]})
pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list}
try:
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
except IOError:
pass
ts_path = os.path.join(rec_dir,"world_timestamps.npy")
ts_path_old = os.path.join(rec_dir,"timestamps.npy")
if not os.path.isfile(ts_path) and os.path.isfile(ts_path_old):
os.rename(ts_path_old, ts_path)
def is_pupil_rec_dir(rec_dir):
if not os.path.isdir(rec_dir):
logger.error("No valid dir supplied")
return False
try:
meta_info = load_meta_info(rec_dir)
meta_info["Capture Software Version"] # Test key existence
except:
logger.error("Could not read info.csv file: Not a valid Pupil recording.")
return False
return True
def transparent_circle(img,center,radius,color,thickness):
center = tuple(map(int,center))
rgb = [255*c for c in color[:3]] # convert to 0-255 scale for OpenCV
alpha = color[-1]
radius = int(radius)
if thickness > 0:
pad = radius + 2 + thickness
else:
pad = radius + 3
roi = slice(center[1]-pad,center[1]+pad),slice(center[0]-pad,center[0]+pad)
try:
overlay = img[roi].copy()
cv2.circle(img,center,radius,rgb, thickness=thickness, lineType=cv2.LINE_AA)
opacity = alpha
cv2.addWeighted(src1=img[roi], alpha=opacity, src2=overlay, beta=1. - opacity, gamma=0, dst=img[roi])
except:
logger.debug("transparent_circle would have been partially outside of img. Did not draw it.")
def transparent_image_overlay(pos,overlay_img,img,alpha):
"""
Overlay one image with another with alpha blending
In player this will be used to overlay the eye (as overlay_img) over the world image (img)
Arguments:
pos: (x,y) position of the top left corner in numpy row,column format from top left corner (numpy coord system)
overlay_img: image to overlay
img: destination image
alpha: 0.0-1.0
"""
roi = slice(pos[1],pos[1]+overlay_img.shape[0]),slice(pos[0],pos[0]+overlay_img.shape[1])
try:
cv2.addWeighted(overlay_img,alpha,img[roi],1.-alpha,0,img[roi])
except:
logger.debug("transparent_image_overlay was outside of the world image and was not drawn")
pass
| fsxfreak/esys-pbi | src/pupil/pupil_src/player/player_methods.py | Python | mit | 13,738 | 0.012957 |
import errno
import os
from setuptools import Extension
from paver.easy import *
from paver.path import path
from paver.setuputils import setup
setup(
name="notifier",
description="A pre-connected mesh of servers for fast internal RPC",
version="1.0",
license="bsd",
author="Libor Michalek",
author_email="libor@pobox.com",
packages=["notifier"],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python",
"Topic :: System :: Networking",
],
install_requires=['gogreen', 'wirebin'],
)
MANIFEST = (
"LICENSE",
"setup.py",
"paver-minilib.zip",
)
@task
def manifest():
path('MANIFEST.in').write_lines('include %s' % x for x in MANIFEST)
@task
@needs('generate_setup', 'minilib', 'manifest', 'setuptools.command.sdist')
def sdist():
pass
@task
def clean():
for p in map(path, ('notifier.egg-info', 'dist', 'build', 'MANIFEST.in')):
if p.exists():
if p.isdir():
p.rmtree()
else:
p.remove()
for p in path(__file__).abspath().parent.walkfiles():
if p.endswith(".pyc") or p.endswith(".pyo"):
try:
p.remove()
except OSError, exc:
if exc.args[0] == errno.EACCES:
continue
raise
| slideinc/notifier | pavement.py | Python | bsd-3-clause | 1,537 | 0.003253 |
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# Copyright 2022 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# pylint: disable=locally-disabled, manifest-required-author
{
"name": "HR Timesheet - Workflow Policy",
"version": "8.0.1.0.0",
"category": "Human Resource",
"website": "https://simetri-sinergi.id",
"author": "OpenSynergy Indonesia, PT. Simetri Sinergi Indonesia",
"license": "AGPL-3",
"installable": True,
"depends": [
"hr_attendance_configuration_page",
"hr_timesheet_sheet",
],
"data": [
"views/hr_attendance_config_setting_views.xml",
"views/hr_timesheet_sheet_views.xml",
],
}
| open-synergy/opnsynid-hr | hr_timesheet_workflow_policy/__openerp__.py | Python | agpl-3.0 | 733 | 0 |
# -*- Python -*-
#
# Jiao Lin <jiao.lin@gmail.com>
#
plots_table = """
exp exp-sqe.h5
sim-singlephonon sp-sqe.h5
sim-multiphonon mp-sqe.h5
sim-multiple-scattering ms-sqe.h5
sim-correction sqe_correction.h5
exp-corrected-single-phonon corrected_sqe.h5
sim-total-inel total-inel-sqe.h5
exp-residual residual-sqe.h5
"""
plot_intermediate_result_sqe_code = """#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 12,9
from multiphonon.backward.plotutils import plot_intermediate_result_sqe as plot
plot(curdir)
from matplotlib import pyplot as plt
plt.show()
"""
plot_intermediate_result_se_code = """#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 12,9
from multiphonon.backward.plotutils import plot_intermediate_result_se as plot
plot(curdir)
from matplotlib import pyplot as plt
plt.show()
"""
plot_dos_iteration_code = """#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 6,4.5
from multiphonon.backward.plotutils import plot_dos_iteration
plot_dos_iteration(curdir, %(total_rounds)d)
from matplotlib import pyplot as plt
plt.show()
"""
plot_residual_code = """#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
from multiphonon.backward.plotutils import plot_residual
plot_residual(curdir)
from matplotlib import pyplot as plt
plt.show()
"""
| sns-chops/multiphonon | multiphonon/backward/_sqe2dos_script_templates.py | Python | mit | 1,493 | 0 |
class Solution:
"""
@param k: The number k.
@return: The kth prime number as description.
"""
def kthPrimeNumber(self, k):
# write your code here
q = [1]
i3 = i5 = i7 = 0
while len(q)-1 < k:
m3, m5, m7 = q[i3] * 3, q[i5] * 5, q[i7] * 7
m = min(m3, m5, m7)
if m == m3:
i3 += 1
if m == m5:
i5 += 1
if m == m7:
i7 += 1
q.append(m)
return q[-1]
a = Solution()
print a.kthPrimeNumber(5) | quake0day/oj | kthPrimeNumber.py | Python | mit | 527 | 0.043643 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
# The cap for tf.clip_by_value, it's hinted from the activation distribution
# that the majority of activation values are in the range [-6, 6].
CLIP_BY_VALUE_CAP = 6
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@contrib_framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@contrib_framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@contrib_framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(input_tensor=x, axis=[1, 2])
else:
return tf.reduce_mean(input_tensor=x, axis=[2, 3])
@contrib_framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.compat.v2.nn.avg_pool2d(
input=net,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.compat.v2.nn.avg_pool2d(
input=path2,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@contrib_framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(input=net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random.uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size,
use_bounded_activation):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
for layer_num in range(num_layers - 1):
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation, use_bounded_activation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if use_bounded_activation:
net = tf.nn.relu6(net)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
self._use_bounded_activation = use_bounded_activation
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
activation_fn = tf.nn.relu6 if self._use_bounded_activation else tf.nn.relu
if curr_filter_shape != prev_filter_shape:
prev_layer = activation_fn(prev_layer)
prev_layer = factorized_reduction(
prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = activation_fn(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu6(net) if self._use_bounded_activation else tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
# num_or_size_splits=1
net = [net]
net.append(prev_layer)
return net
def __call__(self, net, scope=None, filter_scaling=1, stride=1,
prev_layer=None, cell_num=-1, current_step=None):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.compat.v1.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.compat.v1.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i+1]
i += 2
# Apply conv operations
with tf.compat.v1.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left,
current_step)
with tf.compat.v1.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right,
current_step)
# Combine hidden states using 'add'.
with tf.compat.v1.variable_scope('combine'):
h = h1 + h2
if self._use_bounded_activation:
h = tf.nn.relu6(h)
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.compat.v1.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation,
stride, is_from_original_input, current_step):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size,
self._use_bounded_activation)
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif operation in ['none']:
if self._use_bounded_activation:
net = tf.nn.relu6(net)
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
if not self._use_bounded_activation:
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif 'pool' in operation:
net = _pooling(net, stride, operation, self._use_bounded_activation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net, current_step=current_step)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.compat.v1.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(
net[idx], final_num_filters, stride)
states_to_combine = (
[h for h, is_used in zip(net, used_hiddenstates) if not is_used])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@contrib_framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.compat.v1.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if current_step is None:
current_step = tf.compat.v1.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.compat.v1.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.compat.v1.summary.scalar('drop_path_keep_prob',
drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_3x3_2',
'separable_5x5_2',
'separable_3x3_2',
'avg_pool_3x3',
'none',
'avg_pool_3x3',
'avg_pool_3x3',
'separable_3x3_2',
'none']
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_7x7_2',
'max_pool_3x3',
'separable_7x7_2',
'avg_pool_3x3',
'separable_5x5_2',
'none',
'avg_pool_3x3',
'separable_3x3_2',
'max_pool_3x3']
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
| alexgorban/models | research/slim/nets/nasnet/nasnet_utils.py | Python | apache-2.0 | 20,826 | 0.007395 |
"""Auto-generated file, do not edit by hand. IE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IE = PhoneMetadata(id='IE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[159]\\d{2,5}', possible_number_pattern='\\d{3,6}'),
toll_free=PhoneNumberDesc(national_number_pattern='116(?:00[06]|1(?:11|23))', possible_number_pattern='\\d{6}', example_number='116000'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='112|999', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='11(?:2|6(?:00[06]|1(?:11|23)))|51210|999', possible_number_pattern='\\d{3,6}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='51210', possible_number_pattern='\\d{5}'),
short_data=True)
| roubert/python-phonenumbers | python/phonenumbers/shortdata/region_IE.py | Python | apache-2.0 | 1,071 | 0.008403 |
#Embedded file name: ACEStream\Core\Statistics\VideoPlaybackCrawler.pyo
from time import strftime
import cPickle
import sys
import threading
import zlib
from ACEStream.Core.BitTornado.BT1.MessageID import CRAWLER_VIDEOPLAYBACK_INFO_QUERY, CRAWLER_VIDEOPLAYBACK_EVENT_QUERY
from ACEStream.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler
from ACEStream.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH, OLPROTO_VER_TENTH
from ACEStream.Core.Statistics.Crawler import Crawler
from ACEStream.Core.Utilities.utilities import show_permid, show_permid_short
DEBUG = False
class VideoPlaybackCrawler:
__single = None
lock = threading.Lock()
@classmethod
def get_instance(cls, *args, **kargs):
if cls.__single is None:
cls.lock.acquire()
try:
if cls.__single is None:
cls.__single = cls(*args, **kargs)
finally:
cls.lock.release()
return cls.__single
def __init__(self):
if VideoPlaybackCrawler.__single is not None:
raise RuntimeError, 'VideoPlaybackCrawler is singleton'
crawler = Crawler.get_instance()
if crawler.am_crawler():
self._file = open('videoplaybackcrawler.txt', 'a')
self._file.write(''.join(('# ',
'*' * 80,
'\n# ',
strftime('%Y/%m/%d %H:%M:%S'),
' Crawler started\n')))
self._file.flush()
self._event_db = None
else:
self._file = None
self._event_db = VideoPlaybackDBHandler.get_instance()
def query_initiator(self, permid, selversion, request_callback):
if selversion >= OLPROTO_VER_TENTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: query_initiator', show_permid_short(permid), 'version', selversion
request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, 'SELECT key, timestamp, event FROM playback_event; DELETE FROM playback_event;', callback=self._after_event_request_callback)
elif selversion >= OLPROTO_VER_EIGHTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: query_initiator', show_permid_short(permid), 'version', selversion
request_callback(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, 'SELECT key, timestamp, piece_size, num_pieces, bitrate, nat FROM playback_info ORDER BY timestamp DESC LIMIT 50', callback=self._after_info_request_callback)
elif DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: query_info_initiator', show_permid_short(permid), 'unsupported overlay version'
def _after_info_request_callback(self, exc, permid):
if not exc:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: request send to', show_permid_short(permid)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
'INFO REQUEST',
show_permid(permid),
'\n')))
self._file.flush()
def handle_info_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_info_crawler_request', show_permid_short(permid), message
try:
cursor = self._event_db._db.execute_read(message)
except Exception as e:
reply_callback(str(e), error=1)
else:
if cursor:
reply_callback(zlib.compress(cPickle.dumps(list(cursor), 2), 9))
else:
reply_callback('error', error=2)
def handle_info_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
if error:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', error, message
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' INFO REPLY',
show_permid(permid),
str(error),
message,
'\n')))
self._file.flush()
else:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', show_permid_short(permid), cPickle.loads(message)
info = cPickle.loads(message)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' INFO REPLY',
show_permid(permid),
str(error),
str(info),
'\n')))
self._file.flush()
i = 0
for key, timestamp, piece_size, num_pieces, bitrate, nat in info:
i += 1
if i == 1:
sql = "\nSELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50;\nDELETE FROM playback_event WHERE key = '%s';\n" % (key, key)
else:
sql = "\nSELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50;\nDELETE FROM playback_event WHERE key = '%s';\nDELETE FROM playback_info WHERE key = '%s';\n" % (key, key, key)
request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, sql, channel_data=key, callback=self._after_event_request_callback, frequency=0)
def _after_event_request_callback(self, exc, permid):
if not exc:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: request send to', show_permid_short(permid)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT REQUEST',
show_permid(permid),
'\n')))
self._file.flush()
def handle_event_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
if error:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', error, message
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT REPLY',
show_permid(permid),
str(error),
str(channel_data),
message,
'\n')))
self._file.flush()
elif selversion >= OLPROTO_VER_TENTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', show_permid_short(permid), len(message), 'bytes zipped'
info = cPickle.loads(zlib.decompress(message))
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT REPLY',
show_permid(permid),
str(error),
str(channel_data),
str(info),
'\n')))
self._file.flush()
elif selversion >= OLPROTO_VER_EIGHTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', show_permid_short(permid), cPickle.loads(message)
info = cPickle.loads(message)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT REPLY',
show_permid(permid),
str(error),
str(channel_data),
str(info),
'\n')))
self._file.flush()
def handle_event_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_event_crawler_request', show_permid_short(permid), message
try:
cursor = self._event_db._db.execute_read(message)
except Exception as e:
reply_callback(str(e), error=1)
else:
if cursor:
reply_callback(zlib.compress(cPickle.dumps(list(cursor), 2), 9))
else:
reply_callback('error', error=2)
| aplicatii-romanesti/allinclusive-kodi-pi | .kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/ACEStream/Core/Statistics/VideoPlaybackCrawler.py | Python | apache-2.0 | 7,854 | 0.005857 |
import sys
import collections
class GeocoderResult(collections.Iterator):
"""
A geocoder resultset to iterate through address results.
Exemple:
results = Geocoder.geocode('paris, us')
for result in results:
print(result.formatted_address, result.location)
Provide shortcut to ease field retrieval, looking at 'types' in each
'address_components'.
Example:
result.country
result.postal_code
You can also choose a different property to display for each lookup type.
Example:
result.country__short_name
By default, use 'long_name' property of lookup type, so:
result.country
and:
result.country__long_name
are equivalent.
"""
attribute_mapping = {
"state": "administrative_area_level_1",
"province": "administrative_area_level_1",
"city": "locality",
"county": "administrative_area_level_2",
}
def __init__(self, data):
"""
Creates instance of GeocoderResult from the provided JSON data array
"""
self.data = data
self.len = len(self.data)
self.current_index = 0
self.current_data = self.data[0]
def __len__(self):
return self.len
def __iter__(self):
return self
def return_next(self):
if self.current_index >= self.len:
raise StopIteration
self.current_data = self.data[self.current_index]
self.current_index += 1
return self
def __getitem__(self, key):
"""
Accessing GeocoderResult by index will return a GeocoderResult
with just one data entry
"""
return GeocoderResult([self.data[key]])
def __unicode__(self):
return self.formatted_address
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
def __next__(self):
return self.return_next()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
def next(self):
return self.return_next()
@property
def count(self):
return self.len
@property
def coordinates(self):
"""
Return a (latitude, longitude) coordinate pair of the current result
"""
location = self.current_data['geometry']['location']
return location['lat'], location['lng']
@property
def latitude(self):
return self.coordinates[0]
@property
def longitude(self):
return self.coordinates[1]
@property
def raw(self):
"""
Returns the full result set in dictionary format
"""
return self.data
@property
def valid_address(self):
"""
Returns true if queried address is valid street address
"""
return self.current_data['types'] == ['street_address']
@property
def formatted_address(self):
return self.current_data['formatted_address']
def __getattr__(self, name):
lookup = name.split('__')
attribute = lookup[0]
if (attribute in GeocoderResult.attribute_mapping):
attribute = GeocoderResult.attribute_mapping[attribute]
try:
prop = lookup[1]
except IndexError:
prop = 'long_name'
for elem in self.current_data['address_components']:
if attribute in elem['types']:
return elem[prop]
class GeocoderError(Exception):
"""Base class for errors in the :mod:`pygeocoder` module.
Methods of the :class:`Geocoder` raise this when something goes wrong.
"""
#: See http://code.google.com/apis/maps/documentation/geocoding/index.html#StatusCodes
#: for information on the meaning of these status codes.
G_GEO_OK = "OK"
G_GEO_ZERO_RESULTS = "ZERO_RESULTS"
G_GEO_OVER_QUERY_LIMIT = "OVER_QUERY_LIMIT"
G_GEO_REQUEST_DENIED = "REQUEST_DENIED"
G_GEO_MISSING_QUERY = "INVALID_REQUEST"
def __init__(self, status, url=None, response=None):
"""Create an exception with a status and optional full response.
:param status: Either a ``G_GEO_`` code or a string explaining the
exception.
:type status: int or string
:param url: The query URL that resulted in the error, if any.
:type url: string
:param response: The actual response returned from Google, if any.
:type response: dict
"""
Exception.__init__(self, status) # Exception is an old-school class
self.status = status
self.url = url
self.response = response
def __str__(self):
"""Return a string representation of this :exc:`GeocoderError`."""
return 'Error %s\nQuery: %s' % (self.status, self.url)
def __unicode__(self):
"""Return a unicode representation of this :exc:`GeocoderError`."""
return unicode(self.__str__())
| zoeren/pygeocoder | pygeolib.py | Python | bsd-3-clause | 4,972 | 0.000603 |
from peewee import * # no other way to reach playhouse :(
from playhouse import flask_utils as peewee_flask_utils
from playhouse import signals as peewee_signals
database = peewee_flask_utils.FlaskDB()
| thedod/boilerplate-peewee-flask | application/sitepack/db.py | Python | gpl-3.0 | 204 | 0 |
from django.apps import AppConfig
class CirculoConfig(AppConfig):
name = 'circulo'
| jstitch/gift_circle | GiftCircle/circulo/apps.py | Python | gpl-3.0 | 89 | 0 |
# -*- coding: utf-8 -*-
"""
Contains classes to handle images related things
# Requires PIL
"""
from hyde.plugin import Plugin
import re
import Image
class ImageSizerPlugin(Plugin):
"""
Each HTML page is modified to add width and height for images if
they are not already specified.
"""
def __init__(self, site):
super(ImageSizerPlugin, self).__init__(site)
self.cache = {}
def _handle_img(self, resource, src, width, height):
"""Determine what should be added to an img tag"""
if height is not None and width is not None:
return "" # Nothing
if src is None:
self.logger.warn("[%s] has an img tag without src attribute" % resource)
return "" # Nothing
if src not in self.cache:
if src.startswith(self.site.config.media_url):
path = src[len(self.site.config.media_url):].lstrip("/")
path = self.site.config.media_root_path.child(path)
image = self.site.content.resource_from_relative_deploy_path(path)
elif re.match(r'([a-z]+://|//).*', src):
# Not a local link
return "" # Nothing
elif src.startswith("/"):
# Absolute resource
path = src.lstrip("/")
image = self.site.content.resource_from_relative_deploy_path(path)
else:
# Relative resource
path = resource.node.source_folder.child(src)
image = self.site.content.resource_from_path(path)
if image is None:
self.logger.warn(
"[%s] has an unknown image" % resource)
return "" # Nothing
if image.source_file.kind not in ['png', 'jpg', 'jpeg', 'gif']:
self.logger.warn(
"[%s] has an img tag not linking to an image" % resource)
return "" # Nothing
# Now, get the size of the image
try:
self.cache[src] = Image.open(image.path).size
except IOError:
self.logger.warn(
"Unable to process image [%s]" % image)
self.cache[src] = (None, None)
return "" # Nothing
self.logger.debug("Image [%s] is %s" % (src,
self.cache[src]))
new_width, new_height = self.cache[src]
if new_width is None or new_height is None:
return "" # Nothing
if width is not None:
return 'height="%d" ' % (int(width)*new_height/new_width)
elif height is not None:
return 'width="%d" ' % (int(height)*new_width/new_height)
return 'height="%d" width="%d" ' % (new_height, new_width)
def text_resource_complete(self, resource, text):
"""
When the resource is generated, search for img tag and specify
their sizes.
Some img tags may be missed, this is not a perfect parser.
"""
try:
mode = self.site.config.mode
except AttributeError:
mode = "production"
if not resource.source_file.kind == 'html':
return
if mode.startswith('dev'):
self.logger.debug("Skipping sizer in development mode.")
return
pos = 0 # Position in text
img = None # Position of current img tag
state = "find-img"
while pos < len(text):
if state == "find-img":
img = text.find("<img", pos)
if img == -1:
break # No more img tag
pos = img + len("<img")
if not text[pos].isspace():
continue # Not an img tag
pos = pos + 1
tags = {"src": "",
"width": "",
"height": ""}
state = "find-attr"
continue
if state == "find-attr":
if text[pos] == ">":
# We get our img tag
insert = self._handle_img(resource,
tags["src"] or None,
tags["width"] or None,
tags["height"] or None)
img = img + len("<img ")
text = "".join([text[:img], insert, text[img:]])
state = "find-img"
pos = pos + 1
continue
attr = None
for tag in tags:
if text[pos:(pos+len(tag)+1)] == ("%s=" % tag):
attr = tag
pos = pos + len(tag) + 1
break
if not attr:
pos = pos + 1
continue
if text[pos] in ["'", '"']:
pos = pos + 1
state = "get-value"
continue
if state == "get-value":
if text[pos] == ">":
state = "find-attr"
continue
if text[pos] in ["'", '"'] or text[pos].isspace():
# We got our value
pos = pos + 1
state = "find-attr"
continue
tags[attr] = tags[attr] + text[pos]
pos = pos + 1
continue
return text
| Valloric/hyde | hyde/ext/plugins/images.py | Python | mit | 5,652 | 0.000885 |
from bases import *
from inanimate import Bullet
class Player(AnimateObject):
def __init__(self, x=0, y=0, hits=MAX_HEALTH, timer=TIMER, damage=1, score=0):
super(Player, self).__init__(x, y, hits, timer)
self.damage = damage
self.score = score
def shoot(self):
if self.x - 1 >= 0:
return Bullet(self.x - 1, self.y, "player", self.damage)
def read(self, data):
super(Player, self).read(data)
values = data.split(" ")
self.score = int(values[6])
self.damage = int(values[7])
def write(self):
data = super(Player, self).write()
data += " " + str(self.damage)
data += " " + str(self.score)
return data
| elenaod/PythonScrollingShooter | player.py | Python | gpl-3.0 | 743 | 0.004038 |
from midx.notify.common import iter_modified_files
| mikeboers/midx | midx/notify/__init__.py | Python | bsd-3-clause | 51 | 0 |
try:
from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
# url(r'^admin/', include(admin.site.urls)),
)
except ImportError:
from django.conf.urls import include, url
urlpatterns = (
# url(r'^admin/', include(admin.site.urls)),
)
| Ubiwhere/django-dbbackup | dbbackup/tests/testapp/urls.py | Python | bsd-3-clause | 305 | 0 |
"""Configuration for pytest."""
import json
def pytest_generate_tests(metafunc):
"""Configure pytest to call each of the tests once for each test case."""
if "test_case" in metafunc.fixturenames:
tests = json.load(open("tests/test_data.json"))["tests"]
metafunc.parametrize("test_case", tests)
| rowanphipps/Cerberus | tests/conftest.py | Python | mit | 320 | 0 |
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from .. import views
class Common(object):
"""
Stuff shared by the two views.
"""
def get_return_url(self):
return reverse('openid_callback')
def failure(self, message):
return HttpResponse(message)
class Begin(Common, views.Begin):
template_name = 'le_social/openid/openid.html'
begin = Begin.as_view()
class Callback(Common, views.Callback):
def success(self):
openid_url = self.openid_response.identity_url
return HttpResponse('OpenID association: %s' % openid_url)
callback = Callback.as_view()
| brutasse/django-le-social | le_social/openid/tests/views.py | Python | bsd-3-clause | 714 | 0.002801 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('emailer', '0004_auto_20150128_2202'),
]
operations = [
migrations.RemoveField(
model_name='email',
name='creation_date',
),
]
| JustinWingChungHui/okKindred | emailer/migrations/0005_remove_email_creation_date.py | Python | gpl-2.0 | 358 | 0 |
# -*- coding: utf-8 -*-
# Description: litespeed netdata python.d module
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
import glob
import os
import re
from collections import namedtuple
from bases.FrameworkServices.SimpleService import SimpleService
update_every = 10
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'net_throughput_http', # net throughput
'net_throughput_https', # net throughput
'connections_http', # connections
'connections_https', # connections
'requests', # requests
'requests_processing', # requests
'pub_cache_hits', # cache
'private_cache_hits', # cache
'static_hits', # static
]
CHARTS = {
'net_throughput_http': {
'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
'litespeed.net_throughput', 'area'],
'lines': [
['bps_in', 'in', 'absolute'],
['bps_out', 'out', 'absolute', -1]
]
},
'net_throughput_https': {
'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
'litespeed.net_throughput', 'area'],
'lines': [
['ssl_bps_in', 'in', 'absolute'],
['ssl_bps_out', 'out', 'absolute', -1]
]
},
'connections_http': {
'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
'lines': [
['conn_free', 'free', 'absolute'],
['conn_used', 'used', 'absolute']
]
},
'connections_https': {
'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
'lines': [
['ssl_conn_free', 'free', 'absolute'],
['ssl_conn_used', 'used', 'absolute']
]
},
'requests': {
'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
'lines': [
['requests', None, 'absolute', 1, 100]
]
},
'requests_processing': {
'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
'lines': [
['requests_processing', 'processing', 'absolute']
]
},
'pub_cache_hits': {
'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
'lines': [
['pub_cache_hits', 'hits', 'absolute', 1, 100]
]
},
'private_cache_hits': {
'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
'lines': [
['private_cache_hits', 'hits', 'absolute', 1, 100]
]
},
'static_hits': {
'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
'lines': [
['static_hits', 'hits', 'absolute', 1, 100]
]
}
}
t = namedtuple('T', ['key', 'id', 'mul'])
T = [
t('BPS_IN', 'bps_in', 8),
t('BPS_OUT', 'bps_out', 8),
t('SSL_BPS_IN', 'ssl_bps_in', 8),
t('SSL_BPS_OUT', 'ssl_bps_out', 8),
t('REQ_PER_SEC', 'requests', 100),
t('REQ_PROCESSING', 'requests_processing', 1),
t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
t('STATIC_HITS_PER_SEC', 'static_hits', 100),
t('PLAINCONN', 'conn_used', 1),
t('AVAILCONN', 'conn_free', 1),
t('SSLCONN', 'ssl_conn_used', 1),
t('AVAILSSL', 'ssl_conn_free', 1),
]
RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
ZERO_DATA = {
'bps_in': 0,
'bps_out': 0,
'ssl_bps_in': 0,
'ssl_bps_out': 0,
'requests': 0,
'requests_processing': 0,
'pub_cache_hits': 0,
'private_cache_hits': 0,
'static_hits': 0,
'conn_used': 0,
'conn_free': 0,
'ssl_conn_used': 0,
'ssl_conn_free': 0,
}
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.path = self.configuration.get('path', '/tmp/lshttpd/')
self.files = list()
def check(self):
if not self.path:
self.error('"path" not specified')
return False
fs = glob.glob(os.path.join(self.path, '.rtreport*'))
if not fs:
self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
return None
self.debug('stats files:', fs)
for f in fs:
if not is_readable_file(f):
self.error('{0} is not readable'.format(f))
continue
self.files.append(f)
return bool(self.files)
def get_data(self):
"""
Format data received from http request
:return: dict
"""
data = dict(ZERO_DATA)
for f in self.files:
try:
with open(f) as b:
lines = b.readlines()
except (OSError, IOError) as err:
self.error(err)
return None
else:
parse_file(data, lines)
return data
def parse_file(data, lines):
for line in lines:
if not line.startswith(('BPS_IN:', 'MAXCONN:', 'PLAINCONN:', 'REQ_RATE []:')):
continue
m = dict(RE.findall(line))
for v in T:
if v.key in m:
data[v.id] += float(m[v.key]) * v.mul
def is_readable_file(v):
return os.path.isfile(v) and os.access(v, os.R_OK)
| 3cky/netdata | collectors/python.d.plugin/litespeed/litespeed.chart.py | Python | gpl-3.0 | 5,647 | 0.001948 |
input ('Анук Эме - это псевдоним актрисы Франсуазы Юдит Сорья Дрейфус')
| Mariaanisimova/pythonintask | ISTp/2014/VOLKO_S_D/task-1-46.py | Python | apache-2.0 | 123 | 0.013889 |
from __future__ import division
from _init_paths import *
import os
import os.path as osp
import sg_utils as utils
import numpy as np
import skimage.io
import skimage.transform
import h5py
import pickle
import json
import math
import argparse
import time
import cv2
from collections import Counter
from json import encoder
"""
import matplotlib
matplotlib.use("Qt4Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
"""
encoder.FLOAT_REPR = lambda o: format(o, '.2f')
mean = np.array([[[ 103.939, 116.779, 123.68]]])
functional_words = ['a', 'on', 'of', 'the', 'in', 'with', 'and', 'is', 'to', 'an', 'two', 'at', 'next', 'are', 'it']
def scaleandtranspose(im, base_image_size):
# Subtract the ilvsr mean value
new_im = im - mean
# Upsample the image and swap the axes to Color x height x width
new_im = upsample_image(new_im, base_image_size, square=True)
return new_im.transpose((2,0,1))
def BGR2RGB(img):
assert img.shape[2] == 3
new_img = img.copy()
new_img[:, :, [0, 1, 2]] = img[:, :, [2, 1, 0]]
return new_img
def clip(lo, x, hi):
return lo if x <= lo else hi if x >= hi else x
def data_crop(im, boxes):
# Make sure the larger edge is 720 in length
H, W = im.shape[0], im.shape[1]
bbox_img = im.copy()
crop_list = []
for box in boxes:
# Careful that the order is height precede width
leftup_x = clip(0, box[0], W)
leftup_y = clip(0, box[1], H)
rightbot_x = clip(0, box[0] + box[2], W)
rightbot_y = clip(0, box[1] + box[3], H)
crop_list.append(im[leftup_y:rightbot_y, leftup_x:rightbot_x, :])
cv2.rectangle(bbox_img, (leftup_x, leftup_y), (rightbot_x, rightbot_y), (0, 255, 0), 2)
return crop_list, bbox_img
def upsample_image(im, upsample_size, square=False):
h, w = im.shape[0], im.shape[1]
s = max(h, w)
if square:
I_out = np.zeros((upsample_size, upsample_size, 3), dtype=np.float)
else:
new_h = math.ceil(h/w * upsample_size) if w>=h else upsample_size
new_w = math.ceil(w/h * upsample_size) if h>=w else upsample_size
I_out = np.zeros((new_h, new_w, 3), dtype=np.float)
im = cv2.resize(im, None, None, fx = upsample_size/s, fy = upsample_size/s, interpolation=cv2.INTER_CUBIC)
I_out[:im.shape[0], :im.shape[1], :] = im
return I_out
def filter_out(concepts):
rank = Counter()
for concept in concepts:
rank.update(concept)
words = map(lambda arg: arg[0], rank.most_common(20))
return words
class DataLoader(object):
def __init__(self, coco_h5, coco_json):
self.h5 = h5py.File(coco_h5)
self.label_start_ix = self.h5['label_start_ix']
self.label_end_ix = self.h5['label_end_ix']
self.json_image = json.load(open(coco_json))['images']
self.image_num = len(json.load(open(coco_json))['images'])
self.ix_to_word = json.load(open(coco_json))['ix_to_word']
self.split_ix = {}
self.seq_length = 16
self.iterator = {}
for i, info in enumerate(self.json_image):
if info['split'] not in self.split_ix:
self.split_ix[info['split']] = [i]
else:
self.split_ix[info['split']].append(i)
self.reset_iterator()
def get_image_num(self, split):
if split == 'train':
return self.image_num - 10000
else:
return 5000
def reset_iterator(self):
for k in self.split_ix.keys():
self.iterator[k] = 0
def get_batch(self, split, batch_size=1, seq_per_img=5, seq_length=16):
images = np.zeros((batch_size, 256, 256, 3))
seq = np.zeros((seq_per_img, seq_length))
split_ix = self.split_ix[split]
max_ix = self.h5['labels'].shape[0]
max_index = len(split_ix)
wrapped = False
info = []
for i in range(batch_size):
ri = self.iterator[split]
ri_next = ri + 1
if ri_next >= max_index:
ri_next = 0
wrapped = True
self.iterator[split] = ri_next
ix = split_ix[ri]
ix1 = self.h5['label_start_ix'][ix]
ix2 = self.h5['label_end_ix'][ix]
ncaps = ix2 - ix1 + 1
assert ncaps > 0
if ncaps >= seq_per_img:
rand_ix = np.random.choice(range(ix1, ix2+1), seq_per_img, replace=False)
else:
rand_ix = np.random.choice(range(ix1, ix2+1), seq_per_img, replace=True)
for j, j_ix in enumerate(rand_ix):
if j_ix >= max_ix:
seq[j] = self.h5['labels'][-1, :seq_length]
else:
seq[j] = self.h5['labels'][j_ix, :seq_length]
im = self.h5['images'][ix].astype(np.float32)
images[i] = np.transpose(im, axes = (1, 2, 0))
info.append({'id': self.json_image[ix]['id'], 'file_path': self.json_image[ix]['file_path']})
return images, seq, info, wrapped
class TestModel(object):
def __init__(self, vocab_file):
# Set threshold_metric_name and output_metric_name
self.base_image_size = 565
self.vocab = utils.load_variables(vocab_file)
self.is_functional = np.array([x not in functional_words for x in self.vocab['words']])
self.threshold = 0.5
def load(self, prototxt_deploy, model_file):
self.net = caffe.Net(prototxt_deploy, model_file, caffe.TEST)
def forward(self, im, order):
# Make sure the image passed in are BGR order and height x width x channel order
self.net.forward(data=im)
# Retrieve the mil probability of the word
mil_probs = self.net.blobs['mil'].data
mil_probs = mil_probs.reshape((mil_probs.shape[0], mil_probs.shape[1]))
top_ind = np.argsort(-mil_probs, axis=-1)[:, :order + len(functional_words)]
# If not for regional features, just return the distribution
if order == 1000:
return self.net.blobs['mil'].data
# Retrive the sigmoid data from the sigmoid layer
fc8_conv_probs = self.net.blobs['fc8-conv-sigmoid'].data
fc8_conv = fc8_conv_probs.reshape((fc8_conv_probs.shape[0], fc8_conv_probs.shape[1], -1))
fc8_conv_arg = fc8_conv.argmax(axis=-1)
# Retrive the correponding feature maps
feat_map = self.net.blobs['fc7-conv'].data
concepts, prob = [], []
att_feat = np.zeros((feat_map.shape[0], order, feat_map.shape[1]), dtype='float32')
feat_probs = np.zeros((feat_map.shape[0], order, 12, 12), dtype='float32')
# Loop over all the sorted indexes
indexes = []
for i in range(top_ind.shape[0]):
tmp_concepts = []
for j in range(top_ind.shape[1]):
word_idx = top_ind[i, j]
prob_map = fc8_conv_probs[i, word_idx, :, :]
index = fc8_conv_arg[i, word_idx]
word = self.vocab['words'][word_idx]
if word not in functional_words:
if index not in indexes:
i1, i2 = divmod(index, 12)
att_feat[i, len(indexes)] = feat_map[i,:,i1,i2]
indexes.append(index)
feat_probs[i, len(tmp_concepts)] = prob_map
tmp_concepts.append(word)
if len(tmp_concepts) >= order:
break
concepts.append(tmp_concepts)
prob.append(mil_probs[i, top_ind[i]].tolist())
return concepts, prob, att_feat, feat_probs
if __name__ == "__main__":
parser = argparse.ArgumentParser("run visual concept extraction")
parser.add_argument("--test_json", type=str, required=True, help="test image json")
parser.add_argument("--dataset", type=str, required=True, help="the dataset to use")
parser.add_argument("--split", type=str, required=True, help="Choose a split to evaluate")
parser.add_argument("--order", type=int, default=20, help="test image json")
parser.add_argument("--gpuid", type=int, default=0, help="GPU id to run")
parser.add_argument("--salient_grt", type=str, default='../Data/coco/salient_grt.json', help="Groundtruth")
parser.add_argument("--batch_size", type=int, default=1, help="Verbose the results")
parser.add_argument("--verbose", action='store_true', help="Verbose the results")
args = parser.parse_args()
# Caffe setting
caffe.set_mode_gpu()
caffe.set_device(args.gpuid)
prototxt = 'output/vgg/mil_finetune.prototxt.deploy'
model_file = 'output/vgg/snapshot_iter_240000.caffemodel'
vocab_file = 'vocabs/vocab_train.pkl'
basedir = '../Data/%s'%args.dataset
prefix = coco_image_base if dataset=='coco' else flickr_image_base
#prototxt = '/home/thes0193/code/output/v2/mil_finetune.prototxt.deploy'
#model_file = '/home/thes0193/code/output/v2/snapshot_iter_240000.caffemodel'
#vocab_file = '/home/thes0193/code/vocabs/new_train_vocab.pkl'
# Load the model
model = TestModel(vocab_file)
with open(args.salient_grt) as f:
salient_grt_map = {item['id']:item['words'] for item in json.load(f)}
model.load(prototxt, model_file)
# Open h5 file, if not exist then create one, if exists just load it
image_f = json.load(open(args.test_json))
result, prec_set = [], []
h5_name = osp.join(basedir, 'Feats_%s.h5'%(args.split))
if osp.exists(h5_name):
h5_f = h5py.File(h5_name, 'r+')
else:
h5_f = h5py.File(h5_name, 'w')
if 'regional_feats' not in h5_f.keys():
feats = h5_f.create_dataset('regional_feats', shape=(len(image_f), args.order*2048), dtype='float32')
else:
feats = h5_f['/regional_feats']
# Start generate results, i.e. visual concepts and regionl features
for start in range(0, len(image_f), args.batch_size):
end = min(start+args.batch_size, len(image_f))
img_batch = np.zeros((args.batch_size, 3, 565, 565), 'float32')
for i in range(start, end):
img = cv2.imread(osp.join(prefix, image_f[i]['file_name']))
img_batch[i-start] = scaleandtranspose(img, 565)
results = model.forward(img_batch, args.order)
# Calculate the precision and recall
for i in range(start, end):
# Calculate precision
if image_f[i]['id'] in salient_grt_map:
ref = salient_grt_map[image_f[i]['id']]
prec = len(set(ref) & set(results[0][i-start]))/len(ref)
prec_set.append(prec)
print "Precision: %0.2f"%(sum(prec_set)/len(prec_set))
# Form results
result.append({'id': f[i]['id'], 'text': results[0][i-start], 'prob': results[1][i-start]})
feats[start:end] = results[2][:,:,::2].reshape((args.batch_size, -1))
"""
img_fig = plt.figure()
plt.imshow(BGR2RGB(origin_img[i]))
plt.axis('off')
fig = plt.figure(figsize=(10, 6), facecolor='white')
for j in range(12):
img = (batch_img[i].transpose((1,2,0)) + mean)/255
ax = fig.add_subplot(3, 4, j+1)
#ax.set_axis_bgcolor('white')
ax.imshow(BGR2RGB(img))
alpha_img = skimage.transform.resize(feat_probs[i,j], [img.shape[0], img.shape[1]])
ax.imshow(alpha_img, cmap=cm.Greys_r, alpha=0.8)
ax.set_title(visual_concepts[i][j])
ax.axis('off')
plt.show()
raw_input("Press Enter to continue...")
"""
if start % 100 == 0 and start > 0:
print "Finished %d images"%start
h5_f.close()
# Dump it into the visual concept files for next step
with open(osp.join(basedir,'visual_concept_%s.json'%args.split), 'w') as f:
pickle.dump(result, f)
| wenhuchen/ETHZ-Bootstrapped-Captioning | visual-concepts/eval.py | Python | bsd-3-clause | 11,962 | 0.006437 |
#!/usr/bin/env python3
#
# PLASMA : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from capstone import CS_GRP_CALL, CS_GRP_RET
from capstone.arm import (ARM_CC_EQ, ARM_CC_NE, ARM_CC_HS, ARM_CC_LO,
ARM_CC_MI, ARM_CC_PL, ARM_CC_VS, ARM_CC_VC, ARM_CC_HI,
ARM_CC_LS, ARM_CC_GE, ARM_CC_LT, ARM_CC_GT, ARM_CC_LE, ARM_CC_AL,
ARM_INS_EOR, ARM_INS_ADD, ARM_INS_ORR, ARM_INS_AND, ARM_INS_MOV,
ARM_INS_CMP, ARM_INS_SUB, ARM_INS_LDR, ARM_INS_B, ARM_INS_BLX,
ARM_INS_BL, ARM_INS_BX, ARM_REG_LR, ARM_OP_REG, ARM_REG_PC, ARM_INS_POP,
ARM_OP_IMM, ARM_OP_MEM, ARM_REG_SP)
JUMPS = {ARM_INS_B, ARM_INS_BX}
JUMPS_LINK = {ARM_INS_BL, ARM_INS_BLX}
OP_IMM = ARM_OP_IMM
OP_MEM = ARM_OP_MEM
OP_REG = ARM_OP_REG
# Warning: before adding new prolog check in lib.analyzer.has_prolog
PROLOGS = [
[b"\xe9\x2d"], # push registers
]
def is_cmp(i):
return i.id == ARM_INS_CMP
def is_jump(i):
# Suppose that the written register is the first operand
op = i.operands[0]
if op.type == ARM_OP_REG and op.value.reg == ARM_REG_PC:
return True
if i.id == ARM_INS_POP:
for o in i.operands:
if o.type == ARM_OP_REG and o.value.reg == ARM_REG_PC:
return True
return False
return i.id in JUMPS and not (op.type == ARM_OP_REG and \
op.value.reg == ARM_REG_LR)
def is_cond_jump(i):
return is_jump(i) and i.cc != ARM_CC_AL
def is_uncond_jump(i):
return is_jump(i) and i.cc == ARM_CC_AL
def is_ret(i):
op = i.operands[0]
return i.group(CS_GRP_RET) or i.id == ARM_INS_BX and \
op.type == ARM_OP_REG and op.value.reg == ARM_REG_LR
def is_call(i):
return i.group(CS_GRP_CALL) or i.id in JUMPS_LINK
OPPOSITES = [
[ARM_CC_EQ, ARM_CC_NE],
[ARM_CC_GE, ARM_CC_LT],
[ARM_CC_LE, ARM_CC_GT],
[ARM_CC_HI, ARM_CC_LS],
[ARM_CC_HS, ARM_CC_LO],
[ARM_CC_PL, ARM_CC_MI],
[ARM_CC_VS, ARM_CC_VC],
]
OPPOSITES = dict(OPPOSITES + [i[::-1] for i in OPPOSITES])
def invert_cond(i):
return OPPOSITES.get(i.cc, -1)
def get_cond(i):
return i.cc
COND_SYMB = {
ARM_CC_EQ: "==",
ARM_CC_NE: "!=",
ARM_CC_GE: ">=",
ARM_CC_LT: "<",
ARM_CC_LE: "<=",
ARM_CC_GT: ">",
ARM_CC_HI: "(unsigned) >",
ARM_CC_LS: "(unsigned) <=",
ARM_CC_HS: "(unsigned) >=",
ARM_CC_LO: "(unsigned) <",
ARM_CC_VS: "overflow",
ARM_CC_VC: "!overflow",
ARM_CC_PL: ">=",
ARM_CC_MI: "<",
}
INST_SYMB = {
ARM_INS_EOR: "^",
ARM_INS_ORR: "|",
ARM_INS_AND: "&",
ARM_INS_ADD: "+",
ARM_INS_MOV: "=",
ARM_INS_SUB: "-",
ARM_INS_CMP: "cmp",
ARM_INS_LDR: "=",
}
def cond_symbol(ty):
return COND_SYMB.get(ty, "UNKNOWN")
def inst_symbol(i):
return INST_SYMB.get(i.id, "UNKNOWN")
def guess_frame_size(analyzer, ad):
regsctx = analyzer.arch_analyzer.new_regs_context()
if regsctx is None:
return -1
while 1:
i = analyzer.disasm(ad)
if i is None or is_ret(i) or is_call(i) or is_cond_jump(i):
return 0
# Do only registers simulation
analyzer.arch_analyzer.analyze_operands(analyzer, regsctx, i, None, True)
if i.id == ARM_INS_SUB:
op = i.operands[0]
if op.type == ARM_OP_REG and op.value.reg == ARM_REG_SP:
return - analyzer.arch_analyzer.get_sp(regsctx)
ad += i.size
return -1
def search_jmptable_addr(analyzer, jump_i, inner_code):
return None
| chubbymaggie/reverse | plasma/lib/arch/arm/utils.py | Python | gpl-3.0 | 4,197 | 0.004289 |
from share.normalize import ctx, tools
from share.normalize.parsers import Parser
from share.normalize.utils import format_address
class WorkIdentifier(Parser):
uri = tools.RunPython('get_ncar_identifier', ctx)
class Extra:
description = tools.Try(ctx.Related_URL.Description)
url_content_type = tools.Try(ctx.Related_URL.URL_Content_Type.Type)
def get_ncar_identifier(self, ctx):
return 'https://www.earthsystemgrid.org/dataset/{}.html'.format(ctx['Entry_ID'])
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = tools.Delegate(Tag, ctx)
class PersonnelAgent(Parser):
schema = tools.GuessAgentType(
tools.RunPython('combine_first_last_name', ctx)
)
name = tools.RunPython('combine_first_last_name', ctx)
location = tools.RunPython('get_address', ctx['Contact_Address'])
class Extra:
role = tools.Try(ctx.Role)
url = tools.Try(ctx.Data_Center_URL)
def combine_first_last_name(self, ctx):
return ctx['First_Name'] + ' ' + ctx['Last_Name']
def get_address(self, ctx):
address = ctx['Address']
if isinstance(address, list):
address1 = address[0]
address2 = address[1]
return format_address(
self,
address1=address1,
address2=address2,
city=ctx['City'],
state_or_province=ctx['Province_or_State'],
postal_code=ctx['Postal_Code'],
country=ctx['Country']
)
return format_address(
self,
address1=ctx['Address'],
address2=address2,
city=ctx['City'],
state_or_province=ctx['Province_or_State'],
postal_code=ctx['Postal_Code'],
country=ctx['Country']
)
class IsAffiliatedWith(Parser):
related = tools.Delegate(PersonnelAgent, ctx)
class DataCenterAgent(Parser):
schema = tools.GuessAgentType(
ctx.Data_Center_Name.Long_Name,
default='organization'
)
name = ctx.Data_Center_Name.Long_Name
related_agents = tools.Map(tools.Delegate(IsAffiliatedWith), tools.Try(ctx.Personnel))
class Extra:
data_center_short_name = ctx.Data_Center_Name.Short_Name
class AgentWorkRelation(Parser):
agent = tools.Delegate(DataCenterAgent, ctx)
class DataSet(Parser):
title = tools.Join(tools.Try(ctx.record.metadata.DIF.Entry_Title))
description = tools.Try(ctx.record.metadata.DIF.Summary.Abstract)
related_agents = tools.Map(
tools.Delegate(AgentWorkRelation),
tools.Try(ctx.record.metadata.DIF.Data_Center)
)
tags = tools.Map(
tools.Delegate(ThroughTags),
tools.Try(ctx.record.metadata.DIF.Metadata_Name),
tools.Try(ctx.record.header.setSpec)
)
identifiers = tools.Map(tools.Delegate(WorkIdentifier), tools.Try(ctx.record.metadata.DIF))
date_updated = tools.ParseDate(ctx.record.header.datestamp)
is_deleted = tools.RunPython('check_status', tools.Try(ctx.record.header['@status']))
class Extra:
status = tools.Try(ctx.record.header['@status'])
entry_id = tools.Try(ctx.record.metadata.DIF.Entry_ID)
metadata_name = tools.Try(ctx.record.metadata.DIF.Metadata_Name)
metadata_version = tools.Try(ctx.record.metadata.DIF.Metadata_Version)
last_dif_revision_date = tools.Try(ctx.record.metadata.DIF.Last_DIF_Revision_Date)
set_spec = ctx.record.header.setSpec
def check_status(self, status):
if status == 'deleted':
return True
return False
| zamattiac/SHARE | providers/org/ncar/normalizer.py | Python | apache-2.0 | 3,649 | 0.00137 |
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description:
#
# This test covers the situation where a node attached to a parent with a different mesh-local prefix. It verifies
# that the attaching node adopts the parent's mesh-local prefix and the RLOC addresses on the node are correctly
# filtered (by wpantund).
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
node1 = wpan.Node()
node2 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
NET_NAME = 'ml-change'
CHANNEL = 11
PANID = '0x1977'
XPANID = '1020031510006016'
KEY = '0123456789abcdeffecdba9876543210'
ML_PREFIX_1 = 'fd00:1::'
ML_PREFIX_2 = 'fd00:2::'
# Form a network on node1
node1.form(
NET_NAME,
channel=CHANNEL,
panid=PANID,
xpanid=XPANID,
key=KEY,
mesh_local_prefix=ML_PREFIX_1,
)
# On node2, form a network with same parameters but a different mesh-local
# prefix
node2.form(
NET_NAME,
channel=CHANNEL,
panid=PANID,
xpanid=XPANID,
key=KEY,
mesh_local_prefix=ML_PREFIX_2,
)
# Node 2 is expected to attach to node1 and adopt the mesh-local prefix
# from node1
verify(node2.is_associated())
verify(
node2.get(wpan.WPAN_IP6_MESH_LOCAL_PREFIX) == node1.get(
wpan.WPAN_IP6_MESH_LOCAL_PREFIX))
# Ensure that there are only two addresses on the node2 (link-local and mesh-local address) and that RLOC
# address is correctly filtered (by wpantund).
verify(len(wpan.parse_list(node2.get(wpan.WPAN_IP6_ALL_ADDRESSES))) == 2)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| librasungirl/openthread | tests/toranj/test-033-mesh-local-prefix-change.py | Python | bsd-3-clause | 3,956 | 0.000758 |
#!/usr/bin/python
import json
class Tests(object):
def __init__(self, tests, name):
self.tests = tests
self.name = name
self.export_tests()
def export_tests(self):
with open(self.name + ".ptdf", "w+") as ptdf_file:
ptdf_file.write("Application " + self.name + '\n')
ptdf_file.write('Resource "wyeast cluster" grid|machine\n')
ptdf_file.write('Resource "v3.3.1" build\n')
ptdf_file.write('Resource "Linux version 3.17.4-301.fc21.x86_64" environment\n')
ptdf_file.write('Resource "self generated" dataFiles\n')
ptdf_file.write('Resource "whole time" time\n')
ptdf_file.write('Resource ext4 fileSystem\n')
ptdf_file.write('Resource "self instrumentation" perfToolName\n')
ptdf_file.write('Resource "time in seconds" metric\n')
for test_dictionary in self.tests:
execution = self.name.lower() + '-' + str(test_dictionary['START_TIME'])
ptdf_file.write("Execution " + execution + ' ' + self.name + '\n')
for key in test_dictionary:
if key != 'TIME_IN_SECONDS':
ptdf_file.write("ResourceAttribute " + execution + ' ' +
key.lower() + ' "' + str(test_dictionary[key]) + '" string\n')
ptdf_file.write('PerfResult ' + execution +
' "wyeast cluster,v3.3.1,Linux version 3.17.4-301.fc21.x86_64,self generated,' +
execution + ',whole time,ext4" "self instrumentation" "time in seconds" ' +
str(test_dictionary['TIME_IN_SECONDS']) + ' s ' +
str(test_dictionary['START_TIME']) + ' noValue\n')
class Matrix:
def __init__(self, tests):
self.all_data = tests
self.process_matrix()
def process_matrix(self):
for test_type in self.all_data:
test_name = test_type.upper()
data = self.all_data[test_type]
Tests(data, test_name)
with open("data.txt") as json_file:
json_data = json.load(json_file)
Matrix(json_data)
| kfrye/SMM-MPI | utilities/results/ptdf_export.py | Python | gpl-3.0 | 2,213 | 0.004067 |
# Update or remove for 2.0.0
from ..v20.grammars.STIXPatternListener import * # noqa: F401 | chisholm/cti-pattern-validator | stix2patterns/grammars/STIXPatternListener.py | Python | bsd-3-clause | 91 | 0.010989 |
"""Update a task in maniphest.
you can use the 'task id' output from the 'arcyon task-create' command as input
to this command.
usage examples:
update task '99' with a new title, only show id:
$ arcyon task-update 99 -t 'title' --format-id
99
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_taskupdate
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import textwrap
import phlcon_maniphest
import phlcon_project
import phlcon_user
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
# make a list of priority names in increasing order of importance
priority_name_list = phlcon_maniphest.PRIORITIES.keys()
priority_name_list.sort(
key=lambda x: phlcon_maniphest.PRIORITIES[x])
priorities = parser.add_argument_group(
'optional priority arguments',
'use any of ' + textwrap.fill(
str(priority_name_list)))
output_group = parser.add_argument_group(
'Output format arguments',
'Mutually exclusive, defaults to "--format-summary"')
output = output_group.add_mutually_exclusive_group()
opt = parser.add_argument_group(
'Optional task arguments',
'You can supply these later via the web interface if you wish')
priorities.add_argument(
'--priority',
'-p',
choices=priority_name_list,
metavar="PRIORITY",
default=None,
type=str,
help="the priority or importance of the task")
parser.add_argument(
'id',
metavar='INT',
help='the id of the task',
type=str)
parser.add_argument(
'--title',
'-t',
metavar='STRING',
help='the short title of the task',
default=None,
type=str)
opt.add_argument(
'--description',
'-d',
metavar='STRING',
help='the long description of the task',
default=None,
type=str)
opt.add_argument(
'--owner',
'-o',
metavar='USER',
help='the username of the owner',
type=str)
opt.add_argument(
'--ccs',
'-c',
nargs="*",
metavar='USER',
help='a list of usernames to cc on the task',
type=str)
opt.add_argument(
'--projects',
nargs="*",
metavar='PROJECT',
default=[],
help='a list of project names to add the task to',
type=str)
opt.add_argument(
'--comment',
'-m',
metavar='STRING',
help='an optional comment to make on the task',
default=None,
type=str)
output.add_argument(
'--format-summary',
action='store_true',
help='will print a human-readable summary of the result.')
output.add_argument(
'--format-id',
action='store_true',
help='will print just the id of the new task, for scripting.')
output.add_argument(
'--format-url',
action='store_true',
help='will print just the url of the new task, for scripting.')
phlsys_makeconduit.add_argparse_arguments(parser)
def process(args):
if args.title and not args.title.strip():
print('you must supply a non-empty title', file=sys.stderr)
return 1
conduit = phlsys_makeconduit.make_conduit(
args.uri, args.user, args.cert, args.act_as_user)
# create_task expects an integer
priority = None
if args.priority is not None:
priority = phlcon_maniphest.PRIORITIES[args.priority]
# conduit expects PHIDs not plain usernames
user_phids = phlcon_user.UserPhidCache(conduit)
if args.owner:
user_phids.add_hint(args.owner)
if args.ccs:
user_phids.add_hint_list(args.ccs)
owner = user_phids.get_phid(args.owner) if args.owner else None
ccs = [user_phids.get_phid(u) for u in args.ccs] if args.ccs else None
# conduit expects PHIDs not plain project names
projects = None
if args.projects:
project_to_phid = phlcon_project.make_project_to_phid_dict(conduit)
projects = [project_to_phid[p] for p in args.projects]
result = phlcon_maniphest.update_task(
conduit,
args.id,
args.title,
args.description,
priority,
owner,
ccs,
projects,
args.comment)
if args.format_id:
print(result.id)
elif args.format_url:
print(result.uri)
else: # args.format_summary:
message = (
"Updated task '{task_id}', you can view it at this URL:\n"
" {url}"
).format(
task_id=result.id,
url=result.uri)
print(message)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| kjedruczyk/phabricator-tools | py/aon/aoncmd_taskupdate.py | Python | apache-2.0 | 5,958 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Subscriber'
db.create_table(u'subscribers_subscriber', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('source_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
))
db.send_create_signal(u'subscribers', ['Subscriber'])
def backwards(self, orm):
# Deleting model 'Subscriber'
db.delete_table(u'subscribers_subscriber')
models = {
u'subscribers.subscriber': {
'Meta': {'ordering': "['-created']", 'object_name': 'Subscriber'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'source_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'})
}
}
complete_apps = ['subscribers'] | jcalazan/glucose-tracker | glucosetracker/subscribers/migrations/0001_initial.py | Python | mit | 1,627 | 0.00799 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
from testtools import content
from pbr.tests import base
class TestCommands(base.BaseTestCase):
def test_custom_build_py_command(self):
"""Test custom build_py command.
Test that a custom subclass of the build_py command runs when listed in
the commands [global] option, rather than the normal build command.
"""
stdout, stderr, return_code = self.run_setup('build_py')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Running custom build_py command.', stdout)
self.assertEqual(0, return_code)
def test_custom_deb_version_py_command(self):
"""Test custom deb_version command."""
stdout, stderr, return_code = self.run_setup('deb_version')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Extracting deb version', stdout)
self.assertEqual(0, return_code)
def test_custom_rpm_version_py_command(self):
"""Test custom rpm_version command."""
stdout, stderr, return_code = self.run_setup('rpm_version')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Extracting rpm version', stdout)
self.assertEqual(0, return_code)
def test_freeze_command(self):
"""Test that freeze output is sorted in a case-insensitive manner."""
stdout, stderr, return_code = self.run_pbr('freeze')
self.assertEqual(0, return_code)
pkgs = []
for l in stdout.split('\n'):
pkgs.append(l.split('==')[0].lower())
pkgs_sort = sorted(pkgs[:])
self.assertEqual(pkgs_sort, pkgs)
| cvegaj/ElectriCERT | venv3/lib/python3.6/site-packages/pbr/tests/test_commands.py | Python | gpl-3.0 | 3,688 | 0.000271 |
# -*- coding: utf-8 -*-
# Roastero, released under GPLv3
import os
import json
import time
import functools
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from openroast import tools
from openroast.views import customqtwidgets
from openroast import utils as utils
class RecipeEditor(QtWidgets.QDialog):
def __init__(self, recipeLocation=None):
super(RecipeEditor, self).__init__()
# Define main window for the application.
self.setWindowTitle('Openroast')
self.setMinimumSize(800, 600)
self.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.create_ui()
self.recipe = {}
self.recipe["steps"] = [{'fanSpeed': 5, 'targetTemp': 150,
'sectionTime': 0}]
if recipeLocation:
self.load_recipe_file(recipeLocation)
self.preload_recipe_information()
else:
self.preload_recipe_steps(self.recipeSteps)
def create_ui(self):
"""A method used to create the basic ui for the Recipe Editor Window"""
# Create main layout for window.
self.layout = QtWidgets.QGridLayout(self)
self.layout.setRowStretch(1, 3)
# Create input fields.
self.create_input_fields()
self.layout.addLayout(self.inputFieldLayout, 0, 0, 1, 2)
# Create big edit boxes.
self.create_big_edit_boxes()
self.layout.addLayout(self.bigEditLayout, 1, 0, 1, 2)
# Create Bottom Buttons.
self.create_bottom_buttons()
self.layout.addLayout(self.bottomButtonLayout, 2, 0, 1, 2)
def create_input_fields(self):
"""Creates all of the UI components for the top of the Recipe Editor
Window."""
# Create layout for section.
self.inputFieldLayout = QtWidgets.QGridLayout()
# Create labels for fields.
recipeNameLabel = QtWidgets.QLabel("Recipe Name: ")
recipeCreatorLabel = QtWidgets.QLabel("Created by: ")
recipeRoastTypeLabel = QtWidgets.QLabel("Roast Type: ")
beanRegionLabel = QtWidgets.QLabel("Bean Region: ")
beanCountryLabel = QtWidgets.QLabel("Bean Country: ")
beanLinkLabel = QtWidgets.QLabel("Bean Link: ")
beanStoreLabel = QtWidgets.QLabel("Bean Store Name: ")
# Create input fields.
self.recipeName = QtWidgets.QLineEdit()
self.recipeCreator = QtWidgets.QLineEdit()
self.recipeRoastType = QtWidgets.QLineEdit()
self.beanRegion = QtWidgets.QLineEdit()
self.beanCountry = QtWidgets.QLineEdit()
self.beanLink = QtWidgets.QLineEdit()
self.beanStore = QtWidgets.QLineEdit()
# Remove focus from input boxes.
self.recipeName.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.recipeCreator.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.recipeRoastType.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanRegion.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanCountry.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanLink.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanStore.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
# Add objects to the inputFieldLayout
self.inputFieldLayout.addWidget(recipeNameLabel, 0, 0)
self.inputFieldLayout.addWidget(self.recipeName, 0, 1)
self.inputFieldLayout.addWidget(recipeCreatorLabel, 1, 0)
self.inputFieldLayout.addWidget(self.recipeCreator, 1, 1)
self.inputFieldLayout.addWidget(recipeRoastTypeLabel, 2, 0)
self.inputFieldLayout.addWidget(self.recipeRoastType, 2, 1)
self.inputFieldLayout.addWidget(beanRegionLabel, 3, 0)
self.inputFieldLayout.addWidget(self.beanRegion, 3, 1)
self.inputFieldLayout.addWidget(beanCountryLabel, 4, 0)
self.inputFieldLayout.addWidget(self.beanCountry, 4, 1)
self.inputFieldLayout.addWidget(beanLinkLabel, 5, 0)
self.inputFieldLayout.addWidget(self.beanLink, 5, 1)
self.inputFieldLayout.addWidget(beanStoreLabel, 6, 0)
self.inputFieldLayout.addWidget(self.beanStore, 6, 1)
def create_big_edit_boxes(self):
"""Creates the Bottom section of the Recipe Editor Window. This method
creates the Description box and calls another method to make the
recipe steps table."""
# Create big edit box layout.
self.bigEditLayout = QtWidgets.QGridLayout()
# Create labels for the edit boxes.
recipeDescriptionBoxLabel = QtWidgets.QLabel("Description: ")
recipeStepsLabel = QtWidgets.QLabel("Steps: ")
# Create widgets.
self.recipeDescriptionBox = QtWidgets.QTextEdit()
self.recipeSteps = self.create_steps_spreadsheet()
# Add widgets to layout.
self.bigEditLayout.addWidget(recipeDescriptionBoxLabel, 0, 0)
self.bigEditLayout.addWidget(self.recipeDescriptionBox, 1, 0)
self.bigEditLayout.addWidget(recipeStepsLabel, 0, 1)
self.bigEditLayout.addWidget(self.recipeSteps, 1, 1)
def create_bottom_buttons(self):
"""Creates the button panel on the bottom of the Recipe Editor
Window."""
# Set bottom button layout.
self.bottomButtonLayout = QtWidgets.QHBoxLayout()
self.bottomButtonLayout.setSpacing(0)
# Create buttons.
self.saveButton = QtWidgets.QPushButton("SAVE")
self.closeButton = QtWidgets.QPushButton("CLOSE")
# Assign object names to the buttons.
self.saveButton.setObjectName("smallButton")
self.saveButton.clicked.connect(self.save_recipe)
self.closeButton.setObjectName("smallButton")
self.closeButton.clicked.connect(self.close_edit_window)
# Create Spacer.
self.spacer = QtWidgets.QWidget()
self.spacer.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
# Add widgets to the layout.
self.bottomButtonLayout.addWidget(self.spacer)
self.bottomButtonLayout.addWidget(self.closeButton)
self.bottomButtonLayout.addWidget(self.saveButton)
def create_steps_spreadsheet(self):
"""Creates Recipe Steps table. It does not populate the table in this
method."""
recipeStepsTable = QtWidgets.QTableWidget()
recipeStepsTable.setShowGrid(False)
recipeStepsTable.setAlternatingRowColors(True)
recipeStepsTable.setCornerButtonEnabled(False)
recipeStepsTable.horizontalHeader().setSectionResizeMode(1)
recipeStepsTable.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
# Steps spreadsheet
recipeStepsTable.setColumnCount(4)
recipeStepsTable.setHorizontalHeaderLabels(["Temperature",
"Fan Speed", "Section Time", "Modify"])
return recipeStepsTable
def close_edit_window(self):
"""Method used to close the Recipe Editor Window."""
self.close()
def preload_recipe_steps(self, recipeStepsTable):
"""Method that just calls load_recipe_steps() with a table specified and
uses the pre-existing loaded recipe steps in the object."""
steps = self.recipe["steps"]
self.load_recipe_steps(recipeStepsTable, steps)
def load_recipe_steps(self, recipeStepsTable, steps):
"""Takes two arguments. One being the table and the second being the
rows you'd like to add. It does not clear the table and simply adds the
rows on the bottom if there are exiting rows."""
# Create spreadsheet choices
fanSpeedChoices = [str(x) for x in range(1,10)]
targetTempChoices = ["Cooling"] + [str(x) for x in range(150, 551, 10)]
# loop through recipe and load each step
for row in range(len(steps)):
recipeStepsTable.insertRow(recipeStepsTable.rowCount())
# Temperature Value
sectionTempWidget = customqtwidgets.ComboBoxNoWheel()
sectionTempWidget.setObjectName("recipeEditCombo")
sectionTempWidget.addItems(targetTempChoices)
sectionTempWidget.insertSeparator(1)
if 'targetTemp' in steps[row]:
sectionTemp = steps[row]["targetTemp"]
# Accommodate for temperature not fitting in 10 increment list
if str(steps[row]["targetTemp"]) in targetTempChoices:
sectionTempWidget.setCurrentIndex(
targetTempChoices.index(
str(steps[row]["targetTemp"]))+1)
else:
roundedNumber = steps[row]["targetTemp"] - (steps[row]["targetTemp"] % 10)
sectionTempWidget.insertItem(targetTempChoices.index(str(roundedNumber))+2, str(steps[row]["targetTemp"]))
sectionTempWidget.setCurrentIndex(targetTempChoices.index(str(roundedNumber))+2)
elif 'cooling' in steps[row]:
sectionTemp = "Cooling"
sectionTempWidget.setCurrentIndex(targetTempChoices.index("Cooling"))
# Time Value
sectionTimeWidget = customqtwidgets.TimeEditNoWheel()
sectionTimeWidget.setObjectName("recipeEditTime")
sectionTimeWidget.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
sectionTimeWidget.setDisplayFormat("mm:ss")
# Set QTimeEdit to the right time from recipe
sectionTimeStr = time.strftime("%M:%S", time.gmtime(steps[row]["sectionTime"]))
sectionTime = QtCore.QTime().fromString(sectionTimeStr, "mm:ss")
sectionTimeWidget.setTime(sectionTime)
# Fan Speed Value
sectionFanSpeedWidget = customqtwidgets.ComboBoxNoWheel()
sectionFanSpeedWidget.setObjectName("recipeEditCombo")
sectionFanSpeedWidget.addItems(fanSpeedChoices)
sectionFanSpeedWidget.setCurrentIndex(fanSpeedChoices.index(str(steps[row]["fanSpeed"])))
# Modify Row field
upArrow = QtWidgets.QPushButton()
upArrow.setObjectName("upArrow")
#upArrow.setIcon(QtGui.QIcon('static/images/upSmall.png'))
upArrow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/upSmall.png'
)
)
)
upArrow.clicked.connect(functools.partial(self.move_recipe_step_up, row))
downArrow = QtWidgets.QPushButton()
downArrow.setObjectName("downArrow")
#downArrow.setIcon(QtGui.QIcon('static/images/downSmall.png'))
downArrow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/downSmall.png'
)
)
)
downArrow.clicked.connect(functools.partial(self.move_recipe_step_down, row))
deleteRow = QtWidgets.QPushButton()
# deleteRow.setIcon(QtGui.QIcon('static/images/delete.png'))
deleteRow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/delete.png'
)
)
)
deleteRow.setObjectName("deleteRow")
deleteRow.clicked.connect(functools.partial(self.delete_recipe_step, row))
insertRow = QtWidgets.QPushButton()
# insertRow.setIcon(QtGui.QIcon('static/images/plus.png'))
insertRow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/plus.png'
)
)
)
insertRow.setObjectName("insertRow")
insertRow.clicked.connect(functools.partial(self.insert_recipe_step, row))
# Create a grid layout to add all the widgets to
modifyRowWidgetLayout = QtWidgets.QHBoxLayout()
modifyRowWidgetLayout.setSpacing(0)
modifyRowWidgetLayout.setContentsMargins(0,0,0,0)
modifyRowWidgetLayout.addWidget(upArrow)
modifyRowWidgetLayout.addWidget(downArrow)
modifyRowWidgetLayout.addWidget(deleteRow)
modifyRowWidgetLayout.addWidget(insertRow)
# Assign Layout to a QWidget to add to a single column
modifyRowWidget = QtWidgets.QWidget()
modifyRowWidget.setObjectName("buttonTable")
modifyRowWidget.setLayout(modifyRowWidgetLayout)
# Add widgets
recipeStepsTable.setCellWidget(row, 0, sectionTempWidget)
recipeStepsTable.setCellWidget(row, 1, sectionFanSpeedWidget)
recipeStepsTable.setCellWidget(row, 2, sectionTimeWidget)
recipeStepsTable.setCellWidget(row, 3, modifyRowWidget)
def load_recipe_file(self, recipeFile):
"""Takes a file location and opens that file. It then loads the contents
which should be JSON and makes a python dictionary from the contents.
The python dictionary is created as self.recipe."""
# Load recipe file
recipeFileHandler = open(recipeFile)
self.recipe = json.load(recipeFileHandler)
self.recipe["file"] = recipeFile
recipeFileHandler.close()
def preload_recipe_information(self):
"""Loads information from self.recipe and prefills all the fields in the
form."""
self.recipeName.setText(self.recipe["roastName"])
self.recipeCreator.setText(self.recipe["creator"])
self.recipeRoastType.setText(self.recipe["roastDescription"]["roastType"])
self.beanRegion.setText(self.recipe["bean"]["region"])
self.beanCountry.setText(self.recipe["bean"]["country"])
self.beanLink.setText(self.recipe["bean"]["source"]["link"])
self.beanStore.setText(self.recipe["bean"]["source"]["reseller"])
self.recipeDescriptionBox.setText(self.recipe["roastDescription"]["description"])
self.preload_recipe_steps(self.recipeSteps)
def move_recipe_step_up(self, row):
"""This method will take a row and swap it the row above it."""
if row != 0:
steps = self.get_current_table_values()
newSteps = steps
# Swap the steps
newSteps[row], newSteps[row-1] = newSteps[row-1], newSteps[row]
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def move_recipe_step_down(self, row):
"""This method will take a row and swap it the row below it."""
if row != self.recipeSteps.rowCount()-1:
steps = self.get_current_table_values()
newSteps = steps
# Swap the steps
newSteps[row], newSteps[row+1] = newSteps[row+1], newSteps[row]
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def delete_recipe_step(self, row):
"""This method will take a row delete it."""
steps = self.get_current_table_values()
newSteps = steps
# Delete step
newSteps.pop(row)
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def insert_recipe_step(self, row):
"""Inserts a row below the specified row wit generic values."""
steps = self.get_current_table_values()
newSteps = steps
# insert step
newSteps.insert(row+1, {'fanSpeed': 5, 'targetTemp': 150, 'sectionTime': 0})
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def get_current_table_values(self):
"""Used to read all the current table values from the recipeSteps table
and build a dictionary of all the values."""
recipeSteps = []
for row in range(0, self.recipeSteps.rowCount()):
currentRow = {}
currentRow["sectionTime"] = QtCore.QTime(0, 0, 0).secsTo(self.recipeSteps.cellWidget(row, 2).time())
currentRow["fanSpeed"] = int(self.recipeSteps.cellWidget(row, 1).currentText())
# Get Temperature or cooling
if self.recipeSteps.cellWidget(row, 0).currentText() == "Cooling":
currentRow["cooling"] = True
else:
currentRow["targetTemp"] = int(self.recipeSteps.cellWidget(row, 0).currentText())
recipeSteps.append(currentRow)
# Return copied rows
return recipeSteps
def rebuild_recipe_steps_table(self, newSteps):
"""Used to reload all the rows in the recipe steps table with new steps.
"""
# Alert user if they try to delete all the steps
if len(newSteps) < 1:
alert = QtWidgets.QMessageBox()
alert.setWindowTitle('openroast')
alert.setStyleSheet(self.style)
alert.setText("You must have atleast one step!")
alert.exec_()
else:
# Delete all the current rows
while self.recipeSteps.rowCount() > 0:
self.recipeSteps.removeRow(0)
# Add the new step sequence
self.load_recipe_steps(self.recipeSteps, newSteps)
def save_recipe(self):
"""Pulls in all of the information in the window and creates a new
recipe file with the specified contents."""
# Determine Recipe File Name
if "file" in self.recipe:
filePath = self.recipe["file"]
else:
filePath = os.path.expanduser('~/Documents/Openroast/Recipes/My Recipes/') + tools.format_filename(self.recipeName.text()) + ".json"
# TODO: Account for existing file with same name
# Create Dictionary with all the new recipe information
self.newRecipe = {}
self.newRecipe["roastName"] = self.recipeName.text()
self.newRecipe["steps"] = self.get_current_table_values()
self.newRecipe["roastDescription"] = {}
self.newRecipe["roastDescription"]["roastType"] = self.recipeRoastType.text()
self.newRecipe["roastDescription"]["description"] = self.recipeDescriptionBox.toPlainText()
self.newRecipe["creator"] = self.recipeCreator.text()
self.newRecipe["bean"] = {}
self.newRecipe["bean"]["region"] = self.beanRegion.text()
self.newRecipe["bean"]["country"] = self.beanCountry.text()
self.newRecipe["bean"]["source"] = {}
self.newRecipe["bean"]["source"]["reseller"] = self.beanStore.text()
self.newRecipe["bean"]["source"]["link"] = self.beanLink.text()
self.newRecipe["totalTime"] = 0
for step in self.newRecipe["steps"]:
self.newRecipe["totalTime"] += step["sectionTime"]
# Write the recipe to a file
jsonObject = json.dumps(self.newRecipe, indent=4)
# will need to create dir if it doesn't exist
# note that this should never happen because this folder is created
# at OpenroastApp.__init__() time.
if not os.path.exists(os.path.dirname(filePath)):
try:
os.makedirs(os.path.dirname(filePath))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(filePath, 'w')
file.write(jsonObject)
file.close()
| Roastero/Openroast | openroast/views/recipeeditorwindow.py | Python | gpl-3.0 | 19,366 | 0.001962 |
# This file is part of the pyqualtrics package.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/Baguage/pyqualtrics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyqualtrics import Qualtrics
import os
user = None # os.environ["QUALTRICS_USER"]
token = None # os.environ["QUALTRICS_TOKEN"]
if __name__ == "__main__":
print "This is an example of panel import"
print "Make sure you have set QUALTRICS_USER, QUALTRICS_TOKEN and QUALTRICS_LIBRARY_ID enviroment variable"
# Note is user and token are None, QUALTRICS_USER and QUALTRICS_TOKEN environment variables will be used instead
qualtrics = Qualtrics(user, token)
library_id = os.environ["QUALTRICS_LIBRARY_ID"]
panel_id = qualtrics.importJsonPanel(
library_id,
Name="New Panel Created by PyQualtrics library (DELETE ME)",
panel=[
{"Email": "pyqualtrics+1@gmail.com", "FirstName": "PyQualtrics", "LastName": "Library", "SubjectID": "123"},
{"Email": "pyqualtrics+2@gmail.com", "FirstName": "PyQualtrics2", "LastName": "Library2"}
],
headers=["Email", "FirstName", "LastName", "ExternalRef", "SubjectID"],
AllED=1)
if qualtrics.last_error_message:
print "Error creating panel: " + qualtrics.last_error_message
else:
print "Panel created successfully, PanelID: " + panel_id
| Baguage/pyqualtrics | examples/import_panel_example.py | Python | apache-2.0 | 1,983 | 0.002017 |
# -*- coding: utf-8 -*-
"""
models
~~~~~~~~~~~~~~~~~~~~
Top-level models for the entire app.
:copyright: 2011 by Google, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
# standard Python library imports
import datetime
import logging
import urllib
# App Engine imports
from google.appengine.ext import db
from google.appengine.ext import blobstore
from django.utils import simplejson
# local imports
import timesince
# roles the system distinguishes for each user
USER_ROLES = ('Applicant', 'Permit Approver')
# Cases may be ordered lexicographically by state, the first three characters
# of the state string (value in the dict) will be stripped before display.
CASE_STATES = {'incomplete': '00 Incomplete',
'submitted': '10 Submitted For Review',
'under_review': '20 Review Under Way',
'needs_work': '30 Needs Work',
'approved': '40 Approved',
'denied': '50 Rejected',
}
# the case states in which an applicant can upload files and/or notes
APPLICANT_EDITABLE = set(CASE_STATES[x]
for x in 'incomplete submitted needs_work'.split())
# the kind of actions that cause a case to change
CASE_ACTIONS = ('Create', 'Update', 'Submit',
'Review', 'Reassign', 'Comment', 'Approve', 'Deny')
# documents an applicant must upload to submit a case for approver review
PURPOSES = (
'Site Diagram',
'Electrical Diagram',
'Diagram Notes'
)
class ModelEncoder(simplejson.JSONEncoder):
def default(self, obj):
"""Allow JSON encoding of a db.Model instance."""
try:
return obj.json()
except (AttributeError, TypeError):
return simplejson.JSONEncoder.default(self, obj)
class JurisModel(db.Model):
"""A db.Model with a jurisdiction attached (abstract base class)."""
juris = db.StringProperty(required=True)
timestamp = db.DateTimeProperty(auto_now_add=True, required=True)
@classmethod
def get_all(cls):
return cls.all().order('-timestamp')
@property
def timesince(self):
"""Readable form for this object's timestamp."""
return timesince.timesince(self.timestamp)
class LatropMessage(JurisModel):
"""A message received by the latrop."""
msg = db.StringProperty(required=True)
@classmethod
def create(cls, juris, msg):
obj = cls(juris=juris, msg=msg)
obj.put()
# TODO: the other models must be changed to be appropriate for the latrop
# (mandatory juris, factories, different methods, and so on).
class User(JurisModel):
"""A user of this permiting application."""
# TODO: add authentication mechanisms / tokens
# email works as the "primary key" to identify a user
email = db.EmailProperty(required=True)
# application logic ensures a role gets assigned when a new user logs in
# for the first time, but the User object is first created w/o a role
role = db.StringProperty(choices=USER_ROLES, required=False)
def json(self):
"""Return JSON-serializable form."""
return {'cls': 'User', 'email': self.email, 'role': self.role}
@classmethod
def get_by_email(cls, email):
return cls.all().filter('email = ', email).get()
@property
def can_upload(self):
return self.role == 'Applicant'
@property
def can_approve(self):
return self.role == 'Permit Approver'
def __eq__(self, other):
return other is not None and self.email == other.email
def __ne__(self, other):
return other is None or self.email != other.email
class Case(JurisModel):
"""A project for which approval is requested."""
address = db.StringProperty(required=True)
creation_date = db.DateProperty(required=True, auto_now_add=True)
owner = db.ReferenceProperty(User, required=True)
state = db.StringProperty(required=True, choices=CASE_STATES.values())
def json(self):
"""Return JSON-serializable form."""
return {'cls': 'Case', 'address': self.address,
'owner': self.owner.json(), 'state': self.state}
@classmethod
def query_by_owner(cls, user):
"""Returns a db.Query for all cases owned by this user."""
return cls.all().filter('owner = ', user)
@classmethod
def query_under_review(cls):
"""Returns a db.Query for all cases under review."""
return cls.all().filter('state = ', CASE_STATES['under_review'])
@classmethod
def query_submitted(cls):
"""Returns a db.Query for all cases in the submitted state."""
return cls.all().filter('state = ', CASE_STATES['submitted'])
@classmethod
def reviewed_by(cls, user):
"""Returns two lists: cases being reviewed by the user vs by other users."""
these_cases, other_cases = [], []
for case in cls.query_under_review().run():
if case.reviewer == user:
these_cases.append(case)
else:
other_cases.append(case)
return these_cases, other_cases
@classmethod
def create(cls, owner, **k):
"""Creates and returns a new case."""
case = cls(state=CASE_STATES['incomplete'], owner=owner, **k)
case.put()
CaseAction.make(action='Create', case=case, actor=owner)
return case
def submit(self, actor, notes):
"""Submits the case for review."""
self.state = CASE_STATES['submitted']
self.put()
CaseAction.make(action='Submit', case=self, actor=actor, notes=notes)
def review(self, approver):
"""Assigns the case for review by the given approver."""
previous_reviewer = self.reviewer
if previous_reviewer == approver:
# case was already under review by the given approver, no-op
return
# reviewer assignment or change requires actual action, state change
self.state = CASE_STATES['under_review']
self.put()
CaseAction.make(action='Review', case=self, actor=approver)
def approve(self, actor, notes):
"""Marks the case as approved."""
self.state = CASE_STATES['approved']
self.put()
CaseAction.make(action='Approve', case=self, actor=actor, notes=notes)
def comment(self, actor, notes):
"""Returns the case to the applicant requesting changes."""
self.state = CASE_STATES['needs_work']
self.put()
CaseAction.make(action='Comment', case=self, actor=actor, notes=notes)
@property
def visible_state(self):
"""Returns the display form of this case's state."""
return self.state[3:]
@property
def latest_action(self):
"""Returns the latest action recorded on this case."""
return CaseAction.query_by_case(self).order('-timestamp').get()
@property
def last_modified(self):
"""Returns the timestamp at which this case was last modified."""
return datetime.datetime.now() - self.latest_action.timestamp
@property
def applicant_can_edit(self):
"""True iff an applicant can currently modify this case."""
return self.state in APPLICANT_EDITABLE
@property
def reviewer(self):
"""Returns the case's current reviewer, or None."""
if self.state != CASE_STATES['under_review']:
return None
return CaseAction.query_by_case(self, 'Review').get().actor
@property
def submit_blockers(self):
"""Returns a list of the reasons the case may not yet be submitted (an
empty list if the case may be submitted).
"""
blockers = []
for purpose in PURPOSES:
if not self.get_document(purpose):
blockers.append('Missing %s' % purpose)
return blockers
def get_document(self, purpose):
"""Returns the document from this case for the given purpose."""
q = CaseAction.query_by_case(self, 'Update').filter('purpose =', purpose)
return q.get()
class CaseAction(JurisModel):
"""Immutable once fully created (by the `make` classmethod)."""
action = db.StringProperty(required=True, choices=CASE_ACTIONS)
case = db.ReferenceProperty(Case, required=True)
actor = db.ReferenceProperty(User, required=True)
purpose = db.StringProperty(required=False, choices=PURPOSES)
notes = db.TextProperty(required=False)
upload = blobstore.BlobReferenceProperty(required=False)
def json(self):
"""Return JSON-serializable form."""
d = {'cls': 'Action', 'case': self.case.json(), 'actor': self.actor.json()}
if self.purpose:
d['purpose'] = self.purpose
if self.notes:
d['notes'] = self.notes
if self.upload:
d['upload'] = str(self.upload.key())
d['timestamp'] = self.timestamp.isoformat()
return d
@classmethod
def make(cls, **k):
"""Create and put an action, and log information about it."""
# TODO: send info about the action to the latrop
logging.info('********** ')
logging.info('********** NEW ACTION: %s', k)
logging.info('********** JSON: %r',
simplejson.dumps(k, skipkeys=True, cls=ModelEncoder))
logging.info('********** ')
action = cls(**k)
action.put()
@classmethod
def query_by_case(cls, case, action=None):
"""Returns a db.Query for actions on the given case. If action is not None,
only actions of the given kind are involved. The query is ordered by
reverse timestamp (i.e., more recent actions first).
"""
q = cls.all().filter('case = ', case)
if action is not None:
q.filter('action = ', action)
return q.order('-timestamp')
@classmethod
def upload_document_action(cls, case, purpose, user, blob_info, notes):
"""Create and put an action of uploading a document and/or notes."""
cls.make(action='Update', case=case, actor=user, purpose=purpose,
notes=notes, upload=blob_info)
@property
def download_url(self):
"""URL for downloading this action's document, or empty string if none."""
if not self.upload:
return ''
return '/document/serve/%s/%s' % (
urllib.quote(str(self.upload.key())),
urllib.quote(self.upload.filename))
| aleaxit/pysolper | latrop/models.py | Python | apache-2.0 | 10,073 | 0.014196 |
import threading
class CloseableThread(threading.Thread):
running = false
def __init__(self, group = None, target = None, name = None, args=(), kwargs={}):
threading.Thread.__init__(self, group = group, target = target, name = name, args=args, kwargs=kwargs)
def start():
Running = true
threading.Thead.start()
def stop(onStop=None):
Running = false
if (onStop):
onStop()
| zaquestion/vendttp | server/CloseableThread.py | Python | gpl-2.0 | 447 | 0.035794 |
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.config._base import Config
from synapse.util.check_dependencies import check_requirements
class RedisConfig(Config):
section = "redis"
def read_config(self, config, **kwargs):
redis_config = config.get("redis") or {}
self.redis_enabled = redis_config.get("enabled", False)
if not self.redis_enabled:
return
check_requirements("redis")
self.redis_host = redis_config.get("host", "localhost")
self.redis_port = redis_config.get("port", 6379)
self.redis_password = redis_config.get("password")
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
# Configuration for Redis when using workers. This *must* be enabled when
# using workers (unless using old style direct TCP configuration).
#
redis:
# Uncomment the below to enable Redis support.
#
#enabled: true
# Optional host and port to use to connect to redis. Defaults to
# localhost and 6379
#
#host: localhost
#port: 6379
# Optional password if configured on the Redis instance
#
#password: <secret_password>
"""
| matrix-org/synapse | synapse/config/redis.py | Python | apache-2.0 | 1,857 | 0.000539 |
import bench
class Stats(bench.Bench):
def __init__(self, league):
bench.Bench.__init__(self)
self.league = league
self.type = 'stats'
def list(self, team=False, player=False):
"""
Lists all stats for the current season to date. Can be filtered by team or by player. Default will return stat
dump for whole league
:param team: Unique ID of the team to filter for
:param player: Unique ID of the player to filter for
:return:
"""
def get_player_stats(self, player, week=False):
"""
Lists the stat breakdown by week for a given player. Can also be filtered to only return a specific week or a
range of weeks
:param player: Unique ID of the player to filter for
:param week: Optional. Can be a single week or a range ex: 1-4. If blank will default to season to date
:return:
"""
| sroche0/mfl-pyapi | modules/stats.py | Python | gpl-3.0 | 929 | 0.003229 |
from distutils.core import setup
setup(
name='amenu',
version='1.0.3',
author='ludbek',
author_email='sth.srn@gmail.com',
packages= ['amenu', 'amenu.migrations', 'amenu.templatetags'],
scripts=[],
url='https://github.com/ludbek/amenu',
license='LICENSE.txt',
description='A menu plugin for DjangoCMS.',
long_description=open('README.md').read(),
install_requires=[
"South == 1.0.1",
"django-cms >= 3.0.7",
"django-classy-tags == 0.5.2",
],
include_package_data=True,
)
| ludbek/amenu | setup.py | Python | mit | 546 | 0.001832 |
# imports - compatibility imports
from __future__ import absolute_import
# imports - standard imports
import os
# imports - module imports
from spockpy.app.config import BaseConfig
class ServerConfig(BaseConfig):
class Path(BaseConfig.Path):
ABSPATH_TEMPLATES = os.path.join(BaseConfig.Path.ABSPATH_VIEWS, 'templates')
HOST = '0.0.0.0'
PORT = int(os.getenv('PORT', 3000))
| achillesrasquinha/spockpy | spockpy/app/config/server.py | Python | apache-2.0 | 381 | 0.013123 |
"""Module grouping session scoped PyTest fixtures."""
import datetime
import glob
import os
import tempfile
import owslib
import pytest
from _pytest.monkeypatch import MonkeyPatch
from owslib.etree import etree
from owslib.feature.schema import _construct_schema, _get_elements
from owslib.iso import MD_Metadata
from owslib.util import ResponseWrapper, findall
from owslib.wfs import WebFeatureService
import pydov
from pydov import Hooks
from pydov.util import owsutil
from pydov.util.caching import GzipTextFileCache, PlainTextFileCache
from pydov.util.dovutil import build_dov_url
def pytest_runtest_setup():
pydov.hooks = Hooks()
def pytest_configure(config):
config.addinivalue_line("markers",
"online: mark test that requires internet access")
@pytest.fixture(scope='module')
def monkeymodule():
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
@pytest.fixture(scope='module')
def mp_wfs(monkeymodule):
"""Monkeypatch the call to the remote GetCapabilities request.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
"""
def read(*args, **kwargs):
with open('tests/data/util/owsutil/wfscapabilities.xml', 'r',
encoding='utf-8') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeymodule.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture(scope='module')
def wfs(mp_wfs):
"""PyTest fixture providing an instance of a WebFeatureService based on
a local copy of a GetCapabilities request.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
Returns
-------
owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
return WebFeatureService(
url=build_dov_url('geoserver/wfs'), version="1.1.0")
@pytest.fixture()
def mp_remote_fc_notfound(monkeypatch):
"""Monkeypatch the call to get an inexistent remote featurecatalogue.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def __get_remote_fc(*args, **kwargs):
with open('tests/data/util/owsutil/fc_featurecatalogue_notfound.xml',
'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeypatch.setattr(pydov.util.owsutil, '__get_remote_fc', __get_remote_fc)
@pytest.fixture(scope='module')
def mp_remote_md(wfs, monkeymodule, request):
"""Monkeypatch the call to get the remote metadata of the layer.
This monkeypatch requires a module variable ``location_md_metadata``
with the path to the md_metadata file on disk.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_md(*args, **kwargs):
file_path = getattr(request.module, "location_md_metadata")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return MD_Metadata(etree.fromstring(data).find(
'./{http://www.isotc211.org/2005/gmd}MD_Metadata'))
monkeymodule.setattr(pydov.util.owsutil, 'get_remote_metadata',
__get_remote_md)
@pytest.fixture(scope='module')
def mp_remote_fc(monkeymodule, request):
"""Monkeypatch the call to get the remote feature catalogue.
This monkeypatch requires a module variable
``location_fc_featurecatalogue`` with the path to the fc_featurecatalogue
file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_fc(*args, **kwargs):
file_path = getattr(request.module, "location_fc_featurecatalogue")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil, '__get_remote_fc',
__get_remote_fc)
@pytest.fixture(scope='module')
def mp_remote_describefeaturetype(monkeymodule, request):
"""Monkeypatch the call to a remote DescribeFeatureType.
This monkeypatch requires a module variable
``location_wfs_describefeaturetype`` with the path to the
wfs_describefeaturetype file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_describefeaturetype(*args, **kwargs):
file_path = getattr(request.module, "location_wfs_describefeaturetype")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil,
'__get_remote_describefeaturetype',
__get_remote_describefeaturetype)
@pytest.fixture(scope='module')
def mp_get_schema(monkeymodule, request):
def __get_schema(*args, **kwargs):
file_path = getattr(request.module, "location_wfs_describefeaturetype")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
root = etree.fromstring(data)
typename = root.find(
'./{http://www.w3.org/2001/XMLSchema}element').get('name')
if ":" in typename:
typename = typename.split(":")[1]
type_element = findall(
root,
"{http://www.w3.org/2001/XMLSchema}element",
attribute_name="name",
attribute_value=typename,
)[0]
complex_type = type_element.attrib["type"].split(":")[1]
elements = _get_elements(complex_type, root)
nsmap = None
if hasattr(root, "nsmap"):
nsmap = root.nsmap
return _construct_schema(elements, nsmap)
monkeymodule.setattr(pydov.search.abstract.AbstractSearch, '_get_schema',
__get_schema)
@pytest.fixture(scope='module')
def wfs_getfeature(request):
"""PyTest fixture providing a WFS GetFeature response.
This monkeypatch requires a module variable ``location_wfs_getfeature``
with the path to the wfs_getfeature file on disk.
Parameters
----------
request : pytest.fixtue
PyTest fixture providing request context.
Returns
-------
str
WFS response of a GetFeature call to the dov-pub:Boringen layer.
"""
file_path = getattr(request.module, "location_wfs_getfeature")
with open(file_path, 'r') as f:
data = f.read()
return data
@pytest.fixture(scope='module')
def wfs_feature(request):
"""PyTest fixture providing an XML of a WFS feature element.
This monkeypatch requires a module variable ``location_wfs_feature``
with the path to the wfs_feature file on disk.
Parameters
----------
request : pytest.fixtue
PyTest fixture providing request context.
Returns
-------
etree.Element
XML element representing a single record of the Boring WFS layer.
"""
file_path = getattr(request.module, "location_wfs_feature")
with open(file_path, 'r') as f:
return etree.fromstring(f.read())
@pytest.fixture(scope='module')
def mp_remote_wfs_feature(monkeymodule, request):
"""Monkeypatch the call to get WFS features.
This monkeypatch requires a module variable ``location_wfs_getfeature``
with the path to the wfs_getfeature file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_wfs_feature(*args, **kwargs):
file_path = getattr(request.module, "location_wfs_getfeature")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil,
'wfs_get_feature',
__get_remote_wfs_feature)
@pytest.fixture(scope='module')
def mp_dov_xml(monkeymodule, request):
"""Monkeypatch the call to get the remote XML data.
This monkeypatch requires a module variable ``location_dov_xml``
with the path to the dov_xml file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def _get_xml_data(*args, **kwargs):
file_path = getattr(request.module, "location_dov_xml")
with open(file_path, 'r', encoding="utf-8") as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.types.abstract.AbstractDovType,
'_get_xml_data', _get_xml_data)
@pytest.fixture()
def mp_dov_xml_broken(monkeypatch):
"""Monkeypatch the call to break the fetching of remote XML data.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def _get_xml_data(*args, **kwargs):
raise RuntimeError
monkeypatch.setattr(pydov.types.abstract.AbstractDovType,
'_get_xml_data', _get_xml_data)
@pytest.fixture()
def mp_remote_xsd(monkeymodule, request):
"""Monkeypatch the call to get the remote XSD schemas.
This monkeypatch requires a module variable ``location_xsd_base``
with a glob expression to the XSD file(s) on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def _get_remote_xsd(*args, **kwargs):
xsd_base_path = getattr(request.module, "location_xsd_base")
schemas = []
for xsd_file in glob.glob(xsd_base_path):
with open(xsd_file, 'r', encoding="utf-8") as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
schemas.append(etree.fromstring(data))
return schemas
monkeymodule.setattr(pydov.search.abstract.AbstractSearch,
'_get_remote_xsd_schemas', _get_remote_xsd)
@pytest.fixture
def md_metadata(wfs, mp_remote_md):
"""PyTest fixture providing a MD_Metadata instance of the
dov-pub:Boringen layer.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
mp_remote_md : pytest.fixture
Monkeypatch the call to get the remote metadata of the
dov-pub:Boringen layer.
Returns
-------
owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
contentmetadata = wfs.contents['dov-pub:Boringen']
return owsutil.get_remote_metadata(contentmetadata)
@pytest.fixture
def mp_remote_xml(monkeypatch):
"""Monkeypatch the call to get the remote Boring XML data.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def _get_remote_data(*args, **kwargs):
with open('tests/data/types/boring/boring.xml', 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeypatch.setattr(pydov.util.caching.AbstractFileCache,
'_get_remote', _get_remote_data)
@pytest.fixture
def plaintext_cache(request):
"""Fixture for a temporary cache.
This fixture should be parametrized, with a list of parameters in the
order described below.
Parameters
----------
max_age : datetime.timedelta
The maximum age to use for the cache.
"""
orig_cache = pydov.cache
if len(request.param) == 0:
max_age = datetime.timedelta(seconds=1)
else:
max_age = request.param[0]
plaintext_cache = PlainTextFileCache(
cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),
max_age=max_age)
pydov.cache = plaintext_cache
yield plaintext_cache
plaintext_cache.remove()
pydov.cache = orig_cache
@pytest.fixture
def gziptext_cache(request):
"""Fixture for a temporary cache.
This fixture should be parametrized, with a list of parameters in the
order described below.
Parameters
----------
max_age : datetime.timedelta
The maximum age to use for the cache.
"""
orig_cache = pydov.cache
if len(request.param) == 0:
max_age = datetime.timedelta(seconds=1)
else:
max_age = request.param[0]
gziptext_cache = GzipTextFileCache(
cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),
max_age=max_age)
pydov.cache = gziptext_cache
yield gziptext_cache
gziptext_cache.remove()
pydov.cache = orig_cache
@pytest.fixture
def nocache():
"""Fixture to temporarily disable caching."""
orig_cache = pydov.cache
pydov.cache = None
yield
pydov.cache = orig_cache
@pytest.fixture(autouse=True, scope='function')
def patch_owslib_openURL(monkeypatch):
"""Fixture to patch OWSLib's openURL function in favor of a GET request
using our pydov requests session."""
def _openURL(*args, **kwargs):
"""Patch function for owslib.util.openURL using our custom pydov
requests session.
Parameters
----------
url : str
URL to open.
Returns
-------
ResponseWrapper
Wrapped response of the request.
"""
url = args[0]
return ResponseWrapper(pydov.session.get(url))
monkeypatch.setattr('owslib.util.openURL', _openURL)
monkeypatch.setattr('owslib.feature.common.openURL', _openURL)
monkeypatch.setattr('owslib.feature.schema.openURL', _openURL)
monkeypatch.setattr('owslib.feature.wfs110.openURL', _openURL)
| DOV-Vlaanderen/pydov | tests/conftest.py | Python | mit | 15,105 | 0 |
import libtcod.libtcodpy as libtcod
from random import randint
SCREEN_WIDTH = 40
SCREEN_HEIGHT = 20
LIMIT_FPS = 20
MAP_WIDTH = 80
MAP_HEIGHT = 45
ROOM_MAX_SIZE = 10
ROOM_MIN_SIZE = 10
MAX_ROOMS = 30
MAX_ROOM_MONSTERS = 3
FOV_ALGO = libtcod.FOV_SHADOW
FOV_LIGHT_WALLS = True
TORCH_RADIUS = 8
void_color = libtcod.Color(0, 0, 0)
color_pairs = {
"void": (libtcod.Color(0, 0, 0), libtcod.Color(0, 0, 0)),
"bg_wall": (libtcod.Color(25, 25, 25), libtcod.Color(50, 50, 25)),
"fg_wall": (libtcod.Color(50, 50, 50), libtcod.Color(75, 75, 50)),
"bg_floor": (libtcod.Color(50, 50, 50), libtcod.Color(75, 75, 50)),
"fg_floor": (libtcod.Color(75, 75, 75), libtcod.Color(100, 100, 75)),
"fg_stairs": (libtcod.Color(100, 100, 75), libtcod.Color(125, 125, 75)),
}
libtcod.console_set_custom_font('consolas_unicode_12x12.png', libtcod.FONT_LAYOUT_ASCII_INROW | libtcod.FONT_TYPE_GREYSCALE, nb_char_horiz=32, nb_char_vertic=64)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'rbowl', False)
libtcod.sys_set_fps(LIMIT_FPS)
class Game:
def __init__(self):
self.state = 'playing'
self.player_action = None
self.map = Map()
self.player = Object(self.map,
self.map.start_x,
self.map.start_y,
'@', 'player', blocks=True)
self.screen = Screen(self, self.map)
self.screen.move(self.map.start_x - SCREEN_WIDTH/2,
self.map.start_y - SCREEN_HEIGHT/2)
self.fov_recompute = True
self.con = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
libtcod.console_set_default_foreground(self.con, libtcod.white)
self.pressed = set()
def run(self):
while not libtcod.console_is_window_closed():
self.screen.display(self.con)
for obj in self.map.objects:
obj.clear(self.con)
#handle keys and exit game if needed
action = self.handle_keys()
if action == 'exit':
break
if action is not None:
pass
def handle_keys(self):
key = libtcod.console_check_for_keypress(libtcod.KEY_PRESSED | libtcod.KEY_RELEASED)
if key.vk == libtcod.KEY_ESCAPE:
return 'exit'
elif key.vk == libtcod.KEY_CHAR:
if key.pressed:
self.pressed.add(key.c)
else:
try:
self.pressed.remove(key.c)
except KeyError:
pass
if ord('w') in self.pressed:
self.screen.move(0, -1)
elif ord('s') in self.pressed:
self.screen.move(0, 1)
elif ord('a') in self.pressed:
self.screen.move(-1, 0)
elif ord('d') in self.pressed:
self.screen.move(1, 0)
if self.state == 'playing':
if libtcod.console_is_key_pressed(libtcod.KEY_UP):
self.player.move(0, -1)
self.fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_DOWN):
self.player.move(0, 1)
self.fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_LEFT):
self.player.move(-1, 0)
self.fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_RIGHT):
self.player.move(1, 0)
self.fov_recompute = True
else:
return None
return 'action'
class Rect:
"A rectangle on the map."
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x+w
self.y2 = y+h
def center(self):
center_x = (self.x1 + self.x2) / 2
center_y = (self.y1 + self.y2) / 2
return (center_x, center_y)
def intersect(self, other):
return (self.x1 <= other.x2 and self.x2 >= other.x1 and
self.y1 <= other.y2 and self.y2 >= other.y1)
class TileType:
"Types for tiles of the map, and its properties"
def __init__(self, char, fg_color_pair, bg_color_pair, blocked, block_sight=None):
self.char = char
self.fg_color, self.fg_color_lit = fg_color_pair
self.bg_color, self.bg_color_lit = bg_color_pair
self.blocked = blocked
self.block_sight = block_sight or blocked
tiletypes = {
'void': TileType(' ', color_pairs["void"], color_pairs["void"], True),
'floor': TileType('.', color_pairs["fg_floor"], color_pairs["bg_floor"], False),
'wall': TileType('#', color_pairs["fg_wall"], color_pairs["bg_wall"], True),
'up_stairs': TileType('<', color_pairs["fg_stairs"], color_pairs["bg_floor"], False),
'down_stairs': TileType('>', color_pairs["fg_stairs"], color_pairs["bg_floor"], False),
}
class Tile:
"Tile of the map, and its properties"
def __init__(self, type):
self.type = tiletypes[type]
self.explored = False
class Map:
def __init__(self, width=MAP_WIDTH, height=MAP_HEIGHT):
self.width = width
self.height = height
self.tiles = [[Tile('wall')
for y in range(self.height)]
for x in range(self.width)]
self.fov_map = libtcod.map_new(self.width, self.height)
self.objects = []
self.rooms = []
self.num_rooms = 0
for r in range(MAX_ROOMS):
w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1)
y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1)
new_room = Rect(x, y, w, h)
failed = False
for other_room in self.rooms:
if new_room.intersect(other_room):
failed = True
break
if not failed:
self.create_room(new_room)
self.generate_room_objects(new_room)
(new_x, new_y) = new_room.center()
if self.num_rooms == 0:
self.start_x = new_x
self.start_y = new_y
else:
self.join_rooms(new_room, self.rooms[-1])
self.end_x = new_x
self.end_y = new_y
self.rooms.append(new_room)
self.num_rooms += 1
self.tiles[self.start_x][self.start_y].type = tiletypes['up_stairs']
self.tiles[self.end_x][self.end_y].type = tiletypes['down_stairs']
for y in range(self.height):
for x in range(self.width):
libtcod.map_set_properties(self.fov_map,
x, y,
not self.tiles[x][y].type.block_sight,
not self.tiles[x][y].type.blocked)
def add_object(self, obj):
self.objects.append(obj)
def remove_object(self, obj):
self.objects.remove(obj)
def generate_room_objects(self, room):
num_monsters = libtcod.random_get_int(0, 0, MAX_ROOM_MONSTERS)
for i in range(num_monsters):
x = libtcod.random_get_int(0, room.x1+1, room.x2-1)
y = libtcod.random_get_int(0, room.y1+1, room.y2-1)
if self.is_blocked(x, y):
continue
pick = libtcod.random_get_int(0, 0, 100)
if pick < 50:
monster = Object(self, x, y, 'g', 'goblin', color=libtcod.green, blocks=True)
if pick < 80:
monster = Object(self, x, y, 'o', 'orc', color=libtcod.desaturated_green, blocks=True)
else:
monster = Object(self, x, y, 'T', 'troll', color=libtcod.darker_green, blocks=True)
self.objects.append(monster)
def in_bounds(self, x, y):
return ((0 <= x < self.width) and (0 <= y < self.height))
def is_blocked(self, x, y):
if not self.in_bounds(x, y):
return True
if self.tiles[x][y].type.blocked:
return True
for object in self.objects:
if object.blocks and object.x == x and object.y == y:
return True
return False
def is_sightblocked(self, x, y):
if not self.in_bounds(x, y):
return True
return self.tiles[x][y].type.block_sight
def create_room(self, room):
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
if self.in_bounds(x, y):
self.tiles[x][y].type = tiletypes['floor']
def join_rooms(self, room1, room2):
cx1, cy1 = room1.center()
cx2, cy2 = room2.center()
if libtcod.random_get_int(0, 0, 1) == 1:
self.create_h_tunnel(cx1, cx2, cy1)
self.create_v_tunnel(cx2, cy1, cy2)
else:
self.create_v_tunnel(cx1, cy1, cy2)
self.create_h_tunnel(cx1, cx2, cy2)
def create_h_tunnel(self, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2) + 1):
if self.in_bounds(x, y):
self.tiles[x][y].type = tiletypes['floor']
def create_v_tunnel(self, x, y1, y2):
for y in range(min(y1, y2), max(y1, y2) + 1):
if self.in_bounds(x, y):
self.tiles[x][y].type = tiletypes['floor']
def find_clear_space(self):
if len(self.rooms) == 0:
return (libtcod.random_get_int(0, 0, map.width - 1),
libtcod.random_get_int(0, 0, map.height - 1))
room = self.rooms[libtcod.random_get_int(0, 0, len(self.rooms) - 1)]
return (libtcod.random_get_int(0, room.x1 + 1, room.x2 - 1),
libtcod.random_get_int(0, room.y1 + 1, room.y2 - 1))
class Screen:
def __init__(self, game, map, width=SCREEN_WIDTH, height=SCREEN_HEIGHT):
self.game = game
self.map = map
self.width = width
self.height = height
self.x_offset = 0
self.y_offset = 0
def move(self, dx, dy):
new_x = self.x_offset + dx
new_y = self.y_offset + dy
half_width = self.width/2
half_height = self.height/2
if -half_width < new_x < self.map.width - half_width:
self.x_offset = new_x
if -half_height < new_y < self.map.height - half_height:
self.y_offset = new_y
def display(self, con):
if self.game.fov_recompute:
#recompute FOV if needed (the player moved or something)
self.game.fov_recompute = False
libtcod.map_compute_fov(self.map.fov_map,
self.game.player.x, self.game.player.y,
TORCH_RADIUS, FOV_LIGHT_WALLS, FOV_ALGO)
for y in range(self.height):
for x in range(self.width):
map_x, map_y = x + self.x_offset, y + self.y_offset
if self.map.in_bounds(map_x, map_y):
visible = libtcod.map_is_in_fov(self.map.fov_map, map_x, map_y)
tt = self.map.tiles[map_x][map_y].type
if visible:
libtcod.console_set_char_background(con, x, y, tt.bg_color_lit, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(con, tt.fg_color_lit)
libtcod.console_put_char(con, x, y, tt.char, libtcod.BKGND_NONE)
# TODO: Doing this here bugs it if the player's light radius is not on screen
self.map.tiles[map_x][map_y].explored = True
elif self.map.tiles[map_x][map_y].explored:
libtcod.console_set_char_background(con, x, y, tt.bg_color, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(con, tt.fg_color)
libtcod.console_put_char(con, x, y, tt.char, libtcod.BKGND_NONE)
else:
libtcod.console_set_char_background(con, x, y, void_color, libtcod.BKGND_SET)
libtcod.console_put_char(con, x, y, ' ', libtcod.BKGND_NONE)
else:
libtcod.console_set_char_background(con, x, y, void_color, libtcod.BKGND_SET)
libtcod.console_put_char(con, x, y, ' ', libtcod.BKGND_NONE)
for object in self.map.objects:
object.draw(con, self.x_offset, self.y_offset)
libtcod.console_blit(con, 0, 0, self.width, self.height, 0, 0, 0)
libtcod.console_flush()
class Object:
def __init__(self, map, x, y, char, name, blocks=False, color=None):
"""
Fill with defaults
"""
self.map = map
self.name = name
map.add_object(self)
self.x = x
self.y = y
self.char = char
self.color = color or libtcod.white
self.blocks = blocks
def draw(self, con, x_off, y_off):
if libtcod.map_is_in_fov(self.map.fov_map, self.x, self.y):
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, self.x - x_off, self.y - y_off, self.char, libtcod.BKGND_NONE)
def move(self, dx, dy):
if self.map.is_blocked(self.x + dx, self.y + dy):
return
if 0 <= self.x + dx < MAP_WIDTH:
self.x += dx
if 0 <= self.y + dy < MAP_HEIGHT:
self.y += dy
def clear(self, con):
libtcod.console_put_char(con, self.x, self.y, ' ', libtcod.BKGND_NONE)
Game().run()
| gambl0r/roguebowl | rbowl.py | Python | unlicense | 14,250 | 0.007509 |
import cv, cv2
import rectangle
from numpy import concatenate
import logging
class Face(object):
def __init__(self, config = {}):
self.config = {
"top_offset" : 1.0,
"bottom_offset" : 1.0,
"left_offset" : 0.0,
"right_offset" : 0.0,
"haar_confidence" : 3,
"min_face_size" : (70,70),
"cascade_frontal" : "cascades/haarcascade_frontalface_default.xml",
"cascade_profile" : "cascades/haarcascade_profileface.xml"
}
self.set_config(config)
# Create the cascades. We use both, a frontal- and a profile face cascade
self.cascade_frontal = cv2.CascadeClassifier(self.config["cascade_frontal"])
self.cascade_profile = cv2.CascadeClassifier(self.config["cascade_profile"])
# Initially, we have no valid face detection.
self.face_positions = []
# In order to improve perfomance,
# keep the face position for a couple of frames.
# Find face again after a certain number of frames.
self.face_delay = 100
# Count how many frames have passed,
# since we last did a face detection
self.frames_passed = 0
def positions(self, img):
"""
Get all faces in an image.
Also apply some padding to remove the area next to the faces.
This improves both, performance and robustness of the hand search.
"""
self.frames_passed += 1
# Speedup. Only redetect after a certain delay.
if self.faces_invalid():
self.recalculate(img)
return self.face_positions
def faces_invalid(self):
"""
Check if we can still use the old face positions or
if the delay is over and we need to find the face again in the image.
"""
if not self.face_positions:
# No previous face detection. Declare invalid.
return True
if self.frames_passed > self.face_delay:
# The delay has passed. Invalidate previous detection
return True
# Everything ok. We can use the old detection.
return False
def recalculate(self, img):
"""
Try to redetect the face position.
"""
logging.debug("Face detector: Scanning...")
# Reset the frame counter
self.frames_passed = 0
# Invalidate previous detections
self.face_positions = None
rects = self.detect(img)
for r in rects:
x1, y1, x2, y2 = r
logging.info("Face detector: Found face at %s", r)
if rects != None:
self.face_positions = rects
def detect(self, img):
"""
Find blobs which match a given HAAR cascade.
"""
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#gray = cv2.equalizeHist(gray)
# Accumulate all detections in a list
r = self.detect_frontal(img) + self.detect_profile(img)
return r
def detect_frontal(self, img):
rects_frontal = self.cascade_frontal.detectMultiScale(img,
scaleFactor=1.1,
minNeighbors=self.config["haar_confidence"],
minSize=self.config["min_face_size"])
if len(rects_frontal) != 0:
# We found a frontal faces.
rects_frontal = rectangle.convert_from_wh(rects_frontal)
return rects_frontal.tolist()
else:
return []
def detect_profile(self, img):
# Detect faces turned sidewards
rects_profile = self.cascade_profile.detectMultiScale(img,
scaleFactor=1.2,
minNeighbors=self.config["haar_confidence"],
minSize=self.config["min_face_size"])
if len(rects_profile) != 0:
# OK, found profile faces.
rects_profile = rectangle.convert_from_wh(rects_profile)
return rects_profile.tolist()
else:
return []
def set_config(self, config):
"""
Load new settings at runtime
"""
for key in config:
self.config[key] = config[key]
| mre/tracker | face.py | Python | lgpl-3.0 | 3,699 | 0.012976 |
from .base import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!i%7s@1+v&293zcy*kljuke=_l176nqpj2-3dtms()pw^et!we'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| acdh-oeaw/dig_ed_cat | digital_editions/settings/dev.py | Python | mit | 533 | 0 |
class Strings:
ADD_SUPERVISOR = "You are now connected with "
START_MONITORING = " wants to monitor you, click to allow"
ACCEPTED_MONITORING= "You started monitoring "
STOP_MONITORING_SUPERVISE = " canceled monitoring"
STOP_MONITORING_SUPERVISOR = " is no longer monitoring you"
APPLICATION_ANOMALY = " opened an unauthorized application"
SPEED_ANOMALY = " drives faster then authorized. Speed: "
MONITOR_ACCEPTED = "Monitor accepted"
registration_id = "cRLRNiCkFPQ:APA91bENV-BxmM3iXhZ_DwlitVpI5nTvdqGhClq5K1M5sLIZ8aAca_EJnkX3MRl9p_tLGBGoUtvROR2gOVI5bDeTIegS-55C8DM-GAnGI0xdlHVTPM5P9fkSYEslS-EcOsK6Y6dAsPca"
registration_other = "fWsYNQNkFfM:APA91bH_Rq5A1rYLapfmii62coTWgNvCMnqq1q8LIxsvNNByT-iPrU-EledwiKHyT7zzCFbPMkbqbZvdb-YVidkQq0u6jvOk_1RZsvK-Q1_XuM3gavyU63GvbgjNcgd5_Ws1Z-H4Xs_l"
| tweiss1234/Cras | strings.py | Python | apache-2.0 | 824 | 0.003641 |
#!/usr/bin/env python
import fluidity.diagnostics.annulus_mesh as mesh
import fluidity.diagnostics.triangletools as tt
div = mesh.SliceCoordsConstant(0.0, 1.0, 3)
m = mesh.GenerateRectangleMesh(div, div)
tt.WriteTriangle(m, "square-structured-linear")
| FluidityProject/multifluids | tests/square-convection-parallel-trivial/src/genmesh.py | Python | lgpl-2.1 | 254 | 0 |
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
from collections import defaultdict, OrderedDict
from email.header import Header
from hashlib import sha1, sha256
import hmac
import re
import six
# pylint: disable-msg=import-error
from six.moves.urllib.parse import quote, unquote, parse_qsl
import string
from swift.common.utils import split_path, json, get_swift_info, \
close_if_possible, md5
from swift.common import swob
from swift.common.http import HTTP_OK, HTTP_CREATED, HTTP_ACCEPTED, \
HTTP_NO_CONTENT, HTTP_UNAUTHORIZED, HTTP_FORBIDDEN, HTTP_NOT_FOUND, \
HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY, HTTP_REQUEST_ENTITY_TOO_LARGE, \
HTTP_PARTIAL_CONTENT, HTTP_NOT_MODIFIED, HTTP_PRECONDITION_FAILED, \
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE, HTTP_LENGTH_REQUIRED, \
HTTP_BAD_REQUEST, HTTP_REQUEST_TIMEOUT, HTTP_SERVICE_UNAVAILABLE, \
HTTP_TOO_MANY_REQUESTS, HTTP_RATE_LIMITED, is_success
from swift.common.constraints import check_utf8
from swift.proxy.controllers.base import get_container_info
from swift.common.request_helpers import check_path_header
from swift.common.middleware.s3api.controllers import ServiceController, \
ObjectController, AclController, MultiObjectDeleteController, \
LocationController, LoggingStatusController, PartController, \
UploadController, UploadsController, VersioningController, \
UnsupportedController, S3AclController, BucketController, \
TaggingController
from swift.common.middleware.s3api.s3response import AccessDenied, \
InvalidArgument, InvalidDigest, BucketAlreadyOwnedByYou, \
RequestTimeTooSkewed, S3Response, SignatureDoesNotMatch, \
BucketAlreadyExists, BucketNotEmpty, EntityTooLarge, \
InternalError, NoSuchBucket, NoSuchKey, PreconditionFailed, InvalidRange, \
MissingContentLength, InvalidStorageClass, S3NotImplemented, InvalidURI, \
MalformedXML, InvalidRequest, RequestTimeout, InvalidBucketName, \
BadDigest, AuthorizationHeaderMalformed, SlowDown, \
AuthorizationQueryParametersError, ServiceUnavailable
from swift.common.middleware.s3api.exception import NotS3Request, \
BadSwiftRequest
from swift.common.middleware.s3api.utils import utf8encode, \
S3Timestamp, mktime, MULTIUPLOAD_SUFFIX
from swift.common.middleware.s3api.subresource import decode_acl, encode_acl
from swift.common.middleware.s3api.utils import sysmeta_header, \
validate_bucket_name
from swift.common.middleware.s3api.acl_utils import handle_acl_header
# List of sub-resources that must be maintained as part of the HMAC
# signature string.
ALLOWED_SUB_RESOURCES = sorted([
'acl', 'delete', 'lifecycle', 'location', 'logging', 'notification',
'partNumber', 'policy', 'requestPayment', 'torrent', 'uploads', 'uploadId',
'versionId', 'versioning', 'versions', 'website',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'response-content-language',
'response-content-type', 'response-expires', 'cors', 'tagging', 'restore'
])
MAX_32BIT_INT = 2147483647
SIGV2_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S'
SIGV4_X_AMZ_DATE_FORMAT = '%Y%m%dT%H%M%SZ'
SERVICE = 's3' # useful for mocking out in tests
def _header_strip(value):
# S3 seems to strip *all* control characters
if value is None:
return None
stripped = _header_strip.re.sub('', value)
if value and not stripped:
# If there's nothing left after stripping,
# behave as though it wasn't provided
return None
return stripped
_header_strip.re = re.compile('^[\x00-\x20]*|[\x00-\x20]*$')
def _header_acl_property(resource):
"""
Set and retrieve the acl in self.headers
"""
def getter(self):
return getattr(self, '_%s' % resource)
def setter(self, value):
self.headers.update(encode_acl(resource, value))
setattr(self, '_%s' % resource, value)
def deleter(self):
self.headers[sysmeta_header(resource, 'acl')] = ''
return property(getter, setter, deleter,
doc='Get and set the %s acl property' % resource)
class HashingInput(object):
"""
wsgi.input wrapper to verify the hash of the input as it's read.
"""
def __init__(self, reader, content_length, hasher, expected_hex_hash):
self._input = reader
self._to_read = content_length
self._hasher = hasher()
self._expected = expected_hex_hash
def read(self, size=None):
chunk = self._input.read(size)
self._hasher.update(chunk)
self._to_read -= len(chunk)
short_read = bool(chunk) if size is None else (len(chunk) < size)
if self._to_read < 0 or (short_read and self._to_read) or (
self._to_read == 0 and
self._hasher.hexdigest() != self._expected):
self.close()
# Since we don't return the last chunk, the PUT never completes
raise swob.HTTPUnprocessableEntity(
'The X-Amz-Content-SHA56 you specified did not match '
'what we received.')
return chunk
def close(self):
close_if_possible(self._input)
class SigV4Mixin(object):
"""
A request class mixin to provide S3 signature v4 functionality
"""
def check_signature(self, secret):
secret = utf8encode(secret)
user_signature = self.signature
derived_secret = b'AWS4' + secret
for scope_piece in self.scope.values():
derived_secret = hmac.new(
derived_secret, scope_piece.encode('utf8'), sha256).digest()
valid_signature = hmac.new(
derived_secret, self.string_to_sign, sha256).hexdigest()
return user_signature == valid_signature
@property
def _is_query_auth(self):
return 'X-Amz-Credential' in self.params
@property
def timestamp(self):
"""
Return timestamp string according to the auth type
The difference from v2 is v4 have to see 'X-Amz-Date' even though
it's query auth type.
"""
if not self._timestamp:
try:
if self._is_query_auth and 'X-Amz-Date' in self.params:
# NOTE(andrey-mp): Date in Signature V4 has different
# format
timestamp = mktime(
self.params['X-Amz-Date'], SIGV4_X_AMZ_DATE_FORMAT)
else:
if self.headers.get('X-Amz-Date'):
timestamp = mktime(
self.headers.get('X-Amz-Date'),
SIGV4_X_AMZ_DATE_FORMAT)
else:
timestamp = mktime(self.headers.get('Date'))
except (ValueError, TypeError):
raise AccessDenied('AWS authentication requires a valid Date '
'or x-amz-date header')
if timestamp < 0:
raise AccessDenied('AWS authentication requires a valid Date '
'or x-amz-date header')
try:
self._timestamp = S3Timestamp(timestamp)
except ValueError:
# Must be far-future; blame clock skew
raise RequestTimeTooSkewed()
return self._timestamp
def _validate_expire_param(self):
"""
Validate X-Amz-Expires in query parameter
:raises: AccessDenied
:raises: AuthorizationQueryParametersError
:raises: AccessDenined
"""
err = None
try:
expires = int(self.params['X-Amz-Expires'])
except KeyError:
raise AccessDenied()
except ValueError:
err = 'X-Amz-Expires should be a number'
else:
if expires < 0:
err = 'X-Amz-Expires must be non-negative'
elif expires >= 2 ** 63:
err = 'X-Amz-Expires should be a number'
elif expires > 604800:
err = ('X-Amz-Expires must be less than a week (in seconds); '
'that is, the given X-Amz-Expires must be less than '
'604800 seconds')
if err:
raise AuthorizationQueryParametersError(err)
if int(self.timestamp) + expires < S3Timestamp.now():
raise AccessDenied('Request has expired')
def _parse_credential(self, credential_string):
parts = credential_string.split("/")
# credential must be in following format:
# <access-key-id>/<date>/<AWS-region>/<AWS-service>/aws4_request
if not parts[0] or len(parts) != 5:
raise AccessDenied()
return dict(zip(['access', 'date', 'region', 'service', 'terminal'],
parts))
def _parse_query_authentication(self):
"""
Parse v4 query authentication
- version 4:
'X-Amz-Credential' and 'X-Amz-Signature' should be in param
:raises: AccessDenied
:raises: AuthorizationHeaderMalformed
"""
if self.params.get('X-Amz-Algorithm') != 'AWS4-HMAC-SHA256':
raise InvalidArgument('X-Amz-Algorithm',
self.params.get('X-Amz-Algorithm'))
try:
cred_param = self._parse_credential(
swob.wsgi_to_str(self.params['X-Amz-Credential']))
sig = swob.wsgi_to_str(self.params['X-Amz-Signature'])
if not sig:
raise AccessDenied()
except KeyError:
raise AccessDenied()
try:
signed_headers = swob.wsgi_to_str(
self.params['X-Amz-SignedHeaders'])
except KeyError:
# TODO: make sure if is it malformed request?
raise AuthorizationHeaderMalformed()
self._signed_headers = set(signed_headers.split(';'))
invalid_messages = {
'date': 'Invalid credential date "%s". This date is not the same '
'as X-Amz-Date: "%s".',
'region': "Error parsing the X-Amz-Credential parameter; "
"the region '%s' is wrong; expecting '%s'",
'service': 'Error parsing the X-Amz-Credential parameter; '
'incorrect service "%s". This endpoint belongs to "%s".',
'terminal': 'Error parsing the X-Amz-Credential parameter; '
'incorrect terminal "%s". This endpoint uses "%s".',
}
for key in ('date', 'region', 'service', 'terminal'):
if cred_param[key] != self.scope[key]:
kwargs = {}
if key == 'region':
# Allow lowercase region name
# for AWS .NET SDK compatibility
if not self.scope[key].islower() and \
cred_param[key] == self.scope[key].lower():
self.location = self.location.lower()
continue
kwargs = {'region': self.scope['region']}
raise AuthorizationQueryParametersError(
invalid_messages[key] % (cred_param[key], self.scope[key]),
**kwargs)
return cred_param['access'], sig
def _parse_header_authentication(self):
"""
Parse v4 header authentication
- version 4:
'X-Amz-Credential' and 'X-Amz-Signature' should be in param
:raises: AccessDenied
:raises: AuthorizationHeaderMalformed
"""
auth_str = swob.wsgi_to_str(self.headers['Authorization'])
cred_param = self._parse_credential(auth_str.partition(
"Credential=")[2].split(',')[0])
sig = auth_str.partition("Signature=")[2].split(',')[0]
if not sig:
raise AccessDenied()
signed_headers = auth_str.partition(
"SignedHeaders=")[2].split(',', 1)[0]
if not signed_headers:
# TODO: make sure if is it Malformed?
raise AuthorizationHeaderMalformed()
invalid_messages = {
'date': 'Invalid credential date "%s". This date is not the same '
'as X-Amz-Date: "%s".',
'region': "The authorization header is malformed; the region '%s' "
"is wrong; expecting '%s'",
'service': 'The authorization header is malformed; incorrect '
'service "%s". This endpoint belongs to "%s".',
'terminal': 'The authorization header is malformed; incorrect '
'terminal "%s". This endpoint uses "%s".',
}
for key in ('date', 'region', 'service', 'terminal'):
if cred_param[key] != self.scope[key]:
kwargs = {}
if key == 'region':
# Allow lowercase region name
# for AWS .NET SDK compatibility
if not self.scope[key].islower() and \
cred_param[key] == self.scope[key].lower():
self.location = self.location.lower()
continue
kwargs = {'region': self.scope['region']}
raise AuthorizationHeaderMalformed(
invalid_messages[key] % (cred_param[key], self.scope[key]),
**kwargs)
self._signed_headers = set(signed_headers.split(';'))
return cred_param['access'], sig
def _canonical_query_string(self):
return '&'.join(
'%s=%s' % (swob.wsgi_quote(key, safe='-_.~'),
swob.wsgi_quote(value, safe='-_.~'))
for key, value in sorted(self.params.items())
if key not in ('Signature', 'X-Amz-Signature')).encode('ascii')
def _headers_to_sign(self):
"""
Select the headers from the request that need to be included
in the StringToSign.
:return : dict of headers to sign, the keys are all lower case
"""
if 'headers_raw' in self.environ: # eventlet >= 0.19.0
# See https://github.com/eventlet/eventlet/commit/67ec999
headers_lower_dict = defaultdict(list)
for key, value in self.environ['headers_raw']:
headers_lower_dict[key.lower().strip()].append(
' '.join(_header_strip(value or '').split()))
headers_lower_dict = {k: ','.join(v)
for k, v in headers_lower_dict.items()}
else: # mostly-functional fallback
headers_lower_dict = dict(
(k.lower().strip(), ' '.join(_header_strip(v or '').split()))
for (k, v) in six.iteritems(self.headers))
if 'host' in headers_lower_dict and re.match(
'Boto/2.[0-9].[0-2]',
headers_lower_dict.get('user-agent', '')):
# Boto versions < 2.9.3 strip the port component of the host:port
# header, so detect the user-agent via the header and strip the
# port if we detect an old boto version.
headers_lower_dict['host'] = \
headers_lower_dict['host'].split(':')[0]
headers_to_sign = [
(key, value) for key, value in sorted(headers_lower_dict.items())
if swob.wsgi_to_str(key) in self._signed_headers]
if len(headers_to_sign) != len(self._signed_headers):
# NOTE: if we are missing the header suggested via
# signed_header in actual header, it results in
# SignatureDoesNotMatch in actual S3 so we can raise
# the error immediately here to save redundant check
# process.
raise SignatureDoesNotMatch()
return headers_to_sign
def _canonical_uri(self):
"""
It won't require bucket name in canonical_uri for v4.
"""
return swob.wsgi_to_bytes(self.environ.get('RAW_PATH_INFO', self.path))
def _canonical_request(self):
# prepare 'canonical_request'
# Example requests are like following:
#
# GET
# /
# Action=ListUsers&Version=2010-05-08
# content-type:application/x-www-form-urlencoded; charset=utf-8
# host:iam.amazonaws.com
# x-amz-date:20150830T123600Z
#
# content-type;host;x-amz-date
# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
#
# 1. Add verb like: GET
cr = [swob.wsgi_to_bytes(self.method.upper())]
# 2. Add path like: /
path = self._canonical_uri()
cr.append(path)
# 3. Add query like: Action=ListUsers&Version=2010-05-08
cr.append(self._canonical_query_string())
# 4. Add headers like:
# content-type:application/x-www-form-urlencoded; charset=utf-8
# host:iam.amazonaws.com
# x-amz-date:20150830T123600Z
headers_to_sign = self._headers_to_sign()
cr.append(b''.join(swob.wsgi_to_bytes('%s:%s\n' % (key, value))
for key, value in headers_to_sign))
# 5. Add signed headers into canonical request like
# content-type;host;x-amz-date
cr.append(b';'.join(swob.wsgi_to_bytes(k) for k, v in headers_to_sign))
# 6. Add payload string at the tail
if 'X-Amz-Credential' in self.params:
# V4 with query parameters only
hashed_payload = 'UNSIGNED-PAYLOAD'
elif 'X-Amz-Content-SHA256' not in self.headers:
msg = 'Missing required header for this request: ' \
'x-amz-content-sha256'
raise InvalidRequest(msg)
else:
hashed_payload = self.headers['X-Amz-Content-SHA256']
if hashed_payload != 'UNSIGNED-PAYLOAD':
if self.content_length == 0:
if hashed_payload.lower() != sha256().hexdigest():
raise BadDigest(
'The X-Amz-Content-SHA56 you specified did not '
'match what we received.')
elif self.content_length:
self.environ['wsgi.input'] = HashingInput(
self.environ['wsgi.input'],
self.content_length,
sha256,
hashed_payload.lower())
# else, length not provided -- Swift will kick out a
# 411 Length Required which will get translated back
# to a S3-style response in S3Request._swift_error_codes
cr.append(swob.wsgi_to_bytes(hashed_payload))
return b'\n'.join(cr)
@property
def scope(self):
return OrderedDict([
('date', self.timestamp.amz_date_format.split('T')[0]),
('region', self.location),
('service', SERVICE),
('terminal', 'aws4_request'),
])
def _string_to_sign(self):
"""
Create 'StringToSign' value in Amazon terminology for v4.
"""
return b'\n'.join([
b'AWS4-HMAC-SHA256',
self.timestamp.amz_date_format.encode('ascii'),
'/'.join(self.scope.values()).encode('utf8'),
sha256(self._canonical_request()).hexdigest().encode('ascii')])
def signature_does_not_match_kwargs(self):
kwargs = super(SigV4Mixin, self).signature_does_not_match_kwargs()
cr = self._canonical_request()
kwargs.update({
'canonical_request': cr,
'canonical_request_bytes': ' '.join(
format(ord(c), '02x') for c in cr.decode('latin1')),
})
return kwargs
def get_request_class(env, s3_acl):
"""
Helper function to find a request class to use from Map
"""
if s3_acl:
request_classes = (S3AclRequest, SigV4S3AclRequest)
else:
request_classes = (S3Request, SigV4Request)
req = swob.Request(env)
if 'X-Amz-Credential' in req.params or \
req.headers.get('Authorization', '').startswith(
'AWS4-HMAC-SHA256 '):
# This is an Amazon SigV4 request
return request_classes[1]
else:
# The others using Amazon SigV2 class
return request_classes[0]
class S3Request(swob.Request):
"""
S3 request object.
"""
bucket_acl = _header_acl_property('container')
object_acl = _header_acl_property('object')
def __init__(self, env, conf, app=None):
# NOTE: app is not used by this class, need for compatibility of S3acl
swob.Request.__init__(self, env)
self.conf = conf
self.location = self.conf.location
self._timestamp = None
self.access_key, self.signature = self._parse_auth_info()
self.bucket_in_host = self._parse_host()
self.container_name, self.object_name = self._parse_uri()
self._validate_headers()
# Lock in string-to-sign now, before we start messing with query params
self.string_to_sign = self._string_to_sign()
self.environ['s3api.auth_details'] = {
'access_key': self.access_key,
'signature': self.signature,
'string_to_sign': self.string_to_sign,
'check_signature': self.check_signature,
}
self.account = None
self.user_id = None
# Avoids that swift.swob.Response replaces Location header value
# by full URL when absolute path given. See swift.swob for more detail.
self.environ['swift.leave_relative_location'] = True
def check_signature(self, secret):
secret = utf8encode(secret)
user_signature = self.signature
valid_signature = base64.b64encode(hmac.new(
secret, self.string_to_sign, sha1).digest()).strip()
if not six.PY2:
valid_signature = valid_signature.decode('ascii')
return user_signature == valid_signature
@property
def timestamp(self):
"""
S3Timestamp from Date header. If X-Amz-Date header specified, it
will be prior to Date header.
:return : S3Timestamp instance
"""
if not self._timestamp:
try:
if self._is_query_auth and 'Timestamp' in self.params:
# If Timestamp specified in query, it should be prior
# to any Date header (is this right?)
timestamp = mktime(
self.params['Timestamp'], SIGV2_TIMESTAMP_FORMAT)
else:
timestamp = mktime(
self.headers.get('X-Amz-Date',
self.headers.get('Date')))
except ValueError:
raise AccessDenied('AWS authentication requires a valid Date '
'or x-amz-date header')
if timestamp < 0:
raise AccessDenied('AWS authentication requires a valid Date '
'or x-amz-date header')
try:
self._timestamp = S3Timestamp(timestamp)
except ValueError:
# Must be far-future; blame clock skew
raise RequestTimeTooSkewed()
return self._timestamp
@property
def _is_header_auth(self):
return 'Authorization' in self.headers
@property
def _is_query_auth(self):
return 'AWSAccessKeyId' in self.params
def _parse_host(self):
storage_domain = self.conf.storage_domain
if not storage_domain:
return None
if not storage_domain.startswith('.'):
storage_domain = '.' + storage_domain
if 'HTTP_HOST' in self.environ:
given_domain = self.environ['HTTP_HOST']
elif 'SERVER_NAME' in self.environ:
given_domain = self.environ['SERVER_NAME']
else:
return None
port = ''
if ':' in given_domain:
given_domain, port = given_domain.rsplit(':', 1)
if given_domain.endswith(storage_domain):
return given_domain[:-len(storage_domain)]
return None
def _parse_uri(self):
if not check_utf8(swob.wsgi_to_str(self.environ['PATH_INFO'])):
raise InvalidURI(self.path)
if self.bucket_in_host:
obj = self.environ['PATH_INFO'][1:] or None
return self.bucket_in_host, obj
bucket, obj = self.split_path(0, 2, True)
if bucket and not validate_bucket_name(
bucket, self.conf.dns_compliant_bucket_names):
# Ignore GET service case
raise InvalidBucketName(bucket)
return (bucket, obj)
def _parse_query_authentication(self):
"""
Parse v2 authentication query args
TODO: make sure if 0, 1, 3 is supported?
- version 0, 1, 2, 3:
'AWSAccessKeyId' and 'Signature' should be in param
:return: a tuple of access_key and signature
:raises: AccessDenied
"""
try:
access = swob.wsgi_to_str(self.params['AWSAccessKeyId'])
expires = swob.wsgi_to_str(self.params['Expires'])
sig = swob.wsgi_to_str(self.params['Signature'])
except KeyError:
raise AccessDenied()
if not all([access, sig, expires]):
raise AccessDenied()
return access, sig
def _parse_header_authentication(self):
"""
Parse v2 header authentication info
:returns: a tuple of access_key and signature
:raises: AccessDenied
"""
auth_str = swob.wsgi_to_str(self.headers['Authorization'])
if not auth_str.startswith('AWS ') or ':' not in auth_str:
raise AccessDenied()
# This means signature format V2
access, sig = auth_str.split(' ', 1)[1].rsplit(':', 1)
return access, sig
def _parse_auth_info(self):
"""Extract the access key identifier and signature.
:returns: a tuple of access_key and signature
:raises: NotS3Request
"""
if self._is_query_auth:
self._validate_expire_param()
return self._parse_query_authentication()
elif self._is_header_auth:
self._validate_dates()
return self._parse_header_authentication()
else:
# if this request is neither query auth nor header auth
# s3api regard this as not s3 request
raise NotS3Request()
def _validate_expire_param(self):
"""
Validate Expires in query parameters
:raises: AccessDenied
"""
# Expires header is a float since epoch
try:
ex = S3Timestamp(float(self.params['Expires']))
except (KeyError, ValueError):
raise AccessDenied()
if S3Timestamp.now() > ex:
raise AccessDenied('Request has expired')
if ex >= 2 ** 31:
raise AccessDenied(
'Invalid date (should be seconds since epoch): %s' %
self.params['Expires'])
def _validate_dates(self):
"""
Validate Date/X-Amz-Date headers for signature v2
:raises: AccessDenied
:raises: RequestTimeTooSkewed
"""
date_header = self.headers.get('Date')
amz_date_header = self.headers.get('X-Amz-Date')
if not date_header and not amz_date_header:
raise AccessDenied('AWS authentication requires a valid Date '
'or x-amz-date header')
# Anyways, request timestamp should be validated
epoch = S3Timestamp(0)
if self.timestamp < epoch:
raise AccessDenied()
# If the standard date is too far ahead or behind, it is an
# error
delta = abs(int(self.timestamp) - int(S3Timestamp.now()))
if delta > self.conf.allowable_clock_skew:
raise RequestTimeTooSkewed()
def _validate_headers(self):
if 'CONTENT_LENGTH' in self.environ:
try:
if self.content_length < 0:
raise InvalidArgument('Content-Length',
self.content_length)
except (ValueError, TypeError):
raise InvalidArgument('Content-Length',
self.environ['CONTENT_LENGTH'])
value = _header_strip(self.headers.get('Content-MD5'))
if value is not None:
if not re.match('^[A-Za-z0-9+/]+={0,2}$', value):
# Non-base64-alphabet characters in value.
raise InvalidDigest(content_md5=value)
try:
self.headers['ETag'] = binascii.b2a_hex(
binascii.a2b_base64(value))
except binascii.Error:
# incorrect padding, most likely
raise InvalidDigest(content_md5=value)
if len(self.headers['ETag']) != 32:
raise InvalidDigest(content_md5=value)
if self.method == 'PUT' and any(h in self.headers for h in (
'If-Match', 'If-None-Match',
'If-Modified-Since', 'If-Unmodified-Since')):
raise S3NotImplemented(
'Conditional object PUTs are not supported.')
if 'X-Amz-Copy-Source' in self.headers:
try:
check_path_header(self, 'X-Amz-Copy-Source', 2, '')
except swob.HTTPException:
msg = 'Copy Source must mention the source bucket and key: ' \
'sourcebucket/sourcekey'
raise InvalidArgument('x-amz-copy-source',
self.headers['X-Amz-Copy-Source'],
msg)
if 'x-amz-metadata-directive' in self.headers:
value = self.headers['x-amz-metadata-directive']
if value not in ('COPY', 'REPLACE'):
err_msg = 'Unknown metadata directive.'
raise InvalidArgument('x-amz-metadata-directive', value,
err_msg)
if 'x-amz-storage-class' in self.headers:
# Only STANDARD is supported now.
if self.headers['x-amz-storage-class'] != 'STANDARD':
raise InvalidStorageClass()
if 'x-amz-mfa' in self.headers:
raise S3NotImplemented('MFA Delete is not supported.')
sse_value = self.headers.get('x-amz-server-side-encryption')
if sse_value is not None:
if sse_value not in ('aws:kms', 'AES256'):
raise InvalidArgument(
'x-amz-server-side-encryption', sse_value,
'The encryption method specified is not supported')
encryption_enabled = get_swift_info(admin=True)['admin'].get(
'encryption', {}).get('enabled')
if not encryption_enabled or sse_value != 'AES256':
raise S3NotImplemented(
'Server-side encryption is not supported.')
if 'x-amz-website-redirect-location' in self.headers:
raise S3NotImplemented('Website redirection is not supported.')
# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
# describes some of what would be required to support this
if any(['aws-chunked' in self.headers.get('content-encoding', ''),
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' == self.headers.get(
'x-amz-content-sha256', ''),
'x-amz-decoded-content-length' in self.headers]):
raise S3NotImplemented('Transfering payloads in multiple chunks '
'using aws-chunked is not supported.')
if 'x-amz-tagging' in self.headers:
raise S3NotImplemented('Object tagging is not supported.')
@property
def body(self):
"""
swob.Request.body is not secure against malicious input. It consumes
too much memory without any check when the request body is excessively
large. Use xml() instead.
"""
raise AttributeError("No attribute 'body'")
def xml(self, max_length):
"""
Similar to swob.Request.body, but it checks the content length before
creating a body string.
"""
te = self.headers.get('transfer-encoding', '')
te = [x.strip() for x in te.split(',') if x.strip()]
if te and (len(te) > 1 or te[-1] != 'chunked'):
raise S3NotImplemented('A header you provided implies '
'functionality that is not implemented',
header='Transfer-Encoding')
ml = self.message_length()
if ml and ml > max_length:
raise MalformedXML()
if te or ml:
# Limit the read similar to how SLO handles manifests
try:
body = self.body_file.read(max_length)
except swob.HTTPException as err:
if err.status_int == HTTP_UNPROCESSABLE_ENTITY:
# Special case for HashingInput check
raise BadDigest(
'The X-Amz-Content-SHA56 you specified did not '
'match what we received.')
raise
else:
# No (or zero) Content-Length provided, and not chunked transfer;
# no body. Assume zero-length, and enforce a required body below.
return None
return body
def check_md5(self, body):
if 'HTTP_CONTENT_MD5' not in self.environ:
raise InvalidRequest('Missing required header for this request: '
'Content-MD5')
digest = base64.b64encode(md5(
body, usedforsecurity=False).digest()).strip().decode('ascii')
if self.environ['HTTP_CONTENT_MD5'] != digest:
raise BadDigest(content_md5=self.environ['HTTP_CONTENT_MD5'])
def _copy_source_headers(self):
env = {}
for key, value in self.environ.items():
if key.startswith('HTTP_X_AMZ_COPY_SOURCE_'):
env[key.replace('X_AMZ_COPY_SOURCE_', '')] = value
return swob.HeaderEnvironProxy(env)
def check_copy_source(self, app):
"""
check_copy_source checks the copy source existence and if copying an
object to itself, for illegal request parameters
:returns: the source HEAD response
"""
try:
src_path = self.headers['X-Amz-Copy-Source']
except KeyError:
return None
src_path, qs = src_path.partition('?')[::2]
parsed = parse_qsl(qs, True)
if not parsed:
query = {}
elif len(parsed) == 1 and parsed[0][0] == 'versionId':
query = {'version-id': parsed[0][1]}
else:
raise InvalidArgument('X-Amz-Copy-Source',
self.headers['X-Amz-Copy-Source'],
'Unsupported copy source parameter.')
src_path = unquote(src_path)
src_path = src_path if src_path.startswith('/') else ('/' + src_path)
src_bucket, src_obj = split_path(src_path, 0, 2, True)
headers = swob.HeaderKeyDict()
headers.update(self._copy_source_headers())
src_resp = self.get_response(app, 'HEAD', src_bucket, src_obj,
headers=headers, query=query)
if src_resp.status_int == 304: # pylint: disable-msg=E1101
raise PreconditionFailed()
if (self.container_name == src_bucket and
self.object_name == src_obj and
self.headers.get('x-amz-metadata-directive',
'COPY') == 'COPY' and
not query):
raise InvalidRequest("This copy request is illegal "
"because it is trying to copy an "
"object to itself without "
"changing the object's metadata, "
"storage class, website redirect "
"location or encryption "
"attributes.")
# We've done some normalizing; write back so it's ready for
# to_swift_req
self.headers['X-Amz-Copy-Source'] = quote(src_path)
if query:
self.headers['X-Amz-Copy-Source'] += \
'?versionId=' + query['version-id']
return src_resp
def _canonical_uri(self):
"""
Require bucket name in canonical_uri for v2 in virtual hosted-style.
"""
raw_path_info = self.environ.get('RAW_PATH_INFO', self.path)
if self.bucket_in_host:
raw_path_info = '/' + self.bucket_in_host + raw_path_info
return raw_path_info
def _string_to_sign(self):
"""
Create 'StringToSign' value in Amazon terminology for v2.
"""
amz_headers = {}
buf = [swob.wsgi_to_bytes(wsgi_str) for wsgi_str in [
self.method,
_header_strip(self.headers.get('Content-MD5')) or '',
_header_strip(self.headers.get('Content-Type')) or '']]
if 'headers_raw' in self.environ: # eventlet >= 0.19.0
# See https://github.com/eventlet/eventlet/commit/67ec999
amz_headers = defaultdict(list)
for key, value in self.environ['headers_raw']:
key = key.lower()
if not key.startswith('x-amz-'):
continue
amz_headers[key.strip()].append(value.strip())
amz_headers = dict((key, ','.join(value))
for key, value in amz_headers.items())
else: # mostly-functional fallback
amz_headers = dict((key.lower(), value)
for key, value in self.headers.items()
if key.lower().startswith('x-amz-'))
if self._is_header_auth:
if 'x-amz-date' in amz_headers:
buf.append(b'')
elif 'Date' in self.headers:
buf.append(swob.wsgi_to_bytes(self.headers['Date']))
elif self._is_query_auth:
buf.append(swob.wsgi_to_bytes(self.params['Expires']))
else:
# Should have already raised NotS3Request in _parse_auth_info,
# but as a sanity check...
raise AccessDenied()
for key, value in sorted(amz_headers.items()):
buf.append(swob.wsgi_to_bytes("%s:%s" % (key, value)))
path = self._canonical_uri()
if self.query_string:
path += '?' + self.query_string
params = []
if '?' in path:
path, args = path.split('?', 1)
for key, value in sorted(self.params.items()):
if key in ALLOWED_SUB_RESOURCES:
params.append('%s=%s' % (key, value) if value else key)
if params:
buf.append(swob.wsgi_to_bytes('%s?%s' % (path, '&'.join(params))))
else:
buf.append(swob.wsgi_to_bytes(path))
return b'\n'.join(buf)
def signature_does_not_match_kwargs(self):
return {
'a_w_s_access_key_id': self.access_key,
'string_to_sign': self.string_to_sign,
'signature_provided': self.signature,
'string_to_sign_bytes': ' '.join(
format(ord(c), '02x')
for c in self.string_to_sign.decode('latin1')),
}
@property
def controller_name(self):
return self.controller.__name__[:-len('Controller')]
@property
def controller(self):
if self.is_service_request:
return ServiceController
if not self.conf.allow_multipart_uploads:
multi_part = ['partNumber', 'uploadId', 'uploads']
if len([p for p in multi_part if p in self.params]):
raise S3NotImplemented("Multi-part feature isn't support")
if 'acl' in self.params:
return AclController
if 'delete' in self.params:
return MultiObjectDeleteController
if 'location' in self.params:
return LocationController
if 'logging' in self.params:
return LoggingStatusController
if 'partNumber' in self.params:
return PartController
if 'uploadId' in self.params:
return UploadController
if 'uploads' in self.params:
return UploadsController
if 'versioning' in self.params:
return VersioningController
if 'tagging' in self.params:
return TaggingController
unsupported = ('notification', 'policy', 'requestPayment', 'torrent',
'website', 'cors', 'restore')
if set(unsupported) & set(self.params):
return UnsupportedController
if self.is_object_request:
return ObjectController
return BucketController
@property
def is_service_request(self):
return not self.container_name
@property
def is_bucket_request(self):
return self.container_name and not self.object_name
@property
def is_object_request(self):
return self.container_name and self.object_name
@property
def is_authenticated(self):
return self.account is not None
def to_swift_req(self, method, container, obj, query=None,
body=None, headers=None):
"""
Create a Swift request based on this request's environment.
"""
if self.account is None:
account = self.access_key
else:
account = self.account
env = self.environ.copy()
env['swift.infocache'] = self.environ.setdefault('swift.infocache', {})
def sanitize(value):
if set(value).issubset(string.printable):
return value
value = Header(value, 'UTF-8').encode()
if value.startswith('=?utf-8?q?'):
return '=?UTF-8?Q?' + value[10:]
elif value.startswith('=?utf-8?b?'):
return '=?UTF-8?B?' + value[10:]
else:
return value
if 'headers_raw' in env: # eventlet >= 0.19.0
# See https://github.com/eventlet/eventlet/commit/67ec999
for key, value in env['headers_raw']:
if not key.lower().startswith('x-amz-meta-'):
continue
# AWS ignores user-defined headers with these characters
if any(c in key for c in ' "),/;<=>?@[\\]{}'):
# NB: apparently, '(' *is* allowed
continue
# Note that this may have already been deleted, e.g. if the
# client sent multiple headers with the same name, or both
# x-amz-meta-foo-bar and x-amz-meta-foo_bar
env.pop('HTTP_' + key.replace('-', '_').upper(), None)
# Need to preserve underscores. Since we know '=' can't be
# present, quoted-printable seems appropriate.
key = key.replace('_', '=5F').replace('-', '_').upper()
key = 'HTTP_X_OBJECT_META_' + key[11:]
if key in env:
env[key] += ',' + sanitize(value)
else:
env[key] = sanitize(value)
else: # mostly-functional fallback
for key in self.environ:
if not key.startswith('HTTP_X_AMZ_META_'):
continue
# AWS ignores user-defined headers with these characters
if any(c in key for c in ' "),/;<=>?@[\\]{}'):
# NB: apparently, '(' *is* allowed
continue
env['HTTP_X_OBJECT_META_' + key[16:]] = sanitize(env[key])
del env[key]
copy_from_version_id = ''
if 'HTTP_X_AMZ_COPY_SOURCE' in env and env['REQUEST_METHOD'] == 'PUT':
env['HTTP_X_COPY_FROM'], copy_from_version_id = env[
'HTTP_X_AMZ_COPY_SOURCE'].partition('?versionId=')[::2]
del env['HTTP_X_AMZ_COPY_SOURCE']
env['CONTENT_LENGTH'] = '0'
if env.pop('HTTP_X_AMZ_METADATA_DIRECTIVE', None) == 'REPLACE':
env['HTTP_X_FRESH_METADATA'] = 'True'
else:
copy_exclude_headers = ('HTTP_CONTENT_DISPOSITION',
'HTTP_CONTENT_ENCODING',
'HTTP_CONTENT_LANGUAGE',
'CONTENT_TYPE',
'HTTP_EXPIRES',
'HTTP_CACHE_CONTROL',
'HTTP_X_ROBOTS_TAG')
for key in copy_exclude_headers:
env.pop(key, None)
for key in list(env.keys()):
if key.startswith('HTTP_X_OBJECT_META_'):
del env[key]
if self.conf.force_swift_request_proxy_log:
env['swift.proxy_access_log_made'] = False
env['swift.source'] = 'S3'
if method is not None:
env['REQUEST_METHOD'] = method
if obj:
path = '/v1/%s/%s/%s' % (account, container, obj)
elif container:
path = '/v1/%s/%s' % (account, container)
else:
path = '/v1/%s' % (account)
env['PATH_INFO'] = path
params = []
if query is not None:
for key, value in sorted(query.items()):
if value is not None:
params.append('%s=%s' % (key, quote(str(value))))
else:
params.append(key)
if copy_from_version_id and not (query and query.get('version-id')):
params.append('version-id=' + copy_from_version_id)
env['QUERY_STRING'] = '&'.join(params)
return swob.Request.blank(quote(path), environ=env, body=body,
headers=headers)
def _swift_success_codes(self, method, container, obj):
"""
Returns a list of expected success codes from Swift.
"""
if not container:
# Swift account access.
code_map = {
'GET': [
HTTP_OK,
],
}
elif not obj:
# Swift container access.
code_map = {
'HEAD': [
HTTP_NO_CONTENT,
],
'GET': [
HTTP_OK,
HTTP_NO_CONTENT,
],
'PUT': [
HTTP_CREATED,
],
'POST': [
HTTP_NO_CONTENT,
],
'DELETE': [
HTTP_NO_CONTENT,
],
}
else:
# Swift object access.
code_map = {
'HEAD': [
HTTP_OK,
HTTP_PARTIAL_CONTENT,
HTTP_NOT_MODIFIED,
],
'GET': [
HTTP_OK,
HTTP_PARTIAL_CONTENT,
HTTP_NOT_MODIFIED,
],
'PUT': [
HTTP_CREATED,
HTTP_ACCEPTED, # For SLO with heartbeating
],
'POST': [
HTTP_ACCEPTED,
],
'DELETE': [
HTTP_OK,
HTTP_NO_CONTENT,
],
}
return code_map[method]
def _bucket_put_accepted_error(self, container, app):
sw_req = self.to_swift_req('HEAD', container, None)
info = get_container_info(sw_req.environ, app, swift_source='S3')
sysmeta = info.get('sysmeta', {})
try:
acl = json.loads(sysmeta.get('s3api-acl',
sysmeta.get('swift3-acl', '{}')))
owner = acl.get('Owner')
except (ValueError, TypeError, KeyError):
owner = None
if owner is None or owner == self.user_id:
raise BucketAlreadyOwnedByYou(container)
raise BucketAlreadyExists(container)
def _swift_error_codes(self, method, container, obj, env, app):
"""
Returns a dict from expected Swift error codes to the corresponding S3
error responses.
"""
if not container:
# Swift account access.
code_map = {
'GET': {
},
}
elif not obj:
# Swift container access.
code_map = {
'HEAD': {
HTTP_NOT_FOUND: (NoSuchBucket, container),
},
'GET': {
HTTP_NOT_FOUND: (NoSuchBucket, container),
},
'PUT': {
HTTP_ACCEPTED: (self._bucket_put_accepted_error, container,
app),
},
'POST': {
HTTP_NOT_FOUND: (NoSuchBucket, container),
},
'DELETE': {
HTTP_NOT_FOUND: (NoSuchBucket, container),
HTTP_CONFLICT: BucketNotEmpty,
},
}
else:
# Swift object access.
# 404s differ depending upon whether the bucket exists
# Note that base-container-existence checks happen elsewhere for
# multi-part uploads, and get_container_info should be pulling
# from the env cache
def not_found_handler():
if container.endswith(MULTIUPLOAD_SUFFIX) or \
is_success(get_container_info(
env, app, swift_source='S3').get('status')):
return NoSuchKey(obj)
return NoSuchBucket(container)
code_map = {
'HEAD': {
HTTP_NOT_FOUND: not_found_handler,
HTTP_PRECONDITION_FAILED: PreconditionFailed,
},
'GET': {
HTTP_NOT_FOUND: not_found_handler,
HTTP_PRECONDITION_FAILED: PreconditionFailed,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: InvalidRange,
},
'PUT': {
HTTP_NOT_FOUND: (NoSuchBucket, container),
HTTP_UNPROCESSABLE_ENTITY: BadDigest,
HTTP_REQUEST_ENTITY_TOO_LARGE: EntityTooLarge,
HTTP_LENGTH_REQUIRED: MissingContentLength,
HTTP_REQUEST_TIMEOUT: RequestTimeout,
HTTP_PRECONDITION_FAILED: PreconditionFailed,
},
'POST': {
HTTP_NOT_FOUND: not_found_handler,
HTTP_PRECONDITION_FAILED: PreconditionFailed,
},
'DELETE': {
HTTP_NOT_FOUND: (NoSuchKey, obj),
},
}
return code_map[method]
def _get_response(self, app, method, container, obj,
headers=None, body=None, query=None):
"""
Calls the application with this request's environment. Returns a
S3Response object that wraps up the application's result.
"""
method = method or self.environ['REQUEST_METHOD']
if container is None:
container = self.container_name
if obj is None:
obj = self.object_name
sw_req = self.to_swift_req(method, container, obj, headers=headers,
body=body, query=query)
try:
sw_resp = sw_req.get_response(app)
except swob.HTTPException as err:
sw_resp = err
else:
# reuse account
_, self.account, _ = split_path(sw_resp.environ['PATH_INFO'],
2, 3, True)
# Propagate swift.backend_path in environ for middleware
# in pipeline that need Swift PATH_INFO like ceilometermiddleware.
self.environ['s3api.backend_path'] = \
sw_resp.environ['PATH_INFO']
# Propogate backend headers back into our req headers for logging
for k, v in sw_req.headers.items():
if k.lower().startswith('x-backend-'):
self.headers.setdefault(k, v)
resp = S3Response.from_swift_resp(sw_resp)
status = resp.status_int # pylint: disable-msg=E1101
if not self.user_id:
if 'HTTP_X_USER_NAME' in sw_resp.environ:
# keystone
self.user_id = "%s:%s" % (
sw_resp.environ['HTTP_X_TENANT_NAME'],
sw_resp.environ['HTTP_X_USER_NAME'])
if six.PY2 and not isinstance(self.user_id, bytes):
self.user_id = self.user_id.encode('utf8')
else:
# tempauth
self.user_id = self.access_key
success_codes = self._swift_success_codes(method, container, obj)
error_codes = self._swift_error_codes(method, container, obj,
sw_req.environ, app)
if status in success_codes:
return resp
err_msg = resp.body
if status in error_codes:
err_resp = \
error_codes[sw_resp.status_int] # pylint: disable-msg=E1101
if isinstance(err_resp, tuple):
raise err_resp[0](*err_resp[1:])
elif b'quota' in err_msg:
raise err_resp(err_msg)
else:
raise err_resp()
if status == HTTP_BAD_REQUEST:
raise BadSwiftRequest(err_msg.decode('utf8'))
if status == HTTP_UNAUTHORIZED:
raise SignatureDoesNotMatch(
**self.signature_does_not_match_kwargs())
if status == HTTP_FORBIDDEN:
raise AccessDenied()
if status == HTTP_SERVICE_UNAVAILABLE:
raise ServiceUnavailable()
if status in (HTTP_RATE_LIMITED, HTTP_TOO_MANY_REQUESTS):
raise SlowDown()
raise InternalError('unexpected status code %d' % status)
def get_response(self, app, method=None, container=None, obj=None,
headers=None, body=None, query=None):
"""
get_response is an entry point to be extended for child classes.
If additional tasks needed at that time of getting swift response,
we can override this method.
swift.common.middleware.s3api.s3request.S3Request need to just call
_get_response to get pure swift response.
"""
if 'HTTP_X_AMZ_ACL' in self.environ:
handle_acl_header(self)
return self._get_response(app, method, container, obj,
headers, body, query)
def get_validated_param(self, param, default, limit=MAX_32BIT_INT):
value = default
if param in self.params:
try:
value = int(self.params[param])
if value < 0:
err_msg = 'Argument %s must be an integer between 0 and' \
' %d' % (param, MAX_32BIT_INT)
raise InvalidArgument(param, self.params[param], err_msg)
if value > MAX_32BIT_INT:
# check the value because int() could build either a long
# instance or a 64bit integer.
raise ValueError()
if limit < value:
value = limit
except ValueError:
err_msg = 'Provided %s not an integer or within ' \
'integer range' % param
raise InvalidArgument(param, self.params[param], err_msg)
return value
def get_container_info(self, app):
"""
get_container_info will return a result dict of get_container_info
from the backend Swift.
:returns: a dictionary of container info from
swift.controllers.base.get_container_info
:raises: NoSuchBucket when the container doesn't exist
:raises: InternalError when the request failed without 404
"""
if not self.is_authenticated:
sw_req = self.to_swift_req('TEST', None, None, body='')
# don't show log message of this request
sw_req.environ['swift.proxy_access_log_made'] = True
sw_resp = sw_req.get_response(app)
if not sw_req.remote_user:
raise SignatureDoesNotMatch(
**self.signature_does_not_match_kwargs())
_, self.account, _ = split_path(sw_resp.environ['PATH_INFO'],
2, 3, True)
sw_req = self.to_swift_req(app, self.container_name, None)
info = get_container_info(sw_req.environ, app, swift_source='S3')
if is_success(info['status']):
return info
elif info['status'] == 404:
raise NoSuchBucket(self.container_name)
else:
raise InternalError(
'unexpected status code %d' % info['status'])
def gen_multipart_manifest_delete_query(self, app, obj=None, version=None):
if not self.conf.allow_multipart_uploads:
return {}
if not obj:
obj = self.object_name
query = {'symlink': 'get'}
if version is not None:
query['version-id'] = version
resp = self.get_response(app, 'HEAD', obj=obj, query=query)
if not resp.is_slo:
return {}
elif resp.sysmeta_headers.get(sysmeta_header('object', 'etag')):
# Even if allow_async_delete is turned off, SLO will just handle
# the delete synchronously, so we don't need to check before
# setting async=on
return {'multipart-manifest': 'delete', 'async': 'on'}
else:
return {'multipart-manifest': 'delete'}
def set_acl_handler(self, handler):
pass
class S3AclRequest(S3Request):
"""
S3Acl request object.
"""
def __init__(self, env, conf, app):
super(S3AclRequest, self).__init__(env, conf, app)
self.authenticate(app)
self.acl_handler = None
@property
def controller(self):
if 'acl' in self.params and not self.is_service_request:
return S3AclController
return super(S3AclRequest, self).controller
def authenticate(self, app):
"""
authenticate method will run pre-authenticate request and retrieve
account information.
Note that it currently supports only keystone and tempauth.
(no support for the third party authentication middleware)
"""
sw_req = self.to_swift_req('TEST', None, None, body='')
# don't show log message of this request
sw_req.environ['swift.proxy_access_log_made'] = True
sw_resp = sw_req.get_response(app)
if not sw_req.remote_user:
raise SignatureDoesNotMatch(
**self.signature_does_not_match_kwargs())
_, self.account, _ = split_path(sw_resp.environ['PATH_INFO'],
2, 3, True)
if 'HTTP_X_USER_NAME' in sw_resp.environ:
# keystone
self.user_id = "%s:%s" % (sw_resp.environ['HTTP_X_TENANT_NAME'],
sw_resp.environ['HTTP_X_USER_NAME'])
if six.PY2 and not isinstance(self.user_id, bytes):
self.user_id = self.user_id.encode('utf8')
else:
# tempauth
self.user_id = self.access_key
sw_req.environ.get('swift.authorize', lambda req: None)(sw_req)
self.environ['swift_owner'] = sw_req.environ.get('swift_owner', False)
# Need to skip S3 authorization on subsequent requests to prevent
# overwriting the account in PATH_INFO
del self.environ['s3api.auth_details']
def to_swift_req(self, method, container, obj, query=None,
body=None, headers=None):
sw_req = super(S3AclRequest, self).to_swift_req(
method, container, obj, query, body, headers)
if self.account:
sw_req.environ['swift_owner'] = True # needed to set ACL
sw_req.environ['swift.authorize_override'] = True
sw_req.environ['swift.authorize'] = lambda req: None
return sw_req
def get_acl_response(self, app, method=None, container=None, obj=None,
headers=None, body=None, query=None):
"""
Wrapper method of _get_response to add s3 acl information
from response sysmeta headers.
"""
resp = self._get_response(
app, method, container, obj, headers, body, query)
resp.bucket_acl = decode_acl(
'container', resp.sysmeta_headers, self.conf.allow_no_owner)
resp.object_acl = decode_acl(
'object', resp.sysmeta_headers, self.conf.allow_no_owner)
return resp
def get_response(self, app, method=None, container=None, obj=None,
headers=None, body=None, query=None):
"""
Wrap up get_response call to hook with acl handling method.
"""
if not self.acl_handler:
# we should set acl_handler all time before calling get_response
raise Exception('get_response called before set_acl_handler')
resp = self.acl_handler.handle_acl(
app, method, container, obj, headers)
# possible to skip recalling get_response_acl if resp is not
# None (e.g. HEAD)
if resp:
return resp
return self.get_acl_response(app, method, container, obj,
headers, body, query)
def set_acl_handler(self, acl_handler):
self.acl_handler = acl_handler
class SigV4Request(SigV4Mixin, S3Request):
pass
class SigV4S3AclRequest(SigV4Mixin, S3AclRequest):
pass
| swiftstack/swift | swift/common/middleware/s3api/s3request.py | Python | apache-2.0 | 62,988 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class AccountInvoiceMultipartner(osv.osv):
''' Add more than one reference partner in account invoice
(only in report document, not in journal entry)
'''
_inherit = 'account.invoice'
# on change function:
def onchange_extra_address(self, cr, uid, ids, extra_address, partner_id,
context=None):
''' Set domain in partner_ids list when
'''
res = {}
if extra_address == 'contact' and partner_id:
res['domain'] = {'partner_ids': [('parent_id', '=', partner_id)]}
else:
res['domain'] = {'partner_ids': []}
res['value'] = {'partner_ids': False}
return res
_columns = {
'extra_address': fields.selection([
('none', 'None'),
('contact', 'Contact'),
('partner', 'Partner'), ],
'Extra address', select=True, readonly=False, required=True),
'partner_ids': fields.many2many(
'res.partner', 'invoice_partner_rel', 'invoice_id', 'partner_id',
'Extra partner'),
}
_defaults = {
'extra_address': lambda *a: 'none',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-accounting | account_invoice_multipartner/multipartner.py | Python | agpl-3.0 | 2,506 | 0.004789 |
# Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This component is for use with the OpenFlow tutorial.
It acts as a simple hub, but can be modified to act like an L2
learning switch.
It's roughly similar to the one Brandon Heller did for NOX.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
log = core.getLogger()
class Tutorial (object):
"""
A Tutorial object is created for each switch that connects.
A Connection object for that switch is passed to the __init__ function.
"""
def __init__ (self, connection):
# Keep track of the connection to the switch so that we can
# send it messages!
self.connection = connection
# This binds our PacketIn event listener
connection.addListeners(self)
# Use this table to keep track of which ethernet address is on
# which switch port (keys are MACs, values are ports).
self.mac_to_port = {}
def resend_packet (self, packet_in, out_port):
"""
Instructs the switch to resend a packet that it had sent to us.
"packet_in" is the ofp_packet_in object the switch had sent to the
controller due to a table-miss.
"""
msg = of.ofp_packet_out()
msg.data = packet_in
# Add an action to send to the specified port
action = of.ofp_action_output(port = out_port)
msg.actions.append(action)
# Send message to switch
self.connection.send(msg)
def act_like_hub (self, packet, packet_in):
"""
Implement hub-like behavior -- send all packets to all ports besides
the input port.
"""
# We want to output to all ports -- we do that using the special
# OFPP_ALL port as the output port. (We could have also used
# OFPP_FLOOD.)
self.resend_packet(packet_in, of.OFPP_ALL)
# Note that if we didn't get a valid buffer_id, a slightly better
# implementation would check that we got the full data before
# sending it (len(packet_in.data) should be == packet_in.total_len)).
def act_like_switch (self, packet, packet_in):
"""
Implement switch-like behavior.
"""
""" # DELETE THIS LINE TO START WORKING ON THIS (AND THE ONE BELOW!) #
# Here's some psuedocode to start you off implementing a learning
# switch. You'll need to rewrite it as real Python code.
# Learn the port for the source MAC
self.mac_to_port ... <add or update entry>
if the port associated with the destination MAC of the packet is known:
# Send packet out the associated port
self.resend_packet(packet_in, ...)
# Once you have the above working, try pushing a flow entry
# instead of resending the packet (comment out the above and
# uncomment and complete the below.)
log.debug("Installing flow...")
# Maybe the log statement should have source/destination/port?
#msg = of.ofp_flow_mod()
#
## Set fields to match received packet
#msg.match = of.ofp_match.from_packet(packet)
#
#< Set other fields of flow_mod (timeouts? buffer_id?) >
#
#< Add an output action, and send -- similar to resend_packet() >
else:
# Flood the packet out everything but the input port
# This part looks familiar, right?
self.resend_packet(packet_in, of.OFPP_ALL)
""" # DELETE THIS LINE TO START WORKING ON THIS #
def _handle_PacketIn (self, event):
"""
Handles packet in messages from the switch.
"""
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Ignoring incomplete packet")
return
packet_in = event.ofp # The actual ofp_packet_in message.
# Comment out the following line and uncomment the one after
# when starting the exercise.
self.act_like_hub(packet, packet_in)
#self.act_like_switch(packet, packet_in)
def launch ():
"""
Starts the component
"""
def start_switch (event):
log.debug("Controlling %s" % (event.connection,))
Tutorial(event.connection)
core.openflow.addListenerByName("ConnectionUp", start_switch)
| avihad/ARP-Storm | src/arp_open_flow/pox/misc/of_tutorial.py | Python | apache-2.0 | 4,582 | 0.006547 |
import requests
def getHTMLText(url):
try:
r=requests.get(url,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__=="__main__":
url="http://www.baidu.com"
print(getHTMLText(url)) | ccto/python-demos | 06-requests/framework.py | Python | apache-2.0 | 303 | 0.030508 |
import graphlab as gl
import time
def pagerank_update_fn(src, edge, dst):
if src['__id'] != dst['__id']: # ignore self-links
dst['pagerank'] += src['prev_pagerank'] * edge['weight']
return (src, edge, dst)
def sum_weight(src, edge, dst):
if src['__id'] != dst['__id']: # ignore self-links
src['total_weight'] += edge['weight']
return src, edge, dst
def normalize_weight(src, edge, dst):
if src['__id'] != dst['__id']: # ignore self-links
edge['weight'] /= src['total_weight']
return src, edge, dst
def pagerank_triple_apply(input_graph, reset_prob=0.15, threshold=1e-3,
max_iterations=20):
g = gl.SGraph(input_graph.vertices, input_graph.edges)
# compute normalized edge weight
g.vertices['total_weight'] = 0.0
g = g.triple_apply(sum_weight, ['total_weight'])
g = g.triple_apply(normalize_weight, ['weight'])
del g.vertices['total_weight']
# initialize vertex field
g.vertices['prev_pagerank'] = 1.0
it = 0
total_l1_delta = len(g.vertices)
start = time.time()
while(total_l1_delta > threshold and it < max_iterations):
g.vertices['pagerank'] = 0.0
g = g.triple_apply(pagerank_update_fn, ['pagerank'])
g.vertices['pagerank'] = g.vertices['pagerank'] * (1 - reset_prob) \
+ reset_prob
g.vertices['l1_delta'] = (g.vertices['pagerank'] - \
g.vertices['prev_pagerank']).apply(lambda x: abs(x))
total_l1_delta = g.vertices['l1_delta'].sum()
g.vertices['prev_pagerank'] = g.vertices['pagerank']
print 'Iteration %d: total pagerank changed in L1 = %f' % (it,\
total_l1_delta)
it = it + 1
print 'Triple apply pagerank finished in: %f secs' % (time.time() - start)
del g.vertices['prev_pagerank']
return g
# Load graph
g = gl.load_graph('http://snap.stanford.edu/data/email-Enron.txt.gz', 'snap')
g.edges['weight'] = 1.0
# Run triple apply sssp
pagerank_graph = pagerank_triple_apply(g)
print pagerank_graph
| dato-code/how-to | triple_apply_weighted_pagerank.py | Python | cc0-1.0 | 2,161 | 0.006478 |
#!/usr/bin/env python
#
# Jetduino Example for using the Grove Electricity Sensor (http://www.seeedstudio.com/wiki/Grove_-_Electricity_Sensor)
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Electricity Sensor to analog port A0
# SIG,NC,NC,GND
sensor = ARD_A0
jetduino.pinMode(sensor, INPUT_PIN)
# Vcc of the grove interface is normally 5v
grove_vcc = 5
while True:
try:
# Get sensor value
sensor_value = jetduino.analogRead(sensor)
# Calculate amplitude current (mA)
amplitude_current = (float)(sensor_value / 1024 * grove_vcc / 800 * 2000000)
# Calculate effective value (mA)
effective_value = amplitude_current / 1.414
# minimum_current = 1 / 1024 * grove_vcc / 800 * 2000000 / 1.414 = 8.6(mA)
# Only for sinusoidal alternating current
print ("sensor_value", sensor_value)
print ("The amplitude of the current is", amplitude_current, "mA")
print ("The effective value of the current is", effective_value, "mA")
time.sleep(1)
except IOError:
print ("Error")
| NeuroRoboticTech/Jetduino | Software/Python/grove_electricity_sensor.py | Python | mit | 2,751 | 0.003999 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import venusian
from BTrees.OOBTree import OOBTree
from persistent.list import PersistentList
from persistent.dict import PersistentDict
from webob.multidict import MultiDict
from zope.interface import implementer
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from substanced.util import renamer
from substanced.content import content
from dace.objectofcollaboration.principal.role import DACE_ROLES
from dace.objectofcollaboration.principal.util import get_access_keys
from dace.objectofcollaboration.entity import Entity
from dace.descriptors import (
SharedUniqueProperty,
CompositeUniqueProperty,
SharedMultipleProperty,
CompositeMultipleProperty)
from dace.util import getSite, get_obj, find_catalog
from pontus.schema import Schema
from pontus.core import VisualisableElement
from pontus.widget import (
Select2Widget)
from novaideo import _, ACCESS_ACTIONS
from novaideo.content.interface import (
IVersionableEntity,
IDuplicableEntity,
ISearchableEntity,
ICommentable,
IPrivateChannel,
IChannel,
ICorrelableEntity,
IPresentableEntity,
INode,
IEmojiable,
IPerson,
ISignalableEntity,
ISustainable,
IDebatable,
ITokenable)
BATCH_DEFAULT_SIZE = 8
SEARCHABLE_CONTENTS = {}
SUSTAINABLE_CONTENTS = {}
NOVAIDO_ACCES_ACTIONS = {}
ADVERTISING_CONTAINERS = {}
ON_LOAD_VIEWS = {}
class AnonymisationKinds(object):
anonymity = 'anonymity'
pseudonymity = 'pseudonymity'
@classmethod
def get_items(cls):
return {
cls.anonymity: _('Anonymity'),
cls.pseudonymity: _('Pseudonymity')
}
@classmethod
def get_title(cls, item):
items = cls.get_items()
return items.get(item, None)
class Evaluations():
support = 'support'
oppose = 'oppose'
def get_searchable_content(request=None):
if request is None:
request = get_current_request()
return getattr(request, 'searchable_contents', {})
class advertising_banner_config(object):
""" A function, class or method decorator which allows a
developer to create advertising banner registrations.
Advertising banner is a panel. See pyramid_layout.panel_config.
"""
def __init__(self, name='', context=None, renderer=None, attr=None):
self.name = name
self.context = context
self.renderer = renderer
self.attr = attr
def __call__(self, wrapped):
settings = self.__dict__.copy()
def callback(context, name, ob):
config = context.config.with_package(info.module)
config.add_panel(panel=ob, **settings)
ADVERTISING_CONTAINERS[self.name] = {'title': ob.title,
'description': ob.description,
'order': ob.order,
'validator': ob.validator,
'tags': ob.tags
#TODO add validator ob.validator
}
info = venusian.attach(wrapped, callback, category='pyramid_layout')
if info.scope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings['attr'] is None:
settings['attr'] = wrapped.__name__
settings['_info'] = info.codeinfo # fbo "action_method"
return wrapped
class access_action(object):
""" Decorator for creationculturelle access actions.
An access action allows to view an object"""
def __init__(self, access_key=None):
self.access_key = access_key
def __call__(self, wrapped):
def callback(scanner, name, ob):
if ob.context in ACCESS_ACTIONS:
ACCESS_ACTIONS[ob.context].append({'action': ob,
'access_key': self.access_key})
else:
ACCESS_ACTIONS[ob.context] = [{'action': ob,
'access_key': self.access_key}]
venusian.attach(wrapped, callback)
return wrapped
def can_access(user, context, request=None, root=None):
""" Return 'True' if the user can access to the context"""
declared = getattr(getattr(context, '__provides__', None),
'declared', [None])[0]
for data in ACCESS_ACTIONS.get(declared, []):
if data['action'].processsecurity_validation(None, context):
return True
return False
_marker = object()
def serialize_roles(roles, root=None):
result = []
principal_root = getSite()
if principal_root is None:
return []
if root is None:
root = principal_root
root_oid = str(get_oid(root, ''))
principal_root_oid = str(get_oid(principal_root, ''))
for role in roles:
if isinstance(role, tuple):
obj_oid = str(get_oid(role[1], ''))
result.append((role[0]+'_'+obj_oid).lower())
superiors = getattr(DACE_ROLES.get(role[0], _marker),
'all_superiors', [])
result.extend([(r.name+'_'+obj_oid).lower()
for r in superiors])
else:
result.append(role.lower()+'_'+root_oid)
superiors = getattr(DACE_ROLES.get(role, _marker),
'all_superiors', [])
result.extend([(r.name+'_'+root_oid).lower() for r in
superiors])
for superior in superiors:
if superior.name == 'Admin':
result.append('admin_'+principal_root_oid)
break
return list(set(result))
def generate_access_keys(user, root):
return get_access_keys(
user, root=root)
@implementer(ICommentable)
class Commentable(VisualisableElement, Entity):
""" A Commentable entity is an entity that can be comment"""
name = renamer()
comments = CompositeMultipleProperty('comments')
def __init__(self, **kwargs):
super(Commentable, self).__init__(**kwargs)
self.len_comments = 0
def update_len_comments(self):
result = len(self.comments)
result += sum([c.update_len_comments() for c in self.comments])
self.len_comments = result
return self.len_comments
def addtoproperty(self, name, value, moving=None):
super(Commentable, self).addtoproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments += 1
if self is not channel:
self.len_comments += 1
def delfromproperty(self, name, value, moving=None):
super(Commentable, self).delfromproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments -= 1
if self is not channel:
self.len_comments -= 1
@implementer(IDebatable)
class Debatable(VisualisableElement, Entity):
""" A Debatable entity is an entity that can be comment"""
channels = CompositeMultipleProperty('channels', 'subject')
def __init__(self, **kwargs):
super(Debatable, self).__init__(**kwargs)
@property
def channel(self):
channels = getattr(self, 'channels', [])
return channels[0] if channels else None
def get_channel(self, user):
return self.channel
def get_title(self, user=None):
return getattr(self, 'title', '')
def subscribe_to_channel(self, user):
channel = getattr(self, 'channel', None)
if channel and (user not in channel.members):
channel.addtoproperty('members', user)
def add_new_channel(self):
self.addtoproperty('channels', Channel())
@content(
'channel',
icon='icon novaideo-icon icon-idea',
)
@implementer(IChannel)
class Channel(Commentable):
"""Channel class"""
type_title = _('Channel')
icon = 'icon novaideo-icon icon-idea'
templates = {'default': 'novaideo:views/templates/channel_result.pt'}
name = renamer()
members = SharedMultipleProperty('members', 'following_channels')
subject = SharedUniqueProperty('subject', 'channels')
def __init__(self, **kwargs):
super(Channel, self).__init__(**kwargs)
self.set_data(kwargs)
self._comments_at = OOBTree()
def add_comment(self, comment):
self._comments_at[comment.created_at] = get_oid(comment)
def remove_comment(self, comment):
self._comments_at.pop(comment.created_at)
def get_comments_between(self, start, end):
return list(self._comments_at.values(
min=start, max=end))
def get_subject(self, user=None):
subject = self.subject
return subject if subject else getattr(self, '__parent__', None)
def get_title(self, user=None):
title = getattr(self, 'title', '')
if not title:
return getattr(self.get_subject(user), 'title', None)
return title
def is_discuss(self):
return self.subject.__class__.__name__.lower() == 'person'
@implementer(IEmojiable)
class Emojiable(Entity):
def __init__(self, **kwargs):
super(Emojiable, self).__init__(**kwargs)
self.emojis = OOBTree()
self.users_emoji = OOBTree()
def add_emoji(self, emoji, user):
user_oid = get_oid(user)
current_emoji = self.get_user_emoji(user)
if current_emoji:
self.remove_emoji(current_emoji, user)
if emoji:
self.emojis.setdefault(emoji, PersistentList())
self.emojis[emoji].append(user_oid)
self.users_emoji[user_oid] = emoji
def remove_emoji(self, emoji, user):
user_oid = get_oid(user)
if emoji in self.emojis and \
user_oid in self.emojis[emoji]:
self.emojis[emoji].remove(user_oid)
self.users_emoji.pop(user_oid)
def get_user_emoji(self, user):
user_oid = get_oid(user)
return self.users_emoji.get(user_oid, None)
def can_add_reaction(self, user, process):
return False
@content(
'privatechannel',
icon='icon novaideo-icon icon-idea',
)
@implementer(IPrivateChannel)
class PrivateChannel(Channel):
"""Channel class"""
def __init__(self, **kwargs):
super(PrivateChannel, self).__init__(**kwargs)
self.set_data(kwargs)
def get_subject(self, user=None):
subject = None
for member in self.members:
if member is not user:
subject = member
break
return subject if subject else getattr(self, '__parent__', None)
def get_title(self, user=None):
title = getattr(self, 'title', '')
if not title:
return getattr(self.get_subject(user), 'title', None)
return title
@implementer(IVersionableEntity)
class VersionableEntity(Entity):
""" A Versionable entity is an entity that can be versioned"""
version = CompositeUniqueProperty('version', 'nextversion')
nextversion = SharedUniqueProperty('nextversion', 'version')
@property
def current_version(self):
""" Return the current version"""
if self.nextversion is None:
return self
else:
return self.nextversion.current_version
@property
def history(self):
""" Return all versions"""
result = []
if self.version is None:
return [self]
else:
result.append(self)
result.extend(self.version.history)
return result
def destroy(self):
"""Remove branch"""
if self.version:
self.version.destroy()
if self.nextversion:
self.nextversion.delfromproperty('version', self)
@implementer(IDuplicableEntity)
class DuplicableEntity(Entity):
""" A Duplicable entity is an entity that can be duplicated"""
originalentity = SharedUniqueProperty('originalentity', 'duplicates')
duplicates = SharedMultipleProperty('duplicates', 'originalentity')
@colander.deferred
def keywords_choice(node, kw):
root = getSite()
values = [(i, i) for i in sorted(root.keywords)]
create = getattr(root, 'can_add_keywords', True)
return Select2Widget(max_len=5,
values=values,
create=create,
multiple=True)
class SearchableEntitySchema(Schema):
keywords = colander.SchemaNode(
colander.Set(),
widget=keywords_choice,
title=_('Keywords'),
description=_("To add keywords, you need to separate them by commas "
"and then tap the « Enter » key to validate your selection.")
)
@implementer(ISearchableEntity)
class SearchableEntity(VisualisableElement, Entity):
""" A Searchable entity is an entity that can be searched"""
templates = {'default': 'novaideo:templates/views/default_result.pt',
'bloc': 'novaideo:templates/views/default_result.pt'}
def __init__(self, **kwargs):
super(SearchableEntity, self).__init__(**kwargs)
self.keywords = PersistentList()
@property
def is_published(self):
return 'published' in self.state
@property
def is_workable(self):
return self.is_published
@property
def relevant_data(self):
return [getattr(self, 'title', ''),
getattr(self, 'description', ''),
', '.join(getattr(self, 'keywords', []))]
def set_source_data(self, source_data):
if not hasattr(self, 'source_data'):
self.source_data = PersistentDict({})
app_name = source_data.get('app_name')
self.source_data.setdefault(app_name, {})
self.source_data[app_name] = source_data
def get_source_data(self, app_id):
if not hasattr(self, 'source_data'):
return {}
return self.source_data.get(app_id, {})
def is_managed(self, root):
return True
def get_title(self, user=None):
return getattr(self, 'title', '')
def _init_presentation_text(self):
pass
def get_release_date(self):
return getattr(self, 'release_date', self.modified_at)
def presentation_text(self, nb_characters=400):
return getattr(self, 'description', "")[:nb_characters]+'...'
def get_more_contents_criteria(self):
"return specific query, filter values"
return None, {
'metadata_filter': {
'states': ['published'],
'keywords': list(self.keywords)
}
}
@implementer(IPresentableEntity)
class PresentableEntity(Entity):
""" A Presentable entity is an entity that can be presented"""
def __init__(self, **kwargs):
super(PresentableEntity, self).__init__(**kwargs)
self._email_persons_contacted = PersistentList()
@property
def len_contacted(self):
return len(self._email_persons_contacted)
@property
def persons_contacted(self):
""" Return all contacted persons"""
dace_catalog = find_catalog('dace')
novaideo_catalog = find_catalog('novaideo')
identifier_index = novaideo_catalog['identifier']
object_provides_index = dace_catalog['object_provides']
result = []
for email in self._email_persons_contacted:
query = object_provides_index.any([IPerson.__identifier__]) &\
identifier_index.any([email])
users = list(query.execute().all())
user = users[0] if users else None
if user is not None:
result.append(user)
else:
result.append(email.split('@')[0].split('+')[0])
return set(result)
@implementer(ICorrelableEntity)
class CorrelableEntity(Entity):
"""
A Correlable entity is an entity that can be correlated.
A correlation is an abstract association between source entity
and targets entities.
"""
source_correlations = SharedMultipleProperty('source_correlations',
'source')
target_correlations = SharedMultipleProperty('target_correlations',
'targets')
@property
def correlations(self):
"""Return all source correlations and target correlations"""
result = [c.target for c in self.source_correlations]
result.extend([c.source for c in self.target_correlations])
return list(set(result))
@property
def all_source_related_contents(self):
lists_targets = [(c.targets, c) for c in self.source_correlations]
return [(target, c) for targets, c in lists_targets
for target in targets]
@property
def all_target_related_contents(self):
return [(c.source, c) for c in self.target_correlations]
@property
def all_related_contents(self):
related_contents = self.all_source_related_contents
related_contents.extend(self.all_target_related_contents)
return related_contents
@property
def contextualized_contents(self):
lists_contents = [(c.targets, c) for c in
self.contextualized_correlations]
lists_contents = [(target, c) for targets, c in lists_contents
for target in targets]
lists_contents.extend([(c.source, c) for c in
self.contextualized_correlations])
return lists_contents
def get_related_contents(self, type_=None, tags=[]):
if type_ is None and not tags:
return self.all_related_contents
return [(content, c) for content, c in self.all_related_contents
if (type_ is None or c.type == type_) and
(not tags or any(t in tags for t in c.tags))]
class ExaminableEntity(Entity):
"""
A Examinable entity is an entity that can be examined.
"""
opinions_base = {}
@property
def opinion_value(self):
return self.opinions_base.get(
getattr(self, 'opinion', {}).get('opinion', ''), None)
@implementer(INode)
class Node(Entity):
def __init__(self, **kwargs):
super(Node, self).__init__(**kwargs)
self.graph = PersistentDict()
def get_node_id(self):
return str(self.__oid__).replace('-', '_')
def get_node_descriminator(self):
return 'node'
def init_graph(self, calculated=[]):
result = self.get_nodes_data()
self.graph = PersistentDict(result[0])
oid = self.get_node_id()
newcalculated = list(calculated)
newcalculated.append(oid)
for node in self.graph:
if node not in newcalculated:
node_obj = get_obj(self.graph[node]['oid'])
if node_obj:
graph, newcalculated = node_obj.init_graph(
newcalculated)
return self.graph, newcalculated
def get_nodes_data(self, calculated=[]):
oid = self.get_node_id()
newcalculated = list(calculated)
if oid in calculated:
return {}, newcalculated
all_target_contents = [r for r in self.all_target_related_contents
if isinstance(r[0], Node)]
targets = [{'id': t.get_node_id(),
'type': c.type_name,
'oid': getattr(t, '__oid__', 0)}
for (t, c) in all_target_contents]
all_source_contents = [r for r in self.all_source_related_contents
if r[0] not in all_target_contents
and isinstance(r[0], Node)]
targets.extend([{'id': t.get_node_id(),
'type': c.type_name,
'oid': getattr(t, '__oid__', 0)}
for (t, c) in all_source_contents])
result = {oid: {
'oid': self.__oid__,
'title': self.title,
'descriminator': self.get_node_descriminator(),
'targets': targets
}}
all_source_contents.extend(all_target_contents)
newcalculated.append(oid)
for r_content in all_source_contents:
sub_result, newcalculated = r_content[0].get_nodes_data(newcalculated)
result.update(sub_result)
return result, newcalculated
def get_all_sub_nodes(self):
oid = self.get_node_id()
return set([get_obj(self.graph[id_]['oid']) for id_ in self.graph
if id_ != oid])
def get_sub_nodes(self):
oid = self.get_node_id()
return set([get_obj(node['oid']) for
node in self.graph[oid]['targets']])
@implementer(ISignalableEntity)
class SignalableEntity(Entity):
reports = CompositeMultipleProperty('reports')
censoring_reason = CompositeUniqueProperty('censoring_reason')
def __init__(self, **kwargs):
super(SignalableEntity, self).__init__(**kwargs)
self.len_reports = 0
self.init_len_current_reports()
@property
def subject(self):
return self.__parent__
def init_len_current_reports(self):
self.len_current_reports = 0
def addtoproperty(self, name, value, moving=None):
super(SignalableEntity, self).addtoproperty(name, value, moving)
if name == 'reports':
self.len_current_reports = getattr(self, 'len_current_reports', 0)
self.len_reports = getattr(self, 'len_reports', 0)
self.len_current_reports += 1
self.len_reports += 1
@implementer(ISustainable)
class Sustainable(Entity):
"""Question class"""
def __init__(self, **kwargs):
super(Sustainable, self).__init__(**kwargs)
self.set_data(kwargs)
self.votes_positive = OOBTree()
self.votes_negative = OOBTree()
@property
def len_support(self):
return len(self.votes_positive)
@property
def len_opposition(self):
return len(self.votes_negative)
def add_vote(self, user, date, kind='positive'):
oid = get_oid(user)
if kind == 'positive':
self.votes_positive[oid] = date
else:
self.votes_negative[oid] = date
def withdraw_vote(self, user):
oid = get_oid(user)
if oid in self.votes_positive:
self.votes_positive.pop(oid)
elif oid in self.votes_negative:
self.votes_negative.pop(oid)
def has_vote(self, user):
oid = get_oid(user)
return oid in self.votes_positive or \
oid in self.votes_negative
def has_negative_vote(self, user):
oid = get_oid(user)
return oid in self.votes_negative
def has_positive_vote(self, user):
oid = get_oid(user)
return oid in self.votes_positive
@implementer(ITokenable)
class Tokenable(Entity):
"""Question class"""
tokens_opposition = CompositeMultipleProperty('tokens_opposition')
tokens_support = CompositeMultipleProperty('tokens_support')
def __init__(self, **kwargs):
super(Tokenable, self).__init__(**kwargs)
self.set_data(kwargs)
self.allocated_tokens = OOBTree()
self.len_allocated_tokens = PersistentDict({})
def add_token(self, user, evaluation_type):
user_oid = get_oid(user)
if user_oid in self.allocated_tokens:
self.remove_token(user)
self.allocated_tokens[user_oid] = evaluation_type
self.len_allocated_tokens.setdefault(evaluation_type, 0)
self.len_allocated_tokens[evaluation_type] += 1
def remove_token(self, user):
user_oid = get_oid(user)
if user_oid in self.allocated_tokens:
evaluation_type = self.allocated_tokens.pop(user_oid)
self.len_allocated_tokens.setdefault(evaluation_type, 0)
self.len_allocated_tokens[evaluation_type] -= 1
def evaluators(self, evaluation_type=None):
if evaluation_type:
return [get_obj(key) for value, key
in self.allocated_tokens.byValue(evaluation_type)]
return [get_obj(key) for key
in self.allocated_tokens.keys()]
def evaluation(self, user):
user_oid = get_oid(user, None)
return self.allocated_tokens.get(user_oid, None)
def remove_tokens(self, force=False):
evaluators = self.evaluators()
for user in evaluators:
user.remove_token(self)
if force:
self.remove_token(user)
def user_has_token(self, user, root=None):
if hasattr(user, 'has_token'):
return user.has_token(self, root)
return False
def init_support_history(self):
# [(user_oid, date, support_type), ...], support_type = {1:support, 0:oppose, -1:withdraw}
if not hasattr(self, '_support_history'):
setattr(self, '_support_history', PersistentList())
@property
def len_support(self):
return self.len_allocated_tokens.get(Evaluations.support, 0)
@property
def len_opposition(self):
return self.len_allocated_tokens.get(Evaluations.oppose, 0)
| ecreall/nova-ideo | novaideo/core.py | Python | agpl-3.0 | 25,754 | 0.000427 |
"""
Health check endpoint
"""
from flask import Blueprint
HEALTHCHECK = Blueprint('healthcheck', __name__)
@HEALTHCHECK.route('/comments/health_check')
def healthcheck():
"""
Returns 200
"""
return 'OK'
| miljkovicivan/MicroComments | app/healthcheck.py | Python | bsd-3-clause | 222 | 0 |
import hashlib
import json
import os
import unicodedata
import uuid
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.db import models
from django.dispatch import receiver
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_str
import commonware
from uuidfield.fields import UUIDField
import mkt
from mkt.site.storage_utils import copy_stored_file, move_stored_file
from mkt.site.decorators import use_master
from mkt.site.helpers import absolutify
from mkt.site.models import ModelBase, OnChangeMixin, UncachedManagerBase
from mkt.site.utils import smart_path, urlparams
log = commonware.log.getLogger('z.files')
# Acceptable extensions.
EXTENSIONS = ('.webapp', '.json', '.zip')
class File(OnChangeMixin, ModelBase):
STATUS_CHOICES = mkt.STATUS_CHOICES.items()
version = models.ForeignKey('versions.Version', related_name='files')
filename = models.CharField(max_length=255, default='')
size = models.PositiveIntegerField(default=0) # In bytes.
hash = models.CharField(max_length=255, default='')
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES,
default=mkt.STATUS_PENDING)
datestatuschanged = models.DateTimeField(null=True, auto_now_add=True)
reviewed = models.DateTimeField(null=True)
# Whether a webapp uses flash or not.
uses_flash = models.BooleanField(default=False, db_index=True)
class Meta(ModelBase.Meta):
db_table = 'files'
def __unicode__(self):
return unicode(self.id)
@property
def has_been_validated(self):
try:
self.validation
except FileValidation.DoesNotExist:
return False
else:
return True
def get_url_path(self, src):
url = os.path.join(reverse('downloads.file', args=[self.id]),
self.filename)
# Firefox's Add-on Manager needs absolute urls.
return absolutify(urlparams(url, src=src))
@classmethod
def from_upload(cls, upload, version, parse_data={}):
upload.path = smart_path(nfd_str(upload.path))
ext = os.path.splitext(upload.path)[1]
f = cls(version=version)
f.filename = f.generate_filename(extension=ext or '.zip')
f.size = storage.size(upload.path) # Size in bytes.
f.status = mkt.STATUS_PENDING
f.hash = f.generate_hash(upload.path)
f.save()
log.debug('New file: %r from %r' % (f, upload))
# Move the uploaded file from the temp location.
copy_stored_file(upload.path, os.path.join(version.path_prefix,
nfd_str(f.filename)))
if upload.validation:
FileValidation.from_json(f, upload.validation)
return f
@property
def addon(self):
from mkt.versions.models import Version
from mkt.webapps.models import Webapp
version = Version.with_deleted.get(pk=self.version_id)
return Webapp.with_deleted.get(pk=version.addon_id)
def generate_hash(self, filename=None):
"""Generate a hash for a file."""
hash = hashlib.sha256()
with open(filename or self.file_path, 'rb') as obj:
for chunk in iter(lambda: obj.read(1024), ''):
hash.update(chunk)
return 'sha256:%s' % hash.hexdigest()
def generate_filename(self, extension=None):
"""
Files are in the format of: {app_slug}-{version}.{extension}
"""
parts = []
addon = self.version.addon
# slugify drops unicode so we may end up with an empty string.
# Apache did not like serving unicode filenames (bug 626587).
extension = extension or '.zip' if addon.is_packaged else '.webapp'
# Apparently we have non-ascii slugs leaking into prod :(
# FIXME.
parts.append(slugify(addon.app_slug) or 'app')
parts.append(self.version.version)
self.filename = '-'.join(parts) + extension
return self.filename
@property
def file_path(self):
if self.status == mkt.STATUS_DISABLED:
return self.guarded_file_path
else:
return self.approved_file_path
@property
def approved_file_path(self):
return os.path.join(settings.ADDONS_PATH, str(self.version.addon_id),
self.filename)
@property
def guarded_file_path(self):
return os.path.join(settings.GUARDED_ADDONS_PATH,
str(self.version.addon_id), self.filename)
@property
def signed_file_path(self):
return os.path.join(settings.SIGNED_APPS_PATH,
str(self.version.addon_id), self._signed())
@property
def signed_reviewer_file_path(self):
return os.path.join(settings.SIGNED_APPS_REVIEWER_PATH,
str(self.version.addon_id), self._signed())
def _signed(self):
split = self.filename.rsplit('.', 1)
split.insert(-1, 'signed')
return '.'.join(split)
@property
def extension(self):
return os.path.splitext(self.filename)[-1]
@classmethod
def mv(cls, src, dst, msg):
"""Move a file from src to dst."""
try:
if storage.exists(src):
log.info(msg % (src, dst))
move_stored_file(src, dst)
except UnicodeEncodeError:
log.error('Move Failure: %s %s' % (smart_str(src), smart_str(dst)))
def hide_disabled_file(self):
"""Move a disabled file to the guarded file path."""
if not self.filename:
return
src, dst = self.approved_file_path, self.guarded_file_path
self.mv(src, dst, 'Moving disabled file: %s => %s')
def unhide_disabled_file(self):
if not self.filename:
return
src, dst = self.guarded_file_path, self.approved_file_path
self.mv(src, dst, 'Moving undisabled file: %s => %s')
@use_master
def update_status(sender, instance, **kw):
if not kw.get('raw'):
try:
instance.version.addon.reload()
instance.version.addon.update_status()
if 'delete' in kw:
instance.version.addon.update_version(ignore=instance.version)
else:
instance.version.addon.update_version()
except models.ObjectDoesNotExist:
pass
def update_status_delete(sender, instance, **kw):
kw['delete'] = True
return update_status(sender, instance, **kw)
models.signals.post_save.connect(
update_status, sender=File, dispatch_uid='version_update_status')
models.signals.post_delete.connect(
update_status_delete, sender=File, dispatch_uid='version_update_status')
@receiver(models.signals.post_delete, sender=File,
dispatch_uid='cleanup_file')
def cleanup_file(sender, instance, **kw):
""" On delete of the file object from the database, unlink the file from
the file system """
if kw.get('raw') or not instance.filename:
return
# Use getattr so the paths are accessed inside the try block.
for path in ('file_path', 'guarded_file_path'):
try:
filename = getattr(instance, path, None)
except models.ObjectDoesNotExist:
return
if filename and storage.exists(filename):
log.info('Removing filename: %s for file: %s'
% (filename, instance.pk))
storage.delete(filename)
@File.on_change
def check_file(old_attr, new_attr, instance, sender, **kw):
if kw.get('raw'):
return
old, new = old_attr.get('status'), instance.status
if new == mkt.STATUS_DISABLED and old != mkt.STATUS_DISABLED:
instance.hide_disabled_file()
elif old == mkt.STATUS_DISABLED and new != mkt.STATUS_DISABLED:
instance.unhide_disabled_file()
# Log that the hash has changed.
old, new = old_attr.get('hash'), instance.hash
if old != new:
try:
addon = instance.version.addon.pk
except models.ObjectDoesNotExist:
addon = 'unknown'
log.info('Hash changed for file: %s, addon: %s, from: %s to: %s' %
(instance.pk, addon, old, new))
class FileUpload(ModelBase):
"""Created when a file is uploaded for validation/submission."""
uuid = UUIDField(primary_key=True, auto=True)
path = models.CharField(max_length=255, default='')
name = models.CharField(max_length=255, default='',
help_text="The user's original filename")
hash = models.CharField(max_length=255, default='')
user = models.ForeignKey('users.UserProfile', null=True)
valid = models.BooleanField(default=False)
validation = models.TextField(null=True)
task_error = models.TextField(null=True)
objects = UncachedManagerBase()
class Meta(ModelBase.Meta):
db_table = 'file_uploads'
def __unicode__(self):
return self.uuid
def save(self, *args, **kw):
if self.validation:
try:
if json.loads(self.validation)['errors'] == 0:
self.valid = True
except Exception:
log.error('Invalid validation json: %r' % self)
super(FileUpload, self).save()
def add_file(self, chunks, filename, size):
filename = smart_str(filename)
loc = os.path.join(settings.ADDONS_PATH, 'temp', uuid.uuid4().hex)
base, ext = os.path.splitext(smart_path(filename))
if ext in EXTENSIONS:
loc += ext
log.info('UPLOAD: %r (%s bytes) to %r' % (filename, size, loc))
hash = hashlib.sha256()
# The buffer might have been read before, so rewind back at the start.
if hasattr(chunks, 'seek'):
chunks.seek(0)
with storage.open(loc, 'wb') as fd:
for chunk in chunks:
hash.update(chunk)
fd.write(chunk)
self.path = loc
self.name = filename
self.hash = 'sha256:%s' % hash.hexdigest()
self.save()
@classmethod
def from_post(cls, chunks, filename, size, **kwargs):
fu = FileUpload(**kwargs)
fu.add_file(chunks, filename, size)
return fu
@property
def processed(self):
return bool(self.valid or self.validation)
class FileValidation(ModelBase):
file = models.OneToOneField(File, related_name='validation')
valid = models.BooleanField(default=False)
errors = models.IntegerField(default=0)
warnings = models.IntegerField(default=0)
notices = models.IntegerField(default=0)
validation = models.TextField()
class Meta:
db_table = 'file_validation'
@classmethod
def from_json(cls, file, validation):
js = json.loads(validation)
new = cls(file=file, validation=validation, errors=js['errors'],
warnings=js['warnings'], notices=js['notices'])
new.valid = new.errors == 0
new.save()
return new
def nfd_str(u):
"""Uses NFD to normalize unicode strings."""
if isinstance(u, unicode):
return unicodedata.normalize('NFD', u).encode('utf-8')
return u
| eviljeff/zamboni | mkt/files/models.py | Python | bsd-3-clause | 11,351 | 0 |
#!/usr/bin/env python
#
# Copyright 2013 CSIR Meraka HLT and Multilingual Speech Technologies (MuST) North-West University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Marelie Davel"
__email__ = "mdavel@csir.co.za"
"""
Display the dictionary pronunciations of the most frequent words occuring in a speech corpus
@param in_trans_list: List of transcription filenames
@param in_dict: Pronunciation dictionary
@param top_n: Number of words to verify
@param out_name: Name of output file for results
"""
import sys, operator, codecs
#------------------------------------------------------------------------------
def display_top_prons(trans_list_name, dict_name, top_n, out_name):
"""Display the dictionary pronunciations of the most frequent words occuring in a speech corpus"""
#Read dictionary
pron_dict = {}
try:
dict_file = codecs.open(dict_name,"r","utf8")
except IOError:
print "Error: Error reading from file " + dict_name
sys.exit(1)
for ln in dict_file:
ln = ln.strip()
parts = ln.split("\t")
if len(parts) != 2:
print "Error: dictionary format error line %s" % ln
word = parts[0]
pron = parts[1]
if pron_dict.has_key(word):
pron_dict[word].append(pron)
else:
pron_dict[word] = []
pron_dict[word].append(pron)
dict_file.close()
#Read and cnt words in transcriptions
counts = {}
try:
list_file = codecs.open(trans_list_name,"r","utf8")
except IOError:
print "Error: Error reading from file " + trans_list_name
sys.exit(1)
for trans_name in list_file:
trans_name = trans_name.strip()
try:
trans_file = codecs.open(trans_name,"r","utf8")
except IOError:
print "Error: Error reading from file " + trans_name
sys.exit(1)
for ln in trans_file:
ln = ln.strip()
parts = ln.split(" ")
for word in parts:
if counts.has_key(word):
counts[word] = counts[word]+1
else:
counts[word] = 1
trans_file.close()
list_file.close()
#Now write top pronunciations to file
try:
out_file = codecs.open(out_name,"w","utf8")
except IOError:
print "Error: Error writing to file " + out_name
sys.exit(1)
top_words = sorted(counts.items(),key=operator.itemgetter(1),reverse=True)
n = 0;
for (w,c) in top_words:
if n < top_n:
if pron_dict.has_key(w):
for var_pron in pron_dict[w]:
out_file.write("%d\t%-20s\t%s\n" % (c,w,var_pron) )
n = n+1
else:
print "Error: unknown word %s" % word
else:
break
out_file.close()
#------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) == 5:
trans_list_name = str(sys.argv[1])
dict_name = str(sys.argv[2])
top_n = int(sys.argv[3])
out_name = str(sys.argv[4])
print "Displaying the %d most frequent words" % top_n
display_top_prons(trans_list_name, dict_name, top_n, out_name)
else:
print "\nDisplay the dictionary pronunciations of the most frequent words in a speech corpus."
print "Usage: display_top_prons.py <in:trans_list> <in:dict> <n> <out:results>"
print " <in:trans_list> list of transcription filenames"
print " <in:dict> pronunciation dictionary"
print " <n> number of words to verify"
print " <out:results> name of output file for results"
#------------------------------------------------------------------------------
| Mphaya/heasy | heasy/utility_scripts/display_top_prons.py | Python | apache-2.0 | 4,406 | 0.00749 |
import xml.etree.ElementTree as ET
from email.utils import parsedate as parsedate_
from time import mktime
import datetime
def parsedate(d):return datetime.datetime.fromtimestamp(mktime(parsedate_(d)))
def element_value(el,default):
return ((el.text + el.tail) if el is not None else "").strip()
def date_value(dv):
return parsedate(dv) if isinstance(dv,str) else dv
def repeat_value(val):
while True: yield val
class Item:
def __init__(self, title, link, description, pubdate, guid):
self.title = title
self.link = link
self.description = description
self.published = date_value(pubdate)
self.guid = guid
class Feed:
def __init__(self,title,description,link,language,copyright,editor,master,version,items):
self.title = title
self.description = description
self.link = link
self.language = language
self.copyright = copyright
self.editor = editor
self.webmaster = master
self.version = version
self.items = items
@classmethod
def create(cls,data):
tree = ET.fromstring(data)
return {
"0.91": cls.parse_91,
"0.90": cls.parse_90,
"2.0": cls.parse_20
}[tree.get("version","2.0")](tree.find("channel"))
@classmethod
def parse_91(cls,tree):
version = tree.get("version","0.91")
title = element_value(tree.find("title"),"unknown")
link = element_value(tree.find("link"),"")
description = element_value(tree.find("description"),"unknown")
language = element_value(tree.find("language"),"en-us")
copyright = element_value(tree.find("copyright"),"unknown")
editor = element_value(tree.find("managingEditor"),"unknown")
master = element_value(tree.find("webMaster"),"unknown")
items = map(cls.parse_item,tree.iter("item"),repeat_value(version))
return cls(title,description,link,language,copyright,editor,master,version,list(items))
@classmethod
def parse_90(cls,tree):
version = tree.get("version","0.90")
title = element_value(tree.find("title"),"unknown")
link = element_value(tree.find("link"),"")
description = element_value(tree.find("description"),"unknown")
language = element_value(tree.find("language"),"en-us")
copyright = element_value(tree.find("copyright"),"unknown")
editor = element_value(tree.find("managingEditor"),"unknown")
master = element_value(tree.find("webMaster"),"unknown")
items = map(cls.parse_item,tree.iter("item"),repeat_value(version))
return cls(title,description,link,language,copyright,editor,master,version,list(items))
@classmethod
def parse_20(cls,tree):
version = tree.get("version","2.0")
title = element_value(tree.find("title"),"unknown")
link = element_value(tree.find("link"),"")
description = element_value(tree.find("description"),"unknown")
language = element_value(tree.find("language"),"en-us")
copyright = element_value(tree.find("copyright"),"unknown")
editor = element_value(tree.find("managingEditor"),"unknown")
master = element_value(tree.find("webMaster"),"unknown")
items = map(cls.parse_item,tree.iter("item"),repeat_value(version))
return cls(title,description,link,language,copyright,editor,master,version,list(items))
@classmethod
def parse_item(cls,node,version="2.0"):
title = element_value(node.find("title"),"unknown")
link = element_value(node.find("link"),"")
description = element_value(node.find("description"),"unknown")
pubdate = element_value(node.find("pubDate"),"unknown")
guid = element_value(node.find("guid"),"unknown")
return Item(title,link,description,pubdate,guid)
def updates(self,since):
include = lambda x: x.date >= since
return filter(include,self.items)
sample = """<?xml version="1.0"?>
<rss version="2.0">
<channel>
<title>Liftoff News</title>
<link>http://liftoff.msfc.nasa.gov/</link>
<description>Liftoff to Space Exploration.</description>
<language>en-us</language>
<pubDate>Tue, 10 Jun 2003 04:00:00 GMT</pubDate>
<lastBuildDate>Tue, 10 Jun 2003 09:41:01 GMT</lastBuildDate>
<docs>http://blogs.law.harvard.edu/tech/rss</docs>
<generator>Weblog Editor 2.0</generator>
<managingEditor>editor@example.com</managingEditor>
<webMaster>webmaster@example.com</webMaster>
<item>
<title>Star City</title>
<link>http://liftoff.msfc.nasa.gov/news/2003/news-starcity.asp</link>
<description>How do Americans get ready to work with Russians aboard the International Space Station? They take a crash course in culture, language and protocol at Russia's <a href="http://howe.iki.rssi.ru/GCTC/gctc_e.htm">Star City</a>.</description>
<pubDate>Tue, 03 Jun 2003 09:39:21 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/06/03.html#item573</guid>
</item>
<item>
<description>Sky watchers in Europe, Asia, and parts of Alaska and Canada will experience a <a href="http://science.nasa.gov/headlines/y2003/30may_solareclipse.htm">partial eclipse of the Sun</a> on Saturday, May 31st.</description>
<pubDate>Fri, 30 May 2003 11:06:42 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/05/30.html#item572</guid>
</item>
<item>
<title>The Engine That Does More</title>
<link>http://liftoff.msfc.nasa.gov/news/2003/news-VASIMR.asp</link>
<description>Before man travels to Mars, NASA hopes to design new engines that will let us fly through the Solar System more quickly. The proposed VASIMR engine would do that.</description>
<pubDate>Tue, 27 May 2003 08:37:32 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/05/27.html#item571</guid>
</item>
<item>
<title>Astronauts' Dirty Laundry</title>
<link>http://liftoff.msfc.nasa.gov/news/2003/news-laundry.asp</link>
<description>Compared to earlier spacecraft, the International Space Station has many luxuries, but laundry facilities are not one of them. Instead, astronauts have other options.</description>
<pubDate>Tue, 20 May 2003 08:56:02 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/05/20.html#item570</guid>
</item>
</channel>
</rss>
"""
feed = Feed.create(sample)
print(feed.title)
print(feed.description)
for item in feed.items:
print(item.title)
print(item.description)
print(item.link)
print(item.published.day,item.published.month,item.published.year)
print() | tyler-elric/misc | rss.py | Python | gpl-3.0 | 6,353 | 0.030222 |
'''Generate truth data from a truth_data_file.
A truth_data_file may be produced by a human being, so sometimes
it should be considered truth data. This file provides
utilities for turning a truth_data_file of a certain format into
truth data the harness understands.
'''
from __future__ import absolute_import
import argparse
import json
from bs4 import BeautifulSoup
import logging
import sys
from dossier.label import Label, LabelStore, CorefValue
import kvlayer
import yakonfig
logger = logging.getLogger(__name__)
def parse_passage(p):
'''Extract a line_data dict from a passage's XML data and context.
'''
line_data = {}
domain = p.parent.parent.parent
topic = p.parent.parent
subtopic = p.parent
line_data['domain_id'] = domain['id'].encode('utf-8')
line_data['domain_name'] = domain['name'].encode('utf-8')
line_data['userid'] = 'dropped'
line_data['username'] = 'dropped'
line_data['topic_id'] = topic['id'].encode('utf-8')
line_data['topic_name'] = topic['name'].encode('utf-8')
line_data['subtopic_id'] = subtopic['id'].encode('utf-8')
line_data['subtopic_name'] = subtopic['name'].encode('utf-8')
line_data['passage_id'] = p['id'].encode('utf-8')
line_data['passage_name'] = p.find('text').text.encode('utf-8')
line_data['docno'] = p.docno.text.encode('utf-8')
line_data['grade'] = p.rating.text.encode('utf-8')
return line_data
def make_full_doc_id(doc_id, offset_start, offset_end):
'''A full doc_id is of the form: doc_id#offset_start,offset_end
'''
offset_string = ','.join([offset_start, offset_end])
return '#'.join([doc_id, offset_string])
def make_offset_string(offset_start, offset_end):
'''Create an offset string from a pair of offsets.
:param offset_start: str
:param offset_end: str
'''
return ','.join([offset_start, offset_end])
def label_from_truth_data_file_line(line_data):
'''Create a label from a *parsed* truth_data_file line.
:param line_data: dict
'''
# document data
doc_id = line_data['docno']
if not doc_id.strip():
logger.warn('dropping invalid truth data line: '
'bad docno: %r: %r'
% (doc_id, line_data))
return None
if len(line_data['passage_name'].strip()) < 1:
logger.warn('dropping empty passage: %r', line_data)
return None
# annotation data
topic_id = line_data['topic_id']
subtopic_id = line_data['subtopic_id']
passage_id = line_data['passage_id']
annotator = line_data['userid']
# value data
value = CorefValue.Positive
try:
rating = int(line_data['grade'])
except ValueError:
logger.warn('replacing bogus grade with zero = %r',
line_data['grade'])
rating = 0
if rating < 0:
value = CorefValue.Negative
rating = 0
# meta data
meta = {'domain_name': line_data['domain_name'],
'domain_id': line_data['domain_id'],
'username': line_data['username'],
'topic_name': line_data['topic_name'],
'topic_id': line_data['topic_id'],
'subtopic_name': line_data['subtopic_name'],
'passage_text': line_data['passage_name']}
label = Label(topic_id, doc_id, annotator, value,
subtopic_id1=subtopic_id, subtopic_id2=passage_id,
rating=rating, meta=meta)
return label
def parse_truth_data(label_store, truth_data_path, batch_size=10000):
data_file = open(truth_data_path, 'r')
data = BeautifulSoup(data_file, 'xml')
labels_to_put = []
num_labels = 0
for psg in data.find_all('passage'):
line_data = parse_passage(psg)
label = label_from_truth_data_file_line(line_data)
if label is not None:
labels_to_put.append(label)
num_labels += 1
if num_labels % 1000 == 0:
logger.debug('Converted %d labels.' % num_labels)
if len(labels_to_put) >= batch_size:
label_store.put(*labels_to_put)
labels_to_put = []
if len(labels_to_put) > 0:
label_store.put(*labels_to_put)
def main():
parser = argparse.ArgumentParser('test tool for checking that we can load '
'the truth data as distributed by NIST for '
'TREC 2015')
parser.add_argument('truth_data_path', help='path to truth data file')
modules = [yakonfig, kvlayer]
args = yakonfig.parse_args(parser, modules)
logging.basicConfig(level=logging.DEBUG)
kvl = kvlayer.client()
label_store = LabelStore(kvl)
parse_truth_data(label_store, args.truth_data_path)
logger.debug('Done! The truth data was loaded into this kvlayer backend: %r',
json.dumps(yakonfig.get_global_config('kvlayer'), indent=4,
sort_keys=True))
if __name__ == '__main__':
main()
| trec-dd/trec-dd-simulation-harness | trec_dd/harness/truth_data.py | Python | mit | 4,988 | 0.002005 |
"""
Copyright (c) 2016, John Deutscher
Description: Sample Python script for Azure Media Indexer V2
License: MIT (see LICENSE.txt file for details)
Documentation : https://azure.microsoft.com/en-us/documentation/articles/media-services-process-content-with-indexer2/
"""
import os
import json
import amspy
import time
import sys
#import pytz
import urllib
import logging
import datetime
from azure import *
from azure.storage.blob import BlockBlobService
from azure.storage.blob import ContentSettings
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR
# AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR
# PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS
# ACCOUNT! IF YOU RUN ANY EXAMPLE CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE
# DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED!
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# Load Azure app defaults
try:
with open('../../config.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print_phase_message("ERROR: Expecting config.json in examples folder")
sys.exit()
account_name = configData['accountName']
account_key = configData['accountKey']
sto_account_name = configData['sto_accountName']
sto_accountKey = configData['sto_accountKey']
log_name = configData['logName']
log_level = configData['logLevel']
purge_log = configData['purgeLog']
#Initialization...
print ("\n-----------------------= AMS Py =----------------------")
print ("Azure Media Analytics - Indexer v2 Preview Sample")
print ("for details : https://azure.microsoft.com/en-us/documentation/articles/media-services-process-content-with-indexer2/ ")
print ("-------------------------------------------------------\n")
#Remove old log file if requested (default behavior)...
if (os.path.isdir('./log') != True):
os.mkdir('log')
if (purge_log.lower() == "yes"):
if (os.path.isfile(log_name)):
os.remove(log_name)
#Basic Logging...
logging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', level=log_level, filename=log_name)
# Get the access token...
response = amspy.get_access_token(account_name, account_key)
resjson = response.json()
access_token = resjson["access_token"]
#Some global vars...
NAME = "movie"
COUNTER = 0;
ENCRYPTION = "1" # 0=None, StorageEncrypted=1, CommonEncryptionProtected=2, EnvelopeEncryptionProtected=4
ENCRYPTION_SCHEME = "StorageEncryption" # StorageEncryption or CommonEncryption.
VIDEO_NAME = "movie.mp4"
VIDEO_PATH = "../assets/movie.mp4"
ASSET_FINAL_NAME = "Python Sample-Indexer-V2"
PROCESSOR_NAME = "Azure Media Indexer 2 Preview"
INDEXER_V2_JSON_PRESET = "indexerv2.json"
# Just a simple wrapper function to print the title of each of our phases to the console...
def print_phase_header(message):
global COUNTER;
print ("\n[" + str("%02d" % int(COUNTER)) + "] >>> " + message)
COUNTER += 1;
# This wrapper function prints our messages to the console with a timestamp...
def print_phase_message(message):
time_stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print (str(time_stamp) + ": " + message)
### get ams redirected url
response = amspy.get_url(access_token)
if (response.status_code == 200):
ams_redirected_rest_endpoint = str(response.url)
else:
print_phase_message("GET Status: " + str(response.status_code) + " - Getting Redirected URL ERROR." + str(response.content))
exit(1)
######################### PHASE 1: UPLOAD #########################
### create an asset
print_phase_header("Creating a Media Asset")
response = amspy.create_media_asset(access_token, NAME)
if (response.status_code == 201):
resjson = response.json()
asset_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Asset Name........................: " + NAME)
print_phase_message("Media Asset Id..........................: " + asset_id)
else:
print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Asset: '" + NAME + "' Creation ERROR." + str(response.content))
### create an assetfile
print_phase_header("Creating a Media Assetfile (for the video file)")
response = amspy.create_media_assetfile(access_token, asset_id, VIDEO_NAME, "false", "false")
if (response.status_code == 201):
resjson = response.json()
video_assetfile_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Assetfile Name....................: " + str(resjson['d']['Name']))
print_phase_message("Media Assetfile Id......................: " + video_assetfile_id)
print_phase_message("Media Assetfile IsPrimary...............: " + str(resjson['d']['IsPrimary']))
else:
print_phase_message("POST Status: " + str(response.status_code) + " - Media Assetfile: '" + VIDEO_NAME + "' Creation ERROR." + str(response.content))
### create an asset write access policy for uploading
print_phase_header("Creating an Asset Write Access Policy")
duration = "440"
response = amspy.create_asset_accesspolicy(access_token, "NewUploadPolicy", duration, "2")
if (response.status_code == 201):
resjson = response.json()
write_accesspolicy_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Asset Access Policy Id..................: " + write_accesspolicy_id)
print_phase_message("Asset Access Policy Duration/min........: " + str(resjson['d']['DurationInMinutes']))
else:
print_phase_message("POST Status: " + str(response.status_code) + " - Asset Write Access Policy Creation ERROR." + str(response.content))
### create a sas locator
print_phase_header("Creating a write SAS Locator")
## INFO: If you need to upload your files immediately, you should set your StartTime value to five minutes before the current time.
#This is because there may be clock skew between your client machine and Media Services.
#Also, your StartTime value must be in the following DateTime format: YYYY-MM-DDTHH:mm:ssZ (for example, "2014-05-23T17:53:50Z").
# EDITED: Not providing starttime is the best approach to be able to upload a file immediatly...
#starttime = datetime.datetime.now(pytz.timezone(time_zone)).strftime("%Y-%m-%dT%H:%M:%SZ")
#response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id, starttime)
response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id)
if (response.status_code == 201):
resjson = response.json()
saslocator_id = str(resjson['d']['Id'])
saslocator_baseuri = str(resjson['d']['BaseUri'])
sto_asset_name = os.path.basename(os.path.normpath(saslocator_baseuri))
saslocator_cac = str(resjson['d']['ContentAccessComponent'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("SAS URL Locator StartTime...............: " + str(resjson['d']['StartTime']))
print_phase_message("SAS URL Locator Id......................: " + saslocator_id)
print_phase_message("SAS URL Locator Base URI................: " + saslocator_baseuri)
print_phase_message("SAS URL Locator Content Access Component: " + saslocator_cac)
else:
print_phase_message("POST Status: " + str(response.status_code) + " - SAS URL Locator Creation ERROR." + str(response.content))
### Use the Azure Blob Blob Servic library from the Azure Storage SDK.
block_blob_service = BlockBlobService(account_name=sto_account_name, sas_token=saslocator_cac[1:])
### Define a callback method to show progress of large uploads
def uploadCallback(current, total):
if (current != None):
print_phase_message('{0:2,f}/{1:2,.0f} MB'.format(current,total/1024/1024))
### Start upload the video file
print_phase_header("Uploading the Video File")
with open(VIDEO_PATH, mode='rb') as file:
video_content = file.read()
video_content_length = len(video_content)
response = block_blob_service.create_blob_from_path(
sto_asset_name,
VIDEO_NAME,
VIDEO_PATH,
max_connections=5,
content_settings=ContentSettings(content_type='video/mp4'),
progress_callback=uploadCallback,
)
if (response == None):
print_phase_message("PUT Status..............................: 201")
print_phase_message("Video File Uploaded.....................: OK")
### update the assetfile metadata after uploading
print_phase_header("Updating the Video Assetfile")
response = amspy.update_media_assetfile(access_token, asset_id, video_assetfile_id, video_content_length, VIDEO_NAME)
if (response.status_code == 204):
print_phase_message("MERGE Status............................: " + str(response.status_code))
print_phase_message("Assetfile Content Length Updated........: " + str(video_content_length))
else:
print_phase_message("MERGE Status............................: " + str(response.status_code) + " - Assetfile: '" + VIDEO_NAME + "' Update ERROR." + str(response.content))
### delete the locator, so that it can't be used again
print_phase_header("Deleting the Locator")
response = amspy.delete_sas_locator(access_token, saslocator_id)
if (response.status_code == 204):
print_phase_message("DELETE Status...........................: " + str(response.status_code))
print_phase_message("SAS URL Locator Deleted.................: " + saslocator_id)
else:
print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - SAS URL Locator: '" + saslocator_id + "' Delete ERROR." + str(response.content))
### delete the asset access policy
print_phase_header("Deleting the Acess Policy")
response = amspy.delete_asset_accesspolicy(access_token, write_accesspolicy_id)
if (response.status_code == 204):
print_phase_message("DELETE Status...........................: " + str(response.status_code))
print_phase_message("Asset Access Policy Deleted.............: " + write_accesspolicy_id)
else:
print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - Asset Access Policy: '" + write_accesspolicy_id + "' Delete ERROR." + str(response.content))
### get the media processor for Indexer v2
print_phase_header("Getting the Media Processor for Indexer v2")
response = amspy.list_media_processor(access_token)
if (response.status_code == 200):
resjson = response.json()
print_phase_message("GET Status..............................: " + str(response.status_code))
for mp in resjson['d']['results']:
if(str(mp['Name']) == PROCESSOR_NAME):
processor_id = str(mp['Id'])
print_phase_message("MEDIA Processor Id......................: " + processor_id)
print_phase_message("MEDIA Processor Name....................: " + PROCESSOR_NAME)
else:
print_phase_message("GET Status: " + str(response.status_code) + " - Media Processors Listing ERROR." + str(response.content))
## create an INdexer V2 job
print_phase_header("Creating a Media Job to index the content")
with open(INDEXER_V2_JSON_PRESET, mode='r') as file:
indexer_preset = file.read()
response = amspy.encode_mezzanine_asset(access_token, processor_id, asset_id, ASSET_FINAL_NAME, indexer_preset)
if (response.status_code == 201):
resjson = response.json()
job_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Job Id............................: " + job_id)
else:
print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Job Creation ERROR." + str(response.content))
### list a media job
print_phase_header("Getting the Media Job Status")
flag = 1
while (flag):
response = amspy.list_media_job(access_token, job_id)
if (response.status_code == 200):
resjson = response.json()
job_state = str(resjson['d']['State'])
if (resjson['d']['EndTime'] != None):
joboutputassets_uri = resjson['d']['OutputMediaAssets']['__deferred']['uri']
flag = 0
print_phase_message("GET Status..............................: " + str(response.status_code))
print_phase_message("Media Job Status........................: " + amspy.translate_job_state(job_state))
else:
print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job: '" + asset_id + "' Listing ERROR." + str(response.content))
time.sleep(5)
## getting the indexed asset id
print_phase_header("Getting the Indexed Media Asset Id")
response = amspy.get_url(access_token, joboutputassets_uri, False)
if (response.status_code == 200):
resjson = response.json()
output_asset_id = resjson['d']['results'][0]['Id']
print_phase_message("GET Status..............................: " + str(response.status_code))
print_phase_message("Indexed output Media Asset Id..................: " + output_asset_id)
else:
print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job Output Asset: '" + job_id + "' Getting ERROR." + str(response.content))
# Get Asset by using the list_media_asset method and the Asset ID
response = amspy.list_media_asset(access_token,output_asset_id)
if (response.status_code == 200):
resjson = response.json()
# Get the container name from the Uri
outputAssetContainer = resjson['d']['Uri'].split('/')[3]
print(outputAssetContainer)
else:
print("Not a 200: " + str(response.status_code))
exit(-1)
### Use the Azure Blob Blob Service library from the Azure Storage SDK to download just the output WebVTT file
block_blob_service = BlockBlobService(account_name=sto_account_name,account_key=sto_accountKey)
generator = block_blob_service.list_blobs(outputAssetContainer)
for blob in generator:
print_phase_message("Output File Name........................: " + blob.name)
if(blob.name.endswith(".vtt")):
blobText = block_blob_service.get_blob_to_text(outputAssetContainer, blob.name)
print_phase_message("\n\n##### WEB VTT ######")
print(blobText.content.encode('utf-8'))
block_blob_service.get_blob_to_path(outputAssetContainer, blob.name, "output/" + blob.name)
| johndeu/amspy | amspy/examples/analytics/indexer_v2/indexer_v2.py | Python | mit | 14,879 | 0.01566 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import partial
from novaclient import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from karbor.common import constants
from karbor import exception
from karbor.services.protection.client_factory import ClientFactory
from karbor.services.protection import protection_plugin
from karbor.services.protection.protection_plugins.server \
import server_plugin_schemas
from karbor.services.protection.protection_plugins import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
VOLUME_ATTACHMENT_RESOURCE = 'OS::Cinder::VolumeAttachment'
FLOATING_IP_ASSOCIATION = 'OS::Nova::FloatingIPAssociation'
nova_backup_opts = [
cfg.IntOpt(
'poll_interval', default=15,
help='Poll interval for Nova backup status'
),
]
class ProtectOperation(protection_plugin.Operation):
def on_main(self, checkpoint, resource, context, parameters, **kwargs):
server_id = resource.id
bank_section = checkpoint.get_resource_bank_section(server_id)
nova_client = ClientFactory.create_client("nova", context)
cinder_client = ClientFactory.create_client("cinder", context)
neutron_client = ClientFactory.create_client("neutron", context)
resource_definition = {"resource_id": server_id}
# get dependent resources
server_child_nodes = []
resources = checkpoint.resource_graph
for resource_node in resources:
resource = resource_node.value
if resource.id == server_id:
server_child_nodes = resource_node.child_nodes
LOG.info("Creating server backup, server_id: %s. ", server_id)
try:
bank_section.update_object("status",
constants.RESOURCE_STATUS_PROTECTING)
# get attach_metadata about volume
attach_metadata = {}
for server_child_node in server_child_nodes:
child_resource = server_child_node.value
if child_resource.type == constants.VOLUME_RESOURCE_TYPE:
volume = cinder_client.volumes.get(child_resource.id)
attachments = getattr(volume, "attachments")
for attachment in attachments:
if attachment["server_id"] == server_id:
attachment["bootable"] = getattr(
volume, "bootable")
attach_metadata[child_resource.id] = attachment
resource_definition["attach_metadata"] = attach_metadata
# get metadata about AZ
server = nova_client.servers.get(server_id)
availability_zone = getattr(server, "OS-EXT-AZ:availability_zone")
# get metadata about network, flavor, key_name, security_groups
addresses = getattr(server, "addresses")
networks = []
floating_ips = []
for network_infos in addresses.values():
for network_info in network_infos:
addr = network_info.get("addr")
mac = network_info.get("OS-EXT-IPS-MAC:mac_addr")
network_type = network_info.get("OS-EXT-IPS:type")
if network_type == 'fixed':
port = neutron_client.list_ports(
mac_address=mac)["ports"][0]
if port["network_id"] not in networks:
networks.append(port["network_id"])
elif network_type == "floating":
floating_ips.append(addr)
flavor = getattr(server, "flavor")["id"]
key_name = getattr(server, "key_name", None)
security_groups = getattr(server, "security_groups", None)
# get metadata about boot device
boot_metadata = {}
image_info = getattr(server, "image", None)
if image_info is not None and isinstance(image_info, dict):
boot_metadata["boot_device_type"] = "image"
boot_metadata["boot_image_id"] = image_info['id']
else:
boot_metadata["boot_device_type"] = "volume"
volumes_attached = getattr(
server, "os-extended-volumes:volumes_attached", [])
for volume_attached in volumes_attached:
volume_id = volume_attached["id"]
volume_attach_metadata = attach_metadata.get(
volume_id, None)
if volume_attach_metadata is not None and (
volume_attach_metadata["bootable"] == "true"):
boot_metadata["boot_volume_id"] = volume_id
boot_metadata["boot_attach_metadata"] = (
volume_attach_metadata)
resource_definition["boot_metadata"] = boot_metadata
# save all server's metadata
server_metadata = {"availability_zone": availability_zone,
"networks": networks,
"floating_ips": floating_ips,
"flavor": flavor,
"key_name": key_name,
"security_groups": security_groups,
}
resource_definition["server_metadata"] = server_metadata
LOG.info("Creating server backup, resource_definition: %s.",
resource_definition)
bank_section.update_object("metadata", resource_definition)
# update resource_definition backup_status
bank_section.update_object("status",
constants.RESOURCE_STATUS_AVAILABLE)
LOG.info("Finish backup server, server_id: %s.", server_id)
except Exception as err:
# update resource_definition backup_status
LOG.exception("Create backup failed, server_id: %s.", server_id)
bank_section.update_object("status",
constants.RESOURCE_STATUS_ERROR)
raise exception.CreateResourceFailed(
name="Server Backup",
reason=err,
resource_id=server_id,
resource_type=constants.SERVER_RESOURCE_TYPE)
class DeleteOperation(protection_plugin.Operation):
def on_main(self, checkpoint, resource, context, parameters, **kwargs):
resource_id = resource.id
bank_section = checkpoint.get_resource_bank_section(resource_id)
LOG.info("deleting server backup, server_id: %s.", resource_id)
try:
bank_section.update_object("status",
constants.RESOURCE_STATUS_DELETING)
objects = bank_section.list_objects()
for obj in objects:
if obj == "status":
continue
bank_section.delete_object(obj)
bank_section.update_object("status",
constants.RESOURCE_STATUS_DELETED)
LOG.info("finish delete server, server_id: %s.", resource_id)
except Exception as err:
# update resource_definition backup_status
LOG.error("Delete backup failed, server_id: %s.", resource_id)
bank_section.update_object("status",
constants.RESOURCE_STATUS_ERROR)
raise exception.DeleteResourceFailed(
name="Server Backup",
reason=err,
resource_id=resource_id,
resource_type=constants.SERVER_RESOURCE_TYPE)
class VerifyOperation(protection_plugin.Operation):
def __init__(self):
super(VerifyOperation, self).__init__()
def on_main(self, checkpoint, resource, context, parameters, **kwargs):
original_server_id = resource.id
bank_section = checkpoint.get_resource_bank_section(
original_server_id)
LOG.info('Verifying the server backup, server_id: %s',
original_server_id)
update_method = partial(
utils.update_resource_verify_result,
kwargs.get('verify'), resource.type, original_server_id)
backup_status = bank_section.get_object("status")
if backup_status == constants.RESOURCE_STATUS_AVAILABLE:
update_method(constants.RESOURCE_STATUS_AVAILABLE)
else:
reason = ('The status of server backup status is %s.'
% backup_status)
update_method(backup_status, reason)
raise exception.VerifyResourceFailed(
name="Server backup",
reason=reason,
resource_id=original_server_id,
resource_type=resource.type)
class RestoreOperation(protection_plugin.Operation):
def __init__(self, poll_interval):
super(RestoreOperation, self).__init__()
self._interval = poll_interval
def on_complete(self, checkpoint, resource, context, parameters, **kwargs):
original_server_id = resource.id
LOG.info("Restoring server backup, server_id: %s.", original_server_id)
update_method = None
try:
resource_definition = checkpoint.get_resource_bank_section(
original_server_id).get_object("metadata")
nova_client = ClientFactory.create_client("nova", context)
new_resources = kwargs.get("new_resources")
# restore server instance
restore_net_id = parameters.get("restore_net_id", None)
restore_flavor_id = parameters.get("restore_flavor_id", None)
if restore_flavor_id:
resource_definition["server_metadata"]['flavor'] = (
restore_flavor_id)
new_server_id = self._restore_server_instance(
nova_client, new_resources, original_server_id,
parameters.get("restore_name", "karbor-restore-server"),
restore_net_id, resource_definition)
update_method = partial(utils.update_resource_restore_result,
kwargs.get('restore'), resource.type,
new_server_id)
update_method(constants.RESOURCE_STATUS_RESTORING)
self._wait_server_to_active(nova_client, new_server_id)
# restore volume attachment
self._restore_volume_attachment(
nova_client, ClientFactory.create_client("cinder", context),
new_resources, new_server_id, resource_definition)
# restore floating ip association
self._restore_floating_association(
nova_client, new_server_id, resource_definition)
new_resources[original_server_id] = new_server_id
update_method(constants.RESOURCE_STATUS_AVAILABLE)
LOG.info("Finish restore server, server_id: %s.",
original_server_id)
except Exception as e:
if update_method:
update_method(constants.RESOURCE_STATUS_ERROR, str(e))
LOG.exception("Restore server backup failed, server_id: %s.",
original_server_id)
raise exception.RestoreResourceFailed(
name="Server Backup",
reason=e,
resource_id=original_server_id,
resource_type=constants.SERVER_RESOURCE_TYPE
)
def _restore_server_instance(self, nova_client, new_resources,
original_id, restore_name, restore_net_id,
resource_definition):
server_metadata = resource_definition["server_metadata"]
properties = {
"availability_zone": server_metadata.get("availability_zone"),
"flavor": server_metadata.get("flavor"),
"name": restore_name,
"image": None
}
# server boot device
boot_metadata = resource_definition["boot_metadata"]
boot_device_type = boot_metadata.get("boot_device_type")
if boot_device_type == "image":
properties["image"] = new_resources.get(
boot_metadata["boot_image_id"])
elif boot_device_type == "volume":
properties["block_device_mapping_v2"] = [{
'uuid': new_resources.get(
boot_metadata["boot_volume_id"]),
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}]
else:
reason = "Can not find the boot device of the server."
LOG.error("Restore server backup failed, (server_id:"
"%(server_id)s): %(reason)s.",
{'server_id': original_id,
'reason': reason})
raise Exception(reason)
# server key_name, security_groups, networks
properties["key_name"] = server_metadata.get("key_name", None)
if server_metadata.get("security_groups"):
properties["security_groups"] = [
security_group["name"]
for security_group in server_metadata["security_groups"]
]
if restore_net_id is not None:
properties["nics"] = [{'net-id': restore_net_id}]
elif server_metadata.get("networks"):
properties["nics"] = [
{'net-id': network}
for network in server_metadata["networks"]
]
properties["userdata"] = None
try:
server = nova_client.servers.create(**properties)
except Exception as ex:
LOG.error('Error creating server (server_id:%(server_id)s): '
'%(reason)s',
{'server_id': original_id,
'reason': ex})
raise
return server.id
def _restore_volume_attachment(self, nova_client, cinder_client,
new_resources, new_server_id,
resource_definition):
attach_metadata = resource_definition.get("attach_metadata", {})
for original_id, attach_metadata_item in attach_metadata.items():
if attach_metadata_item.get("bootable", None) == "true":
continue
volume_id = new_resources.get(original_id)
try:
nova_client.volumes.create_server_volume(
server_id=new_server_id,
volume_id=volume_id,
device=attach_metadata_item.get("device", None))
except Exception as ex:
LOG.error("Failed to attach volume %(vol)s to server %(srv)s, "
"reason: %(err)s",
{'vol': volume_id,
'srv': new_server_id,
'err': ex})
raise
self._wait_volume_to_attached(cinder_client, volume_id)
def _restore_floating_association(self, nova_client, new_server_id,
resource_definition):
server_metadata = resource_definition["server_metadata"]
for floating_ip in server_metadata.get("floating_ips", []):
nova_client.servers.add_floating_ip(
nova_client.servers.get(new_server_id), floating_ip)
def _wait_volume_to_attached(self, cinder_client, volume_id):
def _get_volume_status():
try:
return cinder_client.volumes.get(volume_id).status
except Exception as ex:
LOG.error('Fetch volume(%(volume_id)s) status failed, '
'reason: %(reason)s',
{'volume_id': volume_id,
'reason': ex})
return 'ERROR'
is_success = utils.status_poll(
_get_volume_status,
interval=self._interval,
success_statuses={'in-use', },
failure_statuses={'ERROR', },
ignore_statuses={'available', 'attaching'}
)
if not is_success:
raise Exception('Attach the volume to server failed')
def _wait_server_to_active(self, nova_client, server_id):
def _get_server_status():
try:
server = self._fetch_server(nova_client, server_id)
return server.status.split('(')[0] if server else 'BUILD'
except Exception as ex:
LOG.error('Fetch server(%(server_id)s) failed, '
'reason: %(reason)s',
{'server_id': server_id,
'reason': ex})
return 'ERROR'
is_success = utils.status_poll(
_get_server_status,
interval=self._interval,
success_statuses={'ACTIVE', },
failure_statuses={'ERROR', },
ignore_statuses={'BUILD', 'HARD_REBOOT', 'PASSWORD', 'REBOOT',
'RESCUE', 'RESIZE', 'REVERT_RESIZE', 'SHUTOFF',
'SUSPENDED', 'VERIFY_RESIZE'},
)
if not is_success:
raise Exception('The server does not start successfully')
def _fetch_server(self, nova_client, server_id):
server = None
try:
server = nova_client.servers.get(server_id)
except exceptions.OverLimit as exc:
LOG.warning("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s",
{'id': server_id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warning("Received the following exception when "
"fetching server (%(id)s) : %(exception)s",
{'id': server_id,
'exception': exc})
else:
raise
return server
class NovaProtectionPlugin(protection_plugin.ProtectionPlugin):
_SUPPORT_RESOURCE_TYPES = [constants.SERVER_RESOURCE_TYPE]
def __init__(self, config=None):
super(NovaProtectionPlugin, self).__init__(config)
self._config.register_opts(nova_backup_opts,
'nova_backup_protection_plugin')
self._poll_interval = (
self._config.nova_backup_protection_plugin.poll_interval)
@classmethod
def get_supported_resources_types(cls):
return cls._SUPPORT_RESOURCE_TYPES
@classmethod
def get_options_schema(cls, resource_type):
return server_plugin_schemas.OPTIONS_SCHEMA
@classmethod
def get_restore_schema(cls, resource_type):
return server_plugin_schemas.RESTORE_SCHEMA
@classmethod
def get_verify_schema(cls, resources_type):
return server_plugin_schemas.VERIFY_SCHEMA
@classmethod
def get_saved_info_schema(cls, resource_type):
return server_plugin_schemas.SAVED_INFO_SCHEMA
@classmethod
def get_saved_info(cls, metadata_store, resource):
pass
def get_protect_operation(self, resource):
return ProtectOperation()
def get_restore_operation(self, resource):
return RestoreOperation(self._poll_interval)
def get_verify_operation(self, resource):
return VerifyOperation()
def get_delete_operation(self, resource):
return DeleteOperation()
| openstack/smaug | karbor/services/protection/protection_plugins/server/nova_protection_plugin.py | Python | apache-2.0 | 20,349 | 0 |
#!/usr/bin/env python3
import sys
import requests
import json
import unittest
import datetime
from util import TestCase
import config
import common
class TestApiProblemGuest(TestCase):
url = '%s/api/groups/3/problems/'%(config.base_url)
token = common.get_user_info({'account': config.user_admin_account, 'passwd': config.user_admin_password})['token']
def test_gets(self):
data = {
"token": self.token,
}
res = requests.get(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 403,
"body": {
"msg": "Permission Denied",
}
}
self.assertEqualR(res, expect_result)
def test_get_visible(self):
data = {
"token": self.token,
}
res = requests.get("%s%s/"%(self.url,10006), data=data)
res.connection.close()
expect_result = {
"status_code": 403,
"body": {
"msg": "Permission Denied",
}
}
self.assertEqualR(res, expect_result)
def test_get_invisible(self):
data = {
"token": self.token,
}
res = requests.get("%s%s/"%(self.url,10005), data=data)
res.connection.close()
expect_result = {
"status_code": 403,
"body": {
"msg": "Permission Denied",
}
}
self.assertEqualR(res, expect_result)
| Tocknicsu/nctuoj | backend/test/api/problem/guest.py | Python | mit | 1,488 | 0.005376 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import hashlib
from django.utils import six
URL_LIST_CACHE = 'powerpages:url_list'
SITEMAP_CONTENT = 'powerpages:sitemap'
def get_cache_name(prefix, name):
"""
Cache name constructor. Uses the same methods as django cache system
Examples:
*) prefix=profile.cache, name=<requestuser.id>
*) prefix=template.cache.sidebar, name=<requestuser.id>
"""
return '{0}.{1}'.format(
prefix, hashlib.md5(six.text_type(name).encode('utf-8')).hexdigest()
)
def template_source(page_pk):
"""Create cache key for page template"""
return 'powerpages:template:{0}'.format(page_pk)
def rendered_source_for_user(page_pk, user_id):
"""Create cache key for rendered page source based on current user"""
return 'powerpages:rendered_source_user:{0}:{1}'.format(page_pk, user_id)
def rendered_source_for_lang(page_pk, lang):
"""Create cache key for rendered page source based on current language"""
return 'powerpages:rendered_source_lang:{0}:{1}'.format(page_pk, lang)
def url_cache(name, *args, **kwargs):
"""
Creates cache key for url of CMS page or standard Django URL
based on hashed serialized name with optional *args and **kwargs
"""
serialized_url = json.dumps([name, args, kwargs], sort_keys=True)
return get_cache_name('powerpages:urls', serialized_url)
| Open-E-WEB/django-powerpages | powerpages/cachekeys.py | Python | mit | 1,418 | 0 |
#!/usr/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Tests for cheminformatics toolkit wrappers
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import logging
import os
from tempfile import NamedTemporaryFile
from typing import Dict
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from simtk import unit
from openff.toolkit.tests.test_forcefield import (
create_acetaldehyde,
create_acetate,
create_cyclohexane,
create_ethanol,
create_reversed_ethanol,
)
from openff.toolkit.tests.utils import (
requires_ambertools,
requires_openeye,
requires_rdkit,
)
from openff.toolkit.topology.molecule import Molecule
from openff.toolkit.utils import get_data_file_path
from openff.toolkit.utils.toolkits import (
GLOBAL_TOOLKIT_REGISTRY,
AmberToolsToolkitWrapper,
BuiltInToolkitWrapper,
ChargeMethodUnavailableError,
GAFFAtomTypeWarning,
IncorrectNumConformersError,
IncorrectNumConformersWarning,
InvalidIUPACNameError,
InvalidToolkitError,
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
ToolkitRegistry,
ToolkitUnavailableException,
ToolkitWrapper,
UndefinedStereochemistryError,
)
# =============================================================================================
# FIXTURES
# =============================================================================================
def get_mini_drug_bank(toolkit_class, xfail_mols=None):
"""Read the mini drug bank sdf file with the toolkit and return the molecules"""
# This is a work around a weird error where even though the test is skipped due to a missing toolkit
# we still try and read the file with the toolkit
if toolkit_class.is_available():
toolkit = toolkit_class()
molecules = Molecule.from_file(
get_data_file_path("molecules/MiniDrugBank.sdf"),
"sdf",
toolkit_registry=toolkit,
allow_undefined_stereo=True,
)
else:
molecules = []
if xfail_mols is None:
return molecules
for i, mol in enumerate(molecules):
if mol.name in xfail_mols:
marker = pytest.mark.xfail(reason=xfail_mols[mol.name])
molecules[i] = pytest.param(mol, marks=marker)
return molecules
openeye_inchi_stereochemistry_lost = [
"DrugBank_2799",
"DrugBank_5414",
"DrugBank_5415",
"DrugBank_5418",
"DrugBank_2955",
"DrugBank_2987",
"DrugBank_5555",
"DrugBank_472",
"DrugBank_5737",
"DrugBank_3332",
"DrugBank_3461",
"DrugBank_794",
"DrugBank_3502",
"DrugBank_6026",
"DrugBank_3622",
"DrugBank_977",
"DrugBank_3693",
"DrugBank_3726",
"DrugBank_3739",
"DrugBank_6222",
"DrugBank_6232",
"DrugBank_3844",
"DrugBank_6295",
"DrugBank_6304",
"DrugBank_6305",
"DrugBank_3930",
"DrugBank_6329",
"DrugBank_6353",
"DrugBank_6355",
"DrugBank_6401",
"DrugBank_4161",
"DrugBank_4162",
"DrugBank_6509",
"DrugBank_6531",
"DrugBank_1570",
"DrugBank_4249",
"DrugBank_1634",
"DrugBank_1659",
"DrugBank_6647",
"DrugBank_1700",
"DrugBank_1721",
"DrugBank_1742",
"DrugBank_1802",
"DrugBank_6775",
"DrugBank_1849",
"DrugBank_1864",
"DrugBank_6875",
"DrugBank_1897",
"DrugBank_4593",
"DrugBank_1962",
"DrugBank_4662",
"DrugBank_7049",
"DrugBank_4702",
"DrugBank_2095",
"DrugBank_4778",
"DrugBank_2141",
"DrugBank_2148",
"DrugBank_2178",
"DrugBank_4865",
"DrugBank_2208",
"DrugBank_2210",
"DrugBank_2276",
"DrugBank_4959",
"DrugBank_4964",
"DrugBank_5043",
"DrugBank_2429",
"DrugBank_5076",
"DrugBank_2465",
"DrugBank_2519",
"DrugBank_2538",
"DrugBank_5158",
"DrugBank_5176",
"DrugBank_2592",
]
openeye_inchi_isomorphic_fails = ["DrugBank_1661", "DrugBank_4346", "DrugBank_2467"]
rdkit_inchi_stereochemistry_lost = [
"DrugBank_5414",
"DrugBank_2955",
"DrugBank_5737",
"DrugBank_3332",
"DrugBank_3461",
"DrugBank_6026",
"DrugBank_3622",
"DrugBank_3726",
"DrugBank_6222",
"DrugBank_3844",
"DrugBank_6304",
"DrugBank_6305",
"DrugBank_6329",
"DrugBank_6509",
"DrugBank_6647",
"DrugBank_1897",
"DrugBank_4778",
"DrugBank_2148",
"DrugBank_2178",
"DrugBank_2538",
"DrugBank_2592",
"DrugBank_4249",
"DrugBank_5076",
"DrugBank_5418",
"DrugBank_3930",
"DrugBank_1634",
"DrugBank_1962",
"DrugBank_5043",
"DrugBank_2519",
"DrugBank_7124",
"DrugBank_6865",
]
rdkit_inchi_roundtrip_mangled = ["DrugBank_2684"]
openeye_iupac_bad_stereo = [
"DrugBank_977",
"DrugBank_1634",
"DrugBank_1700",
"DrugBank_1962",
"DrugBank_2148",
"DrugBank_2178",
"DrugBank_2186",
"DrugBank_2208",
"DrugBank_2519",
"DrugBank_2538",
"DrugBank_2592",
"DrugBank_2651",
"DrugBank_2987",
"DrugBank_3332",
"DrugBank_3502",
"DrugBank_3622",
"DrugBank_3726",
"DrugBank_3844",
"DrugBank_3930",
"DrugBank_4161",
"DrugBank_4162",
"DrugBank_4778",
"DrugBank_4593",
"DrugBank_4959",
"DrugBank_5043",
"DrugBank_5076",
"DrugBank_5176",
"DrugBank_5418",
"DrugBank_5737",
"DrugBank_5902",
"DrugBank_6295",
"DrugBank_6304",
"DrugBank_6305",
"DrugBank_6329",
"DrugBank_6355",
"DrugBank_6401",
"DrugBank_6509",
"DrugBank_6531",
"DrugBank_6647",
"DrugBank_390",
"DrugBank_810",
"DrugBank_4316",
"DrugBank_4346",
"DrugBank_7124",
"DrugBank_2799",
"DrugBank_4662",
"DrugBank_4865",
"DrugBank_2465",
]
@pytest.fixture()
def formic_acid_molecule() -> Molecule:
formic_acid = Molecule()
formic_acid.add_atom(8, 0, False) # O1
formic_acid.add_atom(6, 0, False) # C1
formic_acid.add_atom(8, 0, False) # O2
formic_acid.add_atom(1, 0, False) # H1
formic_acid.add_atom(1, 0, False) # H2
formic_acid.add_bond(0, 1, 2, False) # O1 - C1
formic_acid.add_bond(1, 2, 1, False) # C1 - O2
formic_acid.add_bond(1, 3, 1, False) # C1 - H1
formic_acid.add_bond(2, 4, 1, False) # O2 - H2
return formic_acid
@pytest.fixture()
def formic_acid_conformers() -> Dict[str, unit.Quantity]:
return {
"cis": np.array(
[
[-0.95927322, -0.91789997, 0.36333418],
[-0.34727824, 0.12828046, 0.22784603],
[0.82766682, 0.26871252, -0.42284882],
[-0.67153811, 1.10376000, 0.61921501],
[1.15035689, -0.58282924, -0.78766006],
]
)
* unit.angstrom,
"trans": np.array(
[
[-0.95927322, -0.91789997, 0.36333418],
[-0.34727824, 0.12828046, 0.22784603],
[0.82766682, 0.26871252, -0.42284882],
[-0.67153811, 1.10376000, 0.61921501],
[1.14532626, 1.19679034, -0.41266876],
]
)
* unit.angstrom,
}
# =============================================================================================
# TESTS
# =============================================================================================
@requires_openeye
class TestOpenEyeToolkitWrapper:
"""Test the OpenEyeToolkitWrapper"""
# TODO: Make separate smiles_add_H and smiles_explicit_H tests
def test_smiles(self):
"""Test OpenEyeToolkitWrapper to_smiles() and from_smiles()"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# This differs from RDKit's SMILES due to different canonicalization schemes
smiles = "[H]C([H])([H])C([H])([H])[H]"
molecule = Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
# When creating an OFFMol from SMILES, partial charges should be initialized to None
assert molecule.partial_charges is None
smiles2 = molecule.to_smiles(toolkit_registry=toolkit_wrapper)
assert smiles == smiles2
def test_smiles_missing_stereochemistry(self):
"""Test OpenEyeToolkitWrapper to_smiles() and from_smiles()"""
toolkit_wrapper = OpenEyeToolkitWrapper()
unspec_chiral_smiles = r"C\C(F)=C(/F)CC(C)(Cl)Br"
spec_chiral_smiles = r"C\C(F)=C(/F)C[C@@](C)(Cl)Br"
unspec_db_smiles = r"CC(F)=C(F)C[C@@](C)(Cl)Br"
spec_db_smiles = r"C\C(F)=C(/F)C[C@@](C)(Cl)Br"
for title, smiles, raises_exception in [
("unspec_chiral_smiles", unspec_chiral_smiles, True),
("spec_chiral_smiles", spec_chiral_smiles, False),
("unspec_db_smiles", unspec_db_smiles, True),
("spec_db_smiles", spec_db_smiles, False),
]:
if raises_exception:
with pytest.raises(UndefinedStereochemistryError) as context:
Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
Molecule.from_smiles(
smiles,
toolkit_registry=toolkit_wrapper,
allow_undefined_stereo=True,
)
else:
Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
# TODO: test_smiles_round_trip
def test_smiles_add_H(self):
"""Test OpenEyeToolkitWrapper for adding explicit hydrogens"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# This differs from RDKit's SMILES due to different canonicalization schemes
input_smiles = "CC"
expected_output_smiles = "[H]C([H])([H])C([H])([H])[H]"
molecule = Molecule.from_smiles(input_smiles, toolkit_registry=toolkit_wrapper)
smiles2 = molecule.to_smiles(toolkit_registry=toolkit_wrapper)
assert expected_output_smiles == smiles2
def test_smiles_charged(self):
"""Test OpenEyeToolkitWrapper functions for reading/writing charged SMILES"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# This differs from RDKit's expected output due to different canonicalization schemes
smiles = "[H]C([H])([H])[N+]([H])([H])[H]"
molecule = Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
smiles2 = molecule.to_smiles(toolkit_registry=toolkit_wrapper)
assert smiles == smiles2
def test_to_from_openeye_core_props_filled(self):
"""Test OpenEyeToolkitWrapper to_openeye() and from_openeye()"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# Replacing with a simple molecule with stereochemistry
input_smiles = r"C\C(F)=C(/F)C[C@@](C)(Cl)Br"
expected_output_smiles = (
r"[H]C([H])([H])/C(=C(/C([H])([H])[C@@](C([H])([H])[H])(Cl)Br)\F)/F"
)
molecule = Molecule.from_smiles(input_smiles, toolkit_registry=toolkit_wrapper)
assert (
molecule.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
# Populate core molecule property fields
molecule.name = "Alice"
partial_charges = unit.Quantity(
np.array(
[
-0.9,
-0.8,
-0.7,
-0.6,
-0.5,
-0.4,
-0.3,
-0.2,
-0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
]
),
unit.elementary_charge,
)
molecule.partial_charges = partial_charges
coords = unit.Quantity(
np.array(
[
["0.0", "1.0", "2.0"],
["3.0", "4.0", "5.0"],
["6.0", "7.0", "8.0"],
["9.0", "10.0", "11.0"],
["12.0", "13.0", "14.0"],
["15.0", "16.0", "17.0"],
["18.0", "19.0", "20.0"],
["21.0", "22.0", "23.0"],
["24.0", "25.0", "26.0"],
["27.0", "28.0", "29.0"],
["30.0", "31.0", "32.0"],
["33.0", "34.0", "35.0"],
["36.0", "37.0", "38.0"],
["39.0", "40.0", "41.0"],
["42.0", "43.0", "44.0"],
["45.0", "46.0", "47.0"],
["48.0", "49.0", "50.0"],
["51.0", "52.0", "53.0"],
]
),
unit.angstrom,
)
molecule.add_conformer(coords)
# Populate core atom property fields
molecule.atoms[2].name = "Bob"
# Ensure one atom has its stereochemistry specified
central_carbon_stereo_specified = False
for atom in molecule.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "S":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
# Populate bond core property fields
fractional_bond_orders = [float(val) for val in range(1, 19)]
for fbo, bond in zip(fractional_bond_orders, molecule.bonds):
bond.fractional_bond_order = fbo
# Do a first conversion to/from oemol
oemol = molecule.to_openeye()
molecule2 = Molecule.from_openeye(oemol)
# Test that properties survived first conversion
# assert molecule.to_dict() == molecule2.to_dict()
assert molecule.name == molecule2.name
# NOTE: This expects the same indexing scheme in the original and new molecule
central_carbon_stereo_specified = False
for atom in molecule2.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "S":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
for atom1, atom2 in zip(molecule.atoms, molecule2.atoms):
assert atom1.to_dict() == atom2.to_dict()
for bond1, bond2 in zip(molecule.bonds, molecule2.bonds):
assert bond1.to_dict() == bond2.to_dict()
assert (molecule.conformers[0] == molecule2.conformers[0]).all()
for pc1, pc2 in zip(molecule._partial_charges, molecule2._partial_charges):
pc1_ul = pc1 / unit.elementary_charge
pc2_ul = pc2 / unit.elementary_charge
assert_almost_equal(pc1_ul, pc2_ul, decimal=6)
assert (
molecule2.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
def test_to_from_openeye_core_props_unset(self):
"""Test OpenEyeToolkitWrapper to_openeye() and from_openeye() when given empty core property fields"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# Using a simple molecule with tetrahedral and bond stereochemistry
input_smiles = r"C\C(F)=C(/F)C[C@](C)(Cl)Br"
expected_output_smiles = (
r"[H]C([H])([H])/C(=C(/C([H])([H])[C@](C([H])([H])[H])(Cl)Br)\F)/F"
)
molecule = Molecule.from_smiles(input_smiles, toolkit_registry=toolkit_wrapper)
assert (
molecule.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
# Ensure one atom has its stereochemistry specified
central_carbon_stereo_specified = False
for atom in molecule.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "R":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
# Do a first conversion to/from oemol
oemol = molecule.to_openeye()
molecule2 = Molecule.from_openeye(oemol)
# Test that properties survived first conversion
assert molecule.name == molecule2.name
# NOTE: This expects the same indexing scheme in the original and new molecule
central_carbon_stereo_specified = False
for atom in molecule2.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "R":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
for atom1, atom2 in zip(molecule.atoms, molecule2.atoms):
assert atom1.to_dict() == atom2.to_dict()
for bond1, bond2 in zip(molecule.bonds, molecule2.bonds):
assert bond1.to_dict() == bond2.to_dict()
# The molecule was initialized from SMILES, so mol.conformers arrays should be None for both
assert molecule.conformers is None
assert molecule2.conformers is None
# The molecule was initialized from SMILES, so mol.partial_charges arrays should be None for both
assert molecule.partial_charges is None
assert molecule2.partial_charges is None
assert (
molecule2.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
def test_to_from_openeye_none_partial_charges(self):
"""Test to ensure that to_openeye and from_openeye correctly handle None partial charges"""
import math
# Create ethanol, which has partial charges defined with float values
ethanol = create_ethanol()
assert ethanol.partial_charges is not None
# Convert to OEMol, which should populate the partial charges on
# the OEAtoms with the same partial charges
oemol = ethanol.to_openeye()
for oeatom in oemol.GetAtoms():
assert not math.isnan(oeatom.GetPartialCharge())
# Change the first OEAtom's partial charge to nan, and ensure that it comes
# back to OFFMol with only the first atom as nan
for oeatom in oemol.GetAtoms():
oeatom.SetPartialCharge(float("nan"))
break
eth_from_oe = Molecule.from_openeye(oemol)
assert math.isnan(eth_from_oe.partial_charges[0] / unit.elementary_charge)
for pc in eth_from_oe.partial_charges[1:]:
assert not math.isnan(pc / unit.elementary_charge)
# Then, set all the OEMol's partial charges to nan, and ensure that
# from_openeye produces an OFFMol with partial_charges = None
for oeatom in oemol.GetAtoms():
oeatom.SetPartialCharge(float("nan"))
eth_from_oe = Molecule.from_openeye(oemol)
assert eth_from_oe.partial_charges is None
# Send the OFFMol with partial_charges = None back to OEMol, and
# ensure that all its charges are nan
oemol2 = eth_from_oe.to_openeye()
for oeatom in oemol2.GetAtoms():
assert math.isnan(oeatom.GetPartialCharge())
def test_from_openeye_mutable_input(self):
"""
Test ``OpenEyeToolkitWrapper.from_openeye`` does not mutate the input molecule.
"""
from openeye import oechem
oe_molecule = oechem.OEMol()
oechem.OESmilesToMol(oe_molecule, "C")
assert oechem.OEHasImplicitHydrogens(oe_molecule)
Molecule.from_openeye(oe_molecule)
assert oechem.OEHasImplicitHydrogens(oe_molecule)
def test_from_openeye_implicit_hydrogen(self):
"""
Test OpenEyeToolkitWrapper for loading a molecule with implicit
hydrogens (correct behavior is to add them explicitly)
"""
from openeye import oechem
smiles_impl = "C#C"
oemol_impl = oechem.OEMol()
oechem.OESmilesToMol(oemol_impl, smiles_impl)
molecule_from_impl = Molecule.from_openeye(oemol_impl)
assert molecule_from_impl.n_atoms == 4
smiles_expl = "HC#CH"
oemol_expl = oechem.OEMol()
oechem.OESmilesToMol(oemol_expl, smiles_expl)
molecule_from_expl = Molecule.from_openeye(oemol_expl)
assert molecule_from_expl.to_smiles() == molecule_from_impl.to_smiles()
def test_openeye_from_smiles_hydrogens_are_explicit(self):
"""
Test to ensure that OpenEyeToolkitWrapper.from_smiles has the proper behavior with
respect to its hydrogens_are_explicit kwarg
"""
toolkit_wrapper = OpenEyeToolkitWrapper()
smiles_impl = "C#C"
with pytest.raises(
ValueError,
match="but OpenEye Toolkit interpreted SMILES 'C#C' as having implicit hydrogen",
) as excinfo:
offmol = Molecule.from_smiles(
smiles_impl,
toolkit_registry=toolkit_wrapper,
hydrogens_are_explicit=True,
)
offmol = Molecule.from_smiles(
smiles_impl, toolkit_registry=toolkit_wrapper, hydrogens_are_explicit=False
)
assert offmol.n_atoms == 4
smiles_expl = "HC#CH"
offmol = Molecule.from_smiles(
smiles_expl, toolkit_registry=toolkit_wrapper, hydrogens_are_explicit=True
)
assert offmol.n_atoms == 4
# It's debatable whether this next function should pass. Strictly speaking, the hydrogens in this SMILES
# _are_ explicit, so allowing "hydrogens_are_explicit=False" through here is allowing a contradiction.
# We might rethink the name of this kwarg.
offmol = Molecule.from_smiles(
smiles_expl, toolkit_registry=toolkit_wrapper, hydrogens_are_explicit=False
)
assert offmol.n_atoms == 4
@pytest.mark.parametrize(
"smiles, expected_map", [("[Cl:1][H]", {0: 1}), ("[Cl:1][H:2]", {0: 1, 1: 2})]
)
def test_from_openeye_atom_map(self, smiles, expected_map):
"""
Test OpenEyeToolkitWrapper for loading a molecule with implicit
hydrogens (correct behavior is to add them explicitly)
"""
from openeye import oechem
oemol = oechem.OEMol()
oechem.OESmilesToMol(oemol, smiles)
off_molecule = Molecule.from_openeye(oemol)
assert off_molecule.properties["atom_map"] == expected_map
@pytest.mark.parametrize("molecule", get_mini_drug_bank(OpenEyeToolkitWrapper))
def test_to_inchi(self, molecule):
"""Test conversion to standard and non-standard InChI"""
toolkit = OpenEyeToolkitWrapper()
inchi = molecule.to_inchi(toolkit_registry=toolkit)
non_standard = molecule.to_inchi(True, toolkit_registry=toolkit)
@pytest.mark.parametrize("molecule", get_mini_drug_bank(OpenEyeToolkitWrapper))
def test_to_inchikey(self, molecule):
"""Test the conversion to standard and non-standard InChIKey"""
toolkit = OpenEyeToolkitWrapper()
inchikey = molecule.to_inchikey(toolkit_registry=toolkit)
non_standard_key = molecule.to_inchikey(True, toolkit_registry=toolkit)
def test_from_bad_inchi(self):
"""Test building a molecule from a bad InChI string"""
toolkit = OpenEyeToolkitWrapper()
inchi = "InChI=1S/ksbfksfksfksbfks"
with pytest.raises(RuntimeError):
mol = Molecule.from_inchi(inchi, toolkit_registry=toolkit)
@pytest.mark.parametrize("molecule", get_mini_drug_bank(OpenEyeToolkitWrapper))
def test_non_standard_inchi_round_trip(self, molecule):
"""Test if a molecule can survive an InChi round trip test in some cases the standard InChI
will not enough to ensure information is preserved so we test the non-standard inchi here."""
from openff.toolkit.utils.toolkits import UndefinedStereochemistryError
toolkit = OpenEyeToolkitWrapper()
inchi = molecule.to_inchi(fixed_hydrogens=True, toolkit_registry=toolkit)
# make a copy of the molecule from the inchi string
if molecule.name in openeye_inchi_stereochemistry_lost:
# some molecules lose sterorchemsitry so they are skipped
# if we fail here the molecule may of been fixed
with pytest.raises(UndefinedStereochemistryError):
mol2 = molecule.from_inchi(inchi, toolkit_registry=toolkit)
else:
mol2 = molecule.from_inchi(inchi, toolkit_registry=toolkit)
# compare the full molecule excluding the properties dictionary
# turn of the bond order matching as this could move in the aromatic rings
if molecule.name in openeye_inchi_isomorphic_fails:
# Some molecules graphs change during the round trip testing
# we test quite strict isomorphism here
with pytest.raises(AssertionError):
assert molecule.is_isomorphic_with(
mol2, bond_order_matching=False, toolkit_registry=toolkit
)
else:
assert molecule.is_isomorphic_with(
mol2, bond_order_matching=False, toolkit_registry=toolkit
)
@pytest.mark.parametrize(
"molecule",
get_mini_drug_bank(
OpenEyeToolkitWrapper,
xfail_mols={
"DrugBank_2397": 'OpenEye cannot generate a correct IUPAC name and raises a "Warning: Incorrect name:" or simply return "BLAH".',
"DrugBank_2543": 'OpenEye cannot generate a correct IUPAC name and raises a "Warning: Incorrect name:" or simply return "BLAH".',
"DrugBank_2642": 'OpenEye cannot generate a correct IUPAC name and raises a "Warning: Incorrect name:" or simply return "BLAH".',
"DrugBank_1212": "the roundtrip generates molecules with very different IUPAC/SMILES!",
"DrugBank_2210": "the roundtrip generates molecules with very different IUPAC/SMILES!",
"DrugBank_4584": "the roundtrip generates molecules with very different IUPAC/SMILES!",
"DrugBank_390": 'raises warning "Unable to make OFFMol from OEMol: OEMol has unspecified stereochemistry."',
"DrugBank_810": 'raises warning "Unable to make OFFMol from OEMol: OEMol has unspecified stereochemistry."',
"DrugBank_4316": 'raises warning "Unable to make OFFMol from OEMol: OEMol has unspecified stereochemistry."',
"DrugBank_7124": 'raises warning "Unable to make OFFMol from OEMol: OEMol has unspecified stereochemistry."',
"DrugBank_3739": 'raises warning "Failed to parse name:"',
"DrugBank_4346": 'raises warning "Failed to parse name:"',
"DrugBank_5415": 'raises warning "Failed to parse name:"',
"DrugBank_1661": "fails roundtrip test",
"DrugBank_6353": "fails roundtrip test",
"DrugBank_2799": "from_iupac fails to read what to_iupac returns",
"DrugBank_4865": "from_iupac fails to read what to_iupac returns",
"DrugBank_2465": "from_iupac fails to read what to_iupac returns",
},
),
)
def test_iupac_round_trip(self, molecule):
"""Test round-trips with IUPAC names"""
undefined_stereo = molecule.name in openeye_iupac_bad_stereo
iupac = molecule.to_iupac()
if undefined_stereo:
with pytest.raises(UndefinedStereochemistryError):
Molecule.from_iupac(iupac)
molecule_copy = Molecule.from_iupac(
iupac, allow_undefined_stereo=undefined_stereo
)
if not undefined_stereo:
assert molecule.is_isomorphic_with(
molecule_copy, atom_stereochemistry_matching=not undefined_stereo
)
def test_from_iupac_failure(self):
"""Test that invalid IUPAC names are handled properly"""
toolkit = OpenEyeToolkitWrapper()
with pytest.raises(InvalidIUPACNameError):
toolkit.from_iupac(".BETA.-PINENE")
def test_write_multiconformer_pdb(self):
"""
Make sure OpenEye can write multi conformer PDB files.
"""
from io import StringIO
toolkit = OpenEyeToolkitWrapper()
# load up a multiconformer sdf file and condense down the conformers
molecules = Molecule.from_file(
get_data_file_path("molecules/butane_multi.sdf"), toolkit_registry=toolkit
)
butane = molecules.pop(0)
for mol in molecules:
butane.add_conformer(mol.conformers[0])
assert butane.n_conformers == 7
sio = StringIO()
butane.to_file(sio, "pdb", toolkit_registry=toolkit)
# we need to make sure each conformer is wrote to the file
pdb = sio.getvalue()
assert pdb.count("END") == 7
def test_write_pdb_preserving_atom_order(self):
"""
Make sure OpenEye does not rearrange hydrogens when writing PDBs
(reference: https://github.com/openforcefield/openff-toolkit/issues/475).
"""
from io import StringIO
toolkit = OpenEyeToolkitWrapper()
water = Molecule()
water.add_atom(1, 0, False)
water.add_atom(8, 0, False)
water.add_atom(1, 0, False)
water.add_bond(0, 1, 1, False)
water.add_bond(1, 2, 1, False)
water.add_conformer(
np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
* unit.angstrom
)
sio = StringIO()
water.to_file(sio, "pdb", toolkit_registry=toolkit)
water_from_pdb = sio.getvalue()
water_from_pdb_split = water_from_pdb.split("\n")
assert water_from_pdb_split[0].split()[2].rstrip() == "H"
assert water_from_pdb_split[1].split()[2].rstrip() == "O"
assert water_from_pdb_split[2].split()[2].rstrip() == "H"
def test_get_sdf_coordinates(self):
"""Test OpenEyeToolkitWrapper for importing a single set of coordinates from a sdf file"""
toolkit_wrapper = OpenEyeToolkitWrapper()
filename = get_data_file_path("molecules/toluene.sdf")
molecule = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecule.conformers) == 1
assert molecule.conformers[0].shape == (15, 3)
def test_load_multiconformer_sdf_as_separate_molecules(self):
"""
Test OpenEyeToolkitWrapper for reading a "multiconformer" SDF, which the OFF
Toolkit should treat as separate molecules
"""
toolkit_wrapper = OpenEyeToolkitWrapper()
filename = get_data_file_path("molecules/methane_multiconformer.sdf")
molecules = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecules) == 2
assert len(molecules[0].conformers) == 1
assert len(molecules[1].conformers) == 1
assert molecules[0].conformers[0].shape == (5, 3)
def test_load_multiconformer_sdf_as_separate_molecules_properties(self):
"""
Test OpenEyeToolkitWrapper for reading a "multiconformer" SDF, which the OFF
Toolkit should treat as separate molecules, and it should load their SD properties
and partial charges separately
"""
toolkit_wrapper = OpenEyeToolkitWrapper()
filename = get_data_file_path("molecules/methane_multiconformer_properties.sdf")
molecules = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecules) == 2
assert len(molecules[0].conformers) == 1
assert len(molecules[1].conformers) == 1
assert molecules[0].conformers[0].shape == (5, 3)
# The first molecule in the SDF has the following properties and charges:
assert molecules[0].properties["test_property_key"] == "test_property_value"
np.testing.assert_allclose(
molecules[0].partial_charges / unit.elementary_charge,
[-0.108680, 0.027170, 0.027170, 0.027170, 0.027170],
)
# The second molecule in the SDF has the following properties and charges:
assert molecules[1].properties["test_property_key"] == "test_property_value2"
assert (
molecules[1].properties["another_test_property_key"]
== "another_test_property_value"
)
np.testing.assert_allclose(
molecules[1].partial_charges / unit.elementary_charge,
[0.027170, 0.027170, 0.027170, 0.027170, -0.108680],
)
def test_file_extension_case(self):
"""
Test round-trips of some file extensions when called directly from the toolkit wrappers,
including lower- and uppercase file extensions. Note that this test does not ensure
accuracy, it only tests that reading/writing without raising an exception.
"""
mols_in = OpenEyeToolkitWrapper().from_file(
file_path=get_data_file_path("molecules/ethanol.sdf"), file_format="sdf"
)
assert len(mols_in) > 0
mols_in = OpenEyeToolkitWrapper().from_file(
file_path=get_data_file_path("molecules/ethanol.sdf"), file_format="SDF"
)
assert len(mols_in) > 0
def test_write_sdf_charges(self):
"""Test OpenEyeToolkitWrapper for writing partial charges to a sdf file"""
from io import StringIO
toolkit_wrapper = OpenEyeToolkitWrapper()
ethanol = create_ethanol()
sio = StringIO()
ethanol.to_file(sio, "SDF", toolkit_registry=toolkit_wrapper)
sdf_text = sio.getvalue()
# The output lines of interest here will look like
# > <atom.dprop.PartialCharge>
# -0.400000 -0.300000 -0.200000 -0.100000 0.000010 0.100000 0.200000 0.300000 0.400000
# Parse the SDF text, grabbing the numeric line above
sdf_split = sdf_text.split("\n")
charge_line_found = False
for line in sdf_split:
if charge_line_found:
charges = [float(i) for i in line.split()]
break
if "> <atom.dprop.PartialCharge>" in line:
charge_line_found = True
# Make sure that a charge line was ever found
assert charge_line_found
# Make sure that the charges found were correct
assert_almost_equal(
charges, [-0.4, -0.3, -0.2, -0.1, 0.00001, 0.1, 0.2, 0.3, 0.4]
)
def test_write_sdf_no_charges(self):
"""Test OpenEyeToolkitWrapper for writing an SDF file without charges"""
from io import StringIO
toolkit_wrapper = OpenEyeToolkitWrapper()
ethanol = create_ethanol()
ethanol.partial_charges = None
sio = StringIO()
ethanol.to_file(sio, "SDF", toolkit_registry=toolkit_wrapper)
sdf_text = sio.getvalue()
# In our current configuration, if the OFFMol doesn't have partial charges, we DO NOT want a partial charge
# block to be written. For reference, it's possible to indicate that a partial charge is not known by writing
# out "n/a" (or another placeholder) in the partial charge block atoms without charges.
assert "<atom.dprop.PartialCharge>" not in sdf_text
def test_sdf_properties_roundtrip(self):
"""Test OpenEyeToolkitWrapper for performing a round trip of a molecule with defined partial charges
and entries in the properties dict to and from a sdf file"""
toolkit_wrapper = OpenEyeToolkitWrapper()
ethanol = create_ethanol()
ethanol.properties["test_property"] = "test_value"
# Write ethanol to a temporary file, and then immediately read it.
with NamedTemporaryFile(suffix=".sdf") as iofile:
ethanol.to_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
ethanol2 = Molecule.from_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
np.testing.assert_allclose(
ethanol.partial_charges / unit.elementary_charge,
ethanol2.partial_charges / unit.elementary_charge,
)
assert ethanol2.properties["test_property"] == "test_value"
# Now test with no properties or charges
ethanol = create_ethanol()
ethanol.partial_charges = None
# Write ethanol to a temporary file, and then immediately read it.
with NamedTemporaryFile(suffix=".sdf") as iofile:
ethanol.to_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
ethanol2 = Molecule.from_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
assert ethanol2.partial_charges is None
assert ethanol2.properties == {}
def test_write_multiconformer_mol_as_sdf(self):
"""
Test OpenEyeToolkitWrapper for writing a multiconformer molecule to SDF. The OFF toolkit should only
save the first conformer.
"""
from io import StringIO
toolkit_wrapper = OpenEyeToolkitWrapper()
filename = get_data_file_path("molecules/ethanol.sdf")
ethanol = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
ethanol.partial_charges = (
np.array([-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
* unit.elementary_charge
)
ethanol.properties["test_prop"] = "test_value"
new_conf = ethanol.conformers[0] + (
np.ones(ethanol.conformers[0].shape) * unit.angstrom
)
ethanol.add_conformer(new_conf)
sio = StringIO()
ethanol.to_file(sio, "sdf", toolkit_registry=toolkit_wrapper)
data = sio.getvalue()
# In SD format, each molecule ends with "$$$$"
assert data.count("$$$$") == 1
# A basic SDF for ethanol would be 27 lines, though the properties add three more
assert len(data.split("\n")) == 30
assert "test_prop" in data
assert "<atom.dprop.PartialCharge>" in data
# Ensure the first conformer's first atom's X coordinate is in the file
assert str(ethanol.conformers[0][0][0].value_in_unit(unit.angstrom))[:5] in data
# Ensure the SECOND conformer's first atom's X coordinate is NOT in the file
assert (
str(ethanol.conformers[1][0][0].in_units_of(unit.angstrom))[:5] not in data
)
def test_get_mol2_coordinates(self):
"""Test OpenEyeToolkitWrapper for importing a single set of molecule coordinates"""
toolkit_wrapper = OpenEyeToolkitWrapper()
filename = get_data_file_path("molecules/toluene.mol2")
molecule1 = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecule1.conformers) == 1
assert molecule1.conformers[0].shape == (15, 3)
assert_almost_equal(
molecule1.conformers[0][5][1] / unit.angstrom, 22.98, decimal=2
)
# Test loading from file-like object
with open(filename, "r") as infile:
molecule2 = Molecule(
infile, file_format="MOL2", toolkit_registry=toolkit_wrapper
)
assert molecule1.is_isomorphic_with(molecule2)
assert len(molecule2.conformers) == 1
assert molecule2.conformers[0].shape == (15, 3)
assert_almost_equal(
molecule2.conformers[0][5][1] / unit.angstrom, 22.98, decimal=2
)
# Test loading from gzipped mol2
import gzip
with gzip.GzipFile(filename + ".gz", "r") as infile:
molecule3 = Molecule(
infile, file_format="MOL2", toolkit_registry=toolkit_wrapper
)
assert molecule1.is_isomorphic_with(molecule3)
assert len(molecule3.conformers) == 1
assert molecule3.conformers[0].shape == (15, 3)
assert_almost_equal(
molecule3.conformers[0][5][1] / unit.angstrom, 22.98, decimal=2
)
def test_get_mol2_charges(self):
"""Test OpenEyeToolkitWrapper for importing a mol2 file specifying partial charges"""
toolkit_wrapper = OpenEyeToolkitWrapper()
filename = get_data_file_path("molecules/toluene_charged.mol2")
molecule = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecule.conformers) == 1
assert molecule.conformers[0].shape == (15, 3)
target_charges = unit.Quantity(
np.array(
[
-0.1342,
-0.1271,
-0.1271,
-0.1310,
-0.1310,
-0.0765,
-0.0541,
0.1314,
0.1286,
0.1286,
0.1303,
0.1303,
0.0440,
0.0440,
0.0440,
]
),
unit.elementary_charge,
)
for pc1, pc2 in zip(molecule._partial_charges, target_charges):
pc1_ul = pc1 / unit.elementary_charge
pc2_ul = pc2 / unit.elementary_charge
assert_almost_equal(pc1_ul, pc2_ul, decimal=4)
def test_mol2_charges_roundtrip(self):
"""Test OpenEyeToolkitWrapper for performing a round trip of a molecule with partial charge to and from
a mol2 file"""
toolkit_wrapper = OpenEyeToolkitWrapper()
ethanol = create_ethanol()
# we increase the magnitude of the partial charges here, since mol2 is only
# written to 4 digits of precision, and the default middle charge for our test ethanol is 1e-5
ethanol.partial_charges *= 100
# Write ethanol to a temporary file, and then immediately read it.
with NamedTemporaryFile(suffix=".mol2") as iofile:
ethanol.to_file(
iofile.name, file_format="mol2", toolkit_registry=toolkit_wrapper
)
ethanol2 = Molecule.from_file(
iofile.name, file_format="mol2", toolkit_registry=toolkit_wrapper
)
np.testing.assert_allclose(
ethanol.partial_charges / unit.elementary_charge,
ethanol2.partial_charges / unit.elementary_charge,
)
# Now test with no properties or charges
ethanol = create_ethanol()
ethanol.partial_charges = None
# Write ethanol to a temporary file, and then immediately read it.
with NamedTemporaryFile(suffix=".mol2") as iofile:
ethanol.to_file(
iofile.name, file_format="mol2", toolkit_registry=toolkit_wrapper
)
ethanol2 = Molecule.from_file(
iofile.name, file_format="mol2", toolkit_registry=toolkit_wrapper
)
assert ethanol2.partial_charges is None
assert ethanol2.properties == {}
def test_get_mol2_gaff_atom_types(self):
"""Test that a warning is raised OpenEyeToolkitWrapper when it detects GAFF atom types in a mol2 file."""
toolkit_wrapper = OpenEyeToolkitWrapper()
mol2_file_path = get_data_file_path("molecules/AlkEthOH_test_filt1_ff.mol2")
with pytest.warns(GAFFAtomTypeWarning, match="SYBYL"):
Molecule.from_file(mol2_file_path, toolkit_registry=toolkit_wrapper)
def test_generate_conformers(self):
"""Test OpenEyeToolkitWrapper generate_conformers()"""
toolkit_wrapper = OpenEyeToolkitWrapper()
smiles = "[H]C([H])([H])C([H])([H])[H]"
molecule = toolkit_wrapper.from_smiles(smiles)
molecule.generate_conformers()
assert molecule.n_conformers != 0
assert not (molecule.conformers[0] == (0.0 * unit.angstrom)).all()
def test_generate_multiple_conformers(self):
"""Test OpenEyeToolkitWrapper generate_conformers() for generating multiple conformers"""
toolkit_wrapper = OpenEyeToolkitWrapper()
smiles = "CCCCCCCCCN"
molecule = toolkit_wrapper.from_smiles(smiles)
molecule.generate_conformers(
rms_cutoff=1 * unit.angstrom,
n_conformers=100,
toolkit_registry=toolkit_wrapper,
)
assert molecule.n_conformers > 1
assert not (molecule.conformers[0] == (0.0 * unit.angstrom)).all()
# Ensure rms_cutoff kwarg is working
molecule2 = toolkit_wrapper.from_smiles(smiles)
molecule2.generate_conformers(
rms_cutoff=0.1 * unit.angstrom,
n_conformers=100,
toolkit_registry=toolkit_wrapper,
)
assert molecule2.n_conformers > molecule.n_conformers
# Ensure n_conformers kwarg is working
molecule2 = toolkit_wrapper.from_smiles(smiles)
molecule2.generate_conformers(
rms_cutoff=0.1 * unit.angstrom,
n_conformers=10,
toolkit_registry=toolkit_wrapper,
)
assert molecule2.n_conformers == 10
def test_apply_elf_conformer_selection(self):
"""Test applying the ELF10 method."""
toolkit = OpenEyeToolkitWrapper()
molecule = Molecule.from_file(
get_data_file_path(os.path.join("molecules", "z_3_hydroxy_propenal.sdf")),
"SDF",
)
# Test that the simple case of no conformers does not yield an exception.
toolkit.apply_elf_conformer_selection(molecule)
initial_conformers = [
# Add a conformer with an internal H-bond.
np.array(
[
[0.5477, 0.3297, -0.0621],
[-0.1168, -0.7881, 0.2329],
[-1.4803, -0.8771, 0.1667],
[-0.2158, 1.5206, -0.4772],
[-1.4382, 1.5111, -0.5580],
[1.6274, 0.3962, -0.0089],
[0.3388, -1.7170, 0.5467],
[-1.8612, -0.0347, -0.1160],
[0.3747, 2.4222, -0.7115],
]
)
* unit.angstrom,
# Add a conformer without an internal H-bond.
np.array(
[
[0.5477, 0.3297, -0.0621],
[-0.1168, -0.7881, 0.2329],
[-1.4803, -0.8771, 0.1667],
[-0.2158, 1.5206, -0.4772],
[0.3353, 2.5772, -0.7614],
[1.6274, 0.3962, -0.0089],
[0.3388, -1.7170, 0.5467],
[-1.7743, -1.7634, 0.4166],
[-1.3122, 1.4082, -0.5180],
]
)
* unit.angstrom,
]
molecule._conformers = [*initial_conformers]
# Apply ELF10
toolkit.apply_elf_conformer_selection(molecule)
elf10_conformers = molecule.conformers
assert len(elf10_conformers) == 1
assert np.allclose(
elf10_conformers[0].value_in_unit(unit.angstrom),
initial_conformers[1].value_in_unit(unit.angstrom),
)
def test_assign_partial_charges_am1bcc(self):
"""Test OpenEyeToolkitWrapper assign_partial_charges() with am1bcc"""
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_ethanol()
molecule.assign_partial_charges(
partial_charge_method="am1bcc", toolkit_registry=toolkit_registry
) # , charge_model=charge_model)
charge_sum = 0 * unit.elementary_charge
abs_charge_sum = 0 * unit.elementary_charge
for pc in molecule._partial_charges:
charge_sum += pc
abs_charge_sum += abs(pc)
assert abs(charge_sum) < 0.005 * unit.elementary_charge
assert abs_charge_sum > 0.25 * unit.elementary_charge
def test_assign_partial_charges_am1bcc_net_charge(self):
"""Test OpenEyeToolkitWrapper assign_partial_charges() on a molecule with a net +1 charge"""
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_acetate()
molecule.assign_partial_charges(
partial_charge_method="am1bcc", toolkit_registry=toolkit_registry
)
charge_sum = 0 * unit.elementary_charge
for pc in molecule._partial_charges:
charge_sum += pc
assert (
-0.999 * unit.elementary_charge
> charge_sum
> -1.001 * unit.elementary_charge
)
def test_assign_partial_charges_am1bcc_wrong_n_confs(self):
"""
Test OpenEyeToolkitWrapper assign_partial_charges() with am1bcc when requesting to use an incorrect number of
conformers. This test is a bit shorter than that for AmberToolsToolkitWrapper because OETK uses the
ELF10 multiconformer method of AM1BCC, which doesn't have a maximum number of conformers.
"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_ethanol()
molecule.generate_conformers(
n_conformers=2,
rms_cutoff=0.1 * unit.angstrom,
toolkit_registry=toolkit_registry,
)
# Try again, with strict_n_confs as true, but not including use_confs, so the
# recommended number of confs will be generated
molecule.assign_partial_charges(
partial_charge_method="am1bcc",
toolkit_registry=toolkit_registry,
strict_n_conformers=True,
)
@pytest.mark.parametrize(
"partial_charge_method", ["am1bcc", "am1elf10", "am1-mulliken", "gasteiger"]
)
def test_assign_partial_charges_neutral(self, partial_charge_method):
"""Test OpenEyeToolkitWrapper assign_partial_charges()"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_ethanol()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.0e-5 < charge_sum.value_in_unit(unit.elementary_charge) < 1.0e-5
@pytest.mark.parametrize("partial_charge_method", ["am1bcc", "am1-mulliken"])
def test_assign_partial_charges_conformer_dependence(self, partial_charge_method):
"""Test OpenEyeToolkitWrapper assign_partial_charges()'s use_conformers kwarg
to ensure charges are really conformer dependent. Skip Gasteiger because it isn't
conformer dependent."""
import copy
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_ethanol()
molecule.generate_conformers(n_conformers=1)
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
)
pcs1 = copy.deepcopy(molecule.partial_charges)
molecule._conformers[0][0][0] += 0.2 * unit.angstrom
molecule._conformers[0][1][1] -= 0.2 * unit.angstrom
molecule._conformers[0][2][1] += 0.2 * unit.angstrom
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
)
for pc1, pc2 in zip(pcs1, molecule.partial_charges):
assert abs(pc1 - pc2) > 1.0e-5 * unit.elementary_charge
@pytest.mark.parametrize(
"partial_charge_method", ["am1bcc", "am1elf10", "am1-mulliken", "gasteiger"]
)
def test_assign_partial_charges_net_charge(self, partial_charge_method):
"""
Test OpenEyeToolkitWrapper assign_partial_charges() on a molecule with net charge.
"""
from openff.toolkit.tests.test_forcefield import create_acetate
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_acetate()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.0e-5 < charge_sum.value_in_unit(unit.elementary_charge) + 1.0 < 1.0e-5
def test_assign_partial_charges_bad_charge_method(self):
"""Test OpenEyeToolkitWrapper assign_partial_charges() for a nonexistent charge method"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_ethanol()
# Molecule.assign_partial_charges calls the ToolkitRegistry with raise_exception_types = [],
# which means it will only ever return ValueError
with pytest.raises(
ValueError, match="is not available from OpenEyeToolkitWrapper"
) as excinfo:
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method="NotARealChargeMethod",
)
# ToolkitWrappers raise a specific exception class, so we test that here
with pytest.raises(
ChargeMethodUnavailableError,
match="is not available from OpenEyeToolkitWrapper",
) as excinfo:
OETKW = OpenEyeToolkitWrapper()
OETKW.assign_partial_charges(
molecule=molecule, partial_charge_method="NotARealChargeMethod"
)
@pytest.mark.parametrize(
"partial_charge_method,expected_n_confs",
[("am1bcc", 1), ("am1-mulliken", 1), ("gasteiger", 0)],
)
def test_assign_partial_charges_wrong_n_confs(
self, partial_charge_method, expected_n_confs
):
"""
Test OpenEyeToolkitWrapper assign_partial_charges() when requesting to use an incorrect number of
conformers
"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[OpenEyeToolkitWrapper])
molecule = create_ethanol()
molecule.generate_conformers(n_conformers=2, rms_cutoff=0.01 * unit.angstrom)
# Try passing in the incorrect number of confs, but without specifying strict_n_conformers,
# which should produce a warning
with pytest.warns(
IncorrectNumConformersWarning,
match=f"has 2 conformers, but charge method '{partial_charge_method}' "
f"expects exactly {expected_n_confs}.",
):
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
strict_n_conformers=False,
)
# Try again, with strict_n_confs as true, but not including use_confs, so the
# recommended number of confs will be generated
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
strict_n_conformers=True,
)
# Test calling the ToolkitWrapper _indirectly_, though a ToolkitRegistry,
# which should aggregate any exceptions and bundle all of the messages
# in a failed task together in a single ValueError.
with pytest.raises(
ValueError,
match=f"has 2 conformers, but charge method '{partial_charge_method}' "
f"expects exactly {expected_n_confs}.",
):
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
# Test calling the ToolkitWrapper _directly_, passing in the incorrect number of
# confs, and specify strict_n_conformers, which should produce an IncorrectNumConformersError
with pytest.raises(
IncorrectNumConformersError,
match=f"has 2 conformers, but charge method '{partial_charge_method}' "
f"expects exactly {expected_n_confs}.",
):
OETKW = OpenEyeToolkitWrapper()
OETKW.assign_partial_charges(
molecule=molecule,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
def test_assign_partial_charges_failure(self):
"""Test OpenEyeToolkitWrapper assign_partial_charges() on a molecule it cannot assign charges to"""
toolkit_wrapper = OpenEyeToolkitWrapper()
smiles = "[Li+1]"
molecule = toolkit_wrapper.from_smiles(smiles)
molecule.generate_conformers(toolkit_registry=toolkit_wrapper)
# For now, I'm just testing AM1-BCC (will test more when the SMIRNOFF spec for other charges is finalized)
with pytest.raises(Exception) as excinfo:
molecule.assign_partial_charges(
partial_charge_method="am1-bcc", toolkit_registry=toolkit_wrapper
)
assert "Unable to assign charges" in str(excinfo)
assert "OE Error: " in str(excinfo)
def test_assign_partial_charges_trans_cooh_am1bcc(self):
"""Test OpenEyeToolkitWrapper for computing partial charges for problematic molecules, as exemplified by
Issue 346 (https://github.com/openforcefield/openff-toolkit/issues/346)"""
lysine = Molecule.from_smiles("C(CC[NH3+])C[C@@H](C(=O)O)N")
toolkit_wrapper = OpenEyeToolkitWrapper()
lysine.generate_conformers(toolkit_registry=toolkit_wrapper)
lysine.assign_partial_charges(
partial_charge_method="am1bcc", toolkit_registry=toolkit_wrapper
)
@pytest.mark.parametrize(
"bond_order_model",
["am1-wiberg", "am1-wiberg-elf10", "pm3-wiberg", "pm3-wiberg-elf10"],
)
@pytest.mark.parametrize(
"smiles",
[
"[H]C([H])([H])C([H])([H])[H]",
"[H]C([H])([H])[N+]([H])([H])[H]",
r"C\C(F)=C(/F)C[C@@](C)(Cl)Br",
],
)
def test_assign_fractional_bond_orders(self, bond_order_model, smiles):
"""Test OpenEyeToolkitWrapper assign_fractional_bond_orders()"""
toolkit_wrapper = OpenEyeToolkitWrapper()
molecule = toolkit_wrapper.from_smiles(smiles)
molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper, bond_order_model=bond_order_model
)
# TODO: Add test for equivalent Wiberg orders for equivalent bonds
# Sanity check single bonds.
assert all(
0.75 < bond.fractional_bond_order < 1.25
for bond in molecule.bonds
if bond.bond_order == 1
)
# Sanity check double bonds.
assert all(
1.75 < bond.fractional_bond_order < 2.25
for bond in molecule.bonds
if bond.bond_order == 2
)
def test_assign_fractional_bond_orders_multi_conf(
self, formic_acid_molecule, formic_acid_conformers
):
"""Test that the OpenEyeToolkitWrapper assign_fractional_bond_orders()
function correctly averages over all conformers."""
toolkit_wrapper = OpenEyeToolkitWrapper()
# Compute the WBO from a single conformer.
formic_acid_molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
bond_order_model="am1-wiberg",
use_conformers=[formic_acid_conformers["cis"]],
)
cis_bond_orders = [
bond.fractional_bond_order for bond in formic_acid_molecule.bonds
]
formic_acid_molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
bond_order_model="am1-wiberg",
use_conformers=[formic_acid_conformers["trans"]],
)
trans_bond_orders = [
bond.fractional_bond_order for bond in formic_acid_molecule.bonds
]
# Use the method to average the conformers.
formic_acid_molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
bond_order_model="am1-wiberg",
use_conformers=[
formic_acid_conformers["cis"],
formic_acid_conformers["trans"],
],
)
avg_bond_orders = [
bond.fractional_bond_order for bond in formic_acid_molecule.bonds
]
# The average should be distinct from the WBO from either conformer.
assert not np.allclose(cis_bond_orders, avg_bond_orders)
assert not np.allclose(trans_bond_orders, avg_bond_orders)
assert np.allclose(
np.mean([trans_bond_orders, cis_bond_orders], axis=0), avg_bond_orders
)
def test_assign_fractional_bond_orders_conformer_dependence(self):
"""
Test that OpenEyeToolkitWrapper assign_fractional_bond_orders() provides different results when using
different conformers
"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# Get the WBOs using one conformer
molecule = create_ethanol()
molecule.generate_conformers(toolkit_registry=toolkit_wrapper)
molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
use_conformers=molecule.conformers,
bond_order_model="am1-wiberg",
)
# Do the same again, but change the conformer to yield a different result
molecule_diff_coords = create_ethanol()
molecule_diff_coords.generate_conformers(toolkit_registry=toolkit_wrapper)
molecule_diff_coords._conformers[0][0][0] = (
molecule_diff_coords._conformers[0][0][0] + 1.0 * unit.angstrom
)
molecule_diff_coords._conformers[0][1][0] = (
molecule_diff_coords._conformers[0][1][0] - 1.0 * unit.angstrom
)
molecule_diff_coords._conformers[0][2][0] = (
molecule_diff_coords._conformers[0][2][0] + 1.0 * unit.angstrom
)
molecule_diff_coords.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
use_conformers=molecule_diff_coords.conformers,
bond_order_model="am1-wiberg",
)
for bond1, bond2 in zip(molecule.bonds, molecule_diff_coords.bonds):
assert abs(bond1.fractional_bond_order - bond2.fractional_bond_order) > 1e-3
@pytest.mark.parametrize(
"bond_order_model",
["am1-wiberg", "am1-wiberg-elf10", "pm3-wiberg", "pm3-wiberg-elf10"],
)
def test_assign_fractional_bond_orders_neutral_charge_mol(self, bond_order_model):
"""Test OpenEyeToolkitWrapper assign_fractional_bond_orders() for neutral and
charged molecule"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# Reading neutral molecule from file
filename = get_data_file_path("molecules/CID20742535_neutral.sdf")
molecule1 = Molecule.from_file(filename)
# Reading negative molecule from file
filename = get_data_file_path("molecules/CID20742535_anion.sdf")
molecule2 = Molecule.from_file(filename)
# Checking that only one additional bond is present in the neutral molecule
assert len(molecule1.bonds) == len(molecule2.bonds) + 1
molecule1.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
bond_order_model=bond_order_model,
use_conformers=molecule1.conformers,
)
for i in molecule1.bonds:
if i.is_aromatic:
# Checking aromatic bonds
assert 1.05 < i.fractional_bond_order < 1.65
elif i.atom1.atomic_number == 1 or i.atom2.atomic_number == 1:
# Checking bond order of C-H or O-H bonds are around 1
assert 0.85 < i.fractional_bond_order < 1.05
elif i.atom1.atomic_number == 8 or i.atom2.atomic_number == 8:
# Checking C-O single bond
wbo_C_O_neutral = i.fractional_bond_order
assert 1.0 < wbo_C_O_neutral < 1.5
else:
# Should be C-C single bond
assert (i.atom1_index == 4 and i.atom2_index == 6) or (
i.atom1_index == 6 and i.atom2_index == 4
)
wbo_C_C_neutral = i.fractional_bond_order
assert 1.0 < wbo_C_C_neutral < 1.3
molecule2.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
bond_order_model=bond_order_model,
use_conformers=molecule2.conformers,
)
for i in molecule2.bonds:
if i.is_aromatic:
# Checking aromatic bonds
assert 1.05 < i.fractional_bond_order < 1.65
elif i.atom1.atomic_number == 1 or i.atom2.atomic_number == 1:
# Checking bond order of C-H or O-H bonds are around 1
assert 0.85 < i.fractional_bond_order < 1.05
elif i.atom1.atomic_number == 8 or i.atom2.atomic_number == 8:
# Checking C-O single bond
wbo_C_O_anion = i.fractional_bond_order
assert 1.3 < wbo_C_O_anion < 1.8
else:
# Should be C-C single bond
assert (i.atom1_index == 4 and i.atom2_index == 6) or (
i.atom1_index == 6 and i.atom2_index == 4
)
wbo_C_C_anion = i.fractional_bond_order
assert 1.0 < wbo_C_C_anion < 1.3
# Wiberg bond order of C-C single bond is higher in the anion
assert wbo_C_C_anion > wbo_C_C_neutral
# Wiberg bond order of C-O bond is higher in the anion
assert wbo_C_O_anion > wbo_C_O_neutral
def test_assign_fractional_bond_orders_invalid_method(self):
"""
Test that OpenEyeToolkitWrapper assign_fractional_bond_orders() raises the
correct error if an invalid charge model is provided
"""
toolkit_wrapper = OpenEyeToolkitWrapper()
molecule = toolkit_wrapper.from_smiles("C")
expected_error = (
"Bond order model 'not a real bond order model' is not supported by "
"OpenEyeToolkitWrapper. Supported models are "
"\['am1-wiberg', 'am1-wiberg-elf10', 'pm3-wiberg', 'pm3-wiberg-elf10'\]"
)
with pytest.raises(ValueError, match=expected_error):
molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
bond_order_model="not a real bond order model",
)
@pytest.mark.slow
def test_substructure_search_on_large_molecule(self):
"""Test OpenEyeToolkitWrapper substructure search when a large number hits are found"""
tk = OpenEyeToolkitWrapper()
smiles = "C" * 600
molecule = tk.from_smiles(smiles)
query = "[C:1]~[C:2]"
ret = molecule.chemical_environment_matches(query, toolkit_registry=tk)
assert len(ret) == 1198
assert len(ret[0]) == 2
def test_find_rotatable_bonds(self):
"""Test finding rotatable bonds while ignoring some groups"""
# test a simple molecule
ethanol = create_ethanol()
bonds = ethanol.find_rotatable_bonds()
assert len(bonds) == 2
for bond in bonds:
assert ethanol.atoms[bond.atom1_index].atomic_number != 1
assert ethanol.atoms[bond.atom2_index].atomic_number != 1
# now ignore the C-O bond, forwards
bonds = ethanol.find_rotatable_bonds(ignore_functional_groups="[#6:1]-[#8:2]")
assert len(bonds) == 1
assert ethanol.atoms[bonds[0].atom1_index].atomic_number == 6
assert ethanol.atoms[bonds[0].atom2_index].atomic_number == 6
# now ignore the O-C bond, backwards
bonds = ethanol.find_rotatable_bonds(ignore_functional_groups="[#8:1]-[#6:2]")
assert len(bonds) == 1
assert ethanol.atoms[bonds[0].atom1_index].atomic_number == 6
assert ethanol.atoms[bonds[0].atom2_index].atomic_number == 6
# now ignore the C-C bond
bonds = ethanol.find_rotatable_bonds(ignore_functional_groups="[#6:1]-[#6:2]")
assert len(bonds) == 1
assert ethanol.atoms[bonds[0].atom1_index].atomic_number == 6
assert ethanol.atoms[bonds[0].atom2_index].atomic_number == 8
# ignore a list of searches, forward
bonds = ethanol.find_rotatable_bonds(
ignore_functional_groups=["[#6:1]-[#8:2]", "[#6:1]-[#6:2]"]
)
assert bonds == []
# ignore a list of searches, backwards
bonds = ethanol.find_rotatable_bonds(
ignore_functional_groups=["[#6:1]-[#6:2]", "[#8:1]-[#6:2]"]
)
assert bonds == []
# test molecules that should have no rotatable bonds
cyclohexane = create_cyclohexane()
bonds = cyclohexane.find_rotatable_bonds()
assert bonds == []
methane = Molecule.from_smiles("C")
bonds = methane.find_rotatable_bonds()
assert bonds == []
ethene = Molecule.from_smiles("C=C")
bonds = ethene.find_rotatable_bonds()
assert bonds == []
terminal_forwards = "[*]~[*:1]-[X2H1,X3H2,X4H3:2]-[#1]"
terminal_backwards = "[#1]-[X2H1,X3H2,X4H3:1]-[*:2]~[*]"
# test removing terminal rotors
toluene = Molecule.from_file(get_data_file_path("molecules/toluene.sdf"))
bonds = toluene.find_rotatable_bonds()
assert len(bonds) == 1
assert toluene.atoms[bonds[0].atom1_index].atomic_number == 6
assert toluene.atoms[bonds[0].atom2_index].atomic_number == 6
# find terminal bonds forward
bonds = toluene.find_rotatable_bonds(ignore_functional_groups=terminal_forwards)
assert bonds == []
# find terminal bonds backwards
bonds = toluene.find_rotatable_bonds(
ignore_functional_groups=terminal_backwards
)
assert bonds == []
# TODO: Check partial charge invariants (total charge, charge equivalence)
# TODO: Add test for aromaticity
# TODO: Add test and molecule functionality for isotopes
@requires_rdkit
class TestRDKitToolkitWrapper:
"""Test the RDKitToolkitWrapper"""
def test_smiles(self):
"""Test RDKitToolkitWrapper to_smiles() and from_smiles()"""
toolkit_wrapper = RDKitToolkitWrapper()
# This differs from OE's expected output due to different canonicalization schemes
smiles = "[H][C]([H])([H])[C]([H])([H])[H]"
molecule = Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
# When making a molecule from SMILES, partial charges should be initialized to None
assert molecule.partial_charges is None
smiles2 = molecule.to_smiles(toolkit_registry=toolkit_wrapper)
# print(smiles, smiles2)
assert smiles == smiles2
@pytest.mark.parametrize(
"smiles,exception_regex",
[
(r"C\C(F)=C(/F)CC(C)(Cl)Br", "Undefined chiral centers"),
(r"C\C(F)=C(/F)C[C@@](C)(Cl)Br", None),
(r"CC(F)=C(F)C[C@@](C)(Cl)Br", "Bonds with undefined stereochemistry"),
],
)
def test_smiles_missing_stereochemistry(self, smiles, exception_regex):
"""Test RDKitToolkitWrapper to_smiles() and from_smiles() when given ambiguous stereochemistry"""
toolkit_wrapper = RDKitToolkitWrapper()
if exception_regex is not None:
with pytest.raises(UndefinedStereochemistryError, match=exception_regex):
Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
Molecule.from_smiles(
smiles, toolkit_registry=toolkit_wrapper, allow_undefined_stereo=True
)
else:
Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
# TODO: test_smiles_round_trip
def test_smiles_add_H(self):
"""Test RDKitToolkitWrapper to_smiles() and from_smiles()"""
toolkit_wrapper = RDKitToolkitWrapper()
input_smiles = "CC"
# This differs from OE's expected output due to different canonicalization schemes
expected_output_smiles = "[H][C]([H])([H])[C]([H])([H])[H]"
molecule = Molecule.from_smiles(input_smiles, toolkit_registry=toolkit_wrapper)
smiles2 = molecule.to_smiles(toolkit_registry=toolkit_wrapper)
assert smiles2 == expected_output_smiles
def test_rdkit_from_smiles_hydrogens_are_explicit(self):
"""
Test to ensure that RDKitToolkitWrapper.from_smiles has the proper behavior with
respect to its hydrogens_are_explicit kwarg
"""
toolkit_wrapper = RDKitToolkitWrapper()
smiles_impl = "C#C"
with pytest.raises(
ValueError,
match="but RDKit toolkit interpreted SMILES 'C#C' as having implicit hydrogen",
) as excinfo:
offmol = Molecule.from_smiles(
smiles_impl,
toolkit_registry=toolkit_wrapper,
hydrogens_are_explicit=True,
)
offmol = Molecule.from_smiles(
smiles_impl, toolkit_registry=toolkit_wrapper, hydrogens_are_explicit=False
)
assert offmol.n_atoms == 4
smiles_expl = "[H][C]#[C][H]"
offmol = Molecule.from_smiles(
smiles_expl, toolkit_registry=toolkit_wrapper, hydrogens_are_explicit=True
)
assert offmol.n_atoms == 4
# It's debatable whether this next function should pass. Strictly speaking, the hydrogens in this SMILES
# _are_ explicit, so allowing "hydrogens_are_explicit=False" through here is allowing a contradiction.
# We might rethink the name of this kwarg.
offmol = Molecule.from_smiles(
smiles_expl, toolkit_registry=toolkit_wrapper, hydrogens_are_explicit=False
)
assert offmol.n_atoms == 4
@pytest.mark.parametrize("molecule", get_mini_drug_bank(RDKitToolkitWrapper))
def test_to_inchi(self, molecule):
"""Test conversion to standard and non-standard InChI"""
toolkit = RDKitToolkitWrapper()
inchi = molecule.to_inchi(toolkit_registry=toolkit)
non_standard = molecule.to_inchi(fixed_hydrogens=True, toolkit_registry=toolkit)
@pytest.mark.parametrize("molecule", get_mini_drug_bank(RDKitToolkitWrapper))
def test_to_inchikey(self, molecule):
"""Test the conversion to standard and non-standard InChIKey"""
toolkit = RDKitToolkitWrapper()
inchikey = molecule.to_inchikey(toolkit_registry=toolkit)
non_standard_key = molecule.to_inchikey(
fixed_hydrogens=True, toolkit_registry=toolkit
)
def test_from_bad_inchi(self):
"""Test building a molecule from a bad InChI string"""
toolkit = RDKitToolkitWrapper()
inchi = "InChI=1S/ksbfksfksfksbfks"
with pytest.raises(RuntimeError):
mol = Molecule.from_inchi(inchi, toolkit_registry=toolkit)
inchi_data = [
{
"molecule": create_ethanol(),
"standard_inchi": "InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3",
"fixed_hydrogen_inchi": "InChI=1/C2H6O/c1-2-3/h3H,2H2,1H3",
},
{
"molecule": create_reversed_ethanol(),
"standard_inchi": "InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3",
"fixed_hydrogen_inchi": "InChI=1/C2H6O/c1-2-3/h3H,2H2,1H3",
},
{
"molecule": create_acetaldehyde(),
"standard_inchi": "InChI=1S/C2H4O/c1-2-3/h2H,1H3",
"fixed_hydrogen_inchi": "InChI=1/C2H4O/c1-2-3/h2H,1H3",
},
{
"molecule": create_cyclohexane(),
"standard_inchi": "InChI=1S/C6H12/c1-2-4-6-5-3-1/h1-6H2",
"fixed_hydrogen_inchi": "InChI=1/C6H12/c1-2-4-6-5-3-1/h1-6H2",
},
]
@pytest.mark.parametrize("data", inchi_data)
def test_from_inchi(self, data):
"""Test building a molecule from standard and non-standard InChI strings."""
toolkit = RDKitToolkitWrapper()
ref_mol = data["molecule"]
# make a molecule from inchi
inchi_mol = Molecule.from_inchi(
data["standard_inchi"], toolkit_registry=toolkit
)
assert inchi_mol.to_inchi(toolkit_registry=toolkit) == data["standard_inchi"]
def compare_mols(ref_mol, inchi_mol):
assert ref_mol.n_atoms == inchi_mol.n_atoms
assert ref_mol.n_bonds == inchi_mol.n_bonds
assert ref_mol.n_angles == inchi_mol.n_angles
assert ref_mol.n_propers == inchi_mol.n_propers
assert ref_mol.is_isomorphic_with(inchi_mol) is True
compare_mols(ref_mol, inchi_mol)
# now make the molecule from the non-standard inchi and compare
nonstandard_inchi_mol = Molecule.from_inchi(data["fixed_hydrogen_inchi"])
assert (
nonstandard_inchi_mol.to_inchi(
fixed_hydrogens=True, toolkit_registry=toolkit
)
== data["fixed_hydrogen_inchi"]
)
compare_mols(ref_mol, nonstandard_inchi_mol)
@pytest.mark.parametrize("molecule", get_mini_drug_bank(RDKitToolkitWrapper))
def test_non_standard_inchi_round_trip(self, molecule):
"""Test if a molecule can survive an InChi round trip test in some cases the standard InChI
will not be enough to ensure information is preserved so we test the non-standard inchi here."""
from openff.toolkit.utils.toolkits import UndefinedStereochemistryError
toolkit = RDKitToolkitWrapper()
inchi = molecule.to_inchi(fixed_hydrogens=True, toolkit_registry=toolkit)
# make a copy of the molecule from the inchi string
if molecule.name in rdkit_inchi_stereochemistry_lost:
# some molecules lose stereochemsitry so they are skipped
# if we fail here the molecule may of been fixed
with pytest.raises(UndefinedStereochemistryError):
mol2 = molecule.from_inchi(inchi, toolkit_registry=toolkit)
else:
print(molecule.name)
mol2 = molecule.from_inchi(inchi, toolkit_registry=toolkit)
# Some molecules are mangled by being round-tripped to/from InChI
if molecule.name in rdkit_inchi_roundtrip_mangled:
with pytest.raises(AssertionError):
mol2.to_rdkit()
return
# compare the full molecule excluding the properties dictionary
# turn of the bond order matching as this could move in the aromatic rings
assert molecule.is_isomorphic_with(
mol2, bond_order_matching=False, toolkit_registry=toolkit
)
def test_smiles_charged(self):
"""Test RDKitWrapper functions for reading/writing charged SMILES"""
toolkit_wrapper = RDKitToolkitWrapper()
# This differs from OE's expected output due to different canonicalization schemes
smiles = "[H][C]([H])([H])[N+]([H])([H])[H]"
molecule = Molecule.from_smiles(smiles, toolkit_registry=toolkit_wrapper)
smiles2 = molecule.to_smiles(toolkit_registry=toolkit_wrapper)
assert smiles == smiles2
def test_to_from_rdkit_core_props_filled(self):
"""Test RDKitToolkitWrapper to_rdkit() and from_rdkit() when given populated core property fields"""
toolkit_wrapper = RDKitToolkitWrapper()
# Replacing with a simple molecule with stereochemistry
input_smiles = r"C\C(F)=C(/F)C[C@@](C)(Cl)Br"
expected_output_smiles = r"[H][C]([H])([H])/[C]([F])=[C](\[F])[C]([H])([H])[C@@]([Cl])([Br])[C]([H])([H])[H]"
molecule = Molecule.from_smiles(input_smiles, toolkit_registry=toolkit_wrapper)
assert (
molecule.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
# Populate core molecule property fields
molecule.name = "Alice"
partial_charges = unit.Quantity(
np.array(
[
-0.9,
-0.8,
-0.7,
-0.6,
-0.5,
-0.4,
-0.3,
-0.2,
-0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
]
),
unit.elementary_charge,
)
molecule.partial_charges = partial_charges
coords = unit.Quantity(
np.array(
[
["0.0", "1.0", "2.0"],
["3.0", "4.0", "5.0"],
["6.0", "7.0", "8.0"],
["9.0", "10.0", "11.0"],
["12.0", "13.0", "14.0"],
["15.0", "16.0", "17.0"],
["18.0", "19.0", "20.0"],
["21.0", "22.0", "23.0"],
["24.0", "25.0", "26.0"],
["27.0", "28.0", "29.0"],
["30.0", "31.0", "32.0"],
["33.0", "34.0", "35.0"],
["36.0", "37.0", "38.0"],
["39.0", "40.0", "41.0"],
["42.0", "43.0", "44.0"],
["45.0", "46.0", "47.0"],
["48.0", "49.0", "50.0"],
["51.0", "52.0", "53.0"],
]
),
unit.angstrom,
)
molecule.add_conformer(coords)
# Populate core atom property fields
molecule.atoms[2].name = "Bob"
# Ensure one atom has its stereochemistry specified
central_carbon_stereo_specified = False
for atom in molecule.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "S":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
# Populate bond core property fields
fractional_bond_orders = [float(val) for val in range(18)]
for fbo, bond in zip(fractional_bond_orders, molecule.bonds):
bond.fractional_bond_order = fbo
# Do a first conversion to/from oemol
rdmol = molecule.to_rdkit()
molecule2 = Molecule.from_rdkit(rdmol)
# Test that properties survived first conversion
# assert molecule.to_dict() == molecule2.to_dict()
assert molecule.name == molecule2.name
# NOTE: This expects the same indexing scheme in the original and new molecule
central_carbon_stereo_specified = False
for atom in molecule2.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "S":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
for atom1, atom2 in zip(molecule.atoms, molecule2.atoms):
assert atom1.to_dict() == atom2.to_dict()
for bond1, bond2 in zip(molecule.bonds, molecule2.bonds):
assert bond1.to_dict() == bond2.to_dict()
assert (molecule.conformers[0] == molecule2.conformers[0]).all()
for pc1, pc2 in zip(molecule._partial_charges, molecule2._partial_charges):
pc1_ul = pc1 / unit.elementary_charge
pc2_ul = pc2 / unit.elementary_charge
assert_almost_equal(pc1_ul, pc2_ul, decimal=6)
assert (
molecule2.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
# TODO: This should be its own test
def test_to_from_rdkit_core_props_unset(self):
"""Test RDKitToolkitWrapper to_rdkit() and from_rdkit() when given empty core property fields"""
toolkit_wrapper = RDKitToolkitWrapper()
# Replacing with a simple molecule with stereochemistry
input_smiles = r"C\C(F)=C(/F)C[C@](C)(Cl)Br"
expected_output_smiles = r"[H][C]([H])([H])/[C]([F])=[C](\[F])[C]([H])([H])[C@]([Cl])([Br])[C]([H])([H])[H]"
molecule = Molecule.from_smiles(input_smiles, toolkit_registry=toolkit_wrapper)
assert (
molecule.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
# Ensure one atom has its stereochemistry specified
central_carbon_stereo_specified = False
for atom in molecule.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "R":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
# Do a first conversion to/from rdmol
rdmol = molecule.to_rdkit()
molecule2 = Molecule.from_rdkit(rdmol)
# Test that properties survived first conversion
assert molecule.name == molecule2.name
# NOTE: This expects the same indexing scheme in the original and new molecule
central_carbon_stereo_specified = False
for atom in molecule2.atoms:
if (atom.atomic_number == 6) and atom.stereochemistry == "R":
central_carbon_stereo_specified = True
assert central_carbon_stereo_specified
for atom1, atom2 in zip(molecule.atoms, molecule2.atoms):
assert atom1.to_dict() == atom2.to_dict()
for bond1, bond2 in zip(molecule.bonds, molecule2.bonds):
assert bond1.to_dict() == bond2.to_dict()
# The molecule was initialized from SMILES, so mol.conformers arrays should be None for both
assert molecule.conformers is None
assert molecule2.conformers is None
# The molecule was initialized from SMILES, so mol.partial_charges arrays should be None for both
assert molecule.partial_charges is None
assert molecule2.partial_charges is None
assert (
molecule2.to_smiles(toolkit_registry=toolkit_wrapper)
== expected_output_smiles
)
def test_from_rdkit_implicit_hydrogens(self):
"""
Test that hydrogens are inferred from hydrogen-less RDKit molecules,
unless the option is turned off.
"""
from rdkit import Chem
rdmol = Chem.MolFromSmiles("CC")
offmol = Molecule.from_rdkit(rdmol)
assert any([a.atomic_number == 1 for a in offmol.atoms])
offmol_no_h = Molecule.from_rdkit(rdmol, hydrogens_are_explicit=True)
assert not any([a.atomic_number == 1 for a in offmol_no_h.atoms])
@pytest.mark.parametrize(
"smiles, expected_map", [("[Cl:1][Cl]", {0: 1}), ("[Cl:1][Cl:2]", {0: 1, 1: 2})]
)
def test_from_rdkit_atom_map(self, smiles, expected_map):
"""
Test OpenEyeToolkitWrapper for loading a molecule with implicit
hydrogens (correct behavior is to add them explicitly)
"""
from rdkit import Chem
off_molecule = Molecule.from_rdkit(Chem.MolFromSmiles(smiles))
assert off_molecule.properties["atom_map"] == expected_map
def test_file_extension_case(self):
"""
Test round-trips of some file extensions when called directly from the toolkit wrappers,
including lower- and uppercase file extensions. Note that this test does not ensure
accuracy, it only tests that reading/writing without raising an exception.
"""
mols_in = RDKitToolkitWrapper().from_file(
file_path=get_data_file_path("molecules/ethanol.sdf"), file_format="sdf"
)
assert len(mols_in) > 0
mols_in = RDKitToolkitWrapper().from_file(
file_path=get_data_file_path("molecules/ethanol.sdf"), file_format="SDF"
)
assert len(mols_in) > 0
def test_get_sdf_coordinates(self):
"""Test RDKitToolkitWrapper for importing a single set of coordinates from a sdf file"""
toolkit_wrapper = RDKitToolkitWrapper()
filename = get_data_file_path("molecules/toluene.sdf")
molecule = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecule.conformers) == 1
assert molecule.conformers[0].shape == (15, 3)
assert_almost_equal(
molecule.conformers[0][5][1] / unit.angstrom, 2.0104, decimal=4
)
def test_read_sdf_charges(self):
"""Test RDKitToolkitWrapper for importing a charges from a sdf file"""
toolkit_wrapper = RDKitToolkitWrapper()
filename = get_data_file_path("molecules/ethanol_partial_charges.sdf")
molecule = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert molecule.partial_charges is not None
assert molecule.partial_charges[0] == -0.4 * unit.elementary_charge
assert molecule.partial_charges[-1] == 0.4 * unit.elementary_charge
def test_write_sdf_charges(self):
"""Test RDKitToolkitWrapper for writing partial charges to a sdf file"""
from io import StringIO
toolkit_wrapper = RDKitToolkitWrapper()
ethanol = create_ethanol()
sio = StringIO()
ethanol.to_file(sio, "SDF", toolkit_registry=toolkit_wrapper)
sdf_text = sio.getvalue()
# The output lines of interest here will look like
# > <atom.dprop.PartialCharge> (1)
# -0.40000000000000002 -0.29999999999999999 -0.20000000000000001 -0.10000000000000001 0.01 0.10000000000000001 0.20000000000000001 0.29999999999999999 0.40000000000000002
# Parse the SDF text, grabbing the numeric line above
sdf_split = sdf_text.split("\n")
charge_line_found = False
for line in sdf_split:
if charge_line_found:
charges = [float(i) for i in line.split()]
break
if "> <atom.dprop.PartialCharge>" in line:
charge_line_found = True
# Make sure that a charge line was ever found
assert charge_line_found
# Make sure that the charges found were correct
assert_almost_equal(
charges, [-0.4, -0.3, -0.2, -0.1, 0.00001, 0.1, 0.2, 0.3, 0.4]
)
def test_sdf_properties_roundtrip(self):
"""Test RDKitToolkitWrapper for performing a round trip of a molecule with defined partial charges
and entries in the properties dict to and from a sdf file"""
toolkit_wrapper = RDKitToolkitWrapper()
ethanol = create_ethanol()
# Write ethanol to a temporary file, and then immediately read it.
with NamedTemporaryFile(suffix=".sdf") as iofile:
ethanol.to_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
ethanol2 = Molecule.from_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
assert (ethanol.partial_charges == ethanol2.partial_charges).all()
# Now test with no properties or charges
ethanol = create_ethanol()
ethanol.partial_charges = None
# Write ethanol to a temporary file, and then immediately read it.
with NamedTemporaryFile(suffix=".sdf") as iofile:
ethanol.to_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
ethanol2 = Molecule.from_file(
iofile.name, file_format="SDF", toolkit_registry=toolkit_wrapper
)
assert ethanol2.partial_charges is None
assert ethanol2.properties == {}
def test_write_sdf_no_charges(self):
"""Test RDKitToolkitWrapper for writing an SDF file with no charges"""
from io import StringIO
toolkit_wrapper = RDKitToolkitWrapper()
ethanol = create_ethanol()
ethanol.partial_charges = None
sio = StringIO()
ethanol.to_file(sio, "SDF", toolkit_registry=toolkit_wrapper)
sdf_text = sio.getvalue()
# In our current configuration, if the OFFMol doesn't have partial charges, we DO NOT want a partial charge
# block to be written. For reference, it's possible to indicate that a partial charge is not known by writing
# out "n/a" (or another placeholder) in the partial charge block atoms without charges.
assert "> <atom.dprop.PartialCharge>" not in sdf_text
def test_read_ethene_sdf(self):
"""
Test that RDKitToolkitWrapper can load an ethene molecule without complaining about bond stereo.
See https://github.com/openforcefield/openff-toolkit/issues/785
"""
ethene_file_path = get_data_file_path("molecules/ethene_rdkit.sdf")
toolkit_wrapper = RDKitToolkitWrapper()
toolkit_wrapper.from_file(ethene_file_path, file_format="sdf")
def test_load_multiconformer_sdf_as_separate_molecules(self):
"""
Test RDKitToolkitWrapper for reading a "multiconformer" SDF, which the OFF
Toolkit should treat as separate molecules
"""
toolkit_wrapper = RDKitToolkitWrapper()
filename = get_data_file_path("molecules/methane_multiconformer.sdf")
molecules = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecules) == 2
assert len(molecules[0].conformers) == 1
assert len(molecules[1].conformers) == 1
assert molecules[0].conformers[0].shape == (5, 3)
def test_load_multiconformer_sdf_as_separate_molecules_properties(self):
"""
Test RDKitToolkitWrapper for reading a "multiconformer" SDF, which the OFF
Toolkit should treat as separate molecules
"""
toolkit_wrapper = RDKitToolkitWrapper()
filename = get_data_file_path("molecules/methane_multiconformer_properties.sdf")
molecules = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecules) == 2
assert len(molecules[0].conformers) == 1
assert len(molecules[1].conformers) == 1
assert molecules[0].conformers[0].shape == (5, 3)
# The first molecule in the SDF has the following properties and charges:
assert molecules[0].properties["test_property_key"] == "test_property_value"
np.testing.assert_allclose(
molecules[0].partial_charges / unit.elementary_charge,
[-0.108680, 0.027170, 0.027170, 0.027170, 0.027170],
)
# The second molecule in the SDF has the following properties and charges:
assert molecules[1].properties["test_property_key"] == "test_property_value2"
assert (
molecules[1].properties["another_test_property_key"]
== "another_test_property_value"
)
np.testing.assert_allclose(
molecules[1].partial_charges / unit.elementary_charge,
[0.027170, 0.027170, 0.027170, 0.027170, -0.108680],
)
def test_write_multiconformer_mol_as_sdf(self):
"""
Test RDKitToolkitWrapper for writing a multiconformer molecule to SDF. The OFF toolkit should only
save the first conformer
"""
from io import StringIO
toolkit_wrapper = RDKitToolkitWrapper()
filename = get_data_file_path("molecules/ethanol.sdf")
ethanol = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
ethanol.partial_charges = (
np.array([-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
* unit.elementary_charge
)
ethanol.properties["test_prop"] = "test_value"
new_conf = ethanol.conformers[0] + (
np.ones(ethanol.conformers[0].shape) * unit.angstrom
)
ethanol.add_conformer(new_conf)
sio = StringIO()
ethanol.to_file(sio, "sdf", toolkit_registry=toolkit_wrapper)
data = sio.getvalue()
# In SD format, each molecule ends with "$$$$"
assert data.count("$$$$") == 1
# A basic SDF for ethanol would be 27 lines, though the properties add three more
assert len(data.split("\n")) == 30
assert "test_prop" in data
assert "<atom.dprop.PartialCharge>" in data
# Ensure the first conformer's first atom's X coordinate is in the file
assert str(ethanol.conformers[0][0][0].value_in_unit(unit.angstrom))[:5] in data
# Ensure the SECOND conformer's first atom's X coordinate is NOT in the file
assert (
str(ethanol.conformers[1][0][0].in_units_of(unit.angstrom))[:5] not in data
)
def test_write_multiconformer_pdb(self):
"""
Make sure RDKit can write multi conformer PDB files.
"""
from io import StringIO
toolkit = RDKitToolkitWrapper()
# load up a multiconformer pdb file and condense down the conformers
molecules = Molecule.from_file(
get_data_file_path("molecules/butane_multi.sdf"), toolkit_registry=toolkit
)
butane = molecules.pop(0)
for mol in molecules:
butane.add_conformer(mol.conformers[0])
assert butane.n_conformers == 7
sio = StringIO()
butane.to_file(sio, "pdb", toolkit_registry=toolkit)
# we need to make sure each conformer is wrote to the file
pdb = sio.getvalue()
for i in range(1, 8):
assert f"MODEL {i}" in pdb
# Unskip this when we implement PDB-reading support for RDKitToolkitWrapper
@pytest.mark.skip
def test_get_pdb_coordinates(self):
"""Test RDKitToolkitWrapper for importing a single set of coordinates from a pdb file"""
toolkit_wrapper = RDKitToolkitWrapper()
filename = get_data_file_path("molecules/toluene.pdb")
molecule = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecule.conformers) == 1
assert molecule.conformers[0].shape == (15, 3)
# Unskip this when we implement PDB-reading support for RDKitToolkitWrapper
@pytest.mark.skip
def test_load_aromatic_pdb(self):
"""Test OpenEyeToolkitWrapper for importing molecule conformers"""
toolkit_wrapper = RDKitToolkitWrapper()
filename = get_data_file_path("molecules/toluene.pdb")
molecule = Molecule.from_file(filename, toolkit_registry=toolkit_wrapper)
assert len(molecule.conformers) == 1
assert molecule.conformers[0].shape == (15, 3)
def test_generate_conformers(self):
"""Test RDKitToolkitWrapper generate_conformers()"""
toolkit_wrapper = RDKitToolkitWrapper()
smiles = "[H]C([H])([H])C([H])([H])[H]"
molecule = toolkit_wrapper.from_smiles(smiles)
molecule.generate_conformers()
# TODO: Make this test more robust
def test_generate_multiple_conformers(self):
"""Test RDKitToolkitWrapper generate_conformers() for generating multiple conformers"""
toolkit_wrapper = RDKitToolkitWrapper()
smiles = "CCCCCCCCCN"
molecule = toolkit_wrapper.from_smiles(smiles)
molecule.generate_conformers(
rms_cutoff=1 * unit.angstrom,
n_conformers=100,
toolkit_registry=toolkit_wrapper,
)
assert molecule.n_conformers > 1
assert not (molecule.conformers[0] == (0.0 * unit.angstrom)).all()
# Ensure rms_cutoff kwarg is working
molecule2 = toolkit_wrapper.from_smiles(smiles)
molecule2.generate_conformers(
rms_cutoff=0.1 * unit.angstrom,
n_conformers=100,
toolkit_registry=toolkit_wrapper,
)
assert molecule2.n_conformers > molecule.n_conformers
# Ensure n_conformers kwarg is working
molecule2 = toolkit_wrapper.from_smiles(smiles)
molecule2.generate_conformers(
rms_cutoff=0.1 * unit.angstrom,
n_conformers=10,
toolkit_registry=toolkit_wrapper,
)
assert molecule2.n_conformers == 10
@pytest.mark.parametrize("partial_charge_method", ["mmff94"])
def test_assign_partial_charges_neutral(self, partial_charge_method):
"""Test RDKitToolkitWrapper assign_partial_charges()"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[RDKitToolkitWrapper])
# TODO: create_ethanol should be replaced by a function scope fixture.
molecule = create_ethanol()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.0e-5 < charge_sum.value_in_unit(unit.elementary_charge) < 1.0e-5
@pytest.mark.parametrize("partial_charge_method", ["mmff94"])
def test_assign_partial_charges_net_charge(self, partial_charge_method):
"""
Test RDKitToolkitWrapper assign_partial_charges() on a molecule with net charge.
"""
from openff.toolkit.tests.test_forcefield import create_acetate
toolkit_registry = ToolkitRegistry(toolkit_precedence=[RDKitToolkitWrapper])
# TODO: create_acetate should be replaced by a function scope fixture.
molecule = create_acetate()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.0e-5 < charge_sum.value_in_unit(unit.elementary_charge) + 1.0 < 1.0e-5
def test_assign_partial_charges_bad_charge_method(self):
"""Test RDKitToolkitWrapper assign_partial_charges() for a nonexistent charge method"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[RDKitToolkitWrapper])
molecule = create_ethanol()
# Molecule.assign_partial_charges calls the ToolkitRegistry with raise_exception_types = [],
# which means it will only ever return ValueError
with pytest.raises(
ValueError, match="is not available from RDKitToolkitWrapper"
):
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method="NotARealChargeMethod",
)
# ToolkitWrappers raise a specific exception class, so we test that here
with pytest.raises(
ChargeMethodUnavailableError,
match="is not available from RDKitToolkitWrapper",
):
RDTKW = RDKitToolkitWrapper()
RDTKW.assign_partial_charges(
molecule=molecule, partial_charge_method="NotARealChargeMethod"
)
def test_elf_is_problematic_conformer_acid(
self, formic_acid_molecule, formic_acid_conformers
):
problematic, reason = RDKitToolkitWrapper._elf_is_problematic_conformer(
formic_acid_molecule, formic_acid_conformers["cis"]
)
assert not problematic
assert reason is None
problematic, reason = RDKitToolkitWrapper._elf_is_problematic_conformer(
formic_acid_molecule, formic_acid_conformers["trans"]
)
assert problematic
assert reason is not None
def test_elf_prune_problematic_conformers_acid(
self, formic_acid_molecule, formic_acid_conformers
):
formic_acid_molecule._conformers = [*formic_acid_conformers.values()]
pruned_conformers = RDKitToolkitWrapper._elf_prune_problematic_conformers(
formic_acid_molecule
)
assert len(pruned_conformers) == 1
assert np.allclose(
formic_acid_conformers["cis"].value_in_unit(unit.angstrom),
pruned_conformers[0].value_in_unit(unit.angstrom),
)
def test_elf_compute_electrostatic_energy(self, formic_acid_molecule: Molecule):
"""Test the computation of the ELF electrostatic energy function."""
# Set some partial charges and a dummy conformer with values which make
# computing the expected energy by hand easier.
formic_acid_molecule.partial_charges = (
np.ones(formic_acid_molecule.n_atoms) * 1.0 * unit.elementary_charge
)
formic_acid_molecule.partial_charges[0] *= 2.0
formic_acid_molecule.partial_charges[4] *= 3.0
conformer = np.array(
[
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
]
)
# Compute the conformers electrostatic energy.
computed_energy = RDKitToolkitWrapper._elf_compute_electrostatic_energy(
formic_acid_molecule, conformer * unit.angstrom
)
# q_O1 * q_H2 / d_O1,H2 + q_H1 * q_H2 / d_H1,H2
expected_energy = 2.0 * 3.0 / np.sqrt(2.0) + 1.0 * 3.0 / 2.0
assert np.isclose(computed_energy, expected_energy)
def test_elf_compute_rms_matrix(self, formic_acid_molecule: Molecule):
"""Test the computation of the ELF conformer RMS matrix."""
formic_acid_molecule.add_conformer(np.random.random((5, 3)) * unit.angstrom)
formic_acid_molecule.add_conformer(np.random.random((5, 3)) * unit.angstrom)
rms_matrix = RDKitToolkitWrapper._elf_compute_rms_matrix(formic_acid_molecule)
assert rms_matrix.shape == (2, 2)
assert np.isclose(rms_matrix[0, 0], 0.0)
assert np.isclose(rms_matrix[1, 1], 0.0)
assert np.isclose(rms_matrix[0, 1], rms_matrix[1, 0])
assert not np.isclose(rms_matrix[0, 1], 0.0)
def test_elf_compute_rms_matrix_symmetry(self):
"""Test the computation of the ELF conformer RMS matrix for matrices which
contain symmetry."""
# Create a molecule which can have two different automorphs.
n_methyl_aniline: Molecule = Molecule.from_smiles("CNc1ccccc1")
n_methyl_aniline.generate_conformers(n_conformers=1)
# Add a second conformer with the benzene ring flipped 180
original_conformer = n_methyl_aniline.conformers[0].value_in_unit(unit.angstrom)
ring_atoms = RDKitToolkitWrapper().find_smarts_matches(
n_methyl_aniline,
"[#6]-[#7](-[#6]1:[#6:1](-[#1:2]):[#6:3](-[#1:4]):[#6]:[#6:6](-[#1:5]):[#6:8](-[#1:7])1)",
)[0]
flipped_conformer = np.copy(original_conformer)
for i in range(8):
flipped_conformer[ring_atoms[i], :] = original_conformer[ring_atoms[7 - i]]
n_methyl_aniline.add_conformer(flipped_conformer * unit.angstrom)
# Compute the RMS matrix.
rms_matrix = RDKitToolkitWrapper._elf_compute_rms_matrix(n_methyl_aniline)
assert rms_matrix.shape == (2, 2)
assert np.allclose(rms_matrix, 0.0, atol=1e-7)
@pytest.mark.parametrize(
"expected_conformer_map, rms_tolerance",
[({0: 0, 1: 2}, 0.001 * unit.angstrom), ({0: 0}, 100.0 * unit.angstrom)],
)
def test_elf_select_diverse_conformers(
self,
formic_acid_molecule: Molecule,
expected_conformer_map: Dict[int, int],
rms_tolerance: unit.Quantity,
):
"""Test the greedy selection of 'diverse' ELF conformers."""
formic_acid_molecule.add_conformer(np.random.random((5, 3)) * unit.angstrom)
formic_acid_molecule.add_conformer(formic_acid_molecule.conformers[0] * 1.1)
formic_acid_molecule.add_conformer(formic_acid_molecule.conformers[0] * 1.2)
conformers = RDKitToolkitWrapper._elf_select_diverse_conformers(
formic_acid_molecule, formic_acid_molecule.conformers, 2, rms_tolerance
)
assert len(conformers) == len(expected_conformer_map)
for elf_index, original_index in expected_conformer_map.items():
assert np.allclose(
conformers[elf_index].value_in_unit(unit.angstrom),
formic_acid_molecule.conformers[original_index].value_in_unit(
unit.angstrom
),
)
def test_apply_elf_conformer_selection(self):
"""Test applying the ELF10 method."""
toolkit = RDKitToolkitWrapper()
molecule = Molecule.from_file(
get_data_file_path(os.path.join("molecules", "z_3_hydroxy_propenal.sdf")),
"SDF",
)
# Test that the simple case of no conformers does not yield an exception.
toolkit.apply_elf_conformer_selection(molecule)
initial_conformers = [
# Add a conformer with an internal H-bond.
np.array(
[
[0.5477, 0.3297, -0.0621],
[-0.1168, -0.7881, 0.2329],
[-1.4803, -0.8771, 0.1667],
[-0.2158, 1.5206, -0.4772],
[-1.4382, 1.5111, -0.5580],
[1.6274, 0.3962, -0.0089],
[0.3388, -1.7170, 0.5467],
[-1.8612, -0.0347, -0.1160],
[0.3747, 2.4222, -0.7115],
]
)
* unit.angstrom,
# Add a conformer without an internal H-bond.
np.array(
[
[0.5477, 0.3297, -0.0621],
[-0.1168, -0.7881, 0.2329],
[-1.4803, -0.8771, 0.1667],
[-0.2158, 1.5206, -0.4772],
[0.3353, 2.5772, -0.7614],
[1.6274, 0.3962, -0.0089],
[0.3388, -1.7170, 0.5467],
[-1.7743, -1.7634, 0.4166],
[-1.3122, 1.4082, -0.5180],
]
)
* unit.angstrom,
]
molecule._conformers = [*initial_conformers]
# Apply ELF10
toolkit.apply_elf_conformer_selection(molecule)
elf10_conformers = molecule.conformers
assert len(elf10_conformers) == 1
assert np.allclose(
elf10_conformers[0].value_in_unit(unit.angstrom),
initial_conformers[1].value_in_unit(unit.angstrom),
)
def test_apply_elf_conformer_selection_acid(
self, formic_acid_molecule, formic_acid_conformers, caplog
):
"""Test applying the ELF10 method."""
toolkit = RDKitToolkitWrapper()
# Add the conformers to the molecule and apply ELF.
formic_acid_molecule._conformers = [
formic_acid_conformers["trans"],
formic_acid_conformers["cis"],
]
# Only the CIS conformer should remain after pruning and a warning raised to
# explain why the conformer was discarded.
with caplog.at_level(logging.WARNING):
toolkit.apply_elf_conformer_selection(formic_acid_molecule)
assert formic_acid_molecule.n_conformers == 1
assert "Discarding conformer 0" in caplog.text
assert "Molecules which contain COOH functional groups in a" in caplog.text
assert np.allclose(
formic_acid_molecule.conformers[0].value_in_unit(unit.angstrom),
formic_acid_conformers["cis"].value_in_unit(unit.angstrom),
)
# Check that an exception is raised if no conformers remain after removing the
# trans conformer.
formic_acid_molecule._conformers = [formic_acid_conformers["trans"]]
with pytest.raises(ValueError) as error_info:
toolkit.apply_elf_conformer_selection(formic_acid_molecule)
assert (
"There were no conformers to select from after discarding conformers"
in str(error_info.value)
)
def test_find_rotatable_bonds(self):
"""Test finding rotatable bonds while ignoring some groups"""
# test a simple molecule
ethanol = create_ethanol()
bonds = ethanol.find_rotatable_bonds()
assert len(bonds) == 2
for bond in bonds:
assert ethanol.atoms[bond.atom1_index].atomic_number != 1
assert ethanol.atoms[bond.atom2_index].atomic_number != 1
# now ignore the C-O bond, forwards
bonds = ethanol.find_rotatable_bonds(ignore_functional_groups="[#6:1]-[#8:2]")
assert len(bonds) == 1
assert ethanol.atoms[bonds[0].atom1_index].atomic_number == 6
assert ethanol.atoms[bonds[0].atom2_index].atomic_number == 6
# now ignore the O-C bond, backwards
bonds = ethanol.find_rotatable_bonds(ignore_functional_groups="[#8:1]-[#6:2]")
assert len(bonds) == 1
assert ethanol.atoms[bonds[0].atom1_index].atomic_number == 6
assert ethanol.atoms[bonds[0].atom2_index].atomic_number == 6
# now ignore the C-C bond
bonds = ethanol.find_rotatable_bonds(ignore_functional_groups="[#6:1]-[#6:2]")
assert len(bonds) == 1
assert ethanol.atoms[bonds[0].atom1_index].atomic_number == 6
assert ethanol.atoms[bonds[0].atom2_index].atomic_number == 8
# ignore a list of searches, forward
bonds = ethanol.find_rotatable_bonds(
ignore_functional_groups=["[#6:1]-[#8:2]", "[#6:1]-[#6:2]"]
)
assert bonds == []
# ignore a list of searches, backwards
bonds = ethanol.find_rotatable_bonds(
ignore_functional_groups=["[#6:1]-[#6:2]", "[#8:1]-[#6:2]"]
)
assert bonds == []
# test molecules that should have no rotatable bonds
cyclohexane = create_cyclohexane()
bonds = cyclohexane.find_rotatable_bonds()
assert bonds == []
methane = Molecule.from_smiles("C")
bonds = methane.find_rotatable_bonds()
assert bonds == []
ethene = Molecule.from_smiles("C=C")
bonds = ethene.find_rotatable_bonds()
assert bonds == []
terminal_forwards = "[*]~[*:1]-[X2H1,X3H2,X4H3:2]-[#1]"
terminal_backwards = "[#1]-[X2H1,X3H2,X4H3:1]-[*:2]~[*]"
# test removing terminal rotors
toluene = Molecule.from_file(get_data_file_path("molecules/toluene.sdf"))
bonds = toluene.find_rotatable_bonds()
assert len(bonds) == 1
assert toluene.atoms[bonds[0].atom1_index].atomic_number == 6
assert toluene.atoms[bonds[0].atom2_index].atomic_number == 6
# find terminal bonds forward
bonds = toluene.find_rotatable_bonds(ignore_functional_groups=terminal_forwards)
assert bonds == []
# find terminal bonds backwards
bonds = toluene.find_rotatable_bonds(
ignore_functional_groups=terminal_backwards
)
assert bonds == []
def test_to_rdkit_losing_aromaticity_(self):
# test the example given in issue #513
# <https://github.com/openforcefield/openff-toolkit/issues/513>
smiles = "[H]c1c(c(c(c(c1OC2=C(C(=C(N3C2=C(C(=C3[H])C#N)[H])[H])F)[H])OC([H])([H])C([H])([H])N4C(=C(C(=O)N(C4=O)[H])[H])[H])[H])F)[H]"
mol = Molecule.from_smiles(smiles)
rdmol = mol.to_rdkit()
# now make sure the aromaticity matches for each atom
for (offatom, rdatom) in zip(mol.atoms, rdmol.GetAtoms()):
assert offatom.is_aromatic is rdatom.GetIsAromatic()
@pytest.mark.slow
def test_substructure_search_on_large_molecule(self):
"""Test RDKitToolkitWrapper substructure search when a large number hits are found"""
tk = RDKitToolkitWrapper()
smiles = "C" * 3000
molecule = tk.from_smiles(smiles)
query = "[C:1]~[C:2]"
ret = molecule.chemical_environment_matches(query, toolkit_registry=tk)
assert len(ret) == 5998
assert len(ret[0]) == 2
# TODO: Add test for higher bonds orders
# TODO: Add test for aromaticity
# TODO: Add test and molecule functionality for isotopes
# TODO: Add read tests for MOL/SDF, SMI
# TODO: Add read tests fpr multi-SMI files
# TODO: Add read tests for both files and file-like objects
# TODO: Add read/write tests for gzipped files
# TODO: Add write tests for all formats
@requires_ambertools
@requires_rdkit
class TestAmberToolsToolkitWrapper:
"""Test the AmberToolsToolkitWrapper"""
def test_assign_partial_charges_am1bcc(self):
"""Test AmberToolsToolkitWrapper assign_partial_charges() with am1bcc"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_ethanol()
molecule.assign_partial_charges(
partial_charge_method="am1bcc", toolkit_registry=toolkit_registry
)
charge_sum = 0 * unit.elementary_charge
abs_charge_sum = 0 * unit.elementary_charge
for pc in molecule._partial_charges:
charge_sum += pc
abs_charge_sum += abs(pc)
assert abs(charge_sum) < 0.001 * unit.elementary_charge
assert abs_charge_sum > 0.25 * unit.elementary_charge
def test_assign_partial_charges_am1bcc_net_charge(self):
"""Test AmberToolsToolkitWrapper assign_partial_charges() on a molecule with a net -1 charge"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_acetate()
molecule.assign_partial_charges(
partial_charge_method="am1bcc", toolkit_registry=toolkit_registry
)
charge_sum = 0 * unit.elementary_charge
for pc in molecule._partial_charges:
charge_sum += pc
assert (
-0.99 * unit.elementary_charge > charge_sum > -1.01 * unit.elementary_charge
)
def test_assign_partial_charges_am1bcc_wrong_n_confs(self):
"""
Test AmberToolsToolkitWrapper assign_partial_charges() with am1bcc when requesting to use an incorrect number of
conformers
"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_ethanol()
molecule.generate_conformers(n_conformers=2, rms_cutoff=0.01 * unit.angstrom)
# Try passing in the incorrect number of confs, but without specifying strict_n_conformers,
# which should produce a warning
with pytest.warns(
IncorrectNumConformersWarning,
match="has 2 conformers, but charge method 'am1bcc' expects exactly 1.",
):
molecule.assign_partial_charges(
partial_charge_method="am1bcc",
toolkit_registry=toolkit_registry,
use_conformers=molecule.conformers,
strict_n_conformers=False,
)
# Try again, with strict_n_confs as true, but not including use_confs, so the
# recommended number of confs will be generated
molecule.assign_partial_charges(
partial_charge_method="am1bcc",
toolkit_registry=toolkit_registry,
strict_n_conformers=True,
)
# Test calling the ToolkitWrapper _indirectly_, though the Molecule API,
# which should raise the first error encountered
with pytest.raises(
ValueError,
match=f"has 2 conformers, but charge method 'am1bcc' "
f"expects exactly 1.",
):
molecule.assign_partial_charges(
partial_charge_method="am1bcc",
toolkit_registry=toolkit_registry,
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
# Test calling the ToolkitWrapper _indirectly_, though a ToolkitRegistry,
# specifying raise_exception_types=[]
# which should aggregate any exceptions and bundle all of the messages
# in a failed task together in a single ValueError.
with pytest.raises(
ValueError,
match=f"has 2 conformers, but charge method 'am1bcc' "
f"expects exactly 1.",
):
toolkit_registry.call(
"assign_partial_charges",
partial_charge_method="am1bcc",
molecule=molecule,
use_conformers=molecule.conformers,
strict_n_conformers=True,
raise_exception_types=[],
)
# Test calling the ToolkitWrapper _directly_, passing in the incorrect number of
# confs, and specify strict_n_conformers, which should produce an IncorrectNumConformersError
with pytest.raises(
IncorrectNumConformersError,
match=f"has 2 conformers, but charge method 'am1bcc' "
f"expects exactly 1.",
):
ATTKW = AmberToolsToolkitWrapper()
ATTKW.assign_partial_charges(
partial_charge_method="am1bcc",
molecule=molecule,
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
@pytest.mark.parametrize(
"partial_charge_method", ["am1bcc", "am1-mulliken", "gasteiger"]
)
def test_assign_partial_charges_neutral(self, partial_charge_method):
"""Test AmberToolsToolkitWrapper assign_partial_charges()"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_ethanol()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.0e-5 < charge_sum.value_in_unit(unit.elementary_charge) < 1.0e-5
@pytest.mark.xfail(strict=False)
@pytest.mark.parametrize("partial_charge_method", ["am1bcc", "am1-mulliken"])
def test_assign_partial_charges_conformer_dependence(self, partial_charge_method):
"""Test AmberToolsToolkitWrapper assign_partial_charges()'s use_conformers kwarg
to ensure charges are really conformer dependent. Skip Gasteiger because it isn't
conformer dependent."""
import copy
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_ethanol()
molecule.generate_conformers(n_conformers=1)
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
)
pcs1 = copy.deepcopy(molecule.partial_charges)
# This test case needs a pretty extreme coordinate change since ambertools only
# stores partial charges to 1e-3
molecule._conformers[0][0][0] += 3.0 * unit.angstrom
molecule._conformers[0][1][1] += 3.0 * unit.angstrom
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
)
for pc1, pc2 in zip(pcs1, molecule.partial_charges):
assert abs(pc1 - pc2) > 1.0e-3 * unit.elementary_charge
@pytest.mark.parametrize(
"partial_charge_method", ["am1bcc", "am1-mulliken", "gasteiger"]
)
def test_assign_partial_charges_net_charge(self, partial_charge_method):
"""
Test AmberToolsToolkitWrapper assign_partial_charges().
"""
from openff.toolkit.tests.test_forcefield import create_acetate
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_acetate()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.01 < charge_sum.value_in_unit(unit.elementary_charge) < -0.99
def test_assign_partial_charges_bad_charge_method(self):
"""Test AmberToolsToolkitWrapper assign_partial_charges() for a nonexistent charge method"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_ethanol()
# For now, ToolkitRegistries lose track of what exception type
# was thrown inside them, so we just check for a ValueError here
with pytest.raises(
ValueError, match="is not available from AmberToolsToolkitWrapper"
) as excinfo:
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method="NotARealChargeMethod",
)
# ToolkitWrappers raise a specific exception class, so we test that here
with pytest.raises(
ChargeMethodUnavailableError,
match="is not available from AmberToolsToolkitWrapper",
) as excinfo:
ATTKW = AmberToolsToolkitWrapper()
ATTKW.assign_partial_charges(
molecule=molecule, partial_charge_method="NotARealChargeMethod"
)
@pytest.mark.parametrize(
"partial_charge_method,expected_n_confs",
[("am1bcc", 1), ("am1-mulliken", 1), ("gasteiger", 0)],
)
def test_assign_partial_charges_wrong_n_confs(
self, partial_charge_method, expected_n_confs
):
"""
Test AmberToolsToolkitWrapper assign_partial_charges() when requesting to use an incorrect number of
conformers
"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = create_ethanol()
molecule.generate_conformers(n_conformers=2, rms_cutoff=0.01 * unit.angstrom)
# Try passing in the incorrect number of confs, but without specifying strict_n_conformers,
# which should produce a warning
with pytest.warns(
IncorrectNumConformersWarning,
match=f"has 2 conformers, but charge method '{partial_charge_method}' "
f"expects exactly {expected_n_confs}.",
):
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
strict_n_conformers=False,
)
# Try again, with strict_n_confs as true, but not including use_confs, so the
# recommended number of confs will be generated
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
strict_n_conformers=True,
)
# Test calling the ToolkitWrapper _indirectly_, though the Molecule API
# which should aggregate any exceptions and bundle all of the messages
# in a failed task together in a single ValueError.
with pytest.raises(
ValueError,
match=f"has 2 conformers, but charge method '{partial_charge_method}' "
f"expects exactly {expected_n_confs}.",
):
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
# Test calling the ToolkitWrapper _directly_, passing in the incorrect number of
# confs, and specify strict_n_conformers, which should produce an IncorrectNumConformersError
with pytest.raises(
IncorrectNumConformersError,
match=f"has 2 conformers, but charge method '{partial_charge_method}' "
f"expects exactly {expected_n_confs}.",
):
ATTKW = AmberToolsToolkitWrapper()
ATTKW.assign_partial_charges(
molecule=molecule,
partial_charge_method=partial_charge_method,
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
@pytest.mark.parametrize("bond_order_model", ["am1-wiberg"])
@pytest.mark.parametrize(
"smiles",
[
"[H]C([H])([H])C([H])([H])[H]",
"[H]C([H])([H])[N+]([H])([H])[H]",
r"C\C(F)=C(/F)C[C@@](C)(Cl)Br",
],
)
def test_assign_fractional_bond_orders(self, bond_order_model, smiles):
"""Test AmbetToolsToolkitWrapper assign_fractional_bond_orders()"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = toolkit_registry.call("from_smiles", smiles)
molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_registry, bond_order_model=bond_order_model
)
# TODO: Add test for equivalent Wiberg orders for equivalent bonds
# Sanity check single bonds.
assert all(
0.75 < bond.fractional_bond_order < 1.25
for bond in molecule.bonds
if bond.bond_order == 1
)
# Sanity check double bonds.
assert all(
1.75 < bond.fractional_bond_order < 2.25
for bond in molecule.bonds
if bond.bond_order == 2
)
def test_assign_fractional_bond_orders_conformer_dependence(self):
"""
Test that RDKitToolkitWrapper assign_fractional_bond_orders() provides different results when using
different conformers
"""
toolkit_wrapper = ToolkitRegistry(
[RDKitToolkitWrapper, AmberToolsToolkitWrapper]
)
# Get the WBOs using one conformer
molecule = create_ethanol()
molecule.generate_conformers(toolkit_registry=toolkit_wrapper)
molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
use_conformers=molecule.conformers,
bond_order_model="am1-wiberg",
)
# Do the same again, but change the conformer to yield a different result
molecule_diff_coords = create_ethanol()
molecule_diff_coords.generate_conformers(toolkit_registry=toolkit_wrapper)
molecule_diff_coords._conformers[0][0][0] = (
molecule_diff_coords._conformers[0][0][0] + 1.0 * unit.angstrom
)
molecule_diff_coords._conformers[0][1][0] = (
molecule_diff_coords._conformers[0][1][0] - 1.0 * unit.angstrom
)
molecule_diff_coords._conformers[0][2][0] = (
molecule_diff_coords._conformers[0][2][0] + 1.0 * unit.angstrom
)
molecule_diff_coords.assign_fractional_bond_orders(
toolkit_registry=toolkit_wrapper,
use_conformers=molecule_diff_coords.conformers,
bond_order_model="am1-wiberg",
)
for bond1, bond2 in zip(molecule.bonds, molecule_diff_coords.bonds):
assert abs(bond1.fractional_bond_order - bond2.fractional_bond_order) > 1e-3
@pytest.mark.parametrize("bond_order_model", ["am1-wiberg"])
def test_assign_fractional_bond_orders_neutral_charge_mol(self, bond_order_model):
"""Test AmberToolsToolkitWrapper assign_fractional_bond_orders() for neutral and charged molecule.
Also tests using existing conformers"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
# Reading neutral molecule from file
filename = get_data_file_path("molecules/CID20742535_neutral.sdf")
molecule1 = Molecule.from_file(filename)
# Reading negative molecule from file
filename = get_data_file_path("molecules/CID20742535_anion.sdf")
molecule2 = Molecule.from_file(filename)
# Checking that only one additional bond is present in the neutral molecule
assert len(molecule1.bonds) == len(molecule2.bonds) + 1
molecule1.assign_fractional_bond_orders(
toolkit_registry=toolkit_registry,
bond_order_model=bond_order_model,
use_conformers=molecule1.conformers,
)
for i in molecule1.bonds:
if i.is_aromatic:
# Checking aromatic bonds
assert 1.05 < i.fractional_bond_order < 1.65
elif i.atom1.atomic_number == 1 or i.atom2.atomic_number == 1:
# Checking bond order of C-H or O-H bonds are around 1
assert 0.85 < i.fractional_bond_order < 1.05
elif i.atom1.atomic_number == 8 or i.atom2.atomic_number == 8:
# Checking C-O single bond
wbo_C_O_neutral = i.fractional_bond_order
assert 1.0 < wbo_C_O_neutral < 1.5
else:
# Should be C-C single bond
assert (i.atom1_index == 4 and i.atom2_index == 6) or (
i.atom1_index == 6 and i.atom2_index == 4
)
wbo_C_C_neutral = i.fractional_bond_order
assert 1.0 < wbo_C_C_neutral < 1.3
molecule2.assign_fractional_bond_orders(
toolkit_registry=toolkit_registry,
bond_order_model=bond_order_model,
use_conformers=molecule2.conformers,
)
for i in molecule2.bonds:
if i.is_aromatic:
# Checking aromatic bonds
assert 1.05 < i.fractional_bond_order < 1.65
elif i.atom1.atomic_number == 1 or i.atom2.atomic_number == 1:
# Checking bond order of C-H or O-H bonds are around 1
assert 0.85 < i.fractional_bond_order < 1.05
elif i.atom1.atomic_number == 8 or i.atom2.atomic_number == 8:
# Checking C-O single bond
wbo_C_O_anion = i.fractional_bond_order
assert 1.3 < wbo_C_O_anion < 1.8
else:
# Should be C-C single bond
assert (i.atom1_index == 4 and i.atom2_index == 6) or (
i.atom1_index == 6 and i.atom2_index == 4
)
wbo_C_C_anion = i.fractional_bond_order
assert 1.0 < wbo_C_C_anion < 1.3
# Wiberg bond order of C-C single bond is higher in the anion
assert wbo_C_C_anion > wbo_C_C_neutral
# Wiberg bond order of C-O bond is higher in the anion
assert wbo_C_O_anion > wbo_C_O_neutral
def test_assign_fractional_bond_orders_invalid_method(self):
"""
Test that AmberToolsToolkitWrapper.assign_fractional_bond_orders() raises the
correct error if an invalid charge model is provided
"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
molecule = toolkit_registry.call("from_smiles", "C")
expected_error = (
"Bond order model 'not a real charge model' is not supported by "
"AmberToolsToolkitWrapper. Supported models are ([[]'am1-wiberg'[]])"
)
with pytest.raises(ValueError, match=expected_error):
molecule.assign_fractional_bond_orders(
toolkit_registry=AmberToolsToolkitWrapper(),
bond_order_model="not a real charge model",
)
@requires_openeye
def test_assign_fractional_bond_orders_openeye_installed(self):
"""Test that assign_fractional_bond_orders produces the same result
with and without OpenEye toolkits installed"""
mol = Molecule.from_smiles("CCO")
AmberToolsToolkitWrapper().assign_fractional_bond_orders(mol)
with_oe = [b.fractional_bond_order for b in mol.bonds]
GLOBAL_TOOLKIT_REGISTRY.deregister_toolkit(OpenEyeToolkitWrapper)
AmberToolsToolkitWrapper().assign_fractional_bond_orders(mol)
without_oe = [b.fractional_bond_order for b in mol.bonds]
GLOBAL_TOOLKIT_REGISTRY.register_toolkit(OpenEyeToolkitWrapper)
assert with_oe == without_oe
class TestBuiltInToolkitWrapper:
"""Test the BuiltInToolkitWrapper"""
@pytest.mark.parametrize("partial_charge_method", ["zeros", "formal_charge"])
def test_assign_partial_charges_neutral(self, partial_charge_method):
"""Test BuiltInToolkitWrapper assign_partial_charges()"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[BuiltInToolkitWrapper])
molecule = create_ethanol()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.0e-6 < charge_sum.value_in_unit(unit.elementary_charge) < 1.0e-6
@pytest.mark.parametrize("partial_charge_method", ["formal_charge"])
def test_assign_partial_charges_net_charge(self, partial_charge_method):
"""
Test BuiltInToolkitWrapper assign_partial_charges(). Only formal_charge is tested, since zeros will not
sum up to the proper number
"""
from openff.toolkit.tests.test_forcefield import create_acetate
toolkit_registry = ToolkitRegistry(toolkit_precedence=[BuiltInToolkitWrapper])
molecule = create_acetate()
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method=partial_charge_method,
)
charge_sum = 0.0 * unit.elementary_charge
for pc in molecule.partial_charges:
charge_sum += pc
assert -1.0e-6 < charge_sum.value_in_unit(unit.elementary_charge) + 1.0 < 1.0e-6
def test_assign_partial_charges_bad_charge_method(self):
"""Test BuiltInToolkitWrapper assign_partial_charges() for a nonexistent charge method"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[BuiltInToolkitWrapper])
molecule = create_ethanol()
# For now, the Molecule API passes raise_exception_types=[] to ToolkitRegistry.call,
# which loses track of what exception type
# was thrown inside them, so we just check for a ValueError here
with pytest.raises(
ValueError, match="is not supported by the Built-in toolkit"
) as excinfo:
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method="NotARealChargeMethod",
)
# ToolkitWrappers raise a specific exception class, so we test that here
with pytest.raises(
ChargeMethodUnavailableError,
match="is not supported by the Built-in toolkit",
) as excinfo:
BITKW = BuiltInToolkitWrapper()
BITKW.assign_partial_charges(
molecule=molecule, partial_charge_method="NotARealChargeMethod"
)
def test_assign_partial_charges_wrong_n_confs(self):
"""
Test BuiltInToolkitWrapper assign_partial_charges() when requesting to use an incorrect number of
conformers
"""
from openff.toolkit.tests.test_forcefield import create_ethanol
toolkit_registry = ToolkitRegistry(toolkit_precedence=[BuiltInToolkitWrapper])
molecule = create_ethanol()
molecule.generate_conformers(n_conformers=1)
with pytest.warns(
IncorrectNumConformersWarning,
match="has 1 conformers, but charge method 'zeros' expects exactly 0.",
):
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method="zeros",
use_conformers=molecule.conformers,
strict_n_conformers=False,
)
# Specify strict_n_conformers=True, but not use_conformers, so a recommended number of
# conformers will be generated internally
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method="zeros",
strict_n_conformers=True,
)
# For now, the Molecule API passes raise_exception_types=[] to ToolkitRegistry.call,
# which loses track of what exception type
# was thrown inside them, so we just check for a ValueError here
with pytest.raises(
ValueError,
match=f"has 1 conformers, but charge method 'zeros' " f"expects exactly 0.",
):
molecule.assign_partial_charges(
toolkit_registry=toolkit_registry,
partial_charge_method="zeros",
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
# Test calling the ToolkitWrapper _directly_, passing in the incorrect number of
# confs, and specify strict_n_conformers, which should produce an IncorrectNumConformersError
with pytest.raises(
IncorrectNumConformersError,
match=f"has 1 conformers, but charge method 'zeros' " f"expects exactly 0.",
):
BITKW = BuiltInToolkitWrapper()
BITKW.assign_partial_charges(
molecule=molecule,
partial_charge_method="zeros",
use_conformers=molecule.conformers,
strict_n_conformers=True,
)
class TestToolkitWrapper:
"""Test the ToolkitWrapper class"""
def test_check_n_conformers(self):
"""Ensure that _check_n_conformers is working properly"""
tkw = ToolkitWrapper()
mol = create_ethanol()
## Test molecule with no conformers
# Check with no min or max should pass
tkw._check_n_conformers(mol, "nocharge")
# Check with min=1 should warn
with pytest.warns(
IncorrectNumConformersWarning,
match="has 0 conformers, but charge method 'nocharge' expects at least 1",
):
tkw._check_n_conformers(mol, "nocharge", min_confs=1)
# Check with min=1 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 0 conformers, but charge method 'nocharge' expects at least 1",
):
tkw._check_n_conformers(
mol, "nocharge", min_confs=1, strict_n_conformers=True
)
# Check with min=1, max=1 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 0 conformers, but charge method 'nocharge' expects exactly 1",
):
tkw._check_n_conformers(
mol, "nocharge", min_confs=1, max_confs=1, strict_n_conformers=True
)
# Check with min=1, max=2 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 0 conformers, but charge method 'nocharge' expects between 1 and 2",
):
tkw._check_n_conformers(
mol, "nocharge", min_confs=1, max_confs=2, strict_n_conformers=True
)
# Check with max=1 should pass
tkw._check_n_conformers(mol, "nocharge", max_confs=1, strict_n_conformers=True)
## Test molecule with conformers
# Add some conformers
mol.generate_conformers(n_conformers=1)
for _ in range(9):
mol.add_conformer(mol.conformers[0])
# Check with no min or max should pass
tkw._check_n_conformers(mol, "nocharge")
## min_confs checks
# Check with min=1 should be fine
tkw._check_n_conformers(mol, "nocharge", min_confs=1)
# Check with min=10 should be fine
tkw._check_n_conformers(mol, "nocharge", min_confs=10)
# Check with min=11 should warn
with pytest.warns(
IncorrectNumConformersWarning,
match="has 10 conformers, but charge method 'nocharge' expects at least 11",
):
tkw._check_n_conformers(mol, "nocharge", min_confs=11)
# Check with min=11 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 10 conformers, but charge method 'nocharge' expects at least 11",
):
tkw._check_n_conformers(
mol, "nocharge", min_confs=11, strict_n_conformers=True
)
## max_confs checks
# Check with max=1 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 10 conformers, but charge method 'nocharge' expects at most 1",
):
tkw._check_n_conformers(
mol, "nocharge", max_confs=1, strict_n_conformers=True
)
# Check with max=10 and strict_n_conformers should be OK
tkw._check_n_conformers(mol, "nocharge", max_confs=10, strict_n_conformers=True)
# Check with max=11 and strict_n_conformers should be OK
tkw._check_n_conformers(mol, "nocharge", max_confs=11, strict_n_conformers=True)
## min_confs and max_confs checks
# Check with max=10 and min=10 and strict_n_conformers should be OK
tkw._check_n_conformers(
mol, "nocharge", min_confs=10, max_confs=10, strict_n_conformers=True
)
# Check with max=10 and min=9 and strict_n_conformers should be OK
tkw._check_n_conformers(
mol, "nocharge", min_confs=9, max_confs=10, strict_n_conformers=True
)
# Check with max=11 and min=10 and strict_n_conformers should be OK
tkw._check_n_conformers(
mol, "nocharge", min_confs=10, max_confs=11, strict_n_conformers=True
)
# Check with max=11 and min=9 and strict_n_conformers should be OK
tkw._check_n_conformers(
mol, "nocharge", min_confs=9, max_confs=11, strict_n_conformers=True
)
# Check with min=9 and max=9 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 10 conformers, but charge method 'nocharge' expects exactly 9",
):
tkw._check_n_conformers(
mol, "nocharge", min_confs=9, max_confs=9, strict_n_conformers=True
)
# Check with min=1 and max=9 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 10 conformers, but charge method 'nocharge' expects between 1 and 9",
):
tkw._check_n_conformers(
mol, "nocharge", min_confs=1, max_confs=9, strict_n_conformers=True
)
# Check with min=11 and max=12 and strict_n_conformers should raise an error
with pytest.raises(
IncorrectNumConformersError,
match="has 10 conformers, but charge method 'nocharge' expects between 11 and 12",
):
tkw._check_n_conformers(
mol, "nocharge", min_confs=11, max_confs=12, strict_n_conformers=True
)
class TestToolkitRegistry:
"""Test the ToolkitRegistry class"""
def test_register_empty_toolkit(self):
"""Ensure the default ToolkitRegistry init returns an empty registry"""
empty_registry = ToolkitRegistry()
assert empty_registry.registered_toolkits == []
assert empty_registry.registered_toolkit_versions == {}
@requires_openeye
@requires_rdkit
def test_register_imported_toolkit_wrappers(self):
"""Test that imported toolkits are registered, and in the expected order"""
# Ensure a specified order is respected
default_registry = ToolkitRegistry(
toolkit_precedence=[
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
AmberToolsToolkitWrapper,
BuiltInToolkitWrapper,
],
_register_imported_toolkit_wrappers=True,
)
assert len(default_registry.registered_toolkits) == 4
expected_toolkits = [
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
AmberToolsToolkitWrapper,
BuiltInToolkitWrapper,
]
for found, expected in zip(
default_registry.registered_toolkits, expected_toolkits
):
assert isinstance(found, expected)
# Test forcing a non-default order
non_default_registry = ToolkitRegistry(
toolkit_precedence=[BuiltInToolkitWrapper, RDKitToolkitWrapper],
_register_imported_toolkit_wrappers=True,
)
assert len(non_default_registry.registered_toolkits) == 2
expected_toolkits = [BuiltInToolkitWrapper, RDKitToolkitWrapper]
for found, expected in zip(
non_default_registry.registered_toolkits, expected_toolkits
):
assert isinstance(found, expected)
@requires_rdkit
def test_add_bad_toolkit(self):
registry = ToolkitRegistry(toolkit_precedence=[RDKitToolkitWrapper])
with pytest.raises(InvalidToolkitError):
registry.add_toolkit("rdkit as a string")
@requires_rdkit
@pytest.mark.skipif(
OpenEyeToolkitWrapper.is_available(),
reason="Skipping while OpenEye is available",
)
def test_register_unavailable_toolkit(self):
registry = ToolkitRegistry(toolkit_precedence=[RDKitToolkitWrapper])
with pytest.raises(ToolkitUnavailableException):
registry.register_toolkit(
toolkit_wrapper=OpenEyeToolkitWrapper, exception_if_unavailable=True
)
@pytest.mark.skipif(
RDKitToolkitWrapper.is_available(),
reason="Skipping while The RDKit is available",
)
def test_requires_toolkit_exception(self):
"""Test that ToolkitUnavailableException, not LicenseError, is raised
when RDKitToolkitWrapper is unavailable"""
registry = ToolkitRegistry()
with pytest.raises(ToolkitUnavailableException):
registry.register_toolkit(
toolkit_wrapper=RDKitToolkitWrapper, exception_if_unavailable=True
)
@requires_openeye
def test_register_openeye(self):
"""Test creation of toolkit registry with OpenEye toolkit"""
# Test registration of OpenEyeToolkitWrapper
toolkit_precedence = [OpenEyeToolkitWrapper]
registry = ToolkitRegistry(
toolkit_precedence=toolkit_precedence,
)
assert set(type(c) for c in registry.registered_toolkits) == set(
[OpenEyeToolkitWrapper]
)
# Test ToolkitRegistry.resolve()
assert (
registry.resolve("to_smiles") == registry.registered_toolkits[0].to_smiles
)
# Test ToolkitRegistry.call()
smiles = "[H]C([H])([H])C([H])([H])[H]"
molecule = registry.call("from_smiles", smiles)
smiles2 = registry.call("to_smiles", molecule)
assert smiles == smiles2
@requires_rdkit
def test_register_rdkit(self):
"""Test creation of toolkit registry with RDKit toolkit"""
# Test registration of RDKitToolkitWrapper
toolkit_precedence = [RDKitToolkitWrapper]
registry = ToolkitRegistry(
toolkit_precedence=toolkit_precedence,
)
assert set([type(c) for c in registry.registered_toolkits]) == set(
[RDKitToolkitWrapper]
)
# Test ToolkitRegistry.resolve()
assert (
registry.resolve("to_smiles") == registry.registered_toolkits[0].to_smiles
)
# Test ToolkitRegistry.call()
smiles = "[H][C]([H])([H])[C]([H])([H])[H]"
molecule = registry.call("from_smiles", smiles)
smiles2 = registry.call("to_smiles", molecule)
assert smiles == smiles2
@requires_ambertools
def test_register_ambertools(self):
"""Test creation of toolkit registry with AmberToolsToolkitWrapper"""
# Test registration of AmberToolsToolkitWrapper
toolkit_precedence = [AmberToolsToolkitWrapper]
registry = ToolkitRegistry(
toolkit_precedence=toolkit_precedence,
)
assert set([type(c) for c in registry.registered_toolkits]) == set(
[AmberToolsToolkitWrapper]
)
# Test ToolkitRegistry.resolve()
registry.resolve("assign_partial_charges")
assert (
registry.resolve("assign_partial_charges")
== registry.registered_toolkits[0].assign_partial_charges
)
# Test ToolkitRegistry.call()
molecule = RDKitToolkitWrapper().from_file(
file_path=get_data_file_path("molecules/ethanol.sdf"), file_format="SDF"
)[0]
registry.call("assign_partial_charges", molecule)
charges_from_registry = molecule.partial_charges
AmberToolsToolkitWrapper().assign_partial_charges(molecule)
charges_from_toolkit = molecule.partial_charges
assert np.allclose(charges_from_registry, charges_from_toolkit)
@requires_ambertools
def test_register_rdkit_and_ambertools(self):
"""Test creation of toolkit registry with RDKitToolkitWrapper and
AmberToolsToolkitWrapper and test ToolkitRegistry.resolve()"""
toolkit_precedence = [RDKitToolkitWrapper, AmberToolsToolkitWrapper]
registry = ToolkitRegistry(
toolkit_precedence=toolkit_precedence,
)
assert set([type(c) for c in registry.registered_toolkits]) == set(
[RDKitToolkitWrapper, AmberToolsToolkitWrapper]
)
# Resolve to a method that is supported by AmberToolsToolkitWrapper
# but _not_ RDKitToolkitWrapper. Note that this may change as more
# functionality is added to to toolkit wrappers
assert (
registry.resolve("assign_fractional_bond_orders")
== registry.registered_toolkits[1].assign_fractional_bond_orders
)
# Resolve a method supported by both to the highest-priority wrapper
assert (
registry.resolve("from_smiles")
== registry.registered_toolkits[0].from_smiles
)
# Test ToolkitRegistry.call() for each toolkit
smiles = "[H][C]([H])([H])[C]([H])([H])[H]"
molecule = registry.call("from_smiles", smiles)
smiles2 = registry.call("to_smiles", molecule)
# Round-tripping SMILES is not 100% reliable, so just ensure it returned something
assert isinstance(smiles2, str)
# This method is available in AmberToolsToolkitWrapper, but not RDKitToolkitWrapper
registry.call("assign_partial_charges", molecule)
@requires_ambertools
def test_deregister_toolkit(self):
"""Test removing an instantiated toolkit from the registry"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
assert any(
[
isinstance(tk, AmberToolsToolkitWrapper)
for tk in toolkit_registry._toolkits
]
)
assert any(
[isinstance(tk, RDKitToolkitWrapper) for tk in toolkit_registry._toolkits]
)
toolkit_registry.deregister_toolkit(toolkit_registry._toolkits[-1])
assert any(
[
isinstance(tk, AmberToolsToolkitWrapper)
for tk in toolkit_registry._toolkits
]
)
assert not any(
[isinstance(tk, RDKitToolkitWrapper) for tk in toolkit_registry._toolkits]
)
toolkit_registry.deregister_toolkit(toolkit_registry._toolkits[-1])
assert not any(
[
isinstance(tk, AmberToolsToolkitWrapper)
for tk in toolkit_registry._toolkits
]
)
assert not any(
[isinstance(tk, RDKitToolkitWrapper) for tk in toolkit_registry._toolkits]
)
@requires_ambertools
def test_deregister_toolkit_by_class(self):
"""Test removing a toolkit from the registry by matching class types"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper, RDKitToolkitWrapper]
)
assert any(
[
isinstance(tk, AmberToolsToolkitWrapper)
for tk in toolkit_registry._toolkits
]
)
assert any(
[isinstance(tk, RDKitToolkitWrapper) for tk in toolkit_registry._toolkits]
)
toolkit_registry.deregister_toolkit(RDKitToolkitWrapper)
assert any(
[
isinstance(tk, AmberToolsToolkitWrapper)
for tk in toolkit_registry._toolkits
]
)
assert not any(
[isinstance(tk, RDKitToolkitWrapper) for tk in toolkit_registry._toolkits]
)
toolkit_registry.deregister_toolkit(AmberToolsToolkitWrapper)
assert not any(
[
isinstance(tk, AmberToolsToolkitWrapper)
for tk in toolkit_registry._toolkits
]
)
assert not any(
[isinstance(tk, RDKitToolkitWrapper) for tk in toolkit_registry._toolkits]
)
@requires_ambertools
def test_deregister_toolkit_bad_inputs(self):
"""Test bad inputs to deregister_toolkit"""
toolkit_registry = ToolkitRegistry(
toolkit_precedence=[AmberToolsToolkitWrapper]
)
with pytest.raises(InvalidToolkitError):
toolkit_registry.deregister_toolkit("rdkit as a string")
# Attempt to deregister a toolkit that is not registered
with pytest.raises(ToolkitUnavailableException):
toolkit_registry.deregister_toolkit(RDKitToolkitWrapper)
def deregister_from_global_registry(self):
# TODO: Update this, or move into a separate TestClass, pending GLOBAL_TOOLKIT_REGISTRY rewor
# See issue #493
# Whatever the first tookit it, de-register it and verify it's de-registered
# Keep a copy of the original registry since this is a "global" variable accessible to other modules
from copy import deepcopy
global_registry_copy = deepcopy(GLOBAL_TOOLKIT_REGISTRY)
first_toolkit = type(GLOBAL_TOOLKIT_REGISTRY.registered_toolkits[0])
num_toolkits = len(GLOBAL_TOOLKIT_REGISTRY.registered_toolkits)
GLOBAL_TOOLKIT_REGISTRY.deregister_toolkit(first_toolkit)
assert first_toolkit not in [
type(tk) for tk in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits
]
assert len(GLOBAL_TOOLKIT_REGISTRY.registered_toolkits) == num_toolkits - 1
GLOBAL_TOOLKIT_REGISTRY = deepcopy(global_registry_copy)
def test_register_builtintoolkit(self):
"""Test creation of toolkit registry with Built-in toolkit"""
# Test registration of BuiltInToolkitWrapper
toolkit_precedence = [BuiltInToolkitWrapper]
registry = ToolkitRegistry(
toolkit_precedence=toolkit_precedence,
)
# registry.register_toolkit(BuiltInToolkitWrapper)
assert set([type(c) for c in registry.registered_toolkits]) == set(
[BuiltInToolkitWrapper]
)
# Test ToolkitRegistry.resolve()
assert (
registry.resolve("assign_partial_charges")
== registry.registered_toolkits[0].assign_partial_charges
)
@requires_rdkit
@requires_openeye
def test_toolkit_versions(self):
"""Test behavior of ToolkitRegistry.registered_toolkit_versions"""
toolkit_precedence = [
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
AmberToolsToolkitWrapper,
BuiltInToolkitWrapper,
]
all_toolkits = ToolkitRegistry(toolkit_precedence=toolkit_precedence)
versions = all_toolkits.registered_toolkit_versions
import openeye
import rdkit
assert versions["OpenEye Toolkit"] == openeye.__version__
assert versions["The RDKit"] == rdkit.__version__
assert versions["AmberTools"].startswith(
"2"
) # TODO: Safer way of checking AmberTools version
assert versions["Built-in Toolkit"] is None
toolkit_precedence = [
RDKitToolkitWrapper,
AmberToolsToolkitWrapper,
BuiltInToolkitWrapper,
]
no_openeye = ToolkitRegistry(toolkit_precedence=toolkit_precedence)
assert "OpenEye Toolkit" not in no_openeye.registered_toolkit_versions.keys()
@requires_ambertools
def test_call_raise_first_error(self):
"""Test to ensure proper behavior of raise_first_error kwarg to ToolkitRegistry.call"""
toolkit_precedence = [
BuiltInToolkitWrapper,
RDKitToolkitWrapper,
AmberToolsToolkitWrapper,
]
registry = ToolkitRegistry(
toolkit_precedence=toolkit_precedence,
)
mol = registry.call("from_smiles", "C")
# Specify that the ToolkitRegistry should raise the first ChargeMethodUnavailableError it encounters
with pytest.raises(
ChargeMethodUnavailableError,
match='"notarealchargemethod"" is not supported by the Built-in toolkit.',
):
registry.call(
"assign_partial_charges",
molecule=mol,
partial_charge_method="NotARealChargeMethod",
raise_exception_types=[ChargeMethodUnavailableError],
)
# Specify that the ToolkitRegistry should collect all the errors it encounters and
# ensure it raises a single ValueError when no ToolkitWrappers succeed
with pytest.raises(
ValueError,
match="partial_charge_method 'notarealchargemethod' is not available from AmberToolsToolkitWrapper",
):
registry.call(
"assign_partial_charges",
molecule=mol,
partial_charge_method="NotARealChargeMethod",
raise_exception_types=[],
)
| open-forcefield-group/openforcefield | openff/toolkit/tests/test_toolkits.py | Python | mit | 163,747 | 0.002449 |
#! /usr/bin/python3
import sys, os, time, tempfile
import pytest
import util_test
from util_test import CURR_DIR
from fixtures.vectors import UNITTEST_VECTOR
from fixtures.params import DEFAULT_PARAMS as DP
from lib import (config, util, api)
import counterpartyd
def setup_module():
counterpartyd.set_options(database_file=tempfile.gettempdir() + '/fixtures.unittest.db', testnet=True, **util_test.COUNTERPARTYD_OPTIONS)
util_test.restore_database(config.DATABASE, CURR_DIR + '/fixtures/scenarios/unittest_fixture.sql')
config.FIRST_MULTISIG_BLOCK_TESTNET = 1
# start RPC server
api_server = api.APIServer()
api_server.daemon = True
api_server.start()
for attempt in range(5000): # wait until server is ready.
if api_server.is_ready:
break
elif attempt == 4999:
raise Exception("Timeout: RPC server not ready after 5s")
else:
time.sleep(0.001)
def teardown_module(function):
util_test.remove_database_files(config.DATABASE)
@pytest.fixture
def counterpartyd_db(request):
db = util.connect_to_db()
cursor = db.cursor()
cursor.execute('''BEGIN''')
request.addfinalizer(lambda: cursor.execute('''ROLLBACK'''))
return db
def test_vector(tx_name, method, inputs, outputs, error, records, counterpartyd_db):
if method == 'parse':
util_test.insert_transaction(inputs[0], counterpartyd_db)
inputs += (inputs[0]['data'][4:],) # message arg
util_test.check_ouputs(tx_name, method, inputs, outputs, error, records, counterpartyd_db)
| Ziftr/counterpartyd | test/unit_test.py | Python | mit | 1,568 | 0.007015 |
import uuid
import os
import shutil
import urlparse
import re
import hashlib
from lxml import html
from PIL import Image, ImageFile
from django.conf import settings
import views
ImageFile.MAXBLOCKS = 10000000
def match_or_none(string, rx):
"""
Tries to match a regular expression and returns an integer if it can.
Otherwise, returns None.
@param string: String to match against
@type string: basestring
@param rx: compiled regular expression
@return: number or None
@rtype: int/long or None
"""
if string is None:
return None
match = rx.search(string)
if match:
return int(match.groups()[0])
return None
width_rx = re.compile(r'width\s*:\s*(\d+)(px)?')
height_rx = re.compile(r'height\s*:\s*(\d+)(px)?')
def get_dimensions(img):
"""
Attempts to get the dimensions of an image from the img tag.
It first tries to grab it from the css styles and then falls back
to looking at the attributes.
@param img: Image tag.
@type img: etree._Element
@return: width and height of the image
@rtype: (int or None, int or None)
"""
styles = img.attrib.get('style')
width = match_or_none(styles, width_rx) or img.attrib.get('width')
if isinstance(width, basestring):
width = int(width)
height = match_or_none(styles, height_rx) or img.attrib.get('height')
if isinstance(height, basestring):
height= int(height)
return width, height
def get_local_path(url):
"""
Converts a url to a local path
@param url: Url to convert
@type url: basestring
@return: Local path of the url
@rtype: basestring
"""
url = urlparse.unquote(url)
local_path = settings.STATIC_ROOT + os.path.normpath(url[len(settings.STATIC_URL):])
return local_path
# `buffer` is needed since hashlib apparently isn't unicode safe
hexhash = lambda s: hashlib.md5(buffer(s)).hexdigest()
def new_rendered_path(orig_path, width, height, ext=None):
"""
Builds a new rendered path based on the original path, width, and height.
It takes a hash of the original path to prevent users from accidentally
(or purposely) overwritting other's rendered thumbnails.
This isn't perfect: we are assuming that the original file's conents never
changes, which is the django default. We could make this function more
robust by hashing the file everytime we save but that has the obvious
disadvantage of having to hash the file everytime. YMMV.
@param orig_path: Path to the original image.
@type orig_path: "/path/to/file"
@param width: Desired width of the rendered image.
@type width: int or None
@param height: Desired height of the rendered image.
@type height: int or None
@param ext: Desired extension of the new image. If None, uses
the original extension.
@type ext: basestring or None
@return: Absolute path to where the rendered image should live.
@rtype: "/path/to/rendered/image"
"""
dirname = os.path.dirname(orig_path)
rendered_path = os.path.join(dirname, 'rendered')
if not os.path.exists(rendered_path):
os.mkdir(rendered_path)
hash_path = hexhash(orig_path)
if ext is None:
ext = os.path.splitext(os.path.basename(orig_path))[1]
if ext and ext[0] != u'.':
ext = u'.' + ext
name = '%s_%sx%s' % (hash_path, width, height)
return os.path.join(rendered_path, name) + ext
def is_rendered(path, width, height):
"""
Checks whether or not an image has been rendered to the given path
with the given dimensions
@param path: path to check
@type path: u"/path/to/image"
@param width: Desired width
@type width: int
@param height: Desired height
@type height: int
@return: Whether or not the image is correct
@rtype: bool
"""
if os.path.exists(path):
old_width, old_height = Image.open(path).size
return old_width == width and old_height == height
return False
def transcode_to_jpeg(image, path, width, height):
"""
Transcodes an image to JPEG.
@param image: Opened image to transcode to jpeg.
@type image: PIL.Image
@param path: Path to the opened image.
@type path: u"/path/to/image"
@param width: Desired width of the transcoded image.
@type width: int
@param height: Desired height of the transcoded image.
@type height: int
@return: Path to the new transcoded image.
@rtype: "/path/to/image"
"""
i_width, i_height = image.size
new_width = i_width if width is None else width
new_height = i_height if height is None else height
new_path = new_rendered_path(path, width, height, ext='jpg')
if is_rendered(new_path, new_width, new_height):
return new_path
new_image = image.resize((new_width, new_height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def re_render(path, width, height):
"""
Given an original image, width, and height, creates a thumbnailed image
of the exact dimensions given. We skip animated gifs because PIL can't
resize those automatically whereas browsers can contort them easily. We
also don't stretch images at all and return the original in that case.
@param path: Path to the original image
@type path: "/path/to/image"
@param width: Desired width
@type width: int or None
@param height: Desired height
@type height: int or None
@return: Path to the 'rendered' image.
@rtype: "/path/to/image"
"""
try:
image = Image.open(path)
except IOError:
# Probably doesn't exist or isn't an image
return path
# We have to call image.load first due to a PIL 1.1.7 bug
image.load()
if image.format == 'PNG' and getattr(settings, 'CKEDITOR_PNG_TO_JPEG', False):
pixels = reduce(lambda a,b: a*b, image.size)
# check that our entire alpha channel is set to full opaque
if image.mode == 'RGB' or image.split()[-1].histogram()[-1] == pixels:
return transcode_to_jpeg(image, path, width, height)
if image.size <= (width, height):
return path
if width is None and height is None:
return path
# We can't resize animated gifs
if image.format == 'GIF':
try:
image.seek(1)
return path
except EOFError:
# Static GIFs should throw an EOF on seek
pass
new_path = new_rendered_path(path, width, height)
if is_rendered(new_path, width, height):
return new_path
# Re-render the image, optimizing for filesize
new_image = image.resize((width, height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def get_html_tree(content):
return html.fragment_fromstring(content, create_parent='div')
def render_html_tree(tree):
return html.tostring(tree)[5:-6]
def resize_images(post_content):
"""
Goes through all images, resizing those that we know to be local to the
correct image size.
@param post_content: Raw html of the content to search for images with.
@type post_content: basestring containg HTML fragments
@return: Modified contents.
@rtype: basestring
"""
# Get tree
tree = get_html_tree(post_content)
# Get images
imgs = tree.xpath('//img[starts-with(@src, "%s")]' % settings.STATIC_URL)
for img in imgs:
orig_url = img.attrib['src']
orig_path = get_local_path(orig_url)
width, height = get_dimensions(img)
rendered_path = re_render(orig_path, width, height)
# If we haven't changed the image, move along.
if rendered_path == orig_path:
continue
# Flip to the rendered
img.attrib['data-original'] = orig_url
img.attrib['src'] = views.get_media_url(rendered_path)
# Strip of wrapping div tag
return render_html_tree(tree)
def swap_in_originals(content):
if 'data-original' not in content:
return content
tree = get_html_tree(content)
for img in tree.xpath('//img[@data-original]'):
img.attrib['src'] = img.attrib['data-original']
del img.attrib['data-original']
return render_html_tree(tree)
| ZG-Tennis/django-ckeditor | ckeditor/utils.py | Python | bsd-3-clause | 8,404 | 0.004522 |
from distutils.core import setup
setup(name='pycycle',
version='3.9.9',
packages=[
'pycycle',
'pycycle/thermo',
'pycycle/thermo/cea',
'pycycle/thermo/cea/thermo_data',
'pycycle/elements',
'pycycle/maps',
'pycycle/thermo/tabular'
],
install_requires=[
'openmdao>=3.5.0',
],
)
| JustinSGray/pyCycle | setup.py | Python | apache-2.0 | 386 | 0.005181 |
default_app_config = 'projectlight.apps.ProjectlightConfig'
| cuedpc/edpcmentoring | edpcmentoring/projectlight/__init__.py | Python | mit | 60 | 0 |
########################################################################
#
# File Name: TextElement.py
#
#
"""
Implementation of the XSLT Spec text stylesheet element.
WWW: http://4suite.com/4XSLT e-mail: support@4suite.com
Copyright (c) 1999-2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import EMPTY_NAMESPACE
import xml.dom.ext
import xml.dom.Element
from xml.xpath import CoreFunctions
from xml.xslt import XsltElement, XsltException, Error
from xml.dom import Node
class TextElement(XsltElement):
legalAttrs = ('disable-output-escaping',)
def __init__(self, doc, uri=xml.xslt.XSL_NAMESPACE, localName='text', prefix='xsl', baseUri=''):
XsltElement.__init__(self, doc, uri, localName, prefix, baseUri)
return
def setup(self):
self.__dict__['_disable_output_escaping'] = self.getAttributeNS(EMPTY_NAMESPACE, 'disable-output-escaping') == 'yes'
self.__dict__['_nss'] = xml.dom.ext.GetAllNs(self)
for child in self.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
raise XsltException(Error.ILLEGAL_TEXT_CHILD)
self.normalize()
return
def instantiate(self, context, processor):
if not self.firstChild:
return (context,)
if context.processorNss != self._nss:
origState = context.copyNamespaces()
context.setNamespaces(self._nss)
else:
origState = None
value = self.firstChild and self.firstChild.data or ''
if self._disable_output_escaping:
processor.writers[-1].text(value, escapeOutput=0)
else:
processor.writers[-1].text(value)
origState and context.setNamespaces(origState)
return (context,)
def __getinitargs__(self):
return (None, self.namespaceURI, self.localName, self.prefix,
self.baseUri)
def __getstate__(self):
base_state = XsltElement.__getstate__(self)
new_state = (base_state, self._nss, self._disable_output_escaping)
return new_state
def __setstate__(self, state):
XsltElement.__setstate__(self, state[0])
self._nss = state[1]
self._disable_output_escaping = state[2]
return
| Pikecillo/genna | external/PyXML-0.8.4/xml/xslt/TextElement.py | Python | gpl-2.0 | 2,357 | 0.003818 |
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import dbus
import dbus.glib
import hildon
import osso
# Replace this with your own gettext() functionality
import gpodder
_ = gpodder.gettext
class FremantleRotation(object):
"""thp's screen rotation for Maemo 5
Simply instantiate an object of this class and let it auto-rotate
your StackableWindows depending on the device orientation.
If you need to relayout a window, connect to its "configure-event"
signal and measure the ratio of width/height and relayout for that.
You can set the mode for rotation to AUTOMATIC (default), NEVER or
ALWAYS with the set_mode() method.
"""
AUTOMATIC, NEVER, ALWAYS = range(3)
# Human-readable captions for the above constants
MODE_CAPTIONS = (_('Automatic'), _('Landscape'), _('Portrait'))
# Privately-used constants
_PORTRAIT, _LANDSCAPE = ('portrait', 'landscape')
_ENABLE_ACCEL = 'req_accelerometer_enable'
_DISABLE_ACCEL = 'req_accelerometer_disable'
# Defined in mce/dbus-names.h
_MCE_SERVICE = 'com.nokia.mce'
_MCE_REQUEST_PATH = '/com/nokia/mce/request'
_MCE_REQUEST_IF = 'com.nokia.mce.request'
# sysfs device name for the keyboard slider switch
KBD_SLIDER = '/sys/devices/platform/gpio-switch/slide/state'
_KBD_OPEN = 'open'
_KBD_CLOSED = 'closed'
def __init__(self, app_name, main_window=None, version='1.0', mode=0):
"""Create a new rotation manager
app_name ... The name of your application (for osso.Context)
main_window ... The root window (optional, hildon.StackableWindow)
version ... The version of your application (optional, string)
mode ... Initial mode for this manager (default: AUTOMATIC)
"""
self._orientation = None
self._main_window = main_window
self._stack = hildon.WindowStack.get_default()
self._mode = -1
self._last_dbus_orientation = None
self._keyboard_state = self._get_keyboard_state()
app_id = '-'.join((app_name, self.__class__.__name__))
self._osso_context = osso.Context(app_id, version, False)
program = hildon.Program.get_instance()
program.connect('notify::is-topmost', self._on_topmost_changed)
system_bus = dbus.Bus.get_system()
system_bus.add_signal_receiver(self._on_orientation_signal, \
signal_name='sig_device_orientation_ind', \
dbus_interface='com.nokia.mce.signal', \
path='/com/nokia/mce/signal')
system_bus.add_signal_receiver(self._on_keyboard_signal, \
signal_name='Condition', \
dbus_interface='org.freedesktop.Hal.Device', \
path='/org/freedesktop/Hal/devices/platform_slide')
self.set_mode(mode)
def get_mode(self):
"""Get the currently-set rotation mode
This will return one of three values: AUTOMATIC, ALWAYS or NEVER.
"""
return self._mode
def set_mode(self, new_mode):
"""Set the rotation mode
You can set the rotation mode to AUTOMATIC (use hardware rotation
info), ALWAYS (force portrait) and NEVER (force landscape).
"""
if new_mode not in (self.AUTOMATIC, self.ALWAYS, self.NEVER):
raise ValueError('Unknown rotation mode')
if self._mode != new_mode:
if self._mode == self.AUTOMATIC:
# Remember the current "automatic" orientation for later
self._last_dbus_orientation = self._orientation
# Tell MCE that we don't need the accelerometer anymore
self._send_mce_request(self._DISABLE_ACCEL)
if new_mode == self.NEVER:
self._orientation_changed(self._LANDSCAPE)
elif new_mode == self.ALWAYS and \
self._keyboard_state != self._KBD_OPEN:
self._orientation_changed(self._PORTRAIT)
elif new_mode == self.AUTOMATIC:
# Restore the last-known "automatic" orientation
self._orientation_changed(self._last_dbus_orientation)
# Tell MCE that we need the accelerometer again
self._send_mce_request(self._ENABLE_ACCEL)
self._mode = new_mode
def _send_mce_request(self, request):
rpc = osso.Rpc(self._osso_context)
rpc.rpc_run(self._MCE_SERVICE, \
self._MCE_REQUEST_PATH, \
self._MCE_REQUEST_IF, \
request, \
use_system_bus=True)
def _on_topmost_changed(self, program, property_spec):
# XXX: This seems to never get called on Fremantle(?)
if self._mode == self.AUTOMATIC:
if program.get_is_topmost():
self._send_mce_request(self._ENABLE_ACCEL)
else:
self._send_mce_request(self._DISABLE_ACCEL)
def _get_main_window(self):
if self._main_window:
# If we have gotten the main window as parameter, return it and
# don't try "harder" to find another window using the stack
return self._main_window
else:
# The main window is at the "bottom" of the window stack, and as
# the list we get with get_windows() is sorted "topmost first", we
# simply take the last item of the list to get our main window
windows = self._stack.get_windows()
if windows:
return windows[-1]
else:
return None
def _orientation_changed(self, orientation):
if self._orientation == orientation:
# Ignore repeated requests
return
flags = hildon.PORTRAIT_MODE_SUPPORT
if orientation == self._PORTRAIT:
flags |= hildon.PORTRAIT_MODE_REQUEST
window = self._get_main_window()
if window is not None:
hildon.hildon_gtk_window_set_portrait_flags(window, flags)
self._orientation = orientation
def _get_keyboard_state(self):
return open(self.KBD_SLIDER).read().strip()
def _keyboard_state_changed(self):
state = self._get_keyboard_state()
if state == self._KBD_OPEN:
self._orientation_changed(self._LANDSCAPE)
elif state == self._KBD_CLOSED:
if self._mode == self.AUTOMATIC:
self._orientation_changed(self._last_dbus_orientation)
elif self._mode == self.ALWAYS:
self._orientation_changed(self._PORTRAIT)
self._keyboard_state = state
def _on_keyboard_signal(self, condition, button_name):
if condition == 'ButtonPressed' and button_name == 'cover':
self._keyboard_state_changed()
def _on_orientation_signal(self, orientation, stand, face, x, y, z):
if orientation in (self._PORTRAIT, self._LANDSCAPE):
if self._mode == self.AUTOMATIC and \
self._keyboard_state != self._KBD_OPEN:
# Automatically set the rotation based on hardware orientation
self._orientation_changed(orientation)
# Save the current orientation for "automatic" mode later on
self._last_dbus_orientation = orientation
| timabell/gpodder | src/gpodder/gtkui/frmntl/portrait.py | Python | gpl-3.0 | 8,012 | 0.002122 |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixin classes to be shared between C and C++ compilers.
Without this we'll end up with awful diamond inherintance problems. The goal
of this is to have mixin's, which are classes that are designed *not* to be
standalone, they only work through inheritance.
"""
import collections
import functools
import glob
import itertools
import os
import re
import subprocess
import typing as T
from pathlib import Path
from ... import arglist
from ... import mesonlib
from ... import mlog
from ...linkers import GnuLikeDynamicLinkerMixin, SolarisDynamicLinker, CompCertDynamicLinker
from ...mesonlib import LibType
from ...coredata import OptionKey
from .. import compilers
from ..compilers import CompileCheckMode
from .visualstudio import VisualStudioLikeCompiler
if T.TYPE_CHECKING:
from ...dependencies import Dependency
from ...environment import Environment
from ...compilers.compilers import Compiler
from ...programs import ExternalProgram
else:
# This is a bit clever, for mypy we pretend that these mixins descend from
# Compiler, so we get all of the methods and attributes defined for us, but
# for runtime we make them descend from object (which all classes normally
# do). This gives up DRYer type checking, with no runtime impact
Compiler = object
GROUP_FLAGS = re.compile(r'''\.so (?:\.[0-9]+)? (?:\.[0-9]+)? (?:\.[0-9]+)?$ |
^(?:-Wl,)?-l |
\.a$''', re.X)
class CLikeCompilerArgs(arglist.CompilerArgs):
prepend_prefixes = ('-I', '-L')
dedup2_prefixes = ('-I', '-isystem', '-L', '-D', '-U')
# NOTE: not thorough. A list of potential corner cases can be found in
# https://github.com/mesonbuild/meson/pull/4593#pullrequestreview-182016038
dedup1_prefixes = ('-l', '-Wl,-l', '-Wl,--export-dynamic')
dedup1_suffixes = ('.lib', '.dll', '.so', '.dylib', '.a')
dedup1_args = ('-c', '-S', '-E', '-pipe', '-pthread')
def to_native(self, copy: bool = False) -> T.List[str]:
# This seems to be allowed, but could never work?
assert isinstance(self.compiler, compilers.Compiler), 'How did you get here'
# Check if we need to add --start/end-group for circular dependencies
# between static libraries, and for recursively searching for symbols
# needed by static libraries that are provided by object files or
# shared libraries.
self.flush_pre_post()
if copy:
new = self.copy()
else:
new = self
# This covers all ld.bfd, ld.gold, ld.gold, and xild on Linux, which
# all act like (or are) gnu ld
# TODO: this could probably be added to the DynamicLinker instead
if isinstance(self.compiler.linker, (GnuLikeDynamicLinkerMixin, SolarisDynamicLinker, CompCertDynamicLinker)):
group_start = -1
group_end = -1
for i, each in enumerate(new):
if not GROUP_FLAGS.search(each):
continue
group_end = i
if group_start < 0:
# First occurrence of a library
group_start = i
if group_start >= 0:
# Last occurrence of a library
new.insert(group_end + 1, '-Wl,--end-group')
new.insert(group_start, '-Wl,--start-group')
# Remove system/default include paths added with -isystem
default_dirs = self.compiler.get_default_include_dirs()
if default_dirs:
bad_idx_list = [] # type: T.List[int]
for i, each in enumerate(new):
if not each.startswith('-isystem'):
continue
# Remove the -isystem and the path if the path is a default path
if (each == '-isystem' and
i < (len(new) - 1) and
new[i + 1] in default_dirs):
bad_idx_list += [i, i + 1]
elif each.startswith('-isystem=') and each[9:] in default_dirs:
bad_idx_list += [i]
elif each[8:] in default_dirs:
bad_idx_list += [i]
for i in reversed(bad_idx_list):
new.pop(i)
return self.compiler.unix_args_to_native(new._container)
def __repr__(self) -> str:
self.flush_pre_post()
return f'CLikeCompilerArgs({self.compiler!r}, {self._container!r})'
class CLikeCompiler(Compiler):
"""Shared bits for the C and CPP Compilers."""
if T.TYPE_CHECKING:
warn_args = {} # type: T.Dict[str, T.List[str]]
# TODO: Replace this manual cache with functools.lru_cache
find_library_cache = {} # type: T.Dict[T.Tuple[T.Tuple[str, ...], str, T.Tuple[str, ...], str, LibType], T.Optional[T.List[str]]]
find_framework_cache = {} # type: T.Dict[T.Tuple[T.Tuple[str, ...], str, T.Tuple[str, ...], bool], T.Optional[T.List[str]]]
internal_libs = arglist.UNIXY_COMPILER_INTERNAL_LIBS
def __init__(self, exe_wrapper: T.Optional['ExternalProgram'] = None):
# If a child ObjC or CPP class has already set it, don't set it ourselves
self.can_compile_suffixes.add('h')
# If the exe wrapper was not found, pretend it wasn't set so that the
# sanity check is skipped and compiler checks use fallbacks.
if not exe_wrapper or not exe_wrapper.found() or not exe_wrapper.get_command():
self.exe_wrapper = None
else:
self.exe_wrapper = exe_wrapper
def compiler_args(self, args: T.Optional[T.Iterable[str]] = None) -> CLikeCompilerArgs:
# This is correct, mypy just doesn't understand co-operative inheritance
return CLikeCompilerArgs(self, args)
def needs_static_linker(self) -> bool:
return True # When compiling static libraries, so yes.
def get_always_args(self) -> T.List[str]:
'''
Args that are always-on for all C compilers other than MSVC
'''
return self.get_largefile_args()
def get_no_stdinc_args(self) -> T.List[str]:
return ['-nostdinc']
def get_no_stdlib_link_args(self) -> T.List[str]:
return ['-nostdlib']
def get_warn_args(self, level: str) -> T.List[str]:
# TODO: this should be an enum
return self.warn_args[level]
def get_no_warn_args(self) -> T.List[str]:
# Almost every compiler uses this for disabling warnings
return ['-w']
def get_depfile_suffix(self) -> str:
return 'd'
def get_exelist(self) -> T.List[str]:
return self.exelist.copy()
def get_preprocess_only_args(self) -> T.List[str]:
return ['-E', '-P']
def get_compile_only_args(self) -> T.List[str]:
return ['-c']
def get_no_optimization_args(self) -> T.List[str]:
return ['-O0']
def get_output_args(self, target: str) -> T.List[str]:
return ['-o', target]
def get_werror_args(self) -> T.List[str]:
return ['-Werror']
def get_include_args(self, path: str, is_system: bool) -> T.List[str]:
if path == '':
path = '.'
if is_system:
return ['-isystem', path]
return ['-I' + path]
def get_compiler_dirs(self, env: 'Environment', name: str) -> T.List[str]:
'''
Get dirs from the compiler, either `libraries:` or `programs:`
'''
return []
@functools.lru_cache()
def _get_library_dirs(self, env: 'Environment',
elf_class: T.Optional[int] = None) -> T.List[str]:
# TODO: replace elf_class with enum
dirs = self.get_compiler_dirs(env, 'libraries')
if elf_class is None or elf_class == 0:
return dirs
# if we do have an elf class for 32-bit or 64-bit, we want to check that
# the directory in question contains libraries of the appropriate class. Since
# system directories aren't mixed, we only need to check one file for each
# directory and go by that. If we can't check the file for some reason, assume
# the compiler knows what it's doing, and accept the directory anyway.
retval = []
for d in dirs:
files = [f for f in os.listdir(d) if f.endswith('.so') and os.path.isfile(os.path.join(d, f))]
# if no files, accept directory and move on
if not files:
retval.append(d)
continue
for f in files:
file_to_check = os.path.join(d, f)
try:
with open(file_to_check, 'rb') as fd:
header = fd.read(5)
# if file is not an ELF file, it's weird, but accept dir
# if it is elf, and the class matches, accept dir
if header[1:4] != b'ELF' or int(header[4]) == elf_class:
retval.append(d)
# at this point, it's an ELF file which doesn't match the
# appropriate elf_class, so skip this one
# stop scanning after the first successful read
break
except OSError:
# Skip the file if we can't read it
pass
return retval
def get_library_dirs(self, env: 'Environment',
elf_class: T.Optional[int] = None) -> T.List[str]:
"""Wrap the lru_cache so that we return a new copy and don't allow
mutation of the cached value.
"""
return self._get_library_dirs(env, elf_class).copy()
@functools.lru_cache()
def _get_program_dirs(self, env: 'Environment') -> T.List[str]:
'''
Programs used by the compiler. Also where toolchain DLLs such as
libstdc++-6.dll are found with MinGW.
'''
return self.get_compiler_dirs(env, 'programs')
def get_program_dirs(self, env: 'Environment') -> T.List[str]:
return self._get_program_dirs(env).copy()
def get_pic_args(self) -> T.List[str]:
return ['-fPIC']
def get_pch_use_args(self, pch_dir: str, header: str) -> T.List[str]:
return ['-include', os.path.basename(header)]
def get_pch_name(self, header_name: str) -> str:
return os.path.basename(header_name) + '.' + self.get_pch_suffix()
def get_linker_search_args(self, dirname: str) -> T.List[str]:
return self.linker.get_search_args(dirname)
def get_default_include_dirs(self) -> T.List[str]:
return []
def gen_export_dynamic_link_args(self, env: 'Environment') -> T.List[str]:
return self.linker.export_dynamic_args(env)
def gen_import_library_args(self, implibname: str) -> T.List[str]:
return self.linker.import_library_args(implibname)
def _sanity_check_impl(self, work_dir: str, environment: 'Environment',
sname: str, code: str) -> None:
mlog.debug('Sanity testing ' + self.get_display_language() + ' compiler:', ' '.join(self.exelist))
mlog.debug('Is cross compiler: %s.' % str(self.is_cross))
source_name = os.path.join(work_dir, sname)
binname = sname.rsplit('.', 1)[0]
mode = CompileCheckMode.LINK
if self.is_cross:
binname += '_cross'
if self.exe_wrapper is None:
# Linking cross built apps is painful. You can't really
# tell if you should use -nostdlib or not and for example
# on OSX the compiler binary is the same but you need
# a ton of compiler flags to differentiate between
# arm and x86_64. So just compile.
mode = CompileCheckMode.COMPILE
cargs, largs = self._get_basic_compiler_args(environment, mode)
extra_flags = cargs + self.linker_to_compiler_args(largs)
# Is a valid executable output for all toolchains and platforms
binname += '.exe'
# Write binary check source
binary_name = os.path.join(work_dir, binname)
with open(source_name, 'w') as ofile:
ofile.write(code)
# Compile sanity check
# NOTE: extra_flags must be added at the end. On MSVC, it might contain a '/link' argument
# after which all further arguments will be passed directly to the linker
cmdlist = self.exelist + [sname] + self.get_output_args(binname) + extra_flags
pc, stdo, stde = mesonlib.Popen_safe(cmdlist, cwd=work_dir)
mlog.debug('Sanity check compiler command line:', ' '.join(cmdlist))
mlog.debug('Sanity check compile stdout:')
mlog.debug(stdo)
mlog.debug('-----\nSanity check compile stderr:')
mlog.debug(stde)
mlog.debug('-----')
if pc.returncode != 0:
raise mesonlib.EnvironmentException(f'Compiler {self.name_string()} can not compile programs.')
# Run sanity check
if self.is_cross:
if self.exe_wrapper is None:
# Can't check if the binaries run so we have to assume they do
return
cmdlist = self.exe_wrapper.get_command() + [binary_name]
else:
cmdlist = [binary_name]
mlog.debug('Running test binary command: ' + ' '.join(cmdlist))
try:
pe = subprocess.Popen(cmdlist)
except Exception as e:
raise mesonlib.EnvironmentException('Could not invoke sanity test executable: %s.' % str(e))
pe.wait()
if pe.returncode != 0:
raise mesonlib.EnvironmentException(f'Executables created by {self.language} compiler {self.name_string()} are not runnable.')
def sanity_check(self, work_dir: str, environment: 'Environment') -> None:
code = 'int main(void) { int class=0; return class; }\n'
return self._sanity_check_impl(work_dir, environment, 'sanitycheckc.c', code)
def check_header(self, hname: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
fargs = {'prefix': prefix, 'header': hname}
code = '''{prefix}
#include <{header}>'''
return self.compiles(code.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def has_header(self, hname: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None,
disable_cache: bool = False) -> T.Tuple[bool, bool]:
fargs = {'prefix': prefix, 'header': hname}
code = '''{prefix}
#ifdef __has_include
#if !__has_include("{header}")
#error "Header '{header}' could not be found"
#endif
#else
#include <{header}>
#endif'''
return self.compiles(code.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies, mode='preprocess', disable_cache=disable_cache)
def has_header_symbol(self, hname: str, symbol: str, prefix: str,
env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
fargs = {'prefix': prefix, 'header': hname, 'symbol': symbol}
t = '''{prefix}
#include <{header}>
int main(void) {{
/* If it's not defined as a macro, try to use as a symbol */
#ifndef {symbol}
{symbol};
#endif
return 0;
}}'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def _get_basic_compiler_args(self, env: 'Environment', mode: CompileCheckMode) -> T.Tuple[T.List[str], T.List[str]]:
cargs = [] # type: T.List[str]
largs = [] # type: T.List[str]
if mode is CompileCheckMode.LINK:
# Sometimes we need to manually select the CRT to use with MSVC.
# One example is when trying to do a compiler check that involves
# linking with static libraries since MSVC won't select a CRT for
# us in that case and will error out asking us to pick one.
try:
crt_val = env.coredata.options[OptionKey('b_vscrt')].value
buildtype = env.coredata.options[OptionKey('buildtype')].value
cargs += self.get_crt_compile_args(crt_val, buildtype)
except (KeyError, AttributeError):
pass
# Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS and CPPFLAGS from the env
sys_args = env.coredata.get_external_args(self.for_machine, self.language)
if isinstance(sys_args, str):
sys_args = [sys_args]
# Apparently it is a thing to inject linker flags both
# via CFLAGS _and_ LDFLAGS, even though the former are
# also used during linking. These flags can break
# argument checks. Thanks, Autotools.
cleaned_sys_args = self.remove_linkerlike_args(sys_args)
cargs += cleaned_sys_args
if mode is CompileCheckMode.LINK:
ld_value = env.lookup_binary_entry(self.for_machine, self.language + '_ld')
if ld_value is not None:
largs += self.use_linker_args(ld_value[0])
# Add LDFLAGS from the env
sys_ld_args = env.coredata.get_external_link_args(self.for_machine, self.language)
# CFLAGS and CXXFLAGS go to both linking and compiling, but we want them
# to only appear on the command line once. Remove dupes.
largs += [x for x in sys_ld_args if x not in sys_args]
cargs += self.get_compiler_args_for_mode(mode)
return cargs, largs
def build_wrapper_args(self, env: 'Environment',
extra_args: T.Union[None, arglist.CompilerArgs, T.List[str]],
dependencies: T.Optional[T.List['Dependency']],
mode: CompileCheckMode = CompileCheckMode.COMPILE) -> arglist.CompilerArgs:
# TODO: the caller should handle the listfing of these arguments
if extra_args is None:
extra_args = []
else:
# TODO: we want to do this in the caller
extra_args = mesonlib.listify(extra_args)
extra_args = mesonlib.listify([e(mode.value) if callable(e) else e for e in extra_args])
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, collections.abc.Iterable):
# TODO: we want to ensure the front end does the listifing here
dependencies = [dependencies] # type: ignore
# Collect compiler arguments
cargs = self.compiler_args() # type: arglist.CompilerArgs
largs = [] # type: T.List[str]
for d in dependencies:
# Add compile flags needed by dependencies
cargs += d.get_compile_args()
if mode is CompileCheckMode.LINK:
# Add link flags needed to find dependencies
largs += d.get_link_args()
ca, la = self._get_basic_compiler_args(env, mode)
cargs += ca
largs += la
cargs += self.get_compiler_check_args(mode)
# on MSVC compiler and linker flags must be separated by the "/link" argument
# at this point, the '/link' argument may already be part of extra_args, otherwise, it is added here
if self.linker_to_compiler_args([]) == ['/link'] and largs != [] and not ('/link' in extra_args):
extra_args += ['/link']
args = cargs + extra_args + largs
return args
def run(self, code: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> compilers.RunResult:
need_exe_wrapper = env.need_exe_wrapper(self.for_machine)
if need_exe_wrapper and self.exe_wrapper is None:
raise compilers.CrossNoRunException('Can not run test applications in this cross environment.')
with self._build_wrapper(code, env, extra_args, dependencies, mode='link', want_output=True) as p:
if p.returncode != 0:
mlog.debug('Could not compile test file %s: %d\n' % (
p.input_name,
p.returncode))
return compilers.RunResult(False)
if need_exe_wrapper:
cmdlist = self.exe_wrapper.get_command() + [p.output_name]
else:
cmdlist = [p.output_name]
try:
pe, so, se = mesonlib.Popen_safe(cmdlist)
except Exception as e:
mlog.debug(f'Could not run: {cmdlist} (error: {e})\n')
return compilers.RunResult(False)
mlog.debug('Program stdout:\n')
mlog.debug(so)
mlog.debug('Program stderr:\n')
mlog.debug(se)
return compilers.RunResult(True, pe.returncode, so, se)
def _compile_int(self, expression: str, prefix: str, env: 'Environment',
extra_args: T.Optional[T.List[str]],
dependencies: T.Optional[T.List['Dependency']]) -> bool:
fargs = {'prefix': prefix, 'expression': expression}
t = '''#include <stdio.h>
{prefix}
int main(void) {{ static int a[1-2*!({expression})]; a[0]=0; return 0; }}'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)[0]
def cross_compute_int(self, expression: str, low: T.Optional[int], high: T.Optional[int],
guess: T.Optional[int], prefix: str, env: 'Environment',
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> int:
# Try user's guess first
if isinstance(guess, int):
if self._compile_int('%s == %d' % (expression, guess), prefix, env, extra_args, dependencies):
return guess
# If no bounds are given, compute them in the limit of int32
maxint = 0x7fffffff
minint = -0x80000000
if not isinstance(low, int) or not isinstance(high, int):
if self._compile_int('%s >= 0' % (expression), prefix, env, extra_args, dependencies):
low = cur = 0
while self._compile_int('%s > %d' % (expression, cur), prefix, env, extra_args, dependencies):
low = cur + 1
if low > maxint:
raise mesonlib.EnvironmentException('Cross-compile check overflowed')
cur = cur * 2 + 1
if cur > maxint:
cur = maxint
high = cur
else:
high = cur = -1
while self._compile_int('%s < %d' % (expression, cur), prefix, env, extra_args, dependencies):
high = cur - 1
if high < minint:
raise mesonlib.EnvironmentException('Cross-compile check overflowed')
cur = cur * 2
if cur < minint:
cur = minint
low = cur
else:
# Sanity check limits given by user
if high < low:
raise mesonlib.EnvironmentException('high limit smaller than low limit')
condition = '%s <= %d && %s >= %d' % (expression, high, expression, low)
if not self._compile_int(condition, prefix, env, extra_args, dependencies):
raise mesonlib.EnvironmentException('Value out of given range')
# Binary search
while low != high:
cur = low + int((high - low) / 2)
if self._compile_int('%s <= %d' % (expression, cur), prefix, env, extra_args, dependencies):
high = cur
else:
low = cur + 1
return low
def compute_int(self, expression: str, low: T.Optional[int], high: T.Optional[int],
guess: T.Optional[int], prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> int:
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_compute_int(expression, low, high, guess, prefix, env, extra_args, dependencies)
fargs = {'prefix': prefix, 'expression': expression}
t = '''#include<stdio.h>
{prefix}
int main(void) {{
printf("%ld\\n", (long)({expression}));
return 0;
}};'''
res = self.run(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
if not res.compiled:
return -1
if res.returncode != 0:
raise mesonlib.EnvironmentException('Could not run compute_int test binary.')
return int(res.stdout)
def cross_sizeof(self, typename: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> int:
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
{prefix}
int main(void) {{
{type} something;
return 0;
}}'''
if not self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)[0]:
return -1
return self.cross_compute_int('sizeof(%s)' % typename, None, None, None, prefix, env, extra_args, dependencies)
def sizeof(self, typename: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> int:
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
if self.is_cross:
return self.cross_sizeof(typename, prefix, env, extra_args=extra_args,
dependencies=dependencies)
t = '''#include<stdio.h>
{prefix}
int main(void) {{
printf("%ld\\n", (long)(sizeof({type})));
return 0;
}};'''
res = self.run(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
if not res.compiled:
return -1
if res.returncode != 0:
raise mesonlib.EnvironmentException('Could not run sizeof test binary.')
return int(res.stdout)
def cross_alignment(self, typename: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> int:
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
{prefix}
int main(void) {{
{type} something;
return 0;
}}'''
if not self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)[0]:
return -1
t = '''#include <stddef.h>
{prefix}
struct tmp {{
char c;
{type} target;
}};'''
return self.cross_compute_int('offsetof(struct tmp, target)', None, None, None, t.format(**fargs), env, extra_args, dependencies)
def alignment(self, typename: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> int:
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_alignment(typename, prefix, env, extra_args=extra_args,
dependencies=dependencies)
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
#include <stddef.h>
{prefix}
struct tmp {{
char c;
{type} target;
}};
int main(void) {{
printf("%d", (int)offsetof(struct tmp, target));
return 0;
}}'''
res = self.run(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
if not res.compiled:
raise mesonlib.EnvironmentException('Could not compile alignment test.')
if res.returncode != 0:
raise mesonlib.EnvironmentException('Could not run alignment test binary.')
align = int(res.stdout)
if align == 0:
raise mesonlib.EnvironmentException('Could not determine alignment of %s. Sorry. You might want to file a bug.' % typename)
return align
def get_define(self, dname: str, prefix: str, env: 'Environment',
extra_args: T.Optional[T.List[str]],
dependencies: T.Optional[T.List['Dependency']],
disable_cache: bool = False) -> T.Tuple[str, bool]:
delim = '"MESON_GET_DEFINE_DELIMITER"'
fargs = {'prefix': prefix, 'define': dname, 'delim': delim}
code = '''
{prefix}
#ifndef {define}
# define {define}
#endif
{delim}\n{define}'''
args = self.build_wrapper_args(env, extra_args, dependencies,
mode=CompileCheckMode.PREPROCESS).to_native()
func = functools.partial(self.cached_compile, code.format(**fargs), env.coredata, extra_args=args, mode='preprocess')
if disable_cache:
func = functools.partial(self.compile, code.format(**fargs), extra_args=args, mode='preprocess', temp_dir=env.scratch_dir)
with func() as p:
cached = p.cached
if p.returncode != 0:
raise mesonlib.EnvironmentException(f'Could not get define {dname!r}')
# Get the preprocessed value after the delimiter,
# minus the extra newline at the end and
# merge string literals.
return self._concatenate_string_literals(p.stdout.split(delim + '\n')[-1][:-1]), cached
def get_return_value(self, fname: str, rtype: str, prefix: str,
env: 'Environment', extra_args: T.Optional[T.List[str]],
dependencies: T.Optional[T.List['Dependency']]) -> T.Union[str, int]:
# TODO: rtype should be an enum.
# TODO: maybe we can use overload to tell mypy when this will return int vs str?
if rtype == 'string':
fmt = '%s'
cast = '(char*)'
elif rtype == 'int':
fmt = '%lli'
cast = '(long long int)'
else:
raise AssertionError(f'BUG: Unknown return type {rtype!r}')
fargs = {'prefix': prefix, 'f': fname, 'cast': cast, 'fmt': fmt}
code = '''{prefix}
#include <stdio.h>
int main(void) {{
printf ("{fmt}", {cast} {f}());
return 0;
}}'''.format(**fargs)
res = self.run(code, env, extra_args=extra_args, dependencies=dependencies)
if not res.compiled:
m = 'Could not get return value of {}()'
raise mesonlib.EnvironmentException(m.format(fname))
if rtype == 'string':
return res.stdout
elif rtype == 'int':
try:
return int(res.stdout.strip())
except ValueError:
m = 'Return value of {}() is not an int'
raise mesonlib.EnvironmentException(m.format(fname))
assert False, 'Unreachable'
@staticmethod
def _no_prototype_templ() -> T.Tuple[str, str]:
"""
Try to find the function without a prototype from a header by defining
our own dummy prototype and trying to link with the C library (and
whatever else the compiler links in by default). This is very similar
to the check performed by Autoconf for AC_CHECK_FUNCS.
"""
# Define the symbol to something else since it is defined by the
# includes or defines listed by the user or by the compiler. This may
# include, for instance _GNU_SOURCE which must be defined before
# limits.h, which includes features.h
# Then, undef the symbol to get rid of it completely.
head = '''
#define {func} meson_disable_define_of_{func}
{prefix}
#include <limits.h>
#undef {func}
'''
# Override any GCC internal prototype and declare our own definition for
# the symbol. Use char because that's unlikely to be an actual return
# value for a function which ensures that we override the definition.
head += '''
#ifdef __cplusplus
extern "C"
#endif
char {func} (void);
'''
# The actual function call
main = '''
int main(void) {{
return {func} ();
}}'''
return head, main
@staticmethod
def _have_prototype_templ() -> T.Tuple[str, str]:
"""
Returns a head-er and main() call that uses the headers listed by the
user for the function prototype while checking if a function exists.
"""
# Add the 'prefix', aka defines, includes, etc that the user provides
# This may include, for instance _GNU_SOURCE which must be defined
# before limits.h, which includes features.h
head = '{prefix}\n#include <limits.h>\n'
# We don't know what the function takes or returns, so return it as an int.
# Just taking the address or comparing it to void is not enough because
# compilers are smart enough to optimize it away. The resulting binary
# is not run so we don't care what the return value is.
main = '''\nint main(void) {{
void *a = (void*) &{func};
long long b = (long long) a;
return (int) b;
}}'''
return head, main
def has_function(self, funcname: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
"""Determine if a function exists.
First, this function looks for the symbol in the default libraries
provided by the compiler (stdlib + a few others usually). If that
fails, it checks if any of the headers specified in the prefix provide
an implementation of the function, and if that fails, it checks if it's
implemented as a compiler-builtin.
"""
if extra_args is None:
extra_args = []
# Short-circuit if the check is already provided by the cross-info file
varname = 'has function ' + funcname
varname = varname.replace(' ', '_')
if self.is_cross:
val = env.properties.host.get(varname, None)
if val is not None:
if isinstance(val, bool):
return val, False
raise mesonlib.EnvironmentException(f'Cross variable {varname} is not a boolean.')
# TODO: we really need a protocol for this,
#
# class StrProto(typing.Protocol):
# def __str__(self) -> str: ...
fargs = {'prefix': prefix, 'func': funcname} # type: T.Dict[str, T.Union[str, bool, int]]
# glibc defines functions that are not available on Linux as stubs that
# fail with ENOSYS (such as e.g. lchmod). In this case we want to fail
# instead of detecting the stub as a valid symbol.
# We already included limits.h earlier to ensure that these are defined
# for stub functions.
stubs_fail = '''
#if defined __stub_{func} || defined __stub___{func}
fail fail fail this function is not going to work
#endif
'''
# If we have any includes in the prefix supplied by the user, assume
# that the user wants us to use the symbol prototype defined in those
# includes. If not, then try to do the Autoconf-style check with
# a dummy prototype definition of our own.
# This is needed when the linker determines symbol availability from an
# SDK based on the prototype in the header provided by the SDK.
# Ignoring this prototype would result in the symbol always being
# marked as available.
if '#include' in prefix:
head, main = self._have_prototype_templ()
else:
head, main = self._no_prototype_templ()
templ = head + stubs_fail + main
res, cached = self.links(templ.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
if res:
return True, cached
# MSVC does not have compiler __builtin_-s.
if self.get_id() in {'msvc', 'intel-cl'}:
return False, False
# Detect function as a built-in
#
# Some functions like alloca() are defined as compiler built-ins which
# are inlined by the compiler and you can't take their address, so we
# need to look for them differently. On nice compilers like clang, we
# can just directly use the __has_builtin() macro.
fargs['no_includes'] = '#include' not in prefix
is_builtin = funcname.startswith('__builtin_')
fargs['is_builtin'] = is_builtin
fargs['__builtin_'] = '' if is_builtin else '__builtin_'
t = '''{prefix}
int main(void) {{
/* With some toolchains (MSYS2/mingw for example) the compiler
* provides various builtins which are not really implemented and
* fall back to the stdlib where they aren't provided and fail at
* build/link time. In case the user provides a header, including
* the header didn't lead to the function being defined, and the
* function we are checking isn't a builtin itself we assume the
* builtin is not functional and we just error out. */
#if !{no_includes:d} && !defined({func}) && !{is_builtin:d}
#error "No definition for {__builtin_}{func} found in the prefix"
#endif
#ifdef __has_builtin
#if !__has_builtin({__builtin_}{func})
#error "{__builtin_}{func} not found"
#endif
#elif ! defined({func})
{__builtin_}{func};
#endif
return 0;
}}'''
return self.links(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def has_members(self, typename: str, membernames: T.List[str],
prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename, 'name': 'foo'}
# Create code that accesses all members
members = ''
for member in membernames:
members += '{}.{};\n'.format(fargs['name'], member)
fargs['members'] = members
t = '''{prefix}
void bar(void) {{
{type} {name};
{members}
}};'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def has_type(self, typename: str, prefix: str, env: 'Environment', extra_args: T.List[str],
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
fargs = {'prefix': prefix, 'type': typename}
t = '''{prefix}
void bar(void) {{
sizeof({type});
}};'''
return self.compiles(t.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies)
def symbols_have_underscore_prefix(self, env: 'Environment') -> bool:
'''
Check if the compiler prefixes an underscore to global C symbols
'''
symbol_name = b'meson_uscore_prefix'
code = '''#ifdef __cplusplus
extern "C" {
#endif
void ''' + symbol_name.decode() + ''' (void) {}
#ifdef __cplusplus
}
#endif
'''
args = self.get_compiler_check_args(CompileCheckMode.COMPILE)
n = 'symbols_have_underscore_prefix'
with self._build_wrapper(code, env, extra_args=args, mode='compile', want_output=True, temp_dir=env.scratch_dir) as p:
if p.returncode != 0:
m = 'BUG: Unable to compile {!r} check: {}'
raise RuntimeError(m.format(n, p.stdout))
if not os.path.isfile(p.output_name):
m = 'BUG: Can\'t find compiled test code for {!r} check'
raise RuntimeError(m.format(n))
with open(p.output_name, 'rb') as o:
for line in o:
# Check if the underscore form of the symbol is somewhere
# in the output file.
if b'_' + symbol_name in line:
mlog.debug("Symbols have underscore prefix: YES")
return True
# Else, check if the non-underscored form is present
elif symbol_name in line:
mlog.debug("Symbols have underscore prefix: NO")
return False
raise RuntimeError(f'BUG: {n!r} check failed unexpectedly')
def _get_patterns(self, env: 'Environment', prefixes: T.List[str], suffixes: T.List[str], shared: bool = False) -> T.List[str]:
patterns = [] # type: T.List[str]
for p in prefixes:
for s in suffixes:
patterns.append(p + '{}.' + s)
if shared and env.machines[self.for_machine].is_openbsd():
# Shared libraries on OpenBSD can be named libfoo.so.X.Y:
# https://www.openbsd.org/faq/ports/specialtopics.html#SharedLibs
#
# This globbing is probably the best matching we can do since regex
# is expensive. It's wrong in many edge cases, but it will match
# correctly-named libraries and hopefully no one on OpenBSD names
# their files libfoo.so.9a.7b.1.0
for p in prefixes:
patterns.append(p + '{}.so.[0-9]*.[0-9]*')
return patterns
def get_library_naming(self, env: 'Environment', libtype: LibType, strict: bool = False) -> T.Tuple[str, ...]:
'''
Get library prefixes and suffixes for the target platform ordered by
priority
'''
stlibext = ['a']
# We've always allowed libname to be both `foo` and `libfoo`, and now
# people depend on it. Also, some people use prebuilt `foo.so` instead
# of `libfoo.so` for unknown reasons, and may also want to create
# `foo.so` by setting name_prefix to ''
if strict and not isinstance(self, VisualStudioLikeCompiler): # lib prefix is not usually used with msvc
prefixes = ['lib']
else:
prefixes = ['lib', '']
# Library suffixes and prefixes
if env.machines[self.for_machine].is_darwin():
shlibext = ['dylib', 'so']
elif env.machines[self.for_machine].is_windows():
# FIXME: .lib files can be import or static so we should read the
# file, figure out which one it is, and reject the wrong kind.
if isinstance(self, VisualStudioLikeCompiler):
shlibext = ['lib']
else:
shlibext = ['dll.a', 'lib', 'dll']
# Yep, static libraries can also be foo.lib
stlibext += ['lib']
elif env.machines[self.for_machine].is_cygwin():
shlibext = ['dll', 'dll.a']
prefixes = ['cyg'] + prefixes
else:
# Linux/BSDs
shlibext = ['so']
# Search priority
if libtype is LibType.PREFER_SHARED:
patterns = self._get_patterns(env, prefixes, shlibext, True)
patterns.extend([x for x in self._get_patterns(env, prefixes, stlibext, False) if x not in patterns])
elif libtype is LibType.PREFER_STATIC:
patterns = self._get_patterns(env, prefixes, stlibext, False)
patterns.extend([x for x in self._get_patterns(env, prefixes, shlibext, True) if x not in patterns])
elif libtype is LibType.SHARED:
patterns = self._get_patterns(env, prefixes, shlibext, True)
else:
assert libtype is LibType.STATIC
patterns = self._get_patterns(env, prefixes, stlibext, False)
return tuple(patterns)
@staticmethod
def _sort_shlibs_openbsd(libs: T.List[str]) -> T.List[str]:
filtered = [] # type: T.List[str]
for lib in libs:
# Validate file as a shared library of type libfoo.so.X.Y
ret = lib.rsplit('.so.', maxsplit=1)
if len(ret) != 2:
continue
try:
float(ret[1])
except ValueError:
continue
filtered.append(lib)
float_cmp = lambda x: float(x.rsplit('.so.', maxsplit=1)[1])
return sorted(filtered, key=float_cmp, reverse=True)
@classmethod
def _get_trials_from_pattern(cls, pattern: str, directory: str, libname: str) -> T.List[Path]:
f = Path(directory) / pattern.format(libname)
# Globbing for OpenBSD
if '*' in pattern:
# NOTE: globbing matches directories and broken symlinks
# so we have to do an isfile test on it later
return [Path(x) for x in cls._sort_shlibs_openbsd(glob.glob(str(f)))]
return [f]
@staticmethod
def _get_file_from_list(env: 'Environment', paths: T.List[Path]) -> Path:
'''
We just check whether the library exists. We can't do a link check
because the library might have unresolved symbols that require other
libraries. On macOS we check if the library matches our target
architecture.
'''
# If not building on macOS for Darwin, do a simple file check
if not env.machines.host.is_darwin() or not env.machines.build.is_darwin():
for p in paths:
if p.is_file():
return p
# Run `lipo` and check if the library supports the arch we want
for p in paths:
if not p.is_file():
continue
archs = mesonlib.darwin_get_object_archs(str(p))
if archs and env.machines.host.cpu_family in archs:
return p
else:
mlog.debug('Rejected {}, supports {} but need {}'
.format(p, archs, env.machines.host.cpu_family))
return None
@functools.lru_cache()
def output_is_64bit(self, env: 'Environment') -> bool:
'''
returns true if the output produced is 64-bit, false if 32-bit
'''
return self.sizeof('void *', '', env) == 8
def _find_library_real(self, libname: str, env: 'Environment', extra_dirs: T.List[str], code: str, libtype: LibType) -> T.Optional[T.List[str]]:
# First try if we can just add the library as -l.
# Gcc + co seem to prefer builtin lib dirs to -L dirs.
# Only try to find std libs if no extra dirs specified.
# The built-in search procedure will always favour .so and then always
# search for .a. This is only allowed if libtype is LibType.PREFER_SHARED
if ((not extra_dirs and libtype is LibType.PREFER_SHARED) or
libname in self.internal_libs):
cargs = ['-l' + libname]
largs = self.get_linker_always_args() + self.get_allow_undefined_link_args()
extra_args = cargs + self.linker_to_compiler_args(largs)
if self.links(code, env, extra_args=extra_args, disable_cache=True)[0]:
return cargs
# Don't do a manual search for internal libs
if libname in self.internal_libs:
return None
# Not found or we want to use a specific libtype? Try to find the
# library file itself.
patterns = self.get_library_naming(env, libtype)
# try to detect if we are 64-bit or 32-bit. If we can't
# detect, we will just skip path validity checks done in
# get_library_dirs() call
try:
if self.output_is_64bit(env):
elf_class = 2
else:
elf_class = 1
except (mesonlib.MesonException, KeyError): # TODO evaluate if catching KeyError is wanted here
elf_class = 0
# Search in the specified dirs, and then in the system libraries
for d in itertools.chain(extra_dirs, self.get_library_dirs(env, elf_class)):
for p in patterns:
trials = self._get_trials_from_pattern(p, d, libname)
if not trials:
continue
trial = self._get_file_from_list(env, trials)
if not trial:
continue
return [trial.as_posix()]
return None
def _find_library_impl(self, libname: str, env: 'Environment', extra_dirs: T.List[str],
code: str, libtype: LibType) -> T.Optional[T.List[str]]:
# These libraries are either built-in or invalid
if libname in self.ignore_libs:
return []
if isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
key = (tuple(self.exelist), libname, tuple(extra_dirs), code, libtype)
if key not in self.find_library_cache:
value = self._find_library_real(libname, env, extra_dirs, code, libtype)
self.find_library_cache[key] = value
else:
value = self.find_library_cache[key]
if value is None:
return None
return value.copy()
def find_library(self, libname: str, env: 'Environment', extra_dirs: T.List[str],
libtype: LibType = LibType.PREFER_SHARED) -> T.Optional[T.List[str]]:
code = 'int main(void) { return 0; }\n'
return self._find_library_impl(libname, env, extra_dirs, code, libtype)
def find_framework_paths(self, env: 'Environment') -> T.List[str]:
'''
These are usually /Library/Frameworks and /System/Library/Frameworks,
unless you select a particular macOS SDK with the -isysroot flag.
You can also add to this by setting -F in CFLAGS.
'''
# TODO: this really needs to be *AppleClang*, not just any clang.
if self.id != 'clang':
raise mesonlib.MesonException('Cannot find framework path with non-clang compiler')
# Construct the compiler command-line
commands = self.get_exelist() + ['-v', '-E', '-']
commands += self.get_always_args()
# Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env
commands += env.coredata.get_external_args(self.for_machine, self.language)
mlog.debug('Finding framework path by running: ', ' '.join(commands), '\n')
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
_, _, stde = mesonlib.Popen_safe(commands, env=os_env, stdin=subprocess.PIPE)
paths = [] # T.List[str]
for line in stde.split('\n'):
if '(framework directory)' not in line:
continue
# line is of the form:
# ` /path/to/framework (framework directory)`
paths.append(line[:-21].strip())
return paths
def _find_framework_real(self, name: str, env: 'Environment', extra_dirs: T.List[str], allow_system: bool) -> T.Optional[T.List[str]]:
code = 'int main(void) { return 0; }'
link_args = []
for d in extra_dirs:
link_args += ['-F' + d]
# We can pass -Z to disable searching in the system frameworks, but
# then we must also pass -L/usr/lib to pick up libSystem.dylib
extra_args = [] if allow_system else ['-Z', '-L/usr/lib']
link_args += ['-framework', name]
if self.links(code, env, extra_args=(extra_args + link_args), disable_cache=True)[0]:
return link_args
return None
def _find_framework_impl(self, name: str, env: 'Environment', extra_dirs: T.List[str],
allow_system: bool) -> T.Optional[T.List[str]]:
if isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
key = (tuple(self.exelist), name, tuple(extra_dirs), allow_system)
if key in self.find_framework_cache:
value = self.find_framework_cache[key]
else:
value = self._find_framework_real(name, env, extra_dirs, allow_system)
self.find_framework_cache[key] = value
if value is None:
return None
return value.copy()
def find_framework(self, name: str, env: 'Environment', extra_dirs: T.List[str],
allow_system: bool = True) -> T.Optional[T.List[str]]:
'''
Finds the framework with the specified name, and returns link args for
the same or returns None when the framework is not found.
'''
# TODO: maybe this belongs in clang? also, should probably check for macOS?
if self.id != 'clang':
raise mesonlib.MesonException('Cannot find frameworks with non-clang compiler')
return self._find_framework_impl(name, env, extra_dirs, allow_system)
def get_crt_compile_args(self, crt_val: str, buildtype: str) -> T.List[str]:
# TODO: does this belong here or in GnuLike or maybe PosixLike?
return []
def get_crt_link_args(self, crt_val: str, buildtype: str) -> T.List[str]:
# TODO: does this belong here or in GnuLike or maybe PosixLike?
return []
def thread_flags(self, env: 'Environment') -> T.List[str]:
# TODO: does this belong here or in GnuLike or maybe PosixLike?
host_m = env.machines[self.for_machine]
if host_m.is_haiku() or host_m.is_darwin():
return []
return ['-pthread']
def linker_to_compiler_args(self, args: T.List[str]) -> T.List[str]:
return args.copy()
def has_arguments(self, args: T.List[str], env: 'Environment', code: str,
mode: str) -> T.Tuple[bool, bool]:
return self.compiles(code, env, extra_args=args, mode=mode)
def _has_multi_arguments(self, args: T.List[str], env: 'Environment', code: str) -> T.Tuple[bool, bool]:
new_args = [] # type: T.List[str]
for arg in args:
# some compilers, e.g. GCC, don't warn for unsupported warning-disable
# flags, so when we are testing a flag like "-Wno-forgotten-towel", also
# check the equivalent enable flag too "-Wforgotten-towel"
if arg.startswith('-Wno-'):
new_args.append('-W' + arg[5:])
if arg.startswith('-Wl,'):
mlog.warning('{} looks like a linker argument, '
'but has_argument and other similar methods only '
'support checking compiler arguments. Using them '
'to check linker arguments are never supported, '
'and results are likely to be wrong regardless of '
'the compiler you are using. has_link_argument or '
'other similar method can be used instead.'
.format(arg))
new_args.append(arg)
return self.has_arguments(new_args, env, code, mode='compile')
def has_multi_arguments(self, args: T.List[str], env: 'Environment') -> T.Tuple[bool, bool]:
return self._has_multi_arguments(args, env, 'extern int i;\nint i;\n')
def _has_multi_link_arguments(self, args: T.List[str], env: 'Environment', code: str) -> T.Tuple[bool, bool]:
# First time we check for link flags we need to first check if we have
# --fatal-warnings, otherwise some linker checks could give some
# false positive.
args = self.linker.fatal_warnings() + args
args = self.linker_to_compiler_args(args)
return self.has_arguments(args, env, code, mode='link')
def has_multi_link_arguments(self, args: T.List[str], env: 'Environment') -> T.Tuple[bool, bool]:
return self._has_multi_link_arguments(args, env, 'int main(void) { return 0; }\n')
@staticmethod
def _concatenate_string_literals(s: str) -> str:
pattern = re.compile(r'(?P<pre>.*([^\\]")|^")(?P<str1>([^\\"]|\\.)*)"\s+"(?P<str2>([^\\"]|\\.)*)(?P<post>".*)')
ret = s
m = pattern.match(ret)
while m:
ret = ''.join(m.group('pre', 'str1', 'str2', 'post'))
m = pattern.match(ret)
return ret
def get_has_func_attribute_extra_args(self, name: str) -> T.List[str]:
# Most compilers (such as GCC and Clang) only warn about unknown or
# ignored attributes, so force an error. Overridden in GCC and Clang
# mixins.
return ['-Werror']
def has_func_attribute(self, name: str, env: 'Environment') -> T.Tuple[bool, bool]:
# Just assume that if we're not on windows that dllimport and dllexport
# don't work
m = env.machines[self.for_machine]
if not (m.is_windows() or m.is_cygwin()):
if name in ['dllimport', 'dllexport']:
return False, False
return self.compiles(self.attribute_check_func(name), env,
extra_args=self.get_has_func_attribute_extra_args(name))
def get_disable_assert_args(self) -> T.List[str]:
return ['-DNDEBUG']
| QuLogic/meson | mesonbuild/compilers/mixins/clike.py | Python | apache-2.0 | 59,114 | 0.002436 |
## for learning purpose only
from collections import deque
##The making of a hash Table
def hash_string(keyword,buckets):
'''
# takes as inputs a keyword
# (string) and a number of buckets,
# and returns a number representing
# the bucket for that keyword.
'''
return sum(map(ord, keyword))%buckets
##Testing Hash string distribution using hash str function
def test_hash_func(fn, keys, bucSize):
results = [0] * bucSize
keys_used = set()
for key in keys:
if key and key not in keys_used:
results[fn(key, bucSize)] += 1
keys_used.add(key)
return results
## Implementing a HashTable
## create buckets
create_table = lambda size: [[] for _ in xrange(size)]
##finding buckets
def hashtable_get_bucket(htable,keyword):
return htable[hash_string(keyword, len(htable))]
##adding to buckets
def hashtable_add(htable,key,value):
# your code here
pos = hash_string(key,len(htable))
#O(k/bsize)
for each in htable[pos]:
if each[0] == key: break
else:
htable[pos].append([key, value])
return htable
##look up value of a key
def hashtable_lookup(htable,key):
pos = hash_string(key,len(htable))
for each in htable[pos]:
if each[0] == key: return each[1]
return None
##Update a key if present else add it
def hashtable_update(htable,key,value):
bucket = hashtable_get_bucket(htable,key)
for entry in bucket:
if entry[0] == key:
entry[1] = value
break
else:
hashtable_add(htable,key,value)
return htable
class hashmap(object):
def __init__(self, bsize=0):
self.bsize = bsize or 3
self.table = create_table(self.bsize)
self.keyCount = 0
def __str__(self):
return "%s" %self.table
def __repr__(self):
return "{}".format(self.__str__())
def __len__(self): return len(self.table)
def _getBucket(self, key):
return hashtable_get_bucket(self.table, key)
def _expandTable(self):
self.bsize *= 2
newtable = create_table(self.bsize)
#print "new table %s" %newtable
q = deque(maxlen=self.bsize)
q.appendleft(self.table)
#O(nlogn)
while q:
tbl = q.pop()
ln = len(tbl)
if ln > 1:
q.appendleft(tbl[:ln//2])
q.appendleft(tbl[ln//2:])
else:
#print "_expandTable else tbl is {}".format(tbl)
for each_buck in tbl:
for each_key_list in each_buck:
if each_key_list:
#print "each key list is {}".format(each_key_list)
#print "_expandTable adding key {} val {}".format(each_key_list[0], each_key_list[1])
hashtable_add(newtable, each_key_list[0], each_key_list[1])
assert len(self.table) < len(newtable)
del self.table
self.table = newtable
return self.table
def _addKey(self, key, value):
if self.keyCount+1 > self.bsize:
self._expandTable()
bucket = self._getBucket(key)
for entry in bucket:
if entry[0] == key:
entry[1] = value
break
else:
hashtable_add(self.table, key,value)
self.keyCount += 1
def _getVal(self, key):
return hashtable_lookup(self.table, key)
def __getitem__(self, key):
return self._getVal(key)
def __setitem__(self, key, value):
self._addKey(key, value)
##Delete a key if present else ignore
def _hashtable_delete(self, key):
bucket = hashtable_get_bucket(self.table, key)
for entry in bucket:
if entry[0]==key:
bucket.remove(entry)
self.keyCount -= 1
def remove(self, key):
self._hashtable_delete(key)
if __name__ == "__main__":
table = [[['Francis', 13], ['Ellis', 11]], [], [['Bill', 17],
['Zoe', 14]], [['Coach', 4]], [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]]
assert hashtable_get_bucket(table, "Zoe") == [['Bill', 17], ['Zoe', 14]]
assert hashtable_get_bucket(table, "Brick") == []
assert hashtable_get_bucket(table, "Lilith") == [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]
table = [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]],
[['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]
hashtable_update(table, 'Bill', 42)
hashtable_update(table, 'Rochelle', 94)
hashtable_update(table, 'Zed', 68)
assert table == [[['Ellis', 11], ['Francis', 13]], [['Zed', 68]], \
[['Bill', 42], ['Zoe', 14]], [['Coach', 4]], [['Louis', 29], \
['Nick', 2], ['Rochelle', 94]]]
#d for dict
d = hashmap(4)
d['fdfds'] = 32423324
d['fdfsfdsds'] = 32423324
d['fdfsfdsdssdfsd'] = 32423324
d['fdffsd'] = 32423324
d['ffsd'] = 32423324
d.remove('ffsd')
t = [[], [], [], [['fdfsfdsdssdfsd', 32423324]], [], [['fdffsd', 32423324]], [], [['fdfds', 32423324], ['fdfsfdsds', 32423324]]]
assert repr(d) == repr(t) | codecakes/random_games | implement_hash_map.py | Python | mit | 5,289 | 0.016449 |
class amicable():
def d(self, n):
if n == 1:
return 0
else:
sum_of_factors = 0
for i in range(1, int(n**0.5)+1):
if n % i == 0:
sum_of_factors += i
if n/i != n:
sum_of_factors += int(n/i)
return sum_of_factors
def __call__(self, n):
sum_of_amicable = 0
for i in range(1, n):
original = i, amicable.d(self, i)
inverse = amicable.d(self, amicable.d(self, i)), amicable.d(self, i)
if (original == inverse) & (amicable.d(self, i) != i):
sum_of_amicable += i
return sum_of_amicable
def main():
euler_21 = amicable()
n=10000
print(euler_21(n))
if __name__ == "__main__":
main()
| higee/project_euler | 21-30/21.py | Python | mit | 860 | 0.010465 |
# introduction
print("Bitte füllen Sie das folgende Formular aus")
# get name
name = input("Vorname: ")
lastname = input("Nachname: ")
fullname = name + ' ' + lastname
# get birth info
birth_year = int(input("Geburtsjahr: "))
birth_place = input("Geburtsort: ")
# calculate age
age = 2016 - birth_year
print("\n")
# print generated info
print("Hallo", fullname + ",")
print("Sie sind", age, "Jahre alt und wurden in", birth_place, "geboren.")
print("Vielen Dank für Ihre Teilnahme an der Umfrage.")
| Informatik-AG-KGN-2016/Dokumente | 2016-11-14/input.py | Python | gpl-3.0 | 503 | 0 |
import cherrypy
from cherrypy.lib import httpauth
def check_auth(users, encrypt=None, realm=None):
"""If an authorization header contains credentials, return True, else False."""
request = cherrypy.serving.request
if 'authorization' in request.headers:
# make sure the provided credentials are correctly set
ah = httpauth.parseAuthorization(request.headers['authorization'])
if ah is None:
raise cherrypy.HTTPError(400, 'Bad Request')
if not encrypt:
encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5]
if hasattr(users, '__call__'):
try:
# backward compatibility
users = users() # expect it to return a dictionary
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
except TypeError:
# returns a password (encrypted or clear text)
password = users(ah["username"])
else:
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
# validate the authorization by re-computing it here
# and compare it with what the user-agent provided
if httpauth.checkResponse(ah, password, method=request.method,
encrypt=encrypt, realm=realm):
request.login = ah["username"]
return True
request.login = False
return False
def basic_auth(realm, users, encrypt=None, debug=False):
"""If auth fails, raise 401 with a basic authentication header.
realm: a string containing the authentication realm.
users: a dict of the form: {username: password} or a callable returning a dict.
encrypt: callable used to encrypt the password returned from the user-agent.
if None it defaults to a md5 encryption.
"""
if check_auth(users, encrypt):
if debug:
cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.basicAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
def digest_auth(realm, users, debug=False):
"""If auth fails, raise 401 with a digest authentication header.
realm: a string containing the authentication realm.
users: a dict of the form: {username: password} or a callable returning a dict.
"""
if check_auth(users, realm=realm):
if debug:
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.digestAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
| TRex22/Sick-Beard | cherrypy/lib/auth.py | Python | gpl-3.0 | 3,288 | 0.007603 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "concert.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| fotcorn/liveinconcert | manage.py | Python | mit | 250 | 0 |
import os
import json
import pyodbc
import psycopg2
import psycopg2.extras
from psycopg2.pool import ThreadedConnectionPool
import datetime
from concurrent.futures import ThreadPoolExecutor, wait
import multiprocessing
import sys
import hashlib
from utils import *
THREADNUM = 16
class IntWriter:
def __init__(self, target):
self.inttype = target['type'] #database, file, etc.
self.intconnstr = target['connstr'] #connection string: Server,Port,db/filename
self.mdmconnstr = 'Driver={ODBC Driver 13 for SQL Server}; Server=localhost; Database=MDM_PROD; UID=int_etl; PWD=ugpassword;'
self.mdmquery = 'SELECT [ID],[UID] FROM [MDM_PROD].[MODEL].[OBJECTS] where SystemID = ? and deletiondate is null'
self.goldenquery = 'SELECT [XID] as [ID],[UniqueObjectID] as [GoldenID] FROM [MDM_PROD].[MODEL].[mv_xref] where SystemID = ? and [UniqueObjectID] is not null'
self.mdmssys = target['ssys'] #source system code for UID lookup in MDM
self.intencoding = target['encoding'] #append method description (api, rest, query, etc.)
self.intname = target['name'] #name of table or procedure or whatever else
self.lookupcolumns = target['lookupcolumns']
self.pool = None
self.conn = None
self.curr = None
self.wcounter = 0
self.stream = []
self.intheader = target['header']
self.lookup_table = dict()
self.golden_table = dict()
self.ods_to_dwh_table = set()
self.cache_dict = dict()
self.convtime = datetime.timedelta()
self.connect()
self.active = True
self.executor = ThreadPoolExecutor(max_workers=THREADNUM)
self.futures = []
def golden_tracker(self):
cursor = pyodbc.connect(self.mdmconnstr).execute(self.goldenquery, (self.mdmssys,))
for row in cursor:
self.golden_table[row[0]] = row[1]
logging.info(len(self.golden_table), 'golden IDs are mapped to datasource. Memory used: ', sys.getsizeof(self.golden_table))
def ods_to_dwh_tracker(self):
cursor = pyodbc.connect(self.intconnstr).execute('select odsid from ' + self.intname)
self.ods_to_dwh_table.update([row[0] for row in cursor])
logging.info(len(self.ods_to_dwh_table), 'records already in Staging area. Memory used: ', sys.getsizeof(self.ods_to_dwh_table))
def change_tracker(self, dtype):
query = "select ddochash, dcontenthash from public.v_fact where dtype = %s"
db = psycopg2.connect(self.intconnstr)
cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query, (dtype,))
for row in cursor.fetchall():
self.cache_dict[row['ddochash'].tobytes()] = row['dcontenthash']
def connect(self):
t = datetime.datetime.today()
# TODO: move to a separate function to make program independant of MDM system
cursor = pyodbc.connect(self.mdmconnstr).execute(self.mdmquery, (self.mdmssys,))
columns = [column[0] for column in cursor.description]
for row in cursor.fetchall():
self.lookup_table[row[1]] = row[0]
# print(self.lookup_table)
self.golden_tracker()
if self.inttype == 'odsf1':
self.pool = ThreadedConnectionPool(1, THREADNUM + 1, self.intconnstr)
if self.inttype == 'staf1':
self.ods_to_dwh_tracker()
if self.intname == 'KS2': # TODO: add proper lookup of possible systems or some other logic when to look for changes (may be target system)
self.change_tracker(self.intname)
logging.info('Cache initialization took ' + str(datetime.datetime.today() - t))
return
def clear(self):
self.stream.clear()
return
def written(self):
print(self.wcounter)
return self.wcounter
def __len__(self):
return len(self.stream)
def append(self, data):
st = datetime.datetime.now()
BATCH_SIZE = 1
if self.inttype == 'apij1':
BATCH_SIZE = 1000
objectkeys = ['ExtractionDate','Migration','ActionID','SystemID','EntityID','UID','ParentUID','Verified','UXID','ValidFrom','ValidTo']
obj = {}
if 'PeriodObjects' in data:
obj['ExtractionDate'] = data['ExtractionDate']
obj['Migration'] = data['Migration']
obj['ActionID'] = data['ActionID']
obj['SystemID'] = data['SystemID']
obj['EntityID'] = data['EntityID']
obj['UID'] = data['UID']
obj['ParentUID'] = data['ParentUID']
obj['Verified'] = data['Verified']
obj['UXID'] = data['UXID']
obj['PeriodObjects'] = data['PeriodObjects']
else:
obj['PeriodObjects'] = []
obj['PeriodObjects'].append({'Attributes': []})
if 'ValidFrom' in data:
obj['PeriodObjects'][0]['ValidFrom'] = data['ValidFrom']
if 'ValidTo' in data:
obj['PeriodObjects'][0]['ValidTo'] = data['ValidTo']
for key in data.keys():
if key not in objectkeys:
if data[key] in self.lookup_table:
data[key] = self.lookup_table[data[key]]
obj['PeriodObjects'][0]['Attributes'].append({'Name': key, 'Value': str(data[key]).replace('00000000-0000-0000-0000-000000000000', '#NULL')})
else:
obj[key] = str(data[key]).replace('00000000-0000-0000-0000-000000000000', '#NULL')
obj['ActionID'] = 3 # Force-set action as "integration"
elif self.inttype == 'odsf1':
objectkeys = ['DataType','SystemID','ActionID','ExtractionDate','DocumentUID','Ln','inttimestamp']
obj = dict()
obj['dtimestamp'] = data['inttimestamp']
obj['dextractiondate'] = data['ExtractionDate']
obj['dtype'] = data['DataType']
obj['dsystem'] = data['SystemID']
obj['ddocuid'] = data['DocumentUID']
obj['ddocln'] = data['Ln']
obj['ddochash'] = hashlib.md5((str(obj['ddocuid']) + str(obj['ddocln'])).encode('utf-8')).digest()
# filter elements where GUID lookup failed --- NO IMPORT before GUIDs are in MDM
errs = [(k,v) for (k,v) in data.items() if k in self.lookupcolumns and v not in self.lookup_table and v != '00000000-0000-0000-0000-000000000000']
if len(errs) > 0:
logging.warning('Failed to convert GUID for %s', str(errs))
self.convtime += datetime.datetime.now() - st
return 0
obj['dcontent'] = json.dumps({k:self.lookup_table[v] if v in self.lookup_table else v.replace('00000000-0000-0000-0000-000000000000', '#NULL')
for (k,v) in data.items() if k not in objectkeys}, sort_keys=True)
obj['dcontenthash'] = hashlib.md5(obj['dcontent'].encode('utf-8')).digest()
obj['delta'] = False
if obj['ddochash'] in self.cache_dict:
# This line has been already posted so we need to check if the last available record is actual
# flag line as delta
obj['delta'] = True
if self.cache_dict[obj['ddochash']].tobytes() == obj['dcontenthash']:
# Can update some field here with a timestamp to guaranteee that data is actual
self.convtime += datetime.datetime.now() - st
return 0
# Earlier version exists so we have to create a new record for this version
elif self.inttype == 'staf1':
obj = data.copy()
if obj['odsid'] in self.ods_to_dwh_table:
self.convtime += datetime.datetime.now() - st
return 0
# TODO: this list of fields should be another field in sources table
golden_entities = ['ProjectUID', 'ConstrObjectUID']
for key in golden_entities:
if obj[key] not in self.golden_table:
logging.warning('Failed to find golden ID for record %s %s', str(obj[key]), str(key))
self.convtime += datetime.datetime.now() - st
return 0
obj[key] = self.golden_table[obj[key]]
# treat records which dont need to have golden values - pass nulls to fit into sql requirements
for key in obj:
if obj[key] == '#NULL':
obj[key] = None
self.convtime += datetime.datetime.now() - st
self.stream.append(obj)
if len(self.stream) == BATCH_SIZE:
self.futures.append(self.executor.submit(self.commitp, {'ContextRef': '', 'Objects': self.stream.copy()}))
self.clear()
return 1
def close(self):
if len(self.stream) > 0:
self.futures.append(self.executor.submit(self.commitp, {'ContextRef': '', 'Objects': self.stream.copy()}))
self.clear()
wait(self.futures)
self.wcounter = sum([f.result() for f in self.futures])
self.executor.shutdown(wait=True)
if self.inttype == 'odsf1':
safeexecute_pgsql(self.pool, 'refresh materialized view mv_fact_lastv', None, self.intconnstr)
self.pool.closeall()
print(self.convtime)
self.active = False
def commitp(self, params=None):
t = datetime.datetime.today()
count = 0
if self.inttype == 'apij1':
if params:
w = params
db = pyodbc.connect(self.intconnstr)
cursor = db.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL SNAPSHOT')
cursor.commit()
query = 'DECLARE @ret int' \
' EXEC @ret = ' + self.intname + ' ?, NULL' \
' SELECT @ret'
try:
count = cursor.execute(query, [str(json.dumps(w)),]).fetchone()[0]
cursor.commit()
except:
logging.error("Unexpected SQL server error, rolling back:", sys.exc_info())
logging.error("With object:", w)
cursor.rollback()
elif self.inttype == 'odsf1':
if params and 'Objects' in params:
w = params['Objects']
conn = self.pool.getconn()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
for obj in w:
query = 'INSERT INTO public.fact(dtype, dsystem, ddocuid, ddocln, ddochash, dcontenthash, dcontent, dtimestamp, dextractiondate, delta)' \
' VALUES (%(dtype)s, %(dsystem)s, %(ddocuid)s, %(ddocln)s, %(ddochash)s, %(dcontenthash)s, %(dcontent)s, %(dtimestamp)s, ' \
'%(dextractiondate)s, %(delta)s)'
try:
cur.execute(query, dict(obj))
conn.commit()
count += 1
except:
logging.error("Unexpected PostgreSQL server error, rolling back:", sys.exc_info())
logging.error("With object:", obj)
conn.rollback()
self.pool.putconn(conn)
elif self.inttype == 'staf1':
# TODO: CHECK
if params:
w = params['Objects']
db = pyodbc.connect(self.intconnstr)
cursor = db.cursor()
query = 'INSERT INTO ' + self.intname + '(' + ','.join(self.intheader) + ') VALUES(' + ','.join(['?' for _ in self.intheader]) + ')'
for obj in w:
try:
cursor.execute(query, tuple([obj[key] for key in self.intheader]))
cursor.commit()
count += 1
except:
logging.error("Unexpected SQL server error, rolling back:", sys.exc_info())
logging.error("With query:", query)
logging.error("With object:", obj)
cursor.rollback()
print('Commit took ' + str(datetime.datetime.today() - t))
return count
| HeavenlySword/INTINT | intint/intwriter.py | Python | mit | 12,384 | 0.006137 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9554")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9554")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| yardsalecoin/yardsalecoin | contrib/bitrpc/bitrpc.py | Python | mit | 7,836 | 0.038157 |
from __future__ import unicode_literals
import logging
# Logging configuration
log = logging.getLogger(__name__) # noqa
log.addHandler(logging.NullHandler()) # noqa
from netmiko.ssh_dispatcher import ConnectHandler
from netmiko.ssh_dispatcher import ssh_dispatcher
from netmiko.ssh_dispatcher import redispatch
from netmiko.ssh_dispatcher import platforms
from netmiko.ssh_dispatcher import FileTransfer
from netmiko.scp_handler import SCPConn
from netmiko.cisco.cisco_ios import InLineTransfer
from netmiko.ssh_exception import NetMikoTimeoutException
from netmiko.ssh_exception import NetMikoAuthenticationException
from netmiko.ssh_autodetect import SSHDetect
from netmiko.base_connection import BaseConnection
# Alternate naming
NetmikoTimeoutError = NetMikoTimeoutException
NetmikoAuthError = NetMikoAuthenticationException
__version__ = '2.0.1'
__all__ = ('ConnectHandler', 'ssh_dispatcher', 'platforms', 'SCPConn', 'FileTransfer',
'NetMikoTimeoutException', 'NetMikoAuthenticationException',
'NetmikoTimeoutError', 'NetmikoAuthError', 'InLineTransfer', 'redispatch',
'SSHDetect', 'BaseConnection')
# Cisco cntl-shift-six sequence
CNTL_SHIFT_6 = chr(30)
| fooelisa/netmiko | netmiko/__init__.py | Python | mit | 1,198 | 0.003339 |
from src.greedy_algorithms import find_ample_city
from src.greedy_algorithms import find_majority_element
from src.greedy_algorithms import GasCity
from src.greedy_algorithms import three_sum
from src.greedy_algorithms import trapped_water
from src.greedy_algorithms import two_sum
class TestTwoSum(object):
"""
Testing two sum method
"""
def test_book_example(self):
in_arr = [11, 2, 5, 7, 3]
assert two_sum(14, in_arr)
assert two_sum(13, in_arr)
assert two_sum(16, in_arr)
assert not two_sum(17, in_arr)
assert not two_sum(21, in_arr)
assert not two_sum(11, in_arr)
class TestThreeSum(object):
"""
Question 18.5
"""
def test_book_example(self):
in_arr = [11, 2, 5, 7, 3]
assert three_sum(21, in_arr)
assert not three_sum(22, in_arr)
class TestFindMajorityElement(object):
"""
Question 18.6
"""
def test_book_example(self):
in_arr = [
'b', 'a',
'c', 'a',
'a', 'b',
'a', 'a',
'c', 'a',
]
assert 'a' == find_majority_element(in_arr)
def test_int_example(self):
in_arr = [
3, 3, 4,
2, 4, 4,
2, 4, 4,
]
assert 4 == find_majority_element(in_arr)
class TestFindAmpleCity(object):
"""
Question 18.7
"""
def test_book_example(self):
cities = [
GasCity(id='A', gas=50, to_next=900),
GasCity(id='B', gas=20, to_next=600),
GasCity(id='C', gas=5, to_next=200),
GasCity(id='D', gas=30, to_next=400),
GasCity(id='E', gas=25, to_next=600),
GasCity(id='F', gas=10, to_next=200),
GasCity(id='G', gas=10, to_next=100),
]
assert 'D' == find_ample_city(cities)
class TestMaxTrappedWater(object):
"""
Question 18.8
"""
def test_book_example(self):
heights = [
1, 2, 1,
3, 4, 4,
5, 6, 2,
1, 3, 1,
3, 2, 1,
2, 4, 1,
]
assert 48 == trapped_water(heights)
| jakubtuchol/epi | test/test_greedy_algorithms.py | Python | mit | 2,161 | 0 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
# Generate autosuspend rules for devices that have been tested to work properly
# with autosuspend by the Chromium OS team. Based on
# https://chromium.googlesource.com/chromiumos/platform2/+/master/power_manager/udev/gen_autosuspend_rules.py
import chromiumos.gen_autosuspend_rules
print('# pci:v<00VENDOR>d<00DEVICE> (8 uppercase hexadecimal digits twice)')
for entry in chromiumos.gen_autosuspend_rules.PCI_IDS:
vendor, device = entry.split(':')
vendor = int(vendor, 16)
device = int(device, 16)
print('pci:v{:08X}d{:08X}*'.format(vendor, device))
print('# usb:v<VEND>p<PROD> (4 uppercase hexadecimal digits twice')
for entry in chromiumos.gen_autosuspend_rules.USB_IDS:
vendor, product = entry.split(':')
vendor = int(vendor, 16)
product = int(product, 16)
print('usb:v{:04X}p{:04X}*'.format(vendor, product))
print(' ID_AUTOSUSPEND=1')
| filbranden/systemd | tools/make-autosuspend-rules.py | Python | gpl-2.0 | 938 | 0 |
import rdflib
from rdflib import Namespace
from ontology import Ontology
class DOC(Ontology, ):
def __init__(self):
Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/2000/10/swap/pim/doc#'))
prefix = 'doc'
doc = DOC()
ontology = doc
| h4ck3rm1k3/gcc-ontology | lib/ontologies/org/w3/_2000/_10/swap/pim/doc.py | Python | gpl-3.0 | 265 | 0.011321 |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .local_process import LocalProcess
from .remote_process import StubProcess
from .vagrant_process import VagrantProcess
class WorkerProcess(object):
"""Return a right processer based in the scheme
"""
_processers = {'tcp': StubProcess, 'vagrant': VagrantProcess}
def __init__(self, interpreter):
self._interpreter = interpreter
def take(self):
scheme = self._interpreter.scheme
return self._processers.get(scheme, LocalProcess)(self._interpreter)
| danalec/dotfiles | sublime/.config/sublime-text-3/Packages/Anaconda/anaconda_lib/workers/process.py | Python | mit | 636 | 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('Operation')
class TestOperation(unittest.TestCase):
pass
| Zlash65/erpnext | erpnext/manufacturing/doctype/operation/test_operation.py | Python | gpl-3.0 | 258 | 0.007752 |
# -* encoding: utf-8 *-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from unittest.case import TestCase
from gopythongo.utils.debversion import debian_substr_compare, split_version_parts, DebianVersion, \
InvalidDebianVersionString
class DebianVersionTests(TestCase):
def test_debian_substr_compare(self) -> None:
self.assertEqual(debian_substr_compare("", "a"), -1)
self.assertEqual(debian_substr_compare("09", "10"), -1)
self.assertEqual(debian_substr_compare("~~", "~"), -1)
self.assertEqual(debian_substr_compare("~~", "~~a"), -1)
self.assertEqual(debian_substr_compare("~~", "~~"), 0)
self.assertEqual(debian_substr_compare("~", ""), -1)
self.assertEqual(debian_substr_compare("30", "30"), 0)
def test_debian_version_compare(self) -> None:
self.assertTrue(DebianVersion.fromstring("2:1.0") < DebianVersion.fromstring("3:1.0"))
self.assertTrue(DebianVersion.fromstring("2:1.0~1") < DebianVersion.fromstring("3:1.0"))
self.assertTrue(DebianVersion.fromstring("2:1.0~bpo1") < DebianVersion.fromstring("2:1.0"))
self.assertTrue(DebianVersion.fromstring("2:1.0dev") > DebianVersion.fromstring("2:1.0"))
self.assertTrue(DebianVersion.fromstring("1.0dev") > DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("1.0-1") > DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("1.0-2") > DebianVersion.fromstring("1.0-1"))
self.assertTrue(DebianVersion.fromstring("1.0") == DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("0:1.0") == DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("3:1.0") > DebianVersion.fromstring("2:1.0"))
self.assertTrue(DebianVersion.fromstring("1.1") > DebianVersion.fromstring("1.0"))
def test_split_version_parts(self) -> None:
self.assertListEqual(split_version_parts("a67bhgs89"), ["a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("33a67bhgs89"), ["33", "a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("~33a67bhgs89"), ["~", "33", "a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("33~a67bhgs89"), ["33", "~a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("1"), ["1"])
self.assertListEqual(split_version_parts(""), [""])
def test_serialization(self) -> None:
v = DebianVersion.fromstring("2:1.0~bpo1")
self.assertEqual(v, v.fromstring(v.tostring()))
def test_sorting_compatibility_aptpkg(self) -> None:
version_strings = ["~~a", "~", "~~", "a1", "1.0", "1.0-1", "1.0~bpo1", "1.0-1~bpo1"]
# sorted using python-apt's apt_pkg.version_compare
aptpkg_sorting = ['~~', '~~a', '~', '1.0~bpo1', '1.0', '1.0-1~bpo1', '1.0-1', 'a1']
l = []
for x in version_strings:
l.append(DebianVersion.fromstring(x))
l.sort()
self.assertListEqual(aptpkg_sorting, [str(x) for x in l])
def test_validation(self) -> None:
self.assertRaises(InvalidDebianVersionString, DebianVersion.fromstring, "1.0:0")
self.assertRaises(InvalidDebianVersionString, DebianVersion.fromstring, "ö:1.0")
self.assertRaises(InvalidDebianVersionString, DebianVersion.fromstring, "1.Ö")
| gopythongo/gopythongo | src/py/gopythongo/tests/debversion.py | Python | mpl-2.0 | 3,559 | 0.006185 |
import random
import numpy
large_set_size = 1000
small_set_size = 20
print_diff = 100
stat_num = 1000
stats = [[], [], []]
for _ in range(stat_num):
lset = set()
sset = set()
stop = False
i = 0
large_set_count = None
small_set_count = None
large_set_size_when_small_set_complete = None
while not stop:
# if i % print_diff == 0:
# print(len(lset), len(sset))
r = random.randrange(large_set_size)
if r < small_set_size:
sset.add(r)
lset.add(r)
if len(sset) == small_set_size and small_set_count is None:
small_set_count = i
large_set_size_when_small_set_complete = len(lset)
if len(lset) == large_set_size:
large_set_count = i
stop = True
i += 1
stats[0].append(large_set_size_when_small_set_complete)
stats[1].append(large_set_count)
stats[2].append(small_set_count)
print(numpy.average(stats[0]))
print(numpy.average(stats[1]))
print(numpy.average(stats[2]))
| veltzer/riddling | instances/cover_subset/solution.py | Python | gpl-3.0 | 1,036 | 0 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from scrapy.loader import ItemLoader
from w3lib.html import remove_tags
class SpiderYItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
img = Field()
name = Field()
def filter_price(value):
if value.isdigit():
return value
class Example(scrapy.Item):
name = scrapy.Field(
input_processor=MapCompose(unicode.title),
output_processor=Join(),
)
price = scrapy.Field(
input_processor=MapCompose(remove_tags, filter_price),
output_processor=TakeFirst(),
)
# eg: ItemLoader(item=Product()).add_value('price', [u'€', u'<span>1000</span>'])
# length_out = MapCompose(parse_length, unit='cm') or loader = ItemLoader(product, unit='cm')
# or ItemLoader(product).context['unit'] = 'cm' to change loader_context
# def parse_length(text, loader_context):
# unit = loader_context.get('unit', 'm')
# parsed_length = 'some parsing'
# return parsed_length
| bernieyangmh/spider | spider_y/items.py | Python | gpl-3.0 | 1,245 | 0.00241 |
__author__ = 'kdsouza'
import traits.api as tr
import traitsui.api as trui
from traitsui.editors import *
class VEdit(tr.HasTraits):
"""
Container class for value, editor type, and editor specifications.
"""
value = tr.Any
editor = trui.EditorFactory
kwargs = tr.Dict(key_trait=tr.Str, value_trait=tr.Any)
# item_kwargs = tr.Dict(key_trait=tr.Str, value_trait=tr.Any) # default hide label, custom style?
def __init__(self, value, editor, kwargs=dict()):
super(VEdit, self).__init__()
self.value, self.editor, self.kwargs = value, editor, kwargs
def __eg__(self, other):
return isinstance(other, VEdit) and (self.val == other.val) and (self.editor == other.editor)
| kdz/test | VEdit.py | Python | mit | 735 | 0.005442 |
"""
(Future home of) Tests for program enrollment writing Python API.
Currently, we do not directly unit test the functions in api/writing.py.
This is okay for now because they are all used in
`rest_api.v1.views` and is thus tested through `rest_api.v1.tests.test_views`.
Eventually it would be good to directly test the Python API function and just use
mocks in the view tests.
This file serves as a placeholder and reminder to do that the next time there
is development on the program_enrollments writing API.
"""
from __future__ import absolute_import, unicode_literals
| ESOedX/edx-platform | lms/djangoapps/program_enrollments/api/tests/test_writing.py | Python | agpl-3.0 | 574 | 0.001742 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_dropshipping
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| rosenvladimirov/odoo | anglo_saxon_dropshipping/__init__.py | Python | agpl-3.0 | 1,071 | 0 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.traceable_stack."""
from tensorflow.python.framework import test_util
from tensorflow.python.framework import traceable_stack
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect as inspect
_LOCAL_OBJECT = lambda x: x
_THIS_FILENAME = inspect.getsourcefile(_LOCAL_OBJECT)
class TraceableObjectTest(test_util.TensorFlowTestCase):
def testSetFilenameAndLineFromCallerUsesCallersStack(self):
t_obj = traceable_stack.TraceableObject(17)
# Do not separate placeholder from the set_filename_and_line_from_caller()
# call one line below it as it is used to calculate the latter's line
# number.
placeholder = lambda x: x
result = t_obj.set_filename_and_line_from_caller()
expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
self.assertEqual(expected_lineno, t_obj.lineno)
self.assertEqual(_THIS_FILENAME, t_obj.filename)
self.assertEqual(t_obj.SUCCESS, result)
def testSetFilenameAndLineFromCallerRespectsOffset(self):
def call_set_filename_and_line_from_caller(t_obj):
# We expect to retrieve the line number from _our_ caller.
return t_obj.set_filename_and_line_from_caller(offset=1)
t_obj = traceable_stack.TraceableObject(None)
# Do not separate placeholder from the
# call_set_filename_and_line_from_caller() call one line below it as it is
# used to calculate the latter's line number.
placeholder = lambda x: x
result = call_set_filename_and_line_from_caller(t_obj)
expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
self.assertEqual(expected_lineno, t_obj.lineno)
self.assertEqual(t_obj.SUCCESS, result)
def testSetFilenameAndLineFromCallerHandlesRidiculousOffset(self):
t_obj = traceable_stack.TraceableObject('The quick brown fox.')
# This line shouldn't die.
result = t_obj.set_filename_and_line_from_caller(offset=300)
# We expect a heuristic to be used because we are not currently 300 frames
# down on the stack. The filename and lineno of the outermost frame are not
# predictable -- in some environments the filename is this test file, but in
# other environments it is not (e.g. due to a test runner calling this
# file). Therefore we only test that the called function knows it applied a
# heuristic for the ridiculous stack offset.
self.assertEqual(t_obj.HEURISTIC_USED, result)
class TraceableStackTest(test_util.TensorFlowTestCase):
def testPushPeekPopObj(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(42.0)
t_stack.push_obj('hope')
expected_lifo_peek = ['hope', 42.0]
self.assertEqual(expected_lifo_peek, list(t_stack.peek_objs()))
self.assertEqual('hope', t_stack.pop_obj())
self.assertEqual(42.0, t_stack.pop_obj())
def testPushPeekTopObj(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(42.0)
t_stack.push_obj('hope')
self.assertEqual('hope', t_stack.peek_top_obj())
def testPushPopPreserveLifoOrdering(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(0)
t_stack.push_obj(1)
t_stack.push_obj(2)
t_stack.push_obj(3)
obj_3 = t_stack.pop_obj()
obj_2 = t_stack.pop_obj()
obj_1 = t_stack.pop_obj()
obj_0 = t_stack.pop_obj()
self.assertEqual(3, obj_3)
self.assertEqual(2, obj_2)
self.assertEqual(1, obj_1)
self.assertEqual(0, obj_0)
def testPushObjSetsFilenameAndLineInfoForCaller(self):
t_stack = traceable_stack.TraceableStack()
# We expect that the line number recorded for the 1-object will come from
# the call to t_stack.push_obj(1). Do not separate the next two lines!
placeholder_1 = lambda x: x
t_stack.push_obj(1)
# We expect that the line number recorded for the 2-object will come from
# the call to call_push_obj() and _not_ the call to t_stack.push_obj().
def call_push_obj(obj):
t_stack.push_obj(obj, offset=1)
# Do not separate the next two lines!
placeholder_2 = lambda x: x
call_push_obj(2)
expected_lineno_1 = inspect.getsourcelines(placeholder_1)[1] + 1
expected_lineno_2 = inspect.getsourcelines(placeholder_2)[1] + 1
t_obj_2, t_obj_1 = t_stack.peek_traceable_objs()
self.assertEqual(expected_lineno_2, t_obj_2.lineno)
self.assertEqual(expected_lineno_1, t_obj_1.lineno)
if __name__ == '__main__':
googletest.main()
| tensorflow/tensorflow | tensorflow/python/framework/traceable_stack_test.py | Python | apache-2.0 | 5,145 | 0.003693 |
import numpy
from GenOpt import GeneticOptimizer
##Booths Function but with an additional slack variable to show the constraint feature.
def BoothsFnc(x):
return (x[:, 0] + 2*x[:, 1] - 7)**2 + (2*x[:, 0] + x[:, 1] - 5)**2
InitialSolutions = [numpy.array([numpy.random.uniform(), numpy.random.uniform(), numpy.random.uniform()]) for i in range(1000)]
InitialSolutions = numpy.vstack([10*z/sum(z) for z in InitialSolutions])
G2 = GeneticOptimizer(InitialSolutions = InitialSolutions,
Objective = BoothsFnc,
ub = [10,10,10],
lb = [0,0,0],
Sense = 'min',
MutationType = 'Sum Constraint',
Crossover = 'Weighted Average')
R = G2.GA() | kevinlioi/GenOpt | examples/constrained_optimization.py | Python | mit | 762 | 0.045932 |
from django.shortcuts import render_to_response
from django.template import RequestContext
from home.models import HomeImage
def index(request):
info = {}
info['homeImages'] = HomeImage.objects.all().order_by('title').order_by('order')
return render_to_response('home/index.html', { 'info': info }, context_instance=RequestContext(request))
| UTAlan/PhotosByMallie | mallie/home/views.py | Python | gpl-2.0 | 361 | 0.019391 |
#-*- coding: utf-8 -*-
__all__ = ["XML_BibleParser", "reference"]
import re
import xml.etree.ElementTree as ET
from BibleParser.abstract import parser as abstract_parser, reference as abstract_reference
from BibleParser.error import *
from BibleParser.Numbers import Number
class parser(abstract_parser):
"""
Une implémentation de "BibleParser" manipulant un fichier XML organisé de
la manière suivante:
/bible : la racine
/b : une suite de livres
/c : une liste de chapitres
/v : une liste de versets
Chacun des noeuds "b", "c" ou "v" est identifié par un attribut "n".
"""
def __init__(self, xml_content):
"""
Parse le contenu du fichier XML contenant la bible et sauve la racine
sous l'attribut "bible".
"""
# TODO appeler le constructeur parent ?
abstract_parser.__init__(self)
if not isinstance(xml_content, str):
raise ValueError("expected the content of an XML file")
self.bible = ET.fromstring(xml_content)
# Crée une carte des liens parentaux entre tous les éléments du XML
self._parent_map = dict((c, p) for p in self.bible.iter() for c in p)
def get_element_parent(self, element):
"""
Un ajout à l'interface ElementTree : permet de sélectionner le parent de
tout nœud.
"""
return self._parent_map[element]
def get_greatest_element_index(self, root, element):
"""
Retourne le plus grand index (attribut "n") des sous-éléments du
noeud racine /root/.
Les éléments sont de type /element/.
"""
greatest = None
for child in root.iterfind("./{}".format(element)):
greatest = child
if greatest is None:
return greatest
return int(greatest.attrib["n"])
def get_book_size(self, book_element):
"""
Retourne la taille du livre passé en argument (noeud DOM).
"""
return self.get_greatest_element_index(
book_element,
"c"
)
def get_chapter_size(self, chapter_element):
"""
Retourne la taille du chapitre passé en argument (noeud DOM).
"""
return self.get_greatest_element_index(
chapter_element,
"v"
)
def _parse_verse(self, book_element, chapter_element, verse_element):
"""
Vérifie qu'un verset (donné par son noeud) satisfait les exigences de
la recherche par mots-clés.
Si oui, alors les correpondances sont éventuellement mises en
surbrillance.
Retourne une paire consistant en un objet de type "reference"
et son texte.
"""
if verse_element.text is None:
return
text = verse_element.text
# enlève les indications potentielles de numérotation altérée de verset
text = self._regex_match_alter_verse.sub("", text)
# barrière de concordance avec les mots-clés
if not self._verse_match_rules(text):
return
# mise en surbrillance
if self._highlight_prefix is not None:
text = self._prefix_matches(text)
return (
reference(
self,
None,
book_element.attrib["n"],
int(chapter_element.attrib["n"]),
None,
int(verse_element.attrib["n"]),
None
),
text
)
def get_book_element(self, book_name):
"""
Retourne le noeud du livre dont le nom est passé en argument.
"""
book_element = self.bible.find('./b[@n="{}"]'.format(book_name))
if book_element is None:
raise InvalidBookName(book_name)
return book_element
def get_chapter_element(self, book_element, chapter_index):
"""
Retourne le noeud du chapitre dont le numéro est passé en argument.
Le livre doit-être donné en premier argument en tant que noeud DOM.
"""
chapter_element = book_element.find('./c[@n="{}"]'.format(chapter_index))
if chapter_element is None:
raise InvalidChapterIndex(
book_element.attrib["n"],
chapter_index
)
return chapter_element
def get_verse_element(self, chapter_element, verse_index):
"""
Retourne le noeud du verset dont le numéro est passé en argument.
Le chapitre doit-être donné en premier argument en tant que noeud DOM.
"""
verse_element = chapter_element.find('./v[@n="{}"]'.format(verse_index))
if verse_element is None:
raise InvalidVerseIndex(
self.get_element_parent(chapter_element).attrib["n"],
chapter_element.attrib["n"],
verse_index
)
return verse_element
def _build_chapter_range(self, book_element, ref_obj):
"""
Construit un intervalle dense d'indices de chapitres à partir d'une
référence.
Le livre doit-être donné en premier argument en tant que noeud DOM.
"""
# Sélectionne tous les chapitres
if ref_obj.chapter_low == -1:
chapter_range = range(
1,
self.get_greatest_element_index(book_element, "c")+1
)
# Sélectionne un intervalle de chapitres
elif ref_obj.chapter_high != -1:
chapter_range = range(
ref_obj.chapter_low,
ref_obj.chapter_high+1
)
# Sélectionne un seul chapitre
else:
chapter_range = (ref_obj.chapter_low,)
return chapter_range
def _build_verse_range(self, chapter_element, ref_obj):
"""
Construit un intervalle dense d'indices de versets à partir d'une
référence.
Le chapitre doit-être donné en premier argument en tant que noeud DOM.
"""
# Sélectionne tous les versets du chapitre
if ref_obj.verse_low == -1:
verse_range = range(
1,
self.get_greatest_element_index(chapter_element, "v")+1
)
# Sélectionne un intervalle de versets
elif ref_obj.verse_high != -1:
verse_range = range(
ref_obj.verse_low,
ref_obj.verse_high+1
)
# Sélectionne un seul verset
else:
verse_range = (ref_obj.verse_low,)
return verse_range
def add_reference(self, ref_str):
"""
Ajoute une référence en l'état.
L'entrée est une chaîne, ce qui est stocké est une instance de la classe
"reference".
"""
ref_obj = reference(self, ref_str)
self.references[str(ref_obj)] = ref_obj
def add_contextual_reference(self,
ref_str,
left_lookahead,
right_lookahead):
"""
Ajoute une référence simple en l'élargissant afin d'en faire ressortir
le contexte.
"""
# TODO ne pas déborder au delà d'un chapitre dans le contexte pour les Psaumes
# TODO il faut permettre un choix entre plusieurs types de débordement (coupe exacte, au dernier point, au chapitre, au livre)
ref_obj = reference(self, ref_str)
for new_bible_reference in ref_obj.get_overflowing_references(
left_lookahead,
right_lookahead
):
# ajoute la nouvelle référence
self.references[str(new_bible_reference)] = new_bible_reference
def __iter__(self):
"""
Recherche dans la bible à partir de références et les retournes une
à une sous la forme d'objets de type "reference".
"""
# Parcours toute la bible en cas d'absence de référence
if not self.references:
for book_element in self.bible.iterfind("./b"):
for chapter_element in book_element.iterfind("./c"):
for verse_element in chapter_element.iterfind("./v"):
res = self._parse_verse(
book_element,
chapter_element,
verse_element
)
if res is not None:
yield res
# Parcours uniquement des références précises
else:
for reference in self.references:
ref_obj = self.references[reference]
# récupère le noeud du livre
book_element = self.get_book_element(ref_obj.book)
# construit l'intervalle des chapitres à parcourir
chapter_range = self._build_chapter_range(
book_element,
ref_obj
)
for chapter_index in chapter_range:
# récupère le noeud du chapitre
chapter_element = self.get_chapter_element(
book_element,
chapter_index
)
# construit l'intervalle des versets à parcourir
verse_range = self._build_verse_range(
chapter_element,
ref_obj
)
for verse_index in verse_range:
# accède au noeud du verset
verse_element = self.get_verse_element(
chapter_element,
verse_index
)
res = self._parse_verse(
book_element,
chapter_element,
verse_element
)
if res is not None:
yield res
class reference(abstract_reference):
"""
Une référence biblique connectée à un parseur XML.
Ceci permet d'accéder à des fonctionnalités plus poussée:
° récupérer les élements DOM associés à la référence (voir _get_xml_*)
° récupérer la taille d'un chapitre (le chapitre courant ou le
précédent, ou encore un autre)
° générer des références à partir d'un débordement à droite ou à gauche
"""
# une instance de la classe "XMLBibleParser"
xml_bible_parser = None
book_xml_element = None
chapter_xml_element = None
verse_xml_element = None
_book_size = None
_chapter_size = None
def __init__(self,
parser,
input,
book=None,
chapter_low=None,
chapter_high=None,
verse_low=None,
verse_high=None):
# TODO parent ?
abstract_reference.__init__(
self,
input,
book,
chapter_low,
chapter_high,
verse_low,
verse_high
)
self.xml_bible_parser = parser
def _get_xml_book_element(self):
"""
Récupère, si ce n'est déjà fait, le noeud associé au livre _courant_.
Retourne ce noeud.
"""
if self.book_xml_element is None:
self.book_xml_element = self.xml_bible_parser.get_book_element(self.book)
return self.book_xml_element
def _get_xml_chapter_element(self):
"""
Récupère, si ce n'est déjà fait, le noeud associé au chapitre _courant_.
Ignore le cas où la référence comporte un intervalle de chapitres
(choisi la borne basse de l'intervalle).
Retourne ce noeud.
"""
if self.chapter_xml_element is None:
self.chapter_xml_element = self.xml_bible_parser.get_chapter_element(
self._get_xml_book_element(),
self.chapter_low
)
return self.chapter_xml_element
def _get_xml_verse_element(self):
"""
Récupère, si ce n'est déjà fait, le noeud associé au verset _courant_.
Ignore le cas où la référence comporte un intervalle de versets
(choisi la borne basse de l'intervalle).
Retourne ce noeud.
"""
if self.verse_xml_element is None:
self.verse_xml_element = self.xml_bible_parser.get_verse_element(
self._get_xml_book_element(),
self._get_xml_chapter_element(),
self.verse_low
)
return self.verse_xml_element
def _get_chapter_size(self):
"""
Retourne la taille du chapitre _courant_.
"""
if self._chapter_size is None:
self._chapter_size = self.xml_bible_parser.get_chapter_size(
self._get_xml_chapter_element()
)
return self._chapter_size
def _get_book_size(self):
"""
Retourne la taille du livre _courant_ (en nombre de chapitres).
"""
if self._book_size is None:
self._book_size = self.xml_bible_parser.get_book_size(
self._get_xml_book_element()
)
return self._book_size
def _get_overflowing_references(self,
verse_index,
chapter_index,
left_lookahead,
right_lookahead,
chapter_element=None):
"""
Obtient de manière récursive des références en débordant à droite et à
gauche aussi loin que nécessaire.
Est un itérateur.
"""
if chapter_element is None:
# Assume que le chapitre à trouver est donné par "chapter_index"
chapter_element = self.xml_bible_parser.get_chapter_element(
self._get_xml_book_element(),
chapter_index
)
# Sélectionne à gauche
new_verse_low = verse_index - left_lookahead
if new_verse_low < 1:
# il est nécessaire de rechercher dans le chapitre précédent
if chapter_index > 1:
prev_chapter_element = self.xml_bible_parser.get_chapter_element(
self._get_xml_book_element(),
chapter_index - 1
)
prev_chapt_size = self.xml_bible_parser.get_chapter_size(prev_chapter_element)
# itère récursivement "à gauche" en intanciant une nouvelle
# référence
for r in self._get_overflowing_references(
# l'ancre devient le dernier verset du chapitre
# précédent
prev_chapt_size,
chapter_index - 1,
# le débordement à gauche devient le produit de la
# précédente soustraction
-new_verse_low,
0,
# donne directement le noeud précédent pour éviter un
# parcours supplémentaire du DOM
prev_chapter_element
):
yield r
# le verset le plus à gauche qui nous intéresse est borné au premier
# verset du chapitre _courant_
new_verse_low = 1
# Sélectionne à droite
new_verse_high = verse_index + right_lookahead
to_yield = []
# obtient la taille du chapitre
chapter_size = self._get_chapter_size()
if new_verse_high > chapter_size:
# il est nécessaire de rechercher dans le chapitre suivant
if chapter_index < self._get_book_size():
# itère récursivement "à droite"
for r in self._get_overflowing_references(
# l'ancre devient le premier verset du chapitre suivant
1,
chapter_index + 1,
0,
new_verse_high - chapter_size - 1
):
# les références issues d'un débordement à droite seront levées
# plus tard
to_yield.append(r)
# le verset le plus à droite qui nous intéresse est borné au dernier
# verset du chapitre _courant_
new_verse_high = chapter_size
# À une itération donnée, retourne toujours une référence pointant sur
# le même livre et le même chapitre
yield reference(
self.xml_bible_parser,
None,
self.book,
chapter_index,
-1,
new_verse_low,
new_verse_high
)
# Renvoie ler références à droite _après_ la référence _courante_
for r in to_yield:
yield r
def get_overflowing_references(self,
left_lookahead,
right_lookahead):
"""
Obtient de manière récursive des références en débordant à droite et à
gauche aussi loin que nécessaire.
"""
if left_lookahead < 1 or right_lookahead < 1:
raise ValueError("need lookahead quantities greater than 1")
collection = []
for r in self._get_overflowing_references(
self.verse_low,
self.chapter_low,
left_lookahead,
right_lookahead
):
collection.append(r)
return collection
| oelson/concordance | lib/BibleParser/xml.py | Python | gpl-2.0 | 17,962 | 0.001689 |
__all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name=='platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
""""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: #removal errors are not the only possible
pass
self.name = None
| OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/setuptools/py31compat.py | Python | apache-2.0 | 1,184 | 0.005068 |
# -*- coding: utf-8 -*-
# 226. Invert Binary Tree
#
# Invert a binary tree.
#
# 4
# / \
# 2 7
# / \ / \
# 1 3 6 9
#
# to
#
# 4
# / \
# 7 2
# / \ / \
# 9 6 3 1
#
# Trivia:
# This problem was inspired by this original tweet by Max Howell:
#
# Google: 90% of our engineers use the software you wrote (Homebrew),
# but you can’t invert a binary tree on a whiteboard so fuck off.
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# http://www.jianshu.com/p/85abb0a5f83e
# 每一个节点的左右子树对换,左右子树的左右节点也需要交换,
# 这种时候很容易想到的就是递归的方法。
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
# http://www.tangjikai.com/algorithms/leetcode-226-invert-binary-tree
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
tmp = root.left
root.left = root.right
root.right = tmp
self.invertTree(root.left)
self.invertTree(root.right)
return root
def invertTree2(self, root):
if root:
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
| gengwg/leetcode | 226_invert_binary_tree.py | Python | apache-2.0 | 1,673 | 0.001267 |
import libtcodpy as libtcod
import consts
def find_closest_target(caster, entities, range):
closest_target = None
closest_dist = range + 1
for obj in entities:
if obj.fighter and obj != caster:
dist = caster.distance_to(obj)
if dist < closest_dist:
closest_target = obj
closest_dist = dist
return closest_target
def random_choice(chances):
dice = libtcod.random_get_int(0, 1, sum(chances))
running_sum = 0
choice = 0
for c in chances:
running_sum += c
if dice <= running_sum:
return choice
choice += 1
def random_choice_dict(chances_dict):
chances = list(chances_dict.values())
strings = list(chances_dict.keys())
return strings[random_choice(chances)]
def from_dungeon_level(table, dungeon_level):
for (value, level) in reversed(table):
if dungeon_level >= level:
return value
return 0
def build_leveled_item_list(level):
item_chances = {}
item_chances[consts.ITEM_HEALTHPOTION_NAME] = consts.ITEM_HEALTHPOTION_SPAWNRATE
item_chances[consts.ITEM_SCROLLLIGHTNING_NAME] = from_dungeon_level(consts.ITEM_SCROLLLIGHTNING_SPAWNRATE, level)
item_chances[consts.ITEM_SCROLLCONFUSE_NAME] = from_dungeon_level(consts.ITEM_SCROLLCONFUSE_SPAWNRATE, level)
item_chances[consts.ITEM_SWORDCOPPER_NAME] = from_dungeon_level(consts.ITEM_SWORDCOPPER_SPAWNRATE, level)
item_chances[consts.ITEM_BUCKLERCOPPER_NAME] = from_dungeon_level(consts.ITEM_BUCKLERCOPPER_SPAWNRATE, level)
return item_chances
def build_leveled_mob_list(level):
mob_chances = {}
mob_chances[consts.MOB_KOBOLD_NAME] = consts.MOB_KOBOLD_SPAWNRATE
mob_chances[consts.MOB_SKELETON_NAME] = consts.MOB_SKELETON_SPAWNRATE
mob_chances[consts.MOB_ORC_NAME] = from_dungeon_level(consts.MOB_ORC_SPAWNRATE, level)
return mob_chances
def get_equipped_in_slot(inventory, slot_to_check):
for obj in inventory:
if obj.equipment and obj.equipment.slot == slot_to_check \
and obj.equipment.is_equipped:
return obj.equipment
return None
def get_all_equipped(inventory):
equipped_list = []
for item in inventory:
if item.equipment and item.equipment.is_equipped:
equipped_list.append(item.equipment)
return equipped_list | MykeMcG/SummerRoguelike | src/utils.py | Python | gpl-3.0 | 2,363 | 0.003809 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.