text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_PolyTrend/cycle_7/ar_/test_artificial_1024_RelativeDifference_PolyTrend_7__20.py | Python | bsd-3-clause | 274 | 0.083942 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import pytest
from django import forms
from django.http import QueryDict
from test_project.one_to_one.models import Restaurant, Waiter
from url_filter.backends.django import DjangoFilterBackend
from url_filter.filters import Filter
from url_filter.filtersets.base import FilterSet, StrictMode
from url_filter.utils import FilterSpec
class TestFilterSet(object):
def test_init(self):
fs = FilterSet(
data='some data',
queryset='queryset',
context={'context': 'here'},
strict_mode=StrictMode.fail,
)
assert fs.data == 'some data'
assert fs.queryset == 'queryset'
assert fs.context == {'context': 'here'}
assert fs.strict_mode == StrictMode.fail
def test_get_filters(self):
class TestFilterSet(FilterSet):
foo = Filter(form_field=forms.CharField())
filters = TestFilterSet().get_filters()
assert isinstance(filters, dict)
assert list(filters.keys()) == ['foo']
assert isinstance(filters['foo'], Filter)
assert filters['foo'].parent is None
def test_filters(self):
class TestFilterSet(FilterSet):
foo = Filter(form_field=forms.CharField())
fs = TestFilterSet()
filters = fs.filters
assert isinstance(filters, dict)
assert list(filters.keys()) == ['foo']
assert isinstance(filters['foo'], Filter)
assert filters['foo'].parent is fs
assert filters['foo'].name == 'foo'
def test_default_filter_no_default(self):
class TestFilterSet(FilterSet):
foo = Filter(form_field=forms.CharField())
assert TestFilterSet().default_filter is None
def test_default_filter(self):
class TestFilterSet(FilterSet):
foo = Filter(form_field=forms.CharField(), is_default=True)
bar = Filter(form_field=forms.CharField())
default = TestFilterSet().default_filter
assert isinstance(default, Filter)
assert default.name == 'foo'
def test_validate_key(self):
assert FilterSet().validate_key('foo') is None
assert FilterSet().validate_key('foo__bar') is None
assert FilterSet().validate_key('foo__bar!') is None
with pytest.raises(forms.ValidationError):
FilterSet().validate_key('f!oo')
def test_get_filter_backend(self):
backend = FilterSet().get_filter_backend()
assert isinstance(backend, DjangoFilterBackend)
def test_filter_no_queryset(self):
fs = FilterSet()
with pytest.raises(AssertionError):
fs.filter()
def test_filter_data_not_querydict(self):
fs = FilterSet(queryset=[])
with pytest.raises(AssertionError):
fs.filter()
def test_get_specs(self):
class BarFilterSet(FilterSet):
other = Filter(source='stuff',
form_field=forms.CharField(),
default_lookup='contains')
thing = Filter(form_field=forms.IntegerField(min_value=0, max_value=15))
class FooFilterSet(FilterSet):
field = Filter(form_field=forms.CharField())
bar = BarFilterSet()
def _test(data, expected, **kwargs):
fs = FooFilterSet(
data=QueryDict(data),
queryset=[],
**kwargs
)
assert set(fs.get_specs()) == set(expected)
_test('field=earth&bar__other=mars', [
FilterSpec(['field'], 'exact', 'earth', False),
FilterSpec(['bar', 'stuff'], 'contains', 'mars', False),
])
_test('field!=earth&bar__other=mars', [
FilterSpec(['field'], 'exact', 'earth', True),
FilterSpec(['bar', 'stuff'], 'contains', 'mars', False),
])
_test('field__in=earth,pluto&bar__other__icontains!=mars', [
FilterSpec(['field'], 'in', ['earth', 'pluto'], False),
FilterSpec(['bar', 'stuff'], 'icontains', 'mars', True),
])
_test('fields__in=earth,pluto&bar__other__icontains!=mars', [
FilterSpec(['bar', 'stuff'], 'icontains', 'mars', True),
])
_test('field__in=earth,pluto&bar__ot!her__icontains!=mars', [
FilterSpec(['field'], 'in', ['earth', 'pluto'], False),
])
_test('bar__thing=5', [
FilterSpec(['bar', 'thing'], 'exact', 5, False),
])
_test('bar__thing__in=5,10,15', [
FilterSpec(['bar', 'thing'], 'in', [5, 10, 15], False),
])
_test('bar__thing__range=5,10', [
FilterSpec(['bar', 'thing'], 'range', [5, 10], False),
])
_test('bar=5', [])
_test('bar__thing__range=5,10,15', [])
_test('bar__thing=100', [])
_test('bar__thing__in=100,5', [])
with pytest.raises(forms.ValidationError):
_test('bar__thing__in=100,5', [], strict_mode=StrictMode.fail)
def test_filter_one_to_one(self, one_to_one):
class PlaceFilterSet(FilterSet):
pk = Filter(form_field=forms.IntegerField(min_value=0), is_default=True)
name = Filter(form_field=forms.CharField(max_length=50))
address = Filter(form_field=forms.CharField(max_length=80))
class RestaurantFilterSet(FilterSet):
pk = Filter(form_field=forms.IntegerField(min_value=0), is_default=True)
place = PlaceFilterSet()
serves_hot_dogs = Filter(form_field=forms.BooleanField(required=False))
serves_pizza = Filter(form_field=forms.BooleanField(required=False))
class WaiterFilterSet(FilterSet):
pk = Filter(form_field=forms.IntegerField(min_value=0), is_default=True)
restaurant = RestaurantFilterSet()
name = Filter(form_field=forms.CharField(max_length=50))
def _test(fs, data, qs, expected, count):
_fs = fs(
data=QueryDict(data),
queryset=qs,
)
filtered = _fs.filter()
assert filtered.count() == count
assert set(filtered) == set(expected)
_test(
RestaurantFilterSet,
'place__name__startswith=Demon',
Restaurant.objects.all(),
Restaurant.objects.filter(place__name__startswith='Demon'),
1
)
_test(
RestaurantFilterSet,
'place__address__contains!=Ashland',
Restaurant.objects.all(),
Restaurant.objects.exclude(place__address__contains='Ashland'),
1
)
_test(
WaiterFilterSet,
'restaurant__place__pk=1',
Waiter.objects.all(),
Waiter.objects.filter(restaurant__place=1),
2
)
_test(
WaiterFilterSet,
'restaurant__place=1',
Waiter.objects.all(),
Waiter.objects.filter(restaurant__place=1),
2
)
_test(
WaiterFilterSet,
'restaurant__place__name__startswith=Demon',
Waiter.objects.all(),
Waiter.objects.filter(restaurant__place__name__startswith="Demon"),
2
)
_test(
WaiterFilterSet,
('restaurant__place__name__startswith=Demon'
'&name__icontains!=jon'),
Waiter.objects.all(),
(Waiter.objects
.filter(restaurant__place__name__startswith="Demon")
.exclude(name__icontains='jon')),
1
)
| AlexandreProenca/django-url-filter | tests/filtersets/test_base.py | Python | mit | 7,675 | 0.000782 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import Mapping
from mo_logs import Log
from mo_dots import set_default, wrap, split_field, join_field, concat_field
from mo_math import Math
from pyLibrary.queries.domains import is_keyword
from pyLibrary.queries.expressions import Expression
from pyLibrary.queries.namespace import convert_list, Namespace
from pyLibrary.queries.query import QueryOp
from mo_times.dates import Date
class Typed(Namespace):
"""
NOTE: USING THE ".$value" SUFFIX IS DEPRECIATED: CURRENT VERSIONS OF ES ARE STRONGLY TYPED, LEAVING NO
CASE WHERE A GENERAL "value" IS USEFUL. WE WOULD LIKE TO MOVE TO ".$number", ".$string", ETC. FOR
EACH TYPE, LIKE WE DO WITH DATABASES
"""
def __init__(self):
self.converter_map = {
"and": self._convert_many,
"or": self._convert_many,
"not": self.convert,
"missing": self.convert,
"exists": self.convert
}
def convert(self, expr):
"""
ADD THE ".$value" SUFFIX TO ALL VARIABLES
"""
if isinstance(expr, Expression):
vars_ = expr.vars()
rename = {v: concat_field(v, "$value") for v in vars_}
return expr.map(rename)
if expr is True or expr == None or expr is False:
return expr
elif Math.is_number(expr):
return expr
elif expr == ".":
return "."
elif is_keyword(expr):
#TODO: LOOKUP SCHEMA AND ADD ALL COLUMNS WITH THIS PREFIX
return expr + ".$value"
elif isinstance(expr, basestring):
Log.error("{{name|quote}} is not a valid variable name", name=expr)
elif isinstance(expr, Date):
return expr
elif isinstance(expr, QueryOp):
return self._convert_query(expr)
elif isinstance(expr, Mapping):
if expr["from"]:
return self._convert_query(expr)
elif len(expr) >= 2:
#ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION
return wrap({name: self.convert(value) for name, value in expr.items()})
else:
# ASSUME SINGLE-CLAUSE EXPRESSION
k, v = expr.items()[0]
return self.converter_map.get(k, self._convert_bop)(k, v)
elif isinstance(expr, (list, set, tuple)):
return wrap([self.convert(value) for value in expr])
def _convert_query(self, query):
output = QueryOp("from", None)
output.select = self._convert_clause(query.select)
output.where = self.convert(query.where)
output.frum = self._convert_from(query.frum)
output.edges = self._convert_clause(query.edges)
output.groupby = self._convert_clause(query.groupby)
output.window = convert_list(self._convert_window, query.window)
output.having = convert_list(self._convert_having, query.having)
output.sort = self._convert_clause(query.sort)
output.limit = query.limit
output.format = query.format
return output
def _convert_clause(self, clause):
"""
JSON QUERY EXPRESSIONS HAVE MANY CLAUSES WITH SIMILAR COLUMN DELCARATIONS
"""
if clause == None:
return None
elif isinstance(clause, Mapping):
return set_default({"value": self.convert(clause["value"])}, clause)
else:
return [set_default({"value": self.convert(c.value)}, c) for c in clause]
def _convert_from(self, frum):
return frum
def _convert_having(self, having):
raise NotImplementedError()
def _convert_window(self, window):
raise NotImplementedError()
def _convert_many(self, k, v):
return {k: map(self.convert, v)}
def _convert_bop(self, op, term):
if isinstance(term, list):
return {op: map(self.convert, term)}
return {op: {var: val for var, val in term.items()}}
| klahnakoski/esReplicate | pyLibrary/queries/namespace/typed.py | Python | mpl-2.0 | 4,356 | 0.002525 |
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
try:
import json as _json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as _json
import requests as _requests
import urlparse as _urlparse
import random as _random
import base64 as _base64
from ConfigParser import ConfigParser as _ConfigParser
import os as _os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
auth = _base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
ret = _requests.get(auth_svc, headers=headers, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination for user %s' % (user_id))
else:
raise Exception(ret.text)
return tok['access_token']
def _read_rcfile(file=_os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if _os.path.exists(file):
try:
with open(file) as authrc:
rawdata = _json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=_os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', _os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in ('user_id', 'token',
'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class ModelComparison(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False):
if url is None:
raise ValueError('A url is required')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in _os.environ:
self._headers['AUTHORIZATION'] = _os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_context:
arg_hash['context'] = json_rpc_context
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
json_header = None
if _CT in ret.headers:
json_header = ret.headers[_CT]
if _CT in ret.headers and ret.headers[_CT] == _AJ:
err = _json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
ret.encoding = 'utf-8'
resp = _json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def compare_models(self, params, json_rpc_context = None):
if json_rpc_context and type(json_rpc_context) is not dict:
raise ValueError('Method compare_models: argument json_rpc_context is not type dict as required.')
resp = self._call('ModelComparison.compare_models',
[params], json_rpc_context)
return resp[0]
| mdejongh/ModelComparison | lib/ModelComparison/ModelComparisonClient.py | Python | mit | 7,099 | 0.001127 |
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from base import PulpServerTests
from pulp.server.db import connection
from pulp.server.db.migrate.models import MigrationModule
from pulp.plugins.types.database import TYPE_COLLECTION_PREFIX
ID = '_id'
LAST_UPDATED = '_last_updated'
MIGRATION = 'pulp.server.db.migrations.0005_unit_last_updated'
def test_collections(n=3):
names = []
for suffix in range(0, n):
name = TYPE_COLLECTION_PREFIX + str(suffix)
names.append(name)
return names
def test_units(n=10):
units = []
for unit_id in range(0, n):
unit = {ID: unit_id}
if unit_id % 2 == 0:
unit[LAST_UPDATED] = 1
units.append(unit)
return units
TEST_COLLECTIONS = test_collections()
TEST_UNITS = test_units()
class TestMigration_0005(PulpServerTests):
def setUp(self):
self.clean()
super(TestMigration_0005, self).setUp()
for collection in [connection.get_collection(n, True) for n in TEST_COLLECTIONS]:
for unit in TEST_UNITS:
collection.save(unit, safe=True)
def tearDown(self):
super(TestMigration_0005, self).tearDown()
self.clean()
def clean(self):
database = connection.get_database()
for name in [n for n in database.collection_names() if n in TEST_COLLECTIONS]:
database.drop_collection(name)
def test(self):
# migrate
module = MigrationModule(MIGRATION)._module
module.migrate()
# validation
for collection in [connection.get_collection(n) for n in TEST_COLLECTIONS]:
for unit in collection.find({}):
self.assertTrue(LAST_UPDATED in unit)
unit_id = unit[ID]
last_updated = unit[LAST_UPDATED]
if unit_id % 2 == 0:
self.assertEqual(last_updated, 1)
else:
self.assertTrue(isinstance(last_updated, float))
| beav/pulp | server/test/unit/test_migration_0005.py | Python | gpl-2.0 | 2,511 | 0.001195 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_attention."""
import numpy as np
import tensorflow as tf
from multiple_user_representations.models import parametric_attention
class ParametricAttentionTest(tf.test.TestCase):
def test_parametric_attention_model_with_single_representation(self):
model = parametric_attention.SimpleParametricAttention(
output_dimension=2,
input_embedding_dimension=2,
vocab_size=10,
num_representations=1,
max_sequence_size=20)
input_batch = tf.convert_to_tensor(
np.random.randint(low=0, high=10, size=(10, 20)))
output = model(input_batch)
self.assertIsInstance(model, tf.keras.Model)
self.assertSequenceEqual(output.numpy().shape, [10, 1, 2])
def test_parametric_attention_model_with_multiple_representations(self):
model = parametric_attention.SimpleParametricAttention(
output_dimension=2,
input_embedding_dimension=2,
vocab_size=10,
num_representations=3,
max_sequence_size=20)
input_batch = tf.convert_to_tensor(
np.random.randint(low=0, high=10, size=(10, 20)))
output = model(input_batch)
self.assertIsInstance(model, tf.keras.Model)
self.assertSequenceEqual(output.numpy().shape, [10, 3, 2])
if __name__ == '__main__':
tf.test.main()
| google-research/google-research | multiple_user_representations/models/parametric_attention_test.py | Python | apache-2.0 | 1,915 | 0.001567 |
from maze_builder.sewer import MadLibs, Choice
from maze_builder.lost_text.text_decorations import fix_sentence
negative_status_sentence = MadLibs({
'{NEGATIVE_STATUS INTENSIFIED SENTENCE FIXED}': 1,
'{NEGATIVE_STATUS SENTENCE FIXED}': 1,
},
FIXED=fix_sentence,
SENTENCE={
'{}.': 30,
'{}!': 10,
'{} again?': 10,
'{} now.': 2,
'{} here.': 5,
'{} here!': 2,
'{}. Now I know what that means.': 1,
'{}: and not for the first time.': 1,
'{} -- and not for the first time.': 1,
'{}? Yes, always.': 1,
'I feel {}.': 10,
'I feel so {}.': 5,
'I feel so {}!': 5,
'I\'m {}.': 10,
'I\'m so {}.': 5,
'Will I always be so {}?': 1,
'Why am I so {}?': 1,
'No one knows how {} I am.': 1,
'Has anyone ever been so {} before?': 1,
'Has anyone ever been so {}?': 1,
'Has anyone ever felt so {}?': 1,
'I never want to feel this {} again.': 1,
'I hope I\'ll never be so {} again.': 1,
'I can\'t stand being so {}.': 1,
'I\'ve never been so {}.': 1,
'I\'ve never been so {} before.': 1,
'Before this trip, I\'d never been so {}.': 1,
'At home, no one is ever so {}.': 1,
'So {} a person can be.': 1,
'So {} a person can feel.': 1,
'We weren\'t meant to feel so {}.': 1,
'I never knew what it was like to be so {}.': 1,
'No one has ever been so {}.': 1,
'I could write a book about being so {}.': 1,
'Even in my dreams, I\'m {}.': 1,
'I\'m as {} as I\'ve ever been.': 1,
'Why does God allow us to be so {}?': 1,
'Would I have come this way, if I\'d known I\'d be so {}?': 1,
},
INTENSIFIED={
'awfully {}': 1,
'amazingly {}': 1,
'cursedly {}': 1,
'critically {}': 1,
'deathly {}': 1,
'meagerly {}': 0.2,
'super-{}': 1,
'devastatingly {}': 1,
'terribly {}': 1,
'dreadfully {}': 1,
'wickedly {}': 1,
'disgracefully {}': 1,
'completely {}': 1,
'reprehensibly {}': 1,
'unforgivably {}': 1,
'unpleasantly {}': 1,
'wretchedly {}': 1,
},
NEGATIVE_STATUS={ # Used as '{}' or 'I'm so {}...'
'hungry': 2,
'cold': 2,
'tired': 5,
'exhausted': 1,
'defeated': 1,
'worn out': 1,
'ravenous': 1,
'faint': 1,
'empty': 1,
'hollow': 1,
'insatiable': 1,
'famished': 1,
'unsatisfied': 1,
'beat': 1,
'annoyed': 1,
'bored': 2,
'distressed': 1,
'drained': 1,
'exasperated': 1,
'fatigued': 1,
'sleepy': 1,
'collapsing': 1,
'jaded': 1,
'overtaxed': 1,
'spent': 1,
'wasted': 1,
'worn': 1,
'burned out': 1,
'done for': 1,
'lost': 20,
'desolate': 1,
'lonesome': 1,
'alone': 1,
'spiritless': 1,
'sick and tired': 1,
'sick': 1,
'unenthusiastic': 1,
'unenergetic': 1,
'adrift': 1,
'disoriented': 5,
'astray': 1,
'off-course': 5,
'perplexed': 2,
'bewildered': 2,
'confused': 5,
'contrite': 1,
'unsettled': 1,
'puzzled': 5,
'ailing': 1,
'ill': 1,
'debilitated': 1,
'frail': 1,
'impaired': 1,
'nauseated': 2,
'bedridden': 1,
'not so hot': 1,
'under the weather': 1,
'run down': 1,
'unhealthy': 1,
'unwell': 1,
'weak': 1,
'laid-up': 1,
'rotten': 1,
'anemic': 1,
'feeble': 1,
'confused': 10,
'fragile': 1,
'hesitant': 2,
'powerless': 1,
'uncertain': 5,
'shaky': 1,
'sickly': 1,
'sluggish': 1,
'slow': 1,
'unsteady': 1,
'weakened': 1,
'wobbly': 1,
'puny': 1,
'out of gas': 1,
'irresolute': 1,
'spent': 1,
'infirm': 1,
'chilled': 1,
'frozen': 1,
'frigid': 1,
'raw': 1,
'numbed': 1,
'benumbed': 1,
'thirsty': 1,
'parched': 1,
'injured': 5,
'afraid': 5,
'terrified': 1,
'anxious': 1,
'apprehensive': 1,
'frightened': 1,
'nervous': 1,
'scared': 1,
'cowardly': 1,
'daunted': 1,
'discouraged': 1,
'disheartened': 1,
'dismayed': 1,
'distressed': 1,
'horrified': 1,
'panic-stricken': 1,
'petrified': 1,
'scared stiff': 1,
'scared to death': 1,
'terror-stricken': 1,
'humbled': 1,
'dead': 1,
'naked': 1,
'wild': 1,
'uncivilized': 1,
'scorched': 1,
'withered': 1,
'sunburned': 1,
'windburned': 1,
'frostbitten': 1,
'dehydrated': 1,
'shriveled': 1,
'dried up': 1,
'dried out': 1,
'smelly': 1,
'stinky': 1,
'noxious': 1,
'putrid': 1,
'revolting': 1,
'grody': 1,
'gross': 1,
'icky': 1,
}
)
| kcsaff/maze-builder | maze_builder/lost_text/negative_status.py | Python | mit | 5,360 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner multiple phones',
'version': '0.008',
'category': 'Customizations',
'sequence': 16,
'complexity': 'normal',
'description': '''== Partner multiple phones module ==\n\n
This modules add a tab to manage multiple phones for a partner.\n
The phones are searchable from tree list view like in standard module.\n
This module don't break phone functionality because it keeps the phone char field in partner form.\n
''',
'author': 'ThinkOpen Solutions Brasil',
'website': 'http://www.tkobr.com',
'images': ['images/oerp61.jpeg',
],
'depends': [
'tko_contacts',
],
'data': [
'security/ir.model.access.csv',
'views/tko_partner_phones_view.xml',
'views/res_partner_view.xml',
],
'init': [],
'demo': [],
'update': [],
'test': [], # YAML files with tests
'installable': True,
'application': False,
# If it's True, the modules will be auto-installed when all dependencies
# are installed
'auto_install': False,
'certificate': '',
}
| thinkopensolutions/tkobr-addons | tko_partner_multiple_phones/__manifest__.py | Python | agpl-3.0 | 2,154 | 0.000464 |
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import unittest
import colorsys
import PySide2
from PySide2.QtCore import Qt
from PySide2.QtGui import QColor
class QColorGetTest(unittest.TestCase):
def setUp(self):
self.color = QColor(20, 40, 60, 80)
def testGetRgb(self):
self.assertEqual(self.color.getRgb(), (20, 40, 60, 80))
def testGetHslF(self):
hls = colorsys.rgb_to_hls(20.0/255, 40.0/255, 60.0/255)
hsla = hls[0], hls[2], hls[1], self.color.alphaF()
for x, y in zip(self.color.getHslF(), hsla): # Due to rounding problems
self.assertTrue(x - y < 1/100000.0)
def testGetHsv(self):
hsv = colorsys.rgb_to_hsv(20.0/255, 40.0/255, 60.0/255)
hsva = int(hsv[0]*360.0), int(hsv[1]*255), int(hsv[2]*256), self.color.alpha()
self.assertEqual(self.color.getHsv(), hsva)
def testGetCmyk(self): # not supported by colorsys
self.assertEqual(self.color.getCmyk(), (170, 85, 0, 195, 80))
def testGetCmykF(self): # not supported by colorsys
for x, y in zip(self.color.getCmykF(), (170/255.0, 85/255.0, 0, 195/255.0, 80/255.0)):
self.assertTrue(x - y < 1/10000.0)
class QColorQRgbConstructor(unittest.TestCase):
'''QColor(QRgb) constructor'''
# Affected by bug #170 - QColor(QVariant) coming before QColor(uint)
# in overload sorting
def testBasic(self):
'''QColor(QRgb)'''
color = QColor(255, 0, 0)
#QRgb format #AARRGGBB
rgb = 0x00FF0000
self.assertEqual(QColor(rgb), color)
class QColorEqualGlobalColor(unittest.TestCase):
def testEqualGlobalColor(self):
'''QColor == Qt::GlobalColor'''
self.assertEqual(QColor(255, 0, 0), Qt.red)
class QColorCopy(unittest.TestCase):
def testDeepCopy(self):
'''QColor deepcopy'''
from copy import deepcopy
original = QColor(0, 0, 255)
copy = deepcopy([original])[0]
self.assertTrue(original is not copy)
self.assertEqual(original, copy)
del original
self.assertEqual(copy, QColor(0, 0, 255))
def testEmptyCopy(self):
from copy import deepcopy
original = QColor()
copy = deepcopy([original])[0]
self.assertTrue(original is not copy)
self.assertEqual(original, copy)
del original
self.assertEqual(copy, QColor())
class QColorRepr(unittest.TestCase):
def testReprFunction(self):
c = QColor(100, 120, 200)
c2 = eval(c.__repr__())
self.assertEqual(c, c2)
def testStrFunction(self):
c = QColor('red')
c2 = eval(c.__str__())
self.assertEqual(c, c2)
if __name__ == '__main__':
unittest.main()
| qtproject/pyside-pyside | tests/QtGui/qcolor_test.py | Python | lgpl-2.1 | 3,949 | 0.006584 |
from os import walk
files = []
for (dirpath, dirnames, filenames) in walk('./data/'):
files.extend(filenames)
break
data = []
OFratio = None
for file in files:
t = []
with open('./data/' + file) as f:
for i, line in enumerate(f):
if i in [15, 24, 25, 29, 31, 34, 39]:
t.append(line.split())
OFratio = t[0][2]
Pc = t[1][1]
Tc = t[2][1]
Te = t[2][4]
Pe = t[1][4]
MW = t[5][4]
gamma = t[4][4]
Mach = t[6][5]
Cpc = t[3][3]
Cpe = t[3][6]
data.append([Pc, Tc, Te, Pe, MW, gamma, Mach, Cpc, Cpe])
if len(data) < 15:
print('[WRN] Less than 15 keys!')
block = ''.join(['MixtureRatioData\n{\n OFratio =', OFratio,
'\n PressureData\n {\n',
''.join([' key = {}, {}, {}, {}, {}, {}, {}, {}, {}\n'.format(*line) for line in data]),
' }\n}'])
with open('./data/results.txt', 'a') as f:
f.write(block)
| ferram4/ProcEngines | GameData/ProcEngines/PropellantMixtures/config_parser.py | Python | mit | 928 | 0.003233 |
# -*- coding: utf-8 -*-
# Style_Check:Python_Fragment (meaning no pyflakes check)
#
# GNAT build configuration file
import sys
import os
import time
import re
sys.path.append('.')
import ada_pygments
import latex_elements
# Some configuration values for the various documentation handled by
# this conf.py
DOCS = {
'gnat_rm': {
'title': u'GNAT Reference Manual'},
'gnat_ugn': {
'title': u'GNAT User\'s Guide for Native Platforms'}}
# Then retrieve the source directory
root_source_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
gnatvsn_spec = os.path.join(root_source_dir, '..', 'gnatvsn.ads')
basever = os.path.join(root_source_dir, '..', '..', 'BASE-VER')
texi_fsf = True # Set to False when FSF doc is switched to sphinx by default
with open(gnatvsn_spec, 'rb') as fd:
gnatvsn_content = fd.read()
def get_copyright():
return u'2008-%s, Free Software Foundation' % time.strftime('%Y')
def get_gnat_version():
m = re.search(r'Gnat_Static_Version_String : ' +
r'constant String := "([^\(\)]+)\(.*\)?";',
gnatvsn_content)
if m:
return m.group(1).strip()
else:
if texi_fsf and os.path.exists(basever):
return ''
try:
with open(basever, 'rb') as fd:
return fd.read()
except:
pass
print 'cannot find GNAT version in gnatvsn.ads or in ' + basever
sys.exit(1)
def get_gnat_build_type():
m = re.search(r'Build_Type : constant Gnat_Build_Type := (.+);',
gnatvsn_content)
if m:
return {'Gnatpro': 'PRO',
'FSF': 'FSF',
'GPL': 'GPL'}[m.group(1).strip()]
else:
print 'cannot compute GNAT build type'
sys.exit(1)
# First retrieve the name of the documentation we are building
doc_name = os.environ.get('DOC_NAME', None)
if doc_name is None:
print 'DOC_NAME environment variable should be set'
sys.exit(1)
if doc_name not in DOCS:
print '%s is not a valid documentation name' % doc_name
sys.exit(1)
# Exclude sources that are not part of the current documentation
exclude_patterns = []
for d in os.listdir(root_source_dir):
if d not in ('share', doc_name, doc_name + '.rst'):
exclude_patterns.append(d)
print 'ignoring %s' % d
if doc_name == 'gnat_rm':
exclude_patterns.append('share/gnat_project_manager.rst')
print 'ignoring share/gnat_project_manager.rst'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = doc_name
# General information about the project.
project = DOCS[doc_name]['title']
copyright = get_copyright()
version = get_gnat_version()
release = get_gnat_version()
pygments_style = 'sphinx'
tags.add(get_gnat_build_type())
html_theme = 'sphinxdoc'
if os.path.isfile('adacore_transparent.png'):
html_logo = 'adacore_transparent.png'
if os.path.isfile('favicon.ico'):
html_favicon = 'favicon.ico'
html_static_path = ['_static']
latex_additional_files = ['gnat.sty']
copyright_macros = {
'date': time.strftime("%b %d, %Y"),
'edition': 'GNAT %s Edition' % 'Pro' if get_gnat_build_type() == 'PRO'
else 'GPL',
'name': u'GNU Ada',
'tool': u'GNAT',
'version': version}
latex_elements = {
'preamble': '\\usepackage{gnat}\n' +
latex_elements.TOC_DEPTH +
latex_elements.PAGE_BLANK +
latex_elements.TOC_CMD +
latex_elements.LATEX_HYPHEN +
latex_elements.doc_settings(DOCS[doc_name]['title'],
get_gnat_version()),
'tableofcontents': latex_elements.TOC % copyright_macros}
latex_documents = [
(master_doc, '%s.tex' % doc_name, project, u'AdaCore', 'manual')]
texinfo_documents = [
(master_doc, doc_name, project,
u'AdaCore', doc_name, doc_name, '')]
def setup(app):
app.add_lexer('ada', ada_pygments.AdaLexer())
app.add_lexer('gpr', ada_pygments.GNATProjectLexer())
| Gurgel100/gcc | gcc/ada/doc/share/conf.py | Python | gpl-2.0 | 3,966 | 0.000756 |
from django.db import models
from fluxbb import FLUXBB_PREFIX
class SearchWord(models.Model):
"""
FluxBB Search Word
Fields on this model match exactly with those defined by fluxbb, see the
[fluxbb dbstructure](http://fluxbb.org/docs/v1.5/dbstructure#users).
"""
id = models.AutoField(primary_key=True)
word = models.CharField(max_length=20, default="")
class Meta:
app_label = 'fluxbb'
db_table = FLUXBB_PREFIX + 'search_words'
| kalhartt/django-fluxbb | fluxbb/models/search_word.py | Python | gpl-2.0 | 481 | 0 |
#!/usr/bin/python
import smbus
import time
'''
retrieve data from wii ir camera.
x = 0-1023
y = 0-720
size = 1-15?
top right of scene = [0,0]
'''
def getBlob(n,list): # return x,y,size for blob n (0-3) from list
if len(list)<13:
return []
x = list[1+(n*3)]
y = list[2+(n*3)]
s = list[3+(n*3)]
x += (s&0x30)<<4
y += (s&0xC0)<<2
s = s&0x0F
return [x,y,s]
wiiAddr = 0x58
i2c = smbus.SMBus(1)
i2c.write_byte_data(wiiAddr, 0x30,0x01)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x30,0x08)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x06,0x90)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x08,0xC0)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x1A,0x40)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x33,0x33)
time.sleep(0.05)
while 1:
data = i2c.read_i2c_block_data(wiiAddr, 0x36, 16)
print len(data), "\t", getBlob(0,data), "\t", getBlob(1,data), "\t", getBlob(2,data), "\t", getBlob(3,data)
time.sleep(0.5)
| roving99/robot_pi | 0mq/wii_ir_test.py | Python | gpl-2.0 | 966 | 0.030021 |
# Copyright (C) 2008 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
import collections
from cStringIO import StringIO
from bzrlib import (
debug,
errors,
)
from bzrlib.trace import mutter
class MessageHandler(object):
"""Base class for handling messages received via the smart protocol.
As parts of a message are received, the corresponding PART_received method
will be called.
"""
def __init__(self):
self.headers = None
def headers_received(self, headers):
"""Called when message headers are received.
This default implementation just stores them in self.headers.
"""
self.headers = headers
def byte_part_received(self, byte):
"""Called when a 'byte' part is received.
Note that a 'byte' part is a message part consisting of exactly one
byte.
"""
raise NotImplementedError(self.byte_received)
def bytes_part_received(self, bytes):
"""Called when a 'bytes' part is received.
A 'bytes' message part can contain any number of bytes. It should not
be confused with a 'byte' part, which is always a single byte.
"""
raise NotImplementedError(self.bytes_received)
def structure_part_received(self, structure):
"""Called when a 'structure' part is received.
:param structure: some structured data, which will be some combination
of list, dict, int, and str objects.
"""
raise NotImplementedError(self.bytes_received)
def protocol_error(self, exception):
"""Called when there is a protocol decoding error.
The default implementation just re-raises the exception.
"""
raise
def end_received(self):
"""Called when the end of the message is received."""
# No-op by default.
pass
class ConventionalRequestHandler(MessageHandler):
"""A message handler for "conventional" requests.
"Conventional" is used in the sense described in
doc/developers/network-protocol.txt: a simple message with arguments and an
optional body.
Possible states:
* args: expecting args
* body: expecting body (terminated by receiving a post-body status)
* error: expecting post-body error
* end: expecting end of message
* nothing: finished
"""
def __init__(self, request_handler, responder):
MessageHandler.__init__(self)
self.request_handler = request_handler
self.responder = responder
self.expecting = 'args'
self._should_finish_body = False
self._response_sent = False
def protocol_error(self, exception):
if self.responder.response_sent:
# We can only send one response to a request, no matter how many
# errors happen while processing it.
return
self.responder.send_error(exception)
def byte_part_received(self, byte):
if self.expecting == 'body':
if byte == 'S':
# Success. Nothing more to come except the end of message.
self.expecting = 'end'
elif byte == 'E':
# Error. Expect an error structure.
self.expecting = 'error'
else:
raise errors.SmartProtocolError(
'Non-success status byte in request body: %r' % (byte,))
else:
raise errors.SmartProtocolError(
'Unexpected message part: byte(%r)' % (byte,))
def structure_part_received(self, structure):
if self.expecting == 'args':
self._args_received(structure)
elif self.expecting == 'error':
self._error_received(structure)
else:
raise errors.SmartProtocolError(
'Unexpected message part: structure(%r)' % (structure,))
def _args_received(self, args):
self.expecting = 'body'
self.request_handler.args_received(args)
if self.request_handler.finished_reading:
self._response_sent = True
self.responder.send_response(self.request_handler.response)
self.expecting = 'end'
def _error_received(self, error_args):
self.expecting = 'end'
self.request_handler.post_body_error_received(error_args)
def bytes_part_received(self, bytes):
if self.expecting == 'body':
self._should_finish_body = True
self.request_handler.accept_body(bytes)
else:
raise errors.SmartProtocolError(
'Unexpected message part: bytes(%r)' % (bytes,))
def end_received(self):
if self.expecting not in ['body', 'end']:
raise errors.SmartProtocolError(
'End of message received prematurely (while expecting %s)'
% (self.expecting,))
self.expecting = 'nothing'
self.request_handler.end_received()
if not self.request_handler.finished_reading:
raise errors.SmartProtocolError(
"Complete conventional request was received, but request "
"handler has not finished reading.")
if not self._response_sent:
self.responder.send_response(self.request_handler.response)
class ResponseHandler(object):
"""Abstract base class for an object that handles a smart response."""
def read_response_tuple(self, expect_body=False):
"""Reads and returns the response tuple for the current request.
:keyword expect_body: a boolean indicating if a body is expected in the
response. Some protocol versions needs this information to know
when a response is finished. If False, read_body_bytes should
*not* be called afterwards. Defaults to False.
:returns: tuple of response arguments.
"""
raise NotImplementedError(self.read_response_tuple)
def read_body_bytes(self, count=-1):
"""Read and return some bytes from the body.
:param count: if specified, read up to this many bytes. By default,
reads the entire body.
:returns: str of bytes from the response body.
"""
raise NotImplementedError(self.read_body_bytes)
def read_streamed_body(self):
"""Returns an iterable that reads and returns a series of body chunks.
"""
raise NotImplementedError(self.read_streamed_body)
def cancel_read_body(self):
"""Stop expecting a body for this response.
If expect_body was passed to read_response_tuple, this cancels that
expectation (and thus finishes reading the response, allowing a new
request to be issued). This is useful if a response turns out to be an
error rather than a normal result with a body.
"""
raise NotImplementedError(self.cancel_read_body)
class ConventionalResponseHandler(MessageHandler, ResponseHandler):
def __init__(self):
MessageHandler.__init__(self)
self.status = None
self.args = None
self._bytes_parts = collections.deque()
self._body_started = False
self._body_stream_status = None
self._body = None
self._body_error_args = None
self.finished_reading = False
def setProtoAndMediumRequest(self, protocol_decoder, medium_request):
self._protocol_decoder = protocol_decoder
self._medium_request = medium_request
def byte_part_received(self, byte):
if byte not in ['E', 'S']:
raise errors.SmartProtocolError(
'Unknown response status: %r' % (byte,))
if self._body_started:
if self._body_stream_status is not None:
raise errors.SmartProtocolError(
'Unexpected byte part received: %r' % (byte,))
self._body_stream_status = byte
else:
if self.status is not None:
raise errors.SmartProtocolError(
'Unexpected byte part received: %r' % (byte,))
self.status = byte
def bytes_part_received(self, bytes):
self._body_started = True
self._bytes_parts.append(bytes)
def structure_part_received(self, structure):
if type(structure) is not tuple:
raise errors.SmartProtocolError(
'Args structure is not a sequence: %r' % (structure,))
if not self._body_started:
if self.args is not None:
raise errors.SmartProtocolError(
'Unexpected structure received: %r (already got %r)'
% (structure, self.args))
self.args = structure
else:
if self._body_stream_status != 'E':
raise errors.SmartProtocolError(
'Unexpected structure received after body: %r'
% (structure,))
self._body_error_args = structure
def _wait_for_response_args(self):
while self.args is None and not self.finished_reading:
self._read_more()
def _wait_for_response_end(self):
while not self.finished_reading:
self._read_more()
def _read_more(self):
next_read_size = self._protocol_decoder.next_read_size()
if next_read_size == 0:
# a complete request has been read.
self.finished_reading = True
self._medium_request.finished_reading()
return
bytes = self._medium_request.read_bytes(next_read_size)
if bytes == '':
# end of file encountered reading from server
if 'hpss' in debug.debug_flags:
mutter(
'decoder state: buf[:10]=%r, state_accept=%s',
self._protocol_decoder._get_in_buffer()[:10],
self._protocol_decoder.state_accept.__name__)
raise errors.ConnectionReset(
"Unexpected end of message. "
"Please check connectivity and permissions, and report a bug "
"if problems persist.")
self._protocol_decoder.accept_bytes(bytes)
def protocol_error(self, exception):
# Whatever the error is, we're done with this request.
self.finished_reading = True
self._medium_request.finished_reading()
raise
def read_response_tuple(self, expect_body=False):
"""Read a response tuple from the wire."""
self._wait_for_response_args()
if not expect_body:
self._wait_for_response_end()
if 'hpss' in debug.debug_flags:
mutter(' result: %r', self.args)
if self.status == 'E':
self._wait_for_response_end()
_raise_smart_server_error(self.args)
return tuple(self.args)
def read_body_bytes(self, count=-1):
"""Read bytes from the body, decoding into a byte stream.
We read all bytes at once to ensure we've checked the trailer for
errors, and then feed the buffer back as read_body_bytes is called.
Like the builtin file.read in Python, a count of -1 (the default) means
read the entire body.
"""
# TODO: we don't necessarily need to buffer the full request if count
# != -1. (2008/04/30, Andrew Bennetts)
if self._body is None:
self._wait_for_response_end()
body_bytes = ''.join(self._bytes_parts)
if 'hpss' in debug.debug_flags:
mutter(' %d body bytes read', len(body_bytes))
self._body = StringIO(body_bytes)
self._bytes_parts = None
return self._body.read(count)
def read_streamed_body(self):
while not self.finished_reading:
while self._bytes_parts:
bytes_part = self._bytes_parts.popleft()
if 'hpssdetail' in debug.debug_flags:
mutter(' %d byte part read', len(bytes_part))
yield bytes_part
self._read_more()
if self._body_stream_status == 'E':
_raise_smart_server_error(self._body_error_args)
def cancel_read_body(self):
self._wait_for_response_end()
def _raise_smart_server_error(error_tuple):
"""Raise exception based on tuple received from smart server
Specific error translation is handled by bzrlib.remote._translate_error
"""
if error_tuple[0] == 'UnknownMethod':
raise errors.UnknownSmartMethod(error_tuple[1])
raise errors.ErrorFromSmartServer(error_tuple)
| Distrotech/bzr | bzrlib/smart/message.py | Python | gpl-2.0 | 13,276 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReferenceDataSetsOperations:
"""ReferenceDataSetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.timeseriesinsights.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
environment_name: str,
reference_data_set_name: str,
parameters: "_models.ReferenceDataSetCreateOrUpdateParameters",
**kwargs
) -> "_models.ReferenceDataSetResource":
"""Create or update a reference data set in the specified environment.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param environment_name: The name of the Time Series Insights environment associated with the
specified resource group.
:type environment_name: str
:param reference_data_set_name: Name of the reference data set.
:type reference_data_set_name: str
:param parameters: Parameters for creating a reference data set.
:type parameters: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetCreateOrUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReferenceDataSetResource, or the result of cls(response)
:rtype: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReferenceDataSetResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'environmentName': self._serialize.url("environment_name", environment_name, 'str'),
'referenceDataSetName': self._serialize.url("reference_data_set_name", reference_data_set_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ReferenceDataSetCreateOrUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ReferenceDataSetResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ReferenceDataSetResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets/{referenceDataSetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
environment_name: str,
reference_data_set_name: str,
**kwargs
) -> "_models.ReferenceDataSetResource":
"""Gets the reference data set with the specified name in the specified environment.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param environment_name: The name of the Time Series Insights environment associated with the
specified resource group.
:type environment_name: str
:param reference_data_set_name: The name of the Time Series Insights reference data set
associated with the specified environment.
:type reference_data_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReferenceDataSetResource, or the result of cls(response)
:rtype: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReferenceDataSetResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'environmentName': self._serialize.url("environment_name", environment_name, 'str'),
'referenceDataSetName': self._serialize.url("reference_data_set_name", reference_data_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReferenceDataSetResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets/{referenceDataSetName}'} # type: ignore
async def update(
self,
resource_group_name: str,
environment_name: str,
reference_data_set_name: str,
reference_data_set_update_parameters: "_models.ReferenceDataSetUpdateParameters",
**kwargs
) -> "_models.ReferenceDataSetResource":
"""Updates the reference data set with the specified name in the specified subscription, resource
group, and environment.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param environment_name: The name of the Time Series Insights environment associated with the
specified resource group.
:type environment_name: str
:param reference_data_set_name: The name of the Time Series Insights reference data set
associated with the specified environment.
:type reference_data_set_name: str
:param reference_data_set_update_parameters: Request object that contains the updated
information for the reference data set.
:type reference_data_set_update_parameters: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReferenceDataSetResource, or the result of cls(response)
:rtype: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReferenceDataSetResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'environmentName': self._serialize.url("environment_name", environment_name, 'str'),
'referenceDataSetName': self._serialize.url("reference_data_set_name", reference_data_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(reference_data_set_update_parameters, 'ReferenceDataSetUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReferenceDataSetResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets/{referenceDataSetName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
environment_name: str,
reference_data_set_name: str,
**kwargs
) -> None:
"""Deletes the reference data set with the specified name in the specified subscription, resource
group, and environment.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param environment_name: The name of the Time Series Insights environment associated with the
specified resource group.
:type environment_name: str
:param reference_data_set_name: The name of the Time Series Insights reference data set
associated with the specified environment.
:type reference_data_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-15"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'environmentName': self._serialize.url("environment_name", environment_name, 'str'),
'referenceDataSetName': self._serialize.url("reference_data_set_name", reference_data_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets/{referenceDataSetName}'} # type: ignore
async def list_by_environment(
self,
resource_group_name: str,
environment_name: str,
**kwargs
) -> "_models.ReferenceDataSetListResponse":
"""Lists all the available reference data sets associated with the subscription and within the
specified resource group and environment.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param environment_name: The name of the Time Series Insights environment associated with the
specified resource group.
:type environment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReferenceDataSetListResponse, or the result of cls(response)
:rtype: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetListResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReferenceDataSetListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-15"
accept = "application/json"
# Construct URL
url = self.list_by_environment.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'environmentName': self._serialize.url("environment_name", environment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReferenceDataSetListResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_environment.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets'} # type: ignore
| Azure/azure-sdk-for-python | sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/aio/operations/_reference_data_sets_operations.py | Python | mit | 19,516 | 0.00497 |
from setuptools import setup, find_packages
import os
description = "Make sure all source files have your standard licensing stub "\
"at the top."
long_description = ""
setup(name='plicense',
version='0.2.1',
description=description,
long_description=long_description,
classifiers=[],
keywords='license',
author='Dustin Oprea',
author_email='myselfasunder@gmail.com',
url='https://github.com/dsoprea/LicensePrepend',
license='GPL 2',
packages=find_packages(exclude=[]),
include_package_data=True,
zip_safe=True,
install_requires=[
'nose'
],
scripts=['scripts/plicense'],
)
| dsoprea/LicensePrepend | setup.py | Python | gpl-2.0 | 692 | 0.001445 |
# coding: utf-8
# pylint: disable = invalid-name, C0111, C0301
# pylint: disable = R0912, R0913, R0914, W0105, W0201, W0212
"""Wrapper c_api of LightGBM"""
from __future__ import absolute_import
import ctypes
import os
from tempfile import NamedTemporaryFile
import numpy as np
import scipy.sparse
from .compat import (DataFrame, Series, integer_types, json,
json_default_with_numpy, numeric_types, range_,
string_type)
from .libpath import find_lib_path
def _load_lib():
"""Load LightGBM Library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
raise Exception("cannot find LightGBM library")
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
return lib
_LIB = _load_lib()
class LightGBMError(Exception):
"""Error throwed by LightGBM"""
pass
def _safe_call(ret):
"""Check the return value of C API call
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError())
def is_numeric(obj):
"""Check is a number or not, include numpy number etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check is 1d numpy array"""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_1d_list(data):
"""Check is 1d list"""
return isinstance(data, list) and \
(not data or isinstance(data[0], numeric_types))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""convert to 1d numpy array"""
if is_numpy_1d_array(data):
if data.dtype == dtype:
return data
else:
return data.astype(dtype=dtype, copy=False)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, Series):
return data.values.astype(dtype)
else:
raise TypeError("Wrong type({}) for {}, should be list or numpy array".format(type(data).__name__, name))
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.fromiter(cptr, dtype=np.float32, count=length)
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.fromiter(cptr, dtype=np.int32, count=length)
else:
raise RuntimeError('Expected int pointer')
def c_str(string):
"""Convert a python string to cstring."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a python array to c array."""
return (ctype * len(values))(*values)
def param_dict_to_str(data):
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
pairs.append(str(key) + '=' + ','.join(map(str, val)))
elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):
pairs.append(str(key) + '=' + str(val))
else:
raise TypeError('Unknown type of parameter:%s, got:%s'
% (key, type(val).__name__))
return ' '.join(pairs)
class _temp_file(object):
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
"""marco definition of data type in c_api of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matric is row major in python"""
C_API_IS_ROW_MAJOR = 1
"""marco definition of prediction type in c_api of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
"""data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
def c_float_array(data):
"""get pointer of float numpy array / list"""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError("Expected np.float32 or np.float64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data)
def c_int_array(data):
"""get pointer of int numpy array / list"""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError("Expected np.int32 or np.int64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data)
PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'float16': 'float',
'float32': 'float', 'float64': 'float', 'bool': 'int'}
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, DataFrame):
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = data.select_dtypes(include=['category']).columns
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is pandas Index object
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes)
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto':
categorical_feature = list(cat_cols)
else:
categorical_feature = list(categorical_feature) + list(cat_cols)
if feature_name == 'auto':
feature_name = list(data.columns)
data_dtypes = data.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):
bad_fields = [data.columns[i] for i, dtype in
enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER]
msg = """DataFrame.dtypes for data must be int, float or bool. Did not expect the data types in fields """
raise ValueError(msg + ', '.join(bad_fields))
data = data.values.astype('float')
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
label_dtypes = label.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in label_dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = label.values.astype('float')
return label
def _save_pandas_categorical(file_name, pandas_categorical):
with open(file_name, 'a') as f:
f.write('\npandas_categorical:' + json.dumps(pandas_categorical, default=json_default_with_numpy))
def _load_pandas_categorical(file_name):
with open(file_name, 'r') as f:
last_line = f.readlines()[-1]
if last_line.startswith('pandas_categorical:'):
return json.loads(last_line[len('pandas_categorical:'):])
return None
class _InnerPredictor(object):
"""
A _InnerPredictor of LightGBM.
Only used for prediction, usually used for continued-train
Note: Can convert from Booster, but cannot convert to Booster
"""
def __init__(self, model_file=None, booster_handle=None):
"""Initialize the _InnerPredictor. Not expose to user
Parameters
----------
model_file : string
Path to the model file.
booster_handle : Handle of Booster
use handle to init
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_num_iterations)))
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = None
else:
raise TypeError('Need Model file or Booster handle to create a predictor')
def __del__(self):
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
def predict(self, data, num_iteration=-1,
raw_score=False, pred_leaf=False, data_has_header=False,
is_reshape=True):
"""
Predict logic
Parameters
----------
data : string/numpy array/scipy.sparse
Data source for prediction
When data type is string, it represents the path of txt file
num_iteration : int
Used iteration for prediction
raw_score : bool
True for predict raw score
pred_leaf : bool
True for predict leaf index
data_has_header : bool
Used for txt data, True if txt data has header
is_reshape : bool
Reshape to (nrow, ncol) if true
Returns
-------
Prediction result
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
int_data_has_header = 1 if data_has_header else 0
if num_iteration > self.num_total_iteration:
num_iteration = self.num_total_iteration
if isinstance(data, string_type):
with _temp_file() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, num_iteration,
predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, num_iteration,
predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, num_iteration,
predict_type)
elif isinstance(data, DataFrame):
preds, nrow = self.__pred_for_np2d(data.values, num_iteration,
predict_type)
else:
try:
csr = scipy.sparse.csr_matrix(data)
preds, nrow = self.__pred_for_csr(csr, num_iteration,
predict_type)
except:
raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
if pred_leaf:
preds = preds.astype(np.int32)
if is_reshape and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)'
% (preds.size, nrow))
return preds
def __get_num_preds(self, num_iteration, nrow, predict_type):
"""
Get size of prediction result
"""
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, num_iteration, predict_type):
"""
Predict for a 2-D numpy matrix.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data = c_float_array(data)
n_preds = self.__get_num_preds(num_iteration, mat.shape[0],
predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
def __pred_for_csr(self, csr, num_iteration, predict_type):
"""
Predict for a csr data
"""
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr)
ptr_data, type_ptr_data = c_float_array(csr.data)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def __pred_for_csc(self, csc, num_iteration, predict_type):
"""
Predict for a csc data
"""
nrow = csc.shape[0]
n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr = c_int_array(csc.indptr)
ptr_data, type_ptr_data = c_float_array(csc.data)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
class Dataset(object):
"""Dataset in LightGBM."""
def __init__(self, data, label=None, max_bin=255, reference=None,
weight=None, group=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""
Parameters
----------
data : string/numpy array/scipy.sparse
Data source of Dataset.
When data type is string, it represents the path of txt file
label : list or numpy 1-D array, optional
Label of the data
max_bin : int, required
Max number of discrete bin for features
reference : Other Dataset, optional
If this dataset validation, need to use training data as reference
weight : list or numpy 1-D array , optional
Weight for each instance.
group : list or numpy 1-D array , optional
Group/query size for dataset
silent : boolean, optional
Whether print messages during construction
feature_name : list of str, or 'auto'
Feature names
If 'auto' and data is pandas DataFrame, use data columns name
categorical_feature : list of str or int, or 'auto'
Categorical features,
type int represents index,
type str represents feature names (need to specify feature_name as well)
If 'auto' and data is pandas DataFrame, use pandas categorical columns
params: dict, optional
Other parameters
free_raw_data: Bool
True if need to free raw data after construct inner dataset
"""
self.handle = None
self.data = data
self.label = label
self.max_bin = max_bin
self.reference = reference
self.weight = weight
self.group = group
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = params
self.free_raw_data = free_raw_data
self.used_indices = None
self._predictor = None
self.pandas_categorical = None
def __del__(self):
self._free_handle()
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
def _lazy_init(self, data, label=None, max_bin=255, reference=None,
weight=None, group=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data, feature_name, categorical_feature, self.pandas_categorical)
label = _label_from_pandas(label)
self.data_has_header = False
"""process for args"""
params = {} if params is None else params
self.max_bin = max_bin
self.predictor = predictor
params["max_bin"] = max_bin
if silent:
params["verbose"] = 0
elif "verbose" not in params:
params["verbose"] = 1
"""get categorical features"""
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, string_type) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, integer_types):
categorical_indices.add(name)
else:
raise TypeError("Wrong type({}) or unknown name({}) in categorical_feature"
.format(type(name).__name__, name))
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
"""process for reference dataset"""
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
"""start construct data"""
if isinstance(data, string_type):
"""check data has header or not"""
if str(params.get("has_header", "")).lower() == "true" \
or str(params.get("header", "")).lower() == "true":
self.data_has_header = True
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(data),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except:
raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__))
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
# load init score
if isinstance(self.predictor, _InnerPredictor):
init_score = self.predictor.predict(data,
raw_score=True,
data_has_header=self.data_has_header,
is_reshape=False)
if self.predictor.num_class > 1:
# need re group init score
new_init_score = np.zeros(init_score.size, dtype=np.float32)
num_data = self.num_data()
for i in range_(num_data):
for j in range_(self.predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * self.predictor.num_class + j]
init_score = new_init_score
self.set_init_score(init_score)
elif self.predictor is not None:
raise TypeError('wrong predictor type {}'.format(type(self.predictor).__name__))
# set feature names
self.set_feature_name(feature_name)
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""
Initialize data from a 2-D numpy matrix.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
def __init_from_csr(self, csr, params_str, ref_dataset):
"""
Initialize data from a CSR matrix.
"""
if len(csr.indices) != len(csr.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr)
ptr_data, type_ptr_data = c_float_array(csr.data)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
def __init_from_csc(self, csc, params_str, ref_dataset):
"""
Initialize data from a csc matrix.
"""
if len(csc.indices) != len(csc.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr = c_int_array(csc.indptr)
ptr_data, type_ptr_data = c_float_array(csc.data)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
def construct(self):
"""Lazy init"""
if self.handle is None:
if self.reference is not None:
if self.used_indices is None:
"""create valid"""
self._lazy_init(self.data, label=self.label, max_bin=self.max_bin, reference=self.reference,
weight=self.weight, group=self.group, predictor=self._predictor,
silent=self.silent, params=self.params)
else:
"""construct subset"""
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if self.get_label() is None:
raise ValueError("Label should not be None.")
else:
"""create train"""
self._lazy_init(self.data, label=self.label, max_bin=self.max_bin,
weight=self.weight, group=self.group, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
silent=False, params=None):
"""
Create validation data align with current dataset
Parameters
----------
data : string/numpy array/scipy.sparse
Data source of Dataset.
When data type is string, it represents the path of txt file
label : list or numpy 1-D array, optional
Label of the training data.
weight : list or numpy 1-D array , optional
Weight for each instance.
group : list or numpy 1-D array , optional
Group/query size for dataset
silent : boolean, optional
Whether print messages during construction
params: dict, optional
Other parameters
"""
ret = Dataset(data, label=label, max_bin=self.max_bin, reference=self,
weight=weight, group=group, silent=silent, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""
Get subset of current dataset
Parameters
----------
used_indices : list of int
Used indices of this subset
params : dict
Other parameters
"""
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = used_indices
return ret
def save_binary(self, filename):
"""
Save Dataset to binary file
Parameters
----------
filename : string
Name of the output file.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
def _update_params(self, params):
if not self.params:
self.params = params
else:
self.params.update(params)
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name: str
The field name of the information
data: numpy array or list or None
The array ofdata to be set
"""
if self.handle is None:
raise Exception("Cannot set %s before construct dataset" % field_name)
if data is None:
"""set to None"""
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
elif data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
else:
raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype))
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name: str
The field name of the information
Returns
-------
info : array
A numpy array of information of the data
"""
if self.handle is None:
raise Exception("Cannot get %s before construct dataset" % field_name)
tmp_out_len = ctypes.c_int()
out_type = ctypes.c_int()
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""
Set categorical features
Parameters
----------
categorical_feature : list of int or str
Name/index of categorical features
"""
if self.categorical_feature == categorical_feature:
return
if self.data is not None:
self.categorical_feature = categorical_feature
self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""
Set predictor for continued training, not recommand for user to call this function.
Please set init_model in engine.train or engine.cv
"""
if predictor is self._predictor:
return
if self.data is not None:
self._predictor = predictor
self._free_handle()
else:
raise LightGBMError("Cannot set predictor after freed raw data, set free_raw_data=False when construct Dataset to avoid this.")
def set_reference(self, reference):
"""
Set reference dataset
Parameters
----------
reference : Dataset
Will use reference as template to consturct current dataset
"""
self.set_categorical_feature(reference.categorical_feature)
self.set_feature_name(reference.feature_name)
self._set_predictor(reference._predictor)
if self.reference is reference:
return
if self.data is not None:
self.reference = reference
self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""
Set feature name
Parameters
----------
feature_name : list of str
Feature names
"""
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError("Length of feature_name({}) and num_feature({}) don't match".format(len(feature_name), self.num_feature()))
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
def set_label(self, label):
"""
Set label of Dataset
Parameters
----------
label: numpy array or list or None
The label information to be set into Dataset
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(label, name='label')
self.set_field('label', label)
def set_weight(self, weight):
"""
Set weight of each instance.
Parameters
----------
weight : numpy array or list or None
Weight for each data point
"""
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
def set_init_score(self, init_score):
"""
Set init score of booster to start from.
Parameters
----------
init_score: numpy array or list or None
Init score for booster
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
def set_group(self, group):
"""
Set group size of Dataset (used for ranking).
Parameters
----------
group : numpy array or list or None
Group size of each group
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
def get_label(self):
"""
Get the label of the Dataset.
Returns
-------
label : array
"""
if self.label is None and self.handle is not None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""
Get the weight of the Dataset.
Returns
-------
weight : array
"""
if self.weight is None and self.handle is not None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""
Get the initial score of the Dataset.
Returns
-------
init_score : array
"""
if self.init_score is None and self.handle is not None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_group(self):
"""
Get the initial score of the Dataset.
Returns
-------
init_score : array
"""
if self.group is None and self.handle is not None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
new_group = []
for i in range_(len(self.group) - 1):
new_group.append(self.group[i + 1] - self.group[i])
self.group = new_group
return self.group
def num_data(self):
"""
Get the number of rows in the Dataset.
Returns
-------
number of rows : int
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""
Get the number of columns (features) in the Dataset.
Returns
-------
number of columns : int
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
class Booster(object):
""""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, silent=False):
"""
Initialize the Booster.
Parameters
----------
params : dict
Parameters for boosters.
train_set : Dataset
Training dataset
model_file : string
Path to the model file.
silent : boolean, optional
Whether print messages during construction
"""
self.handle = ctypes.c_void_p()
self.__need_reload_eval_info = True
self.__train_data_name = "training"
self.__attr = {}
self.best_iteration = -1
params = {} if params is None else params
if silent:
params["verbose"] = 0
elif "verbose" not in params:
params["verbose"] = 1
if train_set is not None:
"""Training task"""
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'.format(type(train_set).__name__))
params_str = param_dict_to_str(params)
"""construct booster object"""
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.construct().handle,
c_str(params_str),
ctypes.byref(self.handle)))
"""save reference to data"""
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
"""buffer for inner predict"""
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
elif model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_file)
elif 'model_str' in params:
self.__load_model_from_string(params['model_str'])
else:
raise TypeError('Need at least one training dataset or model file to create booster instance')
def __del__(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.__save_model_to_string()
booster = Booster({'model_str': model_str})
booster.pandas_categorical = self.pandas_categorical
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.__save_model_to_string()
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def set_train_data_name(self, name):
self.__train_data_name = name
def add_valid(self, data, name):
"""
Add an validation data
Parameters
----------
data : Dataset
Validation data
name : String
Name of validation data
"""
if not isinstance(data, Dataset):
raise TypeError('valid data should be Dataset instance, met {}'.format(type(data).__name__))
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
def reset_parameter(self, params):
"""
Reset parameters for booster
Parameters
----------
params : dict
New parameters for boosters
silent : boolean, optional
Whether print messages during construction
"""
if 'metric' in params:
self.__need_reload_eval_info = True
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
def update(self, train_set=None, fobj=None):
"""
Update for one iteration
Note: for multi-class task, the score is group by class_id first, then group by row_id
if you want to get i-th row score in j-th class, the access way is score[j*num_data+i]
and you should group grad and hess in this way as well
Parameters
----------
train_set :
Training data, None means use last training data
fobj : function
Customized objective function.
Returns
-------
is_finished, bool
"""
"""need reset training data"""
if train_set is not None and train_set is not self.train_set:
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'.format(type(train_set).__name__))
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
is_finished = ctypes.c_int(0)
if fobj is None:
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
else:
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Note: for multi-class task, the score is group by class_id first, then group by row_id
if you want to get i-th row score in j-th class, the access way is score[j*num_data+i]
and you should group grad and hess in this way as well
Parameters
----------
grad : 1d numpy or 1d list
The first order of gradient.
hess : 1d numpy or 1d list
The second order of gradient.
Returns
-------
is_finished, bool
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
if len(grad) != len(hess):
raise ValueError("Lengths of gradient({}) and hessian({}) don't match".format(len(grad), len(hess)))
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""
Rollback one iteration
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
def current_iteration(self):
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def eval(self, data, name, feval=None):
"""
Evaluate for data
Parameters
----------
data : Dataset object
name :
Name of data
feval : function
Custom evaluation function.
Returns
-------
result: list
Evaluation result list.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range_(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
"""need to push new valid data"""
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""
Evaluate for training data
Parameters
----------
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result list.
"""
return self.__inner_eval(self.__train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""
Evaluate for validation data
Parameters
----------
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result list.
"""
return [item for i in range_(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=-1):
"""
Save model of booster to file
Parameters
----------
filename : str
Filename to save
num_iteration: int
Number of iteration that want to save. < 0 means save the best iteration(if have)
"""
if num_iteration <= 0:
num_iteration = self.best_iteration
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(num_iteration),
c_str(filename)))
_save_pandas_categorical(filename, self.pandas_categorical)
def __load_model_from_string(self, model_str):
"""[Private] Load model from string"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
def __save_model_to_string(self, num_iteration=-1):
"""[Private] Save model to string"""
if num_iteration <= 0:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
'''if buffer length is not long enough, re-allocate a buffer'''
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
return string_buffer.value.decode()
def dump_model(self, num_iteration=-1):
"""
Dump model to json format
Parameters
----------
num_iteration: int
Number of iteration that want to dump. < 0 means dump to best iteration(if have)
Returns
-------
Json format of model
"""
if num_iteration <= 0:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
'''if buffer length is not long enough, reallocate a buffer'''
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
return json.loads(string_buffer.value.decode())
def predict(self, data, num_iteration=-1, raw_score=False, pred_leaf=False, data_has_header=False, is_reshape=True):
"""
Predict logic
Parameters
----------
data : string/numpy array/scipy.sparse
Data source for prediction
When data type is string, it represents the path of txt file
num_iteration : int
Used iteration for prediction, < 0 means predict for best iteration(if have)
raw_score : bool
True for predict raw score
pred_leaf : bool
True for predict leaf index
data_has_header : bool
Used for txt data
is_reshape : bool
Reshape to (nrow, ncol) if true
Returns
-------
Prediction result
"""
predictor = self._to_predictor()
if num_iteration <= 0:
num_iteration = self.best_iteration
return predictor.predict(data, num_iteration, raw_score, pred_leaf, data_has_header, is_reshape)
def _to_predictor(self):
"""Convert to predictor
"""
predictor = _InnerPredictor(booster_handle=self.handle)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def feature_importance(self, importance_type='split'):
"""
Feature importances
Parameters
----------
importance_type : str, default "split"
How the importance is calculated: "split" or "gain"
"split" is the number of times a feature is used in a model
"gain" is the total gain of splits which use the feature
Returns
-------
Array of feature importances
"""
if importance_type not in ["split", "gain"]:
raise KeyError("importance_type must be split or gain")
dump_model = self.dump_model()
ret = [0] * (dump_model["max_feature_idx"] + 1)
def dfs(root):
if "split_feature" in root:
if importance_type == 'split':
ret[root["split_feature"]] += 1
elif importance_type == 'gain':
ret[root["split_feature"]] += root["split_gain"]
dfs(root["left_child"])
dfs(root["right_child"])
for tree in dump_model["tree_info"]:
dfs(tree["tree_structure"])
return np.array(ret)
def __inner_eval(self, data_name, data_idx, feval=None):
"""
Evaulate training or validation data
"""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.array([0.0 for _ in range_(self.__num_inner_eval)], dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range_(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i]))
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
feval_ret = feval(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""
Predict for training and validation dataset
"""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = \
np.array([0.0 for _ in range_(n_preds)], dtype=np.float64, copy=False)
"""avoid to predict many time in one iteration"""
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError("Wrong length of predict results for data %d" % (data_idx))
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""
Get inner evaluation count and names
"""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
"""Get num of inner evals"""
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
"""Get name of evals"""
tmp_out_len = ctypes.c_int(0)
string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.byref(tmp_out_len),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
self.__name_inner_eval = \
[string_buffers[i].value.decode() for i in range_(self.__num_inner_eval)]
self.__higher_better_inner_eval = \
[name.startswith(('auc', 'ndcg')) for name in self.__name_inner_eval]
def attr(self, key):
"""
Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""
Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, string_type):
raise ValueError("Set attr only accepts strings")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
| cbecker/LightGBM | python-package/lightgbm/basic.py | Python | mit | 66,093 | 0.001407 |
"""Convert "arbitrary" sound files to AIFF (Apple and SGI's audio format).
Input may be compressed.
Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others.
An exception is raised if the file is not of a recognized type.
Returned filename is either the input filename or a temporary filename;
in the latter case the caller must ensure that it is removed.
Other temporary files used are removed by the function.
"""
import os
import tempfile
import pipes
import sndhdr
__all__ = ["error", "toaiff"]
table = {}
t = pipes.Template()
t.append('sox -t au - -t aiff -r 8000 -', '--')
table['au'] = t
# XXX The following is actually sub-optimal.
# XXX The HCOM sampling rate can be 22k, 22k/2, 22k/3 or 22k/4.
# XXX We must force the output sampling rate else the SGI won't play
# XXX files sampled at 5.5k or 7.333k; however this means that files
# XXX sampled at 11k are unnecessarily expanded.
# XXX Similar comments apply to some other file types.
t = pipes.Template()
t.append('sox -t hcom - -t aiff -r 22050 -', '--')
table['hcom'] = t
t = pipes.Template()
t.append('sox -t voc - -t aiff -r 11025 -', '--')
table['voc'] = t
t = pipes.Template()
t.append('sox -t wav - -t aiff -', '--')
table['wav'] = t
t = pipes.Template()
t.append('sox -t 8svx - -t aiff -r 16000 -', '--')
table['8svx'] = t
t = pipes.Template()
t.append('sox -t sndt - -t aiff -r 16000 -', '--')
table['sndt'] = t
t = pipes.Template()
t.append('sox -t sndr - -t aiff -r 16000 -', '--')
table['sndr'] = t
uncompress = pipes.Template()
uncompress.append('uncompress', '--')
class error(Exception):
pass
def toaiff(filename):
temps = []
ret = None
try:
ret = _toaiff(filename, temps)
finally:
for temp in temps[:]:
if temp != ret:
try:
os.unlink(temp)
except os.error:
pass
temps.remove(temp)
return ret
def _toaiff(filename, temps):
if filename[-2:] == '.Z':
fname = tempfile.mktemp()
temps.append(fname)
sts = uncompress.copy(filename, fname)
if sts:
raise error, filename + ': uncompress failed'
else:
fname = filename
try:
ftype = sndhdr.whathdr(fname)
if ftype:
ftype = ftype[0] # All we're interested in
except IOError, msg:
if type(msg) == type(()) and len(msg) == 2 and \
type(msg[0]) == type(0) and type(msg[1]) == type(''):
msg = msg[1]
if type(msg) != type(''):
msg = `msg`
raise error, filename + ': ' + msg
if ftype == 'aiff':
return fname
if ftype is None or not table.has_key(ftype):
raise error, \
filename + ': unsupported audio file type ' + `ftype`
temp = tempfile.mktemp()
temps.append(temp)
sts = table[ftype].copy(fname, temp)
if sts:
raise error, filename + ': conversion to aiff failed'
return temp
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.2/Lib/toaiff.py | Python | mit | 2,989 | 0.004015 |
# -*- coding: utf-8 -*-
""" S3 Query Construction
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("FS",
"S3FieldSelector",
"S3Joins",
"S3ResourceField",
"S3ResourceQuery",
"S3URLQuery",
"S3URLQueryParser",
)
import datetime
import re
import sys
from gluon import current
from gluon.storage import Storage
from s3dal import Field, Row
from s3fields import S3RepresentLazy
from s3utils import s3_get_foreign_key, s3_unicode, S3TypeConverter
ogetattr = object.__getattribute__
TEXTTYPES = ("string", "text")
# =============================================================================
class S3FieldSelector(object):
""" Helper class to construct a resource query """
LOWER = "lower"
UPPER = "upper"
OPERATORS = [LOWER, UPPER]
def __init__(self, name, type=None):
""" Constructor """
if not isinstance(name, basestring) or not name:
raise SyntaxError("name required")
self.name = str(name)
self.type = type
self.op = None
# -------------------------------------------------------------------------
def __lt__(self, value):
return S3ResourceQuery(S3ResourceQuery.LT, self, value)
# -------------------------------------------------------------------------
def __le__(self, value):
return S3ResourceQuery(S3ResourceQuery.LE, self, value)
# -------------------------------------------------------------------------
def __eq__(self, value):
return S3ResourceQuery(S3ResourceQuery.EQ, self, value)
# -------------------------------------------------------------------------
def __ne__(self, value):
return S3ResourceQuery(S3ResourceQuery.NE, self, value)
# -------------------------------------------------------------------------
def __ge__(self, value):
return S3ResourceQuery(S3ResourceQuery.GE, self, value)
# -------------------------------------------------------------------------
def __gt__(self, value):
return S3ResourceQuery(S3ResourceQuery.GT, self, value)
# -------------------------------------------------------------------------
def like(self, value):
return S3ResourceQuery(S3ResourceQuery.LIKE, self, value)
# -------------------------------------------------------------------------
def belongs(self, value):
return S3ResourceQuery(S3ResourceQuery.BELONGS, self, value)
# -------------------------------------------------------------------------
def contains(self, value):
return S3ResourceQuery(S3ResourceQuery.CONTAINS, self, value)
# -------------------------------------------------------------------------
def anyof(self, value):
return S3ResourceQuery(S3ResourceQuery.ANYOF, self, value)
# -------------------------------------------------------------------------
def typeof(self, value):
return S3ResourceQuery(S3ResourceQuery.TYPEOF, self, value)
# -------------------------------------------------------------------------
def lower(self):
self.op = self.LOWER
return self
# -------------------------------------------------------------------------
def upper(self):
self.op = self.UPPER
return self
# -------------------------------------------------------------------------
def expr(self, val):
if self.op and val is not None:
if self.op == self.LOWER and \
hasattr(val, "lower") and callable(val.lower) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.lower()
elif self.op == self.UPPER and \
hasattr(val, "upper") and callable(val.upper) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.upper()
return val
# -------------------------------------------------------------------------
def represent(self, resource):
try:
rfield = S3ResourceField(resource, self.name)
except:
colname = None
else:
colname = rfield.colname
if colname:
if self.op is not None:
return "%s.%s()" % (colname, self.op)
else:
return colname
else:
return "(%s?)" % self.name
# -------------------------------------------------------------------------
@classmethod
def extract(cls, resource, row, field):
"""
Extract a value from a Row
@param resource: the resource
@param row: the Row
@param field: the field
@return: field if field is not a Field/S3FieldSelector instance,
the value from the row otherwise
"""
error = lambda fn: KeyError("Field not found: %s" % fn)
t = type(field)
if isinstance(field, Field):
colname = str(field)
tname, fname = colname.split(".", 1)
elif t is S3FieldSelector:
rfield = S3ResourceField(resource, field.name)
colname = rfield.colname
if not colname:
# unresolvable selector
raise error(field.name)
fname = rfield.fname
tname = rfield.tname
elif t is S3ResourceField:
colname = field.colname
if not colname:
# unresolved selector
return None
fname = field.fname
tname = field.tname
else:
return field
if type(row) is Row:
try:
if tname in row.__dict__:
value = ogetattr(ogetattr(row, tname), fname)
else:
value = ogetattr(row, fname)
except:
try:
value = row[colname]
except (KeyError, AttributeError):
raise error(colname)
elif fname in row:
value = row[fname]
elif colname in row:
value = row[colname]
elif tname is not None and \
tname in row and fname in row[tname]:
value = row[tname][fname]
else:
raise error(colname)
if callable(value):
# Lazy virtual field
try:
value = value()
except:
current.log.error(sys.exc_info()[1])
value = None
if hasattr(field, "expr"):
return field.expr(value)
return value
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Resolve this field against a resource
@param resource: the resource
"""
return S3ResourceField(resource, self.name)
# =============================================================================
# Short name for the S3FieldSelector class
#
FS = S3FieldSelector
# =============================================================================
class S3FieldPath(object):
""" Helper class to parse field selectors """
# -------------------------------------------------------------------------
@classmethod
def resolve(cls, resource, selector, tail=None):
"""
Resolve a selector (=field path) against a resource
@param resource: the S3Resource to resolve against
@param selector: the field selector string
@param tail: tokens to append to the selector
The general syntax for a selector is:
selector = {[alias].}{[key]$}[field|selector]
(Parts in {} are optional, | indicates alternatives)
* Alias can be:
~ refers to the resource addressed by the
preceding parts of the selector (=last
resource)
component alias of a component of the last resource
linktable alias of a link table of the last resource
table name of a table that has a foreign key for
the last resource (auto-detect the key)
key:table same as above, but specifying the foreign key
* Key can be:
key the name of a foreign key in the last resource
context a context expression
* Field can be:
fieldname the name of a field or virtual field of the
last resource
context a context expression
A "context expression" is a name enclosed in parentheses:
(context)
During parsing, context expressions get replaced by the
string which has been configured for this name for the
last resource with:
s3db.configure(tablename, context = dict(name = "string"))
With context expressions, the same selector can be used
for different resources, each time resolving into the
specific field path. However, the field addressed must
be of the same type in all resources to form valid
queries.
If a context name can not be resolved, resolve() will
still succeed - but the S3FieldPath returned will have
colname=None and ftype="context" (=unresolvable context).
"""
if not selector:
raise SyntaxError("Invalid selector: %s" % selector)
tokens = re.split("(\.|\$)", selector)
if tail:
tokens.extend(tail)
parser = cls(resource, None, tokens)
parser.original = selector
return parser
# -------------------------------------------------------------------------
def __init__(self, resource, table, tokens):
"""
Constructor - not to be called directly, use resolve() instead
@param resource: the S3Resource
@param table: the table
@param tokens: the tokens as list
"""
s3db = current.s3db
if table is None:
table = resource.table
# Initialize
self.original = None
self.tname = table._tablename
self.fname = None
self.field = None
self.ftype = None
self.virtual = False
self.colname = None
self.joins = {}
self.distinct = False
self.multiple = True
head = tokens.pop(0)
tail = None
if head and head[0] == "(" and head[-1] == ")":
# Context expression
head = head.strip("()")
self.fname = head
self.ftype = "context"
if not resource:
resource = s3db.resource(table, components=[])
context = resource.get_config("context")
if context and head in context:
tail = self.resolve(resource, context[head], tail=tokens)
else:
# unresolvable
pass
elif tokens:
# Resolve the tail
op = tokens.pop(0)
if tokens:
if op == ".":
# head is a component or linktable alias, and tokens is
# a field expression in the component/linked table
if not resource:
resource = s3db.resource(table, components=[])
ktable, join, m, d = self._resolve_alias(resource, head)
self.multiple = m
self.distinct = d
else:
# head is a foreign key in the current table and tokens is
# a field expression in the referenced table
ktable, join = self._resolve_key(table, head)
self.distinct = True
if join is not None:
self.joins[ktable._tablename] = join
tail = S3FieldPath(None, ktable, tokens)
else:
raise SyntaxError("trailing operator")
if tail is None:
# End of the expression
if self.ftype != "context":
# Expression is resolved, head is a field name:
self.field = self._resolve_field(table, head)
if not self.field:
self.virtual = True
self.ftype = "virtual"
else:
self.virtual = False
self.ftype = str(self.field.type)
self.fname = head
self.colname = "%s.%s" % (self.tname, self.fname)
else:
# Read field data from tail
self.tname = tail.tname
self.fname = tail.fname
self.field = tail.field
self.ftype = tail.ftype
self.virtual = tail.virtual
self.colname = tail.colname
self.distinct |= tail.distinct
self.multiple |= tail.multiple
self.joins.update(tail.joins)
# -------------------------------------------------------------------------
@staticmethod
def _resolve_field(table, fieldname):
"""
Resolve a field name against the table, recognizes "id" as
table._id.name, and "uid" as current.xml.UID.
@param table: the Table
@param fieldname: the field name
@return: the Field
"""
if fieldname == "uid":
fieldname = current.xml.UID
if fieldname == "id":
field = table._id
elif fieldname in table.fields:
field = ogetattr(table, fieldname)
else:
field = None
return field
# -------------------------------------------------------------------------
@staticmethod
def _resolve_key(table, fieldname):
"""
Resolve a foreign key into the referenced table and the
join and left join between the current table and the
referenced table
@param table: the current Table
@param fieldname: the fieldname of the foreign key
@return: tuple of (referenced table, join, left join)
@raise: AttributeError is either the field or
the referended table are not found
@raise: SyntaxError if the field is not a foreign key
"""
if fieldname in table.fields:
f = table[fieldname]
else:
raise AttributeError("key not found: %s" % fieldname)
ktablename, pkey, multiple = s3_get_foreign_key(f, m2m=False)
if not ktablename:
raise SyntaxError("%s is not a foreign key" % f)
ktable = current.s3db.table(ktablename,
AttributeError("undefined table %s" % ktablename),
db_only=True)
pkey = ktable[pkey] if pkey else ktable._id
join = [ktable.on(f == pkey)]
return ktable, join
# -------------------------------------------------------------------------
@staticmethod
def _resolve_alias(resource, alias):
"""
Resolve a table alias into the linked table (component, linktable
or free join), and the joins and left joins between the current
resource and the linked table.
@param resource: the current S3Resource
@param alias: the alias
@return: tuple of (linked table, joins, left joins, multiple,
distinct), the two latter being flags to indicate
possible ambiguous query results (needed by the query
builder)
@raise: AttributeError if one of the key fields or tables
can not be found
@raise: SyntaxError if the alias can not be resolved (e.g.
because on of the keys isn't a foreign key, points
to the wrong table or is ambiguous)
"""
# Alias for this resource?
if alias in ("~", resource.alias):
return resource.table, None, False, False
multiple = True
linked = resource.linked
if linked and linked.alias == alias:
# It's the linked table
linktable = resource.table
ktable = linked.table
join = [ktable.on(ktable[linked.fkey] == linktable[linked.rkey])]
return ktable, join, multiple, True
s3db = current.s3db
tablename = resource.tablename
# Try to attach the component
if alias not in resource.components and \
alias not in resource.links:
_alias = alias
hook = s3db.get_component(tablename, alias)
if not hook:
_alias = s3db.get_alias(tablename, alias)
if _alias:
hook = s3db.get_component(tablename, _alias)
if hook:
resource._attach(_alias, hook)
components = resource.components
links = resource.links
if alias in components:
# Is a component
component = components[alias]
ktable = component.table
join = component._join()
multiple = component.multiple
elif alias in links:
# Is a linktable
link = links[alias]
ktable = link.table
join = link._join()
elif "_" in alias:
# Is a free join
DELETED = current.xml.DELETED
table = resource.table
tablename = resource.tablename
pkey = fkey = None
# Find the table
fkey, kname = (alias.split(":") + [None])[:2]
if not kname:
fkey, kname = kname, fkey
ktable = s3db.table(kname,
AttributeError("table not found: %s" % kname),
db_only=True)
if fkey is None:
# Autodetect left key
for fname in ktable.fields:
tn, key, m = s3_get_foreign_key(ktable[fname], m2m=False)
if not tn:
continue
if tn == tablename:
if fkey is not None:
raise SyntaxError("ambiguous foreign key in %s" %
alias)
else:
fkey = fname
if key:
pkey = key
if fkey is None:
raise SyntaxError("no foreign key for %s in %s" %
(tablename, kname))
else:
# Check left key
if fkey not in ktable.fields:
raise AttributeError("no field %s in %s" % (fkey, kname))
tn, pkey, m = s3_get_foreign_key(ktable[fkey], m2m=False)
if tn and tn != tablename:
raise SyntaxError("%s.%s is not a foreign key for %s" %
(kname, fkey, tablename))
elif not tn:
raise SyntaxError("%s.%s is not a foreign key" %
(kname, fkey))
# Default primary key
if pkey is None:
pkey = table._id.name
# Build join
query = (table[pkey] == ktable[fkey])
if DELETED in ktable.fields:
query &= ktable[DELETED] != True
join = [ktable.on(query)]
else:
raise SyntaxError("Invalid tablename: %s" % alias)
return ktable, join, multiple, True
# =============================================================================
class S3ResourceField(object):
""" Helper class to resolve a field selector against a resource """
# -------------------------------------------------------------------------
def __init__(self, resource, selector, label=None):
"""
Constructor
@param resource: the resource
@param selector: the field selector (string)
"""
self.resource = resource
self.selector = selector
lf = S3FieldPath.resolve(resource, selector)
self.tname = lf.tname
self.fname = lf.fname
self.colname = lf.colname
self._joins = lf.joins
self.distinct = lf.distinct
self.multiple = lf.multiple
self._join = None
self.field = lf.field
self.virtual = False
self.represent = s3_unicode
self.requires = None
if self.field is not None:
field = self.field
self.ftype = str(field.type)
if resource.linked is not None and self.ftype == "id":
# Always represent the link-table's ID as the
# linked record's ID => needed for data tables
self.represent = lambda i, resource=resource: \
resource.component_id(None, i)
else:
self.represent = field.represent
self.requires = field.requires
elif self.colname:
self.virtual = True
self.ftype = "virtual"
else:
self.ftype = "context"
# Fall back to the field label
if label is None:
fname = self.fname
if fname in ["L1", "L2", "L3", "L3", "L4", "L5"]:
try:
label = current.gis.get_location_hierarchy(fname)
except:
label = None
elif fname == "L0":
label = current.messages.COUNTRY
if label is None:
f = self.field
if f:
label = f.label
elif fname:
label = " ".join([s.strip().capitalize()
for s in fname.split("_") if s])
else:
label = None
self.label = label
self.show = True
# -------------------------------------------------------------------------
def __repr__(self):
""" String representation of this instance """
return "<S3ResourceField " \
"selector='%s' " \
"label='%s' " \
"table='%s' " \
"field='%s' " \
"type='%s'>" % \
(self.selector, self.label, self.tname, self.fname, self.ftype)
# -------------------------------------------------------------------------
@property
def join(self):
"""
Implicit join (Query) for this field, for backwards-compatibility
"""
if self._join is not None:
return self._join
join = self._join = {}
for tablename, joins in self._joins.items():
query = None
for expression in joins:
if query is None:
query = expression.second
else:
query &= expression.second
if query:
join[tablename] = query
return join
# -------------------------------------------------------------------------
@property
def left(self):
"""
The left joins for this field, for backwards-compability
"""
return self._joins
# -------------------------------------------------------------------------
def extract(self, row, represent=False, lazy=False):
"""
Extract the value for this field from a row
@param row: the Row
@param represent: render a text representation for the value
@param lazy: return a lazy representation handle if available
"""
tname = self.tname
fname = self.fname
colname = self.colname
error = "Field not found in Row: %s" % colname
if type(row) is Row:
try:
if tname in row.__dict__:
value = ogetattr(ogetattr(row, tname), fname)
else:
value = ogetattr(row, fname)
except:
try:
value = row[colname]
except (KeyError, AttributeError):
raise KeyError(error)
elif fname in row:
value = row[fname]
elif colname in row:
value = row[colname]
elif tname is not None and \
tname in row and fname in row[tname]:
value = row[tname][fname]
else:
raise KeyError(error)
if callable(value):
# Lazy virtual field
try:
value = value()
except:
current.log.error(sys.exc_info()[1])
value = None
if represent:
renderer = self.represent
if callable(renderer):
if lazy and hasattr(renderer, "bulk"):
return S3RepresentLazy(value, renderer)
else:
return renderer(value)
else:
return s3_unicode(value)
else:
return value
# =============================================================================
class S3Joins(object):
""" A collection of joins """
def __init__(self, tablename, joins=None):
"""
Constructor
@param tablename: the name of the master table
@param joins: list of joins
"""
self.tablename = tablename
self.joins = {}
self.tables = set()
self.add(joins)
# -------------------------------------------------------------------------
def __iter__(self):
"""
Iterate over the names of all joined tables in the collection
"""
return self.joins.__iter__()
# -------------------------------------------------------------------------
def __getitem__(self, tablename):
"""
Get the list of joins for a table
@param tablename: the tablename
"""
return self.joins.__getitem__(tablename)
# -------------------------------------------------------------------------
def __setitem__(self, tablename, joins):
"""
Update the joins for a table
@param tablename: the tablename
@param joins: the list of joins for this table
"""
master = self.tablename
joins_dict = self.joins
tables = current.db._adapter.tables
joins_dict[tablename] = joins
if len(joins) > 1:
for join in joins:
try:
tname = join.first._tablename
except AttributeError:
tname = str(join.first)
if tname not in joins_dict and \
master in tables(join.second):
joins_dict[tname] = [join]
self.tables.add(tablename)
return
# -------------------------------------------------------------------------
def keys(self):
"""
Get a list of names of all joined tables
"""
return self.joins.keys()
# -------------------------------------------------------------------------
def items(self):
"""
Get a list of tuples (tablename, [joins]) for all joined tables
"""
return self.joins.items()
# -------------------------------------------------------------------------
def values(self):
"""
Get a list of joins for all joined tables
@return: a nested list like [[join, join, ...], ...]
"""
return self.joins.values()
# -------------------------------------------------------------------------
def add(self, joins):
"""
Add joins to this collection
@param joins: a join or a list/tuple of joins
@return: the list of names of all tables for which joins have
been added to the collection
"""
tablenames = set()
if joins:
if not isinstance(joins, (list, tuple)):
joins = [joins]
for join in joins:
tablename = join.first._tablename
self[tablename] = [join]
tablenames.add(tablename)
return list(tablenames)
# -------------------------------------------------------------------------
def extend(self, other):
"""
Extend this collection with the joins from another collection
@param other: the other collection (S3Joins), or a dict like
{tablename: [join, join]}
@return: the list of names of all tables for which joins have
been added to the collection
"""
if type(other) is S3Joins:
add = self.tables.add
else:
add = None
joins = self.joins if type(other) is S3Joins else self
for tablename in other:
if tablename not in self.joins:
joins[tablename] = other[tablename]
if add:
add(tablename)
return other.keys()
# -------------------------------------------------------------------------
def __repr__(self):
"""
String representation of this collection
"""
return "<S3Joins %s>" % str([str(j) for j in self.as_list()])
# -------------------------------------------------------------------------
def as_list(self, tablenames=None, aqueries=None, prefer=None):
"""
Return joins from this collection as list
@param tablenames: the names of the tables for which joins
shall be returned, defaults to all tables
in the collection. Dependencies will be
included automatically (if available)
@param aqueries: dict of accessible-queries {tablename: query}
to include in the joins; if there is no entry
for a particular table, then it will be looked
up from current.auth and added to the dict.
To prevent differential authorization of a
particular joined table, set {<tablename>: None}
in the dict
@param prefer: If any table or any of its dependencies would be
joined by this S3Joins collection, then skip this
table here (and enforce it to be joined by the
preferred collection), to prevent duplication of
left joins as inner joins:
join = inner_joins.as_list(prefer=left_joins)
left = left_joins.as_list()
@return: a list of joins, ordered by their interdependency, which
can be used as join/left parameter of Set.select()
"""
accessible_query = current.auth.s3_accessible_query
if tablenames is None:
tablenames = self.tables
else:
tablenames = set(tablenames)
skip = set()
if prefer:
preferred_joins = prefer.as_list(tablenames=tablenames)
for join in preferred_joins:
try:
tname = join.first._tablename
except AttributeError:
tname = str(join.first)
skip.add(tname)
tablenames -= skip
joins = self.joins
# Resolve dependencies
required_tables = set()
get_tables = current.db._adapter.tables
for tablename in tablenames:
if tablename not in joins or \
tablename == self.tablename or \
tablename in skip:
continue
join_list = joins[tablename]
preferred = False
dependencies = set()
for join in join_list:
join_tables = set(get_tables(join.second))
if join_tables:
if any((tname in skip for tname in join_tables)):
preferred = True
dependencies |= join_tables
if preferred:
skip.add(tablename)
skip |= dependencies
prefer.extend({tablename: join_list})
else:
required_tables.add(tablename)
required_tables |= dependencies
# Collect joins
joins_dict = {}
for tablename in required_tables:
if tablename not in joins or tablename == self.tablename:
continue
for join in joins[tablename]:
j = join
table = j.first
tname = table._tablename
if aqueries is not None and tname in tablenames:
if tname not in aqueries:
aquery = accessible_query("read", table)
aqueries[tname] = aquery
else:
aquery = aqueries[tname]
if aquery is not None:
j = join.first.on(join.second & aquery)
joins_dict[tname] = j
# Sort joins (if possible)
try:
return self.sort(joins_dict.values())
except RuntimeError:
return joins_dict.values()
# -------------------------------------------------------------------------
@classmethod
def sort(cls, joins):
"""
Sort a list of left-joins by their interdependency
@param joins: the list of joins
"""
if len(joins) <= 1:
return joins
r = list(joins)
tables = current.db._adapter.tables
append = r.append
head = None
for i in xrange(len(joins)):
join = r.pop(0)
head = join
tablenames = tables(join.second)
for j in r:
try:
tn = j.first._tablename
except AttributeError:
tn = str(j.first)
if tn in tablenames:
head = None
break
if head is not None:
break
else:
append(join)
if head is not None:
return [head] + cls.sort(r)
else:
raise RuntimeError("circular join dependency")
# =============================================================================
class S3ResourceQuery(object):
"""
Helper class representing a resource query
- unlike DAL Query objects, these can be converted to/from URL filters
"""
# Supported operators
NOT = "not"
AND = "and"
OR = "or"
LT = "lt"
LE = "le"
EQ = "eq"
NE = "ne"
GE = "ge"
GT = "gt"
LIKE = "like"
BELONGS = "belongs"
CONTAINS = "contains"
ANYOF = "anyof"
TYPEOF = "typeof"
COMPARISON = [LT, LE, EQ, NE, GE, GT,
LIKE, BELONGS, CONTAINS, ANYOF, TYPEOF]
OPERATORS = [NOT, AND, OR] + COMPARISON
# -------------------------------------------------------------------------
def __init__(self, op, left=None, right=None):
""" Constructor """
if op not in self.OPERATORS:
raise SyntaxError("Invalid operator: %s" % op)
self.op = op
self.left = left
self.right = right
# -------------------------------------------------------------------------
def __and__(self, other):
""" AND """
return S3ResourceQuery(self.AND, self, other)
# -------------------------------------------------------------------------
def __or__(self, other):
""" OR """
return S3ResourceQuery(self.OR, self, other)
# -------------------------------------------------------------------------
def __invert__(self):
""" NOT """
if self.op == self.NOT:
return self.left
else:
return S3ResourceQuery(self.NOT, self)
# -------------------------------------------------------------------------
def _joins(self, resource, left=False):
op = self.op
l = self.left
r = self.right
if op in (self.AND, self.OR):
if isinstance(l, S3ResourceQuery):
ljoins, ld = l._joins(resource, left=left)
else:
ljoins, ld = {}, False
if isinstance(r, S3ResourceQuery):
rjoins, rd = r._joins(resource, left=left)
else:
rjoins, rd = {}, False
ljoins = dict(ljoins)
ljoins.update(rjoins)
return (ljoins, ld or rd)
elif op == self.NOT:
if isinstance(l, S3ResourceQuery):
return l._joins(resource, left=left)
else:
return {}, False
joins, distinct = {}, False
if isinstance(l, S3FieldSelector):
try:
rfield = l.resolve(resource)
except (SyntaxError, AttributeError):
pass
else:
distinct = rfield.distinct
if distinct and left or not distinct and not left:
joins = rfield._joins
return (joins, distinct)
# -------------------------------------------------------------------------
def fields(self):
""" Get all field selectors involved with this query """
op = self.op
l = self.left
r = self.right
if op in (self.AND, self.OR):
lf = l.fields()
rf = r.fields()
return lf + rf
elif op == self.NOT:
return l.fields()
elif isinstance(l, S3FieldSelector):
return [l.name]
else:
return []
# -------------------------------------------------------------------------
def split(self, resource):
"""
Split this query into a real query and a virtual one (AND)
@param resource: the S3Resource
@return: tuple (DAL-translatable sub-query, virtual filter),
both S3ResourceQuery instances
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
lq, lf = l.split(resource) \
if isinstance(l, S3ResourceQuery) else (l, None)
rq, rf = r.split(resource) \
if isinstance(r, S3ResourceQuery) else (r, None)
q = lq
if rq is not None:
if q is not None:
q &= rq
else:
q = rq
f = lf
if rf is not None:
if f is not None:
f &= rf
else:
f = rf
return q, f
elif op == self.OR:
lq, lf = l.split(resource) \
if isinstance(l, S3ResourceQuery) else (l, None)
rq, rf = r.split(resource) \
if isinstance(r, S3ResourceQuery) else (r, None)
if lf is not None or rf is not None:
return None, self
else:
q = lq
if rq is not None:
if q is not None:
q |= rq
else:
q = rq
return q, None
elif op == self.NOT:
if isinstance(l, S3ResourceQuery):
if l.op == self.OR:
i = (~(l.left)) & (~(l.right))
return i.split(resource)
else:
q, f = l.split(resource)
if q is not None and f is not None:
return None, self
elif q is not None:
return ~q, None
elif f is not None:
return None, ~f
else:
return ~l, None
l = self.left
try:
if isinstance(l, S3FieldSelector):
lfield = l.resolve(resource)
else:
lfield = S3ResourceField(resource, l)
except:
lfield = None
if not lfield or lfield.field is None:
return None, self
else:
return self, None
# -------------------------------------------------------------------------
def transform(self, resource):
"""
Placeholder for transformation method
@param resource: the S3Resource
"""
# @todo: implement
return self
# -------------------------------------------------------------------------
def query(self, resource):
"""
Convert this S3ResourceQuery into a DAL query, ignoring virtual
fields (the necessary joins for this query can be constructed
with the joins() method)
@param resource: the resource to resolve the query against
"""
op = self.op
l = self.left
r = self.right
# Resolve query components
if op == self.AND:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
r = r.query(resource) if isinstance(r, S3ResourceQuery) else r
if l is None or r is None:
return None
elif l is False or r is False:
return l if r is False else r if l is False else False
else:
return l & r
elif op == self.OR:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
r = r.query(resource) if isinstance(r, S3ResourceQuery) else r
if l is None or r is None:
return None
elif l is False or r is False:
return l if r is False else r if l is False else False
else:
return l | r
elif op == self.NOT:
l = l.query(resource) if isinstance(l, S3ResourceQuery) else l
if l is None:
return None
elif l is False:
return False
else:
return ~l
# Resolve the fields
if isinstance(l, S3FieldSelector):
try:
rfield = S3ResourceField(resource, l.name)
except:
return None
if rfield.virtual:
return None
elif not rfield.field:
return False
lfield = l.expr(rfield.field)
elif isinstance(l, Field):
lfield = l
else:
return None # not a field at all
if isinstance(r, S3FieldSelector):
try:
rfield = S3ResourceField(resource, r.name)
except:
return None
rfield = rfield.field
if rfield.virtual:
return None
elif not rfield.field:
return False
rfield = r.expr(rfield.field)
else:
rfield = r
# Resolve the operator
invert = False
query_bare = self._query_bare
ftype = str(lfield.type)
if isinstance(rfield, (list, tuple)) and ftype[:4] != "list":
if op == self.EQ:
op = self.BELONGS
elif op == self.NE:
op = self.BELONGS
invert = True
elif op not in (self.BELONGS, self.TYPEOF):
query = None
for v in rfield:
q = query_bare(op, lfield, v)
if q is not None:
if query is None:
query = q
else:
query |= q
return query
# Convert date(time) strings
if ftype == "datetime" and \
isinstance(rfield, basestring):
rfield = S3TypeConverter.convert(datetime.datetime, rfield)
elif ftype == "date" and \
isinstance(rfield, basestring):
rfield = S3TypeConverter.convert(datetime.date, rfield)
query = query_bare(op, lfield, rfield)
if invert:
query = ~(query)
return query
# -------------------------------------------------------------------------
def _query_bare(self, op, l, r):
"""
Translate a filter expression into a DAL query
@param op: the operator
@param l: the left operand
@param r: the right operand
"""
if op == self.CONTAINS:
q = l.contains(r, all=True)
elif op == self.ANYOF:
# NB str/int doesn't matter here
q = l.contains(r, all=False)
elif op == self.BELONGS:
q = self._query_belongs(l, r)
elif op == self.TYPEOF:
q = self._query_typeof(l, r)
elif op == self.LIKE:
q = l.like(s3_unicode(r))
elif op == self.LT:
q = l < r
elif op == self.LE:
q = l <= r
elif op == self.EQ:
q = l == r
elif op == self.NE:
q = l != r
elif op == self.GE:
q = l >= r
elif op == self.GT:
q = l > r
else:
q = None
return q
# -------------------------------------------------------------------------
def _query_typeof(self, l, r):
"""
Translate TYPEOF into DAL expression
@param l: the left operator
@param r: the right operator
"""
hierarchy, field, nodeset, none = self._resolve_hierarchy(l, r)
if not hierarchy:
# Not a hierarchical query => use simple belongs
return self._query_belongs(l, r)
if not field:
# Field does not exist (=>skip subquery)
return None
# Construct the subquery
list_type = str(field.type)[:5] == "list:"
if nodeset:
if list_type:
q = (field.contains(list(nodeset)))
elif len(nodeset) > 1:
q = (field.belongs(nodeset))
else:
q = (field == tuple(nodeset)[0])
else:
q = None
if none:
# None needs special handling with older DAL versions
if not list_type:
if q is None:
q = (field == None)
else:
q |= (field == None)
if q is None:
# Values not resolvable (=subquery always fails)
q = field.belongs(set())
return q
# -------------------------------------------------------------------------
@classmethod
def _resolve_hierarchy(cls, l, r):
"""
Resolve the hierarchical lookup in a typeof-query
@param l: the left operator
@param r: the right operator
"""
from s3hierarchy import S3Hierarchy
tablename = l.tablename
# Connect to the hierarchy
hierarchy = S3Hierarchy(tablename)
if hierarchy.config is None:
# Reference to a hierarchical table?
ktablename, key = s3_get_foreign_key(l)[:2]
if ktablename:
hierarchy = S3Hierarchy(ktablename)
else:
key = None
list_type = str(l.type)[:5] == "list:"
if hierarchy.config is None and not list_type:
# No hierarchy configured and no list:reference
return False, None, None, None
field, keys = l, r
if not key:
s3db = current.s3db
table = s3db[tablename]
if l.name != table._id.name:
# Lookup-field rather than primary key => resolve it
# Build a filter expression for the lookup table
fs = S3FieldSelector(l.name)
if list_type:
expr = fs.contains(r)
else:
expr = cls._query_belongs(l, r, field = fs)
# Resolve filter expression into subquery
resource = s3db.resource(tablename)
if expr is not None:
subquery = expr.query(resource)
else:
subquery = None
if not subquery:
# Field doesn't exist
return True, None, None, None
# Execute query and retrieve the lookup table IDs
DELETED = current.xml.DELETED
if DELETED in table.fields:
subquery &= table[DELETED] != True
rows = current.db(subquery).select(table._id)
# Override field/keys
field = table[hierarchy.pkey.name]
keys = set([row[table._id.name] for row in rows])
nodeset, none = None, False
if keys:
# Lookup all descendant types from the hierarchy
none = False
if not isinstance(keys, (list, tuple, set)):
keys = set([keys])
nodes = set()
for node in keys:
if node is None:
none = True
else:
try:
node_id = long(node)
except ValueError:
continue
nodes.add(node_id)
if hierarchy.config is not None:
nodeset = hierarchy.findall(nodes, inclusive=True)
else:
nodeset = nodes
elif keys is None:
none = True
return True, field, nodeset, none
# -------------------------------------------------------------------------
@staticmethod
def _query_belongs(l, r, field=None):
"""
Resolve BELONGS into a DAL expression (or S3ResourceQuery if
field is an S3FieldSelector)
@param l: the left operator
@param r: the right operator
@param field: alternative left operator
"""
if field is None:
field = l
expr = None
none = False
if not isinstance(r, (list, tuple, set)):
items = [r]
else:
items = r
if None in items:
none = True
items = [item for item in items if item is not None]
wildcard = False
if str(l.type) in ("string", "text"):
for item in items:
if isinstance(item, basestring):
if "*" in item and "%" not in item:
s = item.replace("*", "%")
else:
s = item
else:
try:
s = str(item)
except:
continue
if "%" in s:
wildcard = True
_expr = (field.like(s))
else:
_expr = (field == s)
if expr is None:
expr = _expr
else:
expr |= _expr
if not wildcard:
if len(items) == 1:
# Don't use belongs() for single value
expr = (field == tuple(items)[0])
elif items:
expr = (field.belongs(items))
if none:
# None needs special handling with older DAL versions
if expr is None:
expr = (field == None)
else:
expr |= (field == None)
elif expr is None:
expr = field.belongs(set())
return expr
# -------------------------------------------------------------------------
def __call__(self, resource, row, virtual=True):
"""
Probe whether the row matches the query
@param resource: the resource to resolve the query against
@param row: the DB row
@param virtual: execute only virtual queries
"""
if self.op == self.AND:
l = self.left(resource, row, virtual=False)
r = self.right(resource, row, virtual=False)
if l is None:
return r
if r is None:
return l
return l and r
elif self.op == self.OR:
l = self.left(resource, row, virtual=False)
r = self.right(resource, row, virtual=False)
if l is None:
return r
if r is None:
return l
return l or r
elif self.op == self.NOT:
l = self.left(resource, row)
if l is None:
return None
else:
return not l
real = False
left = self.left
if isinstance(left, S3FieldSelector):
try:
lfield = left.resolve(resource)
except (AttributeError, KeyError, SyntaxError):
return None
if lfield.field is not None:
real = True
elif not lfield.virtual:
# Unresolvable expression => skip
return None
else:
lfield = left
if isinstance(left, Field):
real = True
right = self.right
if isinstance(right, S3FieldSelector):
try:
rfield = right.resolve(resource)
except (AttributeError, KeyError, SyntaxError):
return None
if rfield.virtual:
real = False
elif rfield.field is None:
# Unresolvable expression => skip
return None
else:
rfield = right
if virtual and real:
return None
extract = lambda f: S3FieldSelector.extract(resource, row, f)
try:
l = extract(lfield)
r = extract(rfield)
except (KeyError, SyntaxError):
current.log.error(sys.exc_info()[1])
return None
if isinstance(left, S3FieldSelector):
l = left.expr(l)
if isinstance(right, S3FieldSelector):
r = right.expr(r)
op = self.op
invert = False
probe = self._probe
if isinstance(rfield, (list, tuple)) and \
not isinstance(lfield, (list, tuple)):
if op == self.EQ:
op = self.BELONGS
elif op == self.NE:
op = self.BELONGS
invert = True
elif op != self.BELONGS:
for v in r:
try:
r = probe(op, l, v)
except (TypeError, ValueError):
r = False
if r:
return True
return False
try:
r = probe(op, l, r)
except (TypeError, ValueError):
return False
if invert and r is not None:
return not r
else:
return r
# -------------------------------------------------------------------------
def _probe(self, op, l, r):
"""
Probe whether the value pair matches the query
@param l: the left value
@param r: the right value
"""
result = False
convert = S3TypeConverter.convert
# Fallbacks for TYPEOF
if op == self.TYPEOF:
if isinstance(l, (list, tuple, set)):
op = self.ANYOF
elif isinstance(r, (list, tuple, set)):
op = self.BELONGS
else:
op = self.EQ
if op == self.CONTAINS:
r = convert(l, r)
result = self._probe_contains(l, r)
elif op == self.ANYOF:
if not isinstance(r, (list, tuple, set)):
r = [r]
for v in r:
if isinstance(l, (list, tuple, set, basestring)):
if self._probe_contains(l, v):
return True
elif l == v:
return True
return False
elif op == self.BELONGS:
if not isinstance(r, (list, tuple, set)):
r = [r]
r = convert(l, r)
result = self._probe_contains(r, l)
elif op == self.LIKE:
pattern = re.escape(str(r)).replace("\\%", ".*").replace(".*.*", "\\%")
return re.match(pattern, str(l)) is not None
else:
r = convert(l, r)
if op == self.LT:
result = l < r
elif op == self.LE:
result = l <= r
elif op == self.EQ:
result = l == r
elif op == self.NE:
result = l != r
elif op == self.GE:
result = l >= r
elif op == self.GT:
result = l > r
return result
# -------------------------------------------------------------------------
@staticmethod
def _probe_contains(a, b):
"""
Probe whether a contains b
"""
if a is None:
return False
try:
if isinstance(a, basestring):
return str(b) in a
elif isinstance(a, (list, tuple, set)):
if isinstance(b, (list, tuple, set)):
convert = S3TypeConverter.convert
found = True
for _b in b:
if _b not in a:
found = False
for _a in a:
try:
if convert(_a, _b) == _a:
found = True
break
except (TypeError, ValueError):
continue
if not found:
break
return found
else:
return b in a
else:
return str(b) in str(a)
except:
return False
# -------------------------------------------------------------------------
def represent(self, resource):
"""
Represent this query as a human-readable string.
@param resource: the resource to resolve the query against
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
r = r.represent(resource) \
if isinstance(r, S3ResourceQuery) else str(r)
return "(%s and %s)" % (l, r)
elif op == self.OR:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
r = r.represent(resource) \
if isinstance(r, S3ResourceQuery) else str(r)
return "(%s or %s)" % (l, r)
elif op == self.NOT:
l = l.represent(resource) \
if isinstance(l, S3ResourceQuery) else str(l)
return "(not %s)" % l
else:
if isinstance(l, S3FieldSelector):
l = l.represent(resource)
elif isinstance(l, basestring):
l = '"%s"' % l
if isinstance(r, S3FieldSelector):
r = r.represent(resource)
elif isinstance(r, basestring):
r = '"%s"' % r
if op == self.CONTAINS:
return "(%s in %s)" % (r, l)
elif op == self.BELONGS:
return "(%s in %s)" % (l, r)
elif op == self.ANYOF:
return "(%s contains any of %s)" % (l, r)
elif op == self.TYPEOF:
return "(%s is a type of %s)" % (l, r)
elif op == self.LIKE:
return "(%s like %s)" % (l, r)
elif op == self.LT:
return "(%s < %s)" % (l, r)
elif op == self.LE:
return "(%s <= %s)" % (l, r)
elif op == self.EQ:
return "(%s == %s)" % (l, r)
elif op == self.NE:
return "(%s != %s)" % (l, r)
elif op == self.GE:
return "(%s >= %s)" % (l, r)
elif op == self.GT:
return "(%s > %s)" % (l, r)
else:
return "(%s ?%s? %s)" % (l, op, r)
# -------------------------------------------------------------------------
def serialize_url(self, resource=None):
"""
Serialize this query as URL query
@return: a Storage of URL variables
"""
op = self.op
l = self.left
r = self.right
url_query = Storage()
def _serialize(n, o, v, invert):
try:
quote = lambda s: s if "," not in s else '"%s"' % s
if isinstance(v, list):
v = ",".join([quote(S3TypeConverter.convert(str, val))
for val in v])
else:
v = quote(S3TypeConverter.convert(str, v))
except:
return
if "." not in n:
if resource is not None:
n = "~.%s" % n
else:
return url_query
if o == self.LIKE:
v = v.replace("%", "*")
if o == self.EQ:
operator = ""
else:
operator = "__%s" % o
if invert:
operator = "%s!" % operator
key = "%s%s" % (n, operator)
if key in url_query:
url_query[key] = "%s,%s" % (url_query[key], v)
else:
url_query[key] = v
return url_query
if op == self.AND:
lu = l.serialize_url(resource=resource)
url_query.update(lu)
ru = r.serialize_url(resource=resource)
url_query.update(ru)
elif op == self.OR:
sub = self._or()
if sub is None:
# This OR-subtree is not serializable
return url_query
n, o, v, invert = sub
_serialize(n, o, v, invert)
elif op == self.NOT:
lu = l.serialize_url(resource=resource)
for k in lu:
url_query["%s!" % k] = lu[k]
elif isinstance(l, S3FieldSelector):
_serialize(l.name, op, r, False)
return url_query
# -------------------------------------------------------------------------
def _or(self):
"""
Helper method to URL-serialize an OR-subtree in a query in
alternative field selector syntax if they all use the same
operator and value (this is needed to URL-serialize an
S3SearchSimpleWidget query).
"""
op = self.op
l = self.left
r = self.right
if op == self.AND:
return None
elif op == self.NOT:
lname, lop, lval, linv = l._or()
return (lname, lop, lval, not linv)
elif op == self.OR:
lvars = l._or()
rvars = r._or()
if lvars is None or rvars is None:
return None
lname, lop, lval, linv = lvars
rname, rop, rval, rinv = rvars
if lop != rop or linv != rinv:
return None
if lname == rname:
return (lname, lop, [lval, rval], linv)
elif lval == rval:
return ("%s|%s" % (lname, rname), lop, lval, linv)
else:
return None
else:
return (l.name, op, r, False)
# =============================================================================
class S3URLQuery(object):
""" URL Query Parser """
# -------------------------------------------------------------------------
@classmethod
def parse(cls, resource, vars):
"""
Construct a Storage of S3ResourceQuery from a Storage of get_vars
@param resource: the S3Resource
@param vars: the get_vars
@return: Storage of S3ResourceQuery like {alias: query}, where
alias is the alias of the component the query concerns
"""
query = Storage()
if resource is None:
return query
if not vars:
return query
subquery = cls._subquery
allof = lambda l, r: l if r is None else r if l is None else r & l
for key, value in vars.iteritems():
if key == "$filter":
# Instantiate the advanced filter parser
parser = S3URLQueryParser()
if parser.parser is None:
# not available
continue
# Multiple $filter expressions?
expressions = value if type(value) is list else [value]
# Default alias (=master)
default_alias = resource.alias
# Parse all expressions
for expression in expressions:
parsed = parser.parse(expression)
for alias in parsed:
q = parsed[alias]
qalias = alias if alias is not None else default_alias
if qalias not in query:
query[qalias] = [q]
else:
query[qalias].append(q)
# Stop here
continue
elif not("." in key or key[0] == "(" and ")" in key):
# Not a filter expression
continue
# Process old-style filters
selectors, op, invert = cls.parse_expression(key)
if type(value) is list:
# Multiple queries with the same selector (AND)
q = reduce(allof,
[subquery(selectors, op, invert, v) for v in value],
None)
else:
q = subquery(selectors, op, invert, value)
if q is None:
continue
# Append to query
if len(selectors) > 1:
aliases = [s.split(".", 1)[0] for s in selectors]
if len(set(aliases)) == 1:
alias = aliases[0]
else:
alias = resource.alias
#alias = resource.alias
else:
alias = selectors[0].split(".", 1)[0]
if alias == "~":
alias = resource.alias
if alias not in query:
query[alias] = [q]
else:
query[alias].append(q)
return query
# -------------------------------------------------------------------------
@staticmethod
def parse_url(url):
"""
Parse a URL query into get_vars
@param query: the URL query string
@return: the get_vars (Storage)
"""
if not url:
return Storage()
elif "?" in url:
query = url.split("?", 1)[1]
elif "=" in url:
query = url
else:
return Storage()
import cgi
dget = cgi.parse_qsl(query, keep_blank_values=1)
get_vars = Storage()
for (key, value) in dget:
if key in get_vars:
if type(get_vars[key]) is list:
get_vars[key].append(value)
else:
get_vars[key] = [get_vars[key], value]
else:
get_vars[key] = value
return get_vars
# -------------------------------------------------------------------------
@staticmethod
def parse_expression(key):
"""
Parse a URL expression
@param key: the key for the URL variable
@return: tuple (selectors, operator, invert)
"""
if key[-1] == "!":
invert = True
else:
invert = False
fs = key.rstrip("!")
op = None
if "__" in fs:
fs, op = fs.split("__", 1)
op = op.strip("_")
if not op:
op = "eq"
if "|" in fs:
selectors = [s for s in fs.split("|") if s]
else:
selectors = [fs]
return selectors, op, invert
# -------------------------------------------------------------------------
@staticmethod
def parse_value(value):
"""
Parse a URL query value
@param value: the value
@return: the parsed value
"""
uquote = lambda w: w.replace('\\"', '\\"\\') \
.strip('"') \
.replace('\\"\\', '"')
NONE = ("NONE", "None")
if type(value) is not list:
value = [value]
vlist = []
for item in value:
w = ""
quote = False
ignore_quote = False
for c in s3_unicode(item):
if c == '"' and not ignore_quote:
w += c
quote = not quote
elif c == "," and not quote:
if w in NONE:
w = None
else:
w = uquote(w).encode("utf-8")
vlist.append(w)
w = ""
else:
w += c
if c == "\\":
ignore_quote = True
else:
ignore_quote = False
if w in NONE:
w = None
else:
w = uquote(w).encode("utf-8")
vlist.append(w)
if len(vlist) == 1:
return vlist[0]
return vlist
# -------------------------------------------------------------------------
@classmethod
def _subquery(cls, selectors, op, invert, value):
"""
Construct a sub-query from URL selectors, operator and value
@param selectors: the selector(s)
@param op: the operator
@param invert: invert the query
@param value: the value
"""
v = cls.parse_value(value)
q = None
for fs in selectors:
if op == S3ResourceQuery.LIKE:
# Auto-lowercase and replace wildcard
f = S3FieldSelector(fs).lower()
if isinstance(v, basestring):
v = v.replace("*", "%").lower()
elif isinstance(v, list):
v = [x.replace("*", "%").lower() for x in v if x is not None]
else:
f = S3FieldSelector(fs)
rquery = None
try:
rquery = S3ResourceQuery(op, f, v)
except SyntaxError:
current.log.error("Invalid URL query operator: %s (sub-query ignored)" % op)
q = None
break
# Invert operation
if invert:
rquery = ~rquery
# Add to subquery
if q is None:
q = rquery
elif invert:
q &= rquery
else:
q |= rquery
return q
# =============================================================================
# Helper to combine multiple queries using AND
#
combine = lambda x, y: x & y if x is not None else y
# =============================================================================
class S3URLQueryParser(object):
""" New-style URL Filter Parser """
def __init__(self):
""" Constructor """
self.parser = None
self.ParseResults = None
self.ParseException = None
self._parser()
# -------------------------------------------------------------------------
def _parser(self):
""" Import PyParsing and define the syntax for filter expressions """
# PyParsing available?
try:
import pyparsing as pp
except ImportError:
current.log.error("Advanced filter syntax requires pyparsing, $filter ignored")
return False
# Selector Syntax
context = lambda s, l, t: t[0].replace("[", "(").replace("]", ")")
selector = pp.Word(pp.alphas + "[]~", pp.alphanums + "_.$:[]")
selector.setParseAction(context)
keyword = lambda x, y: x | pp.Keyword(y) if x else pp.Keyword(y)
# Expression Syntax
function = reduce(keyword, S3FieldSelector.OPERATORS)
expression = function + \
pp.Literal("(").suppress() + \
selector + \
pp.Literal(")").suppress()
# Comparison Syntax
comparison = reduce(keyword, S3ResourceQuery.COMPARISON)
# Value Syntax
number = pp.Regex(r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
value = number | \
pp.Keyword("NONE") | \
pp.quotedString | \
pp.Word(pp.alphanums + pp.printables)
qe = pp.Group(pp.Group(expression | selector) +
comparison +
pp.originalTextFor(pp.delimitedList(value, combine=True)))
parser = pp.operatorPrecedence(qe, [("not", 1, pp.opAssoc.RIGHT, ),
("and", 2, pp.opAssoc.LEFT, ),
("or", 2, pp.opAssoc.LEFT, ),
])
self.parser = parser
self.ParseResults = pp.ParseResults
self.ParseException = pp.ParseException
return True
# -------------------------------------------------------------------------
def parse(self, expression):
"""
Parse a string expression and convert it into a dict
of filters (S3ResourceQueries).
@parameter expression: the filter expression as string
@return: a dict of {component_alias: filter_query}
"""
query = {}
parser = self.parser
if not expression or parser is None:
return query
try:
parsed = parser.parseString(expression)
except self.ParseException:
current.log.error("Invalid URL Filter Expression: '%s'" %
expression)
else:
if parsed:
query = self.convert_expression(parsed[0])
return query
# -------------------------------------------------------------------------
def convert_expression(self, expression):
"""
Convert a parsed filter expression into a dict of
filters (S3ResourceQueries)
@param expression: the parsed filter expression (ParseResults)
@returns: a dict of {component_alias: filter_query}
"""
ParseResults = self.ParseResults
convert = self.convert_expression
if isinstance(expression, ParseResults):
first, op, second = ([None, None, None] + list(expression))[-3:]
if isinstance(first, ParseResults):
first = convert(first)
if isinstance(second, ParseResults):
second = convert(second)
if op == "not":
return self._not(second)
elif op == "and":
return self._and(first, second)
elif op == "or":
return self._or(first, second)
elif op in S3ResourceQuery.COMPARISON:
return self._query(op, first, second)
elif op in S3FieldSelector.OPERATORS and second:
selector = S3FieldSelector(second)
selector.op = op
return selector
elif op is None and second:
return S3FieldSelector(second)
else:
return None
# -------------------------------------------------------------------------
def _and(self, first, second):
"""
Conjunction of two query {component_alias: filter_query} (AND)
@param first: the first dict
@param second: the second dict
@return: the combined dict
"""
if not first:
return second
if not second:
return first
result = dict(first)
for alias, subquery in second.items():
if alias not in result:
result[alias] = subquery
else:
result[alias] &= subquery
return result
# -------------------------------------------------------------------------
def _or(self, first, second):
"""
Disjunction of two query dicts {component_alias: filter_query} (OR)
@param first: the first query dict
@param second: the second query dict
@return: the combined dict
"""
if not first:
return second
if not second:
return first
if len(first) > 1:
first = {None: reduce(combine, first.values())}
if len(second) > 1:
second = {None: reduce(combine, second.values())}
falias = first.keys()[0]
salias = second.keys()[0]
alias = falias if falias == salias else None
return {alias: first[falias] | second[salias]}
# -------------------------------------------------------------------------
def _not(self, query):
"""
Negation of a query dict
@param query: the query dict {component_alias: filter_query}
"""
if query is None:
return None
if len(query) == 1:
alias, sub = query.items()[0]
if sub.op == S3ResourceQuery.OR and alias is None:
l = sub.left
r = sub.right
lalias = self._alias(sub.left.left)
ralias = self._alias(sub.right.left)
if lalias == ralias:
return {alias: ~sub}
else:
# not(A or B) => not(A) and not(B)
return {lalias: ~sub.left, ralias: ~sub.right}
else:
if sub.op == S3ResourceQuery.NOT:
return {alias: sub.left}
else:
return {alias: ~sub}
else:
return {None: ~reduce(combine, query.values())}
# -------------------------------------------------------------------------
def _query(self, op, first, second):
"""
Create an S3ResourceQuery
@param op: the operator
@param first: the first operand (=S3FieldSelector)
@param second: the second operand (=value)
"""
if not isinstance(first, S3FieldSelector):
return {}
selector = first
alias = self._alias(selector)
value = S3URLQuery.parse_value(second.strip())
if op == S3ResourceQuery.LIKE:
if isinstance(value, basestring):
value = value.replace("*", "%").lower()
elif isinstance(value, list):
value = [x.replace("*", "%").lower() for x in value if x is not None]
return {alias: S3ResourceQuery(op, selector, value)}
# -------------------------------------------------------------------------
@staticmethod
def _alias(selector):
"""
Get the component alias from an S3FieldSelector (DRY Helper)
@param selector: the S3FieldSelector
@return: the alias as string or None for the master resource
"""
alias = None
if selector and isinstance(selector, S3FieldSelector):
prefix = selector.name.split("$", 1)[0]
if "." in prefix:
alias = prefix.split(".", 1)[0]
if alias in ("~", ""):
alias = None
return alias
# END =========================================================================
| ScottBuchanan/eden | modules/s3/s3query.py | Python | mit | 82,174 | 0.001217 |
"""
A collection of utility and helper modules.
These differ from notary_utils in that they do not depend on or need to connect to the notary database.
"""
| danwent/Perspectives-Server | util/__init__.py | Python | gpl-3.0 | 156 | 0.00641 |
__author__ = 'deevarvar'
import string
import random
import os
#generate a random string
def string_generator(size=6, chars=string.ascii_letters+string.digits):
return ''.join(random.choice(chars) for _ in range(size))
#emulate touch cmd
def touchFile(fname, time=None):
with open(fname, 'a'):
os.utime(fname,time)
| deevarvar/myLab | book/tlpi_zhiye/utlib/ut_util.py | Python | mit | 335 | 0.01194 |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
"""
Raspymc is a multimedia centre exposed via a http server built with bottlepy
Copyright (C) 2013 Giancarlo Fringuello
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, inspect, ConfigParser, pickle
from utils import *
from logger import*
from track_obj import *
CNF_SERVER_PATH = sys.path[0]
CNF_FOLDER_PATH = ""
CNF_PLAYLIST_PATH = CNF_SERVER_PATH + "/config/playlist.pkl"
CNF_FOLDER_PATH = CNF_SERVER_PATH + "/config/"
CNF_CONFIG_FILE = CNF_FOLDER_PATH + "config.ini"
#
# Loads the saved playlist from file
def get_playlist():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()")
l_playlist = []
try:
with open(CNF_PLAYLIST_PATH, 'rb') as l_input:
l_playlist = pickle.load(l_input)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
return l_playlist
def store_playlist(p_list):
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()")
try:
with open(CNF_PLAYLIST_PATH, 'wb') as l_output:
pickle.dump(p_list, l_output, pickle.HIGHEST_PROTOCOL)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
#
# Loads the configuration from file
def get_folder_path():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()")
global CNF_FOLDER_PATH
global CNF_PLAYLIST_PATH
global SERVER_PATH
l_config_parser = ConfigParser.ConfigParser()
l_clean_configuration = False
if not os.path.isdir(CNF_FOLDER_PATH): # if config directory does not exist, create it
os.makedirs(CNF_FOLDER_PATH)
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", CNF_FOLDER_PATH + " did not exist, it has been created")
if os.path.isfile(CNF_CONFIG_FILE):
try:
l_config_parser.read(CNF_CONFIG_FILE)
if l_config_parser.has_section("PATH"):
if l_config_parser.has_option("PATH", "CNF_FOLDER_PATH"):
CNF_FOLDER_PATH = l_config_parser.get("PATH","CNF_FOLDER_PATH")
else:
l_clean_configuration = True
else:
# if section does not exist
l_clean_configuration = True
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "unable to load CNF_FOLDER_PATH, using home as default, new config.ini will be generated.")
except:
# if unable to read file (e.g. file damaged)
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "exception: unable to load CNF_FOLDER_PATH from " + CNF_CONFIG_FILE + ", using home path as default, new config.ini will be generated.")
else:
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "no configuration file found, new config.ini will be generated.")
if l_clean_configuration:
# cleanup config file
for l_section in l_config_parser.sections():
l_config_parser.remove_section(l_section)
l_config_parser.add_section("PATH")
l_config_parser.set("PATH", "CNF_FOLDER_PATH", os.path.expanduser("~"))
l_config_parser.write(file(CNF_CONFIG_FILE, 'w'))
if "" == CNF_FOLDER_PATH:
CNF_FOLDER_PATH = os.path.expanduser("~")
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_FOLDER_PATH = " + CNF_FOLDER_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_PLAYLIST_PATH = " + CNF_PLAYLIST_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_SERVER_PATH = " + CNF_SERVER_PATH)
return CNF_FOLDER_PATH
def get_server_path():
return SERVER_PATH
def get_playlist_path():
return CNF_PLAYLIST_PATH | GiancarloF/raspymc_server | core/conf_manager.py | Python | gpl-3.0 | 4,528 | 0.023852 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import json
import csv
import StringIO
import io
from dictUtil import as_dict, merge_dicts
import urllib
# import jq
from itertools import izip
from logUtil import logging
# for manifest introspection only
import inspect
from pyspark import SparkContext
class FileUtil(object):
def __init__(self, sparkContext):
self.sc = sparkContext
## Support for entries into manifest
# any entry created thus
# should have spark_context, name of caller, module of caller
# untested: do not use
def makeEntry(self, **kwargs):
entry = dict(**kwargs)
entry["spark_context"] = self.sc
op = kwargs.get("operation", None)
if not op:
try:
st = inspect.stack()
# stack exists
if len(st)>=2:
# look up one stack frame, retrieve the function name[3]
op = st[1][3]
# stack frame memory leak could be very bad, so be careful
del st
except:
pass
mdl = kwargs.get("module", None)
if not mdl:
try:
st = inspect.stack()
# stack exists
if len(st)>=2:
# look up one stack frame, retrieve the module it belongs to
mdl = inspect.getmodule(st[0]).__name__
# stack frame memory leak could be very bad, so be careful
del st
except:
pass
entry["module"] = mdl
return entry
## GENERIC
## Herein:
## file_format is in {text, sequence}
## data_type is in {csv, json, jsonlines(=keyless)}
def load_file(self, filename, file_format='sequence', data_type='json', **kwargs):
try:
handlerName = FileUtil.load_dispatch_table[(file_format, data_type)]
handler = getattr(self, handlerName)
rdd = handler(filename, **kwargs)
# TBD: return (rdd, manifestEntry)
# entry = self.makeEntry(input_filename=filename,
# input_file_format=file_format,
# input_data_type=data_type)
# return (rdd, entry)
#logging.info("Loaded {}/{} file {}: {} elements".format(file_format, data_type, filename, rdd.count()))
return rdd
except KeyError:
raise NotImplementedError("File_Format={}, data_type={}".format(file_format, data_type))
load_dispatch_table = {("sequence", "json"): "_load_sequence_json_file",
("sequence", "csv"): "_load_sequence_csv_file",
("text", "json"): "_load_text_json_file",
("text", "jsonlines"): "_load_text_jsonlines_file",
("text", "csv"): "_load_text_csv_file"}
def _load_sequence_json_file(self, filename, **kwargs):
rdd_input = self.sc.sequenceFile(filename)
rdd_json = rdd_input.mapValues(lambda x: json.loads(x))
return rdd_json
def _load_text_json_file(self, filename, separator='\t', **kwargs):
# rdd_input = self.sc.textFile(filename)
# rdd_json = rdd_input.map(lambda x: FileUtil.__parse_json_line(x, separator))
rdd_strings = self.sc.textFile(filename)
rdd_split = rdd_strings.map(lambda line: tuple(line.split(separator, 1)))
def tryJson(v):
try:
j = json.loads(v)
return j
except Exception as e:
print("failed [{}] on {}".format(str(e), v), file=sys.stderr)
rdd_json = rdd_split.mapValues(lambda v: tryJson(v))
return rdd_json
def _load_text_jsonlines_file(self, filename, keyPath='.uri', **kwargs):
rdd_strings = self.sc.textFile(filename)
def tryJson(line):
try:
obj = json.loads(line)
# We ignore all but the first occurrence of key
try:
# key = jq.jq(keyPath).transform(obj, multiple_output=False)
key = obj["uri"]
except:
key = None
if key:
# i.e., a paired RDD
return (key, obj)
else:
raise ValueError("No key (per {}) in line {}".format(keyPath, line))
except Exception as e:
print("failed [{}] on {}".format(str(e), line), file=sys.stderr)
rdd_json = rdd_strings.map(lambda line: tryJson(line))
return rdd_json
def _load_sequence_csv_file(self, filename, **kwargs):
"""Should emulate text/csv"""
raise NotImplementedError("File_Format=sequence, data_type=csv")
def _load_text_csv_file(self, filename, separator=',', **kwargs):
"""Return a pair RDD where key is taken from first column, remaining columns are named after their column id as string"""
rdd_input = self.sc.textFile(filename)
def load_csv_record(line):
input_stream = StringIO.StringIO(line)
reader = csv.reader(input_stream, delimiter=',')
# key in first column, remaining columns 1..n become dict key values
payload = reader.next()
key = payload[0]
rest = payload[1:]
# generate dict of "1": first value, "2": second value, ...
d = {}
for (cell,i) in izip(rest, range(1,1+len(rest))):
d[str(i)] = cell
# just in case, add "0": key
d["0"] = key
return (key, d)
rdd_parsed = rdd_input.map(load_csv_record)
return rdd_parsed
## SAVE
def save_file(self, rdd, filename, file_format='sequence', data_type='json', **kwargs):
try:
handlerName = FileUtil.save_dispatch_table[(file_format, data_type)]
handler = getattr(self, handlerName)
rdd = handler(rdd, filename, **kwargs)
# TBD: return (rdd, manifestEntry)
# entry = self.makeEntry(output_filename=filename,
# output_file_format=file_format,
# output_data_type=data_type)
# return (rdd, entry)
return rdd
except KeyError:
raise NotImplementedError("File_Format={}, data_type={}".format(file_format, data_type))
save_dispatch_table = {("sequence", "json"): "_save_sequence_json_file",
("sequence", "csv"): "_save_sequence_csv_file",
("text", "json"): "_save_text_json_file",
("text", "csv"): "_save_text_csv_file"}
def _save_sequence_json_file(self, rdd, filename, separator='\t', **kwargs):
# regardless of whatever it is, key is retained
rdd.mapValues(lambda x: json.dumps(x)).saveAsSequenceFile(filename)
return filename
def _save_text_json_file(self, rdd, filename, separator='\t', **kwargs):
rdd_json = rdd.map(lambda (k, v): FileUtil.__dump_as_json(k, v, separator))
# this saves the (<uri>, <serialized_json_string>) as as text repn
# perhaps a regular readable text file uri<separator>JSON will be more useful?
rdd_json.saveAsTextFile(filename)
return filename
def _save_text_csv_file(self, rdd, filename, separator='\t', encoding='utf-8', **kwargs):
with io.open(filename, 'wb', encoding=encoding) as f:
wrtr = csv.writer(f, delimiter=separator)
def save_csv_record(line):
wrtr.writerow(line)
rdd.foreach(save_csv_record)
return filename
def _save_sequence_csv_file(self, rdd, filename, separator='\t', **kwargs):
raise NotImplementedError("File_Format=sequence, data_type=csv")
## JSON
@staticmethod
def __parse_json_line(line, separator):
line = line.strip()
if len(line) > 0:
line_elem = line.split(separator, 2)
if len(line_elem) > 1:
return line_elem[0], json.loads(line_elem[1])
elif len(line_elem) == 1:
return '', json.loads(line_elem[0])
@staticmethod
def __dump_as_json(key, value, sep):
return key + sep + json.dumps(value)
@staticmethod
def get_json_config(config_spec):
# if it's a dict, or coercible to a dict, return the dict
try:
return as_dict(config_spec)
except TypeError:
pass
# Not a dict
config_file = None
if config_spec.startswith("http"):
# URL: fetch it
config_file = urllib.urlopen(config_spec)
else:
# string: open file with that name
config_file = open(config_spec)
config = json.load(config_file)
# Close any open files
try:
config_file.close()
except:
pass
return config
@staticmethod
def get_config(config_spec):
"""Like get_json_config but does not parse result as JSON"""
config_file = None
if config_spec.startswith("http"):
# URL: fetch it
config_file = urllib.urlopen(config_spec)
else:
# string: open file with that name
config_file = open(config_spec)
config = json.load(config_file)
# Close any open files
try:
config_file.close()
except:
pass
return config
##################################################################
import argparse
def main(argv=None):
'''TEST ONLY: this is called if run from command line'''
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input_file', required=True)
parser.add_argument('--input_file_format', default='sequence')
parser.add_argument('--input_data_type', default='json')
parser.add_argument('--input_separator', default='\t')
parser.add_argument('-o','--output_dir', required=True)
parser.add_argument('--output_file_format', default='sequence')
parser.add_argument('--output_data_type', default='json')
parser.add_argument('--output_separator', default='\t')
args=parser.parse_args()
# can be inconvenient to specify tab on the command line
args.input_separator = "\t" if args.input_separator=='tab' else args.input_separator
args.output_separator = "\t" if args.output_separator=='tab' else args.output_separator
sc = SparkContext(appName="fileUtil")
fUtil = FileUtil(sc)
## CONFIG LOAD
input_kwargs = {"file_format": args.input_file_format,
"data_type": args.input_data_type}
parse_kwargs = {"separator": args.input_separator}
load_kwargs = merge_dicts(input_kwargs, parse_kwargs)
## LOAD
rdd = fUtil.load_file(args.input_file, **load_kwargs)
## CONFIG SAVE
output_kwargs = {"file_format": args.output_file_format,
"data_type": args.output_data_type}
emit_kwargs = {"separator": args.output_separator}
save_kwargs = merge_dicts(output_kwargs, emit_kwargs)
## SAVE
fUtil.save_file(rdd, args.output_dir, **save_kwargs)
if __name__ == "__main__":
"""
Usage: tokenizer.py [input] [config] [output]
"""
main()
| usc-isi-i2/WEDC | spark_dependencies/python_lib/digSparkUtil/fileUtil.py | Python | apache-2.0 | 11,462 | 0.005496 |
''' This script is run as root by the osmc update module. '''
import apt
import socket
import sys
from datetime import datetime
import json
import os
import time
import subprocess
import traceback
from CompLogger import comprehensive_logger as clog
t = datetime
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
try:
sys.stdout = Logger("/var/tmp/OSMC_python_apt_log.txt")
except:
pass
@clog(maxlength=1500)
def call_parent(raw_message, data={}):
address = '/var/tmp/osmc.settings.update.sockfile'
print '%s %s sending response' % (t.now(), 'apt_cache_action.py')
message = (raw_message, data)
message = json.dumps(message)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(address)
sock.sendall(message)
sock.close()
except Exception as e:
return '%s %s failed to connect to parent - %s' % (t.now(), 'apt_cache_action.py', e)
return 'response sent'
class Main(object):
def __init__(self, action):
# with apt.apt_pkg.SystemLock():
# implements a lock on the package system, so that nothing else can alter packages
print '==================================================================='
print '%s %s running' % (t.now(), 'apt_cache_action.py')
self.error_package = ''
self.error_message = ''
self.heading = 'Updater'
self.action = action
self.cache = apt.Cache()
self.block_update_file = '/var/tmp/.suppress_osmc_update_checks'
self.action_to_method = {
'update' : self.update,
'update_manual' : self.update,
'commit' : self.commit,
'fetch' : self.fetch,
'action_list' : self.action_list,
}
try:
self.act()
except Exception as e:
print '%s %s exception occurred' % (t.now(), 'apt_cache_action.py')
print '%s %s exception value : %s' % (t.now(), 'apt_cache_action.py', e)
deets = 'Error Type and Args: %s : %s \n\n %s' % (type(e).__name__, e.args, traceback.format_exc())
# send the error to the parent (parent will kill the progress bar)
call_parent('apt_error', {'error': self.error_message, 'package': self.error_package, 'exception': deets})
self.respond()
print '%s %s exiting' % (t.now(), 'apt_cache_action.py')
print '==================================================================='
def respond(self):
call_parent('apt_cache %s complete' % self.action)
def act(self):
action = self.action_to_method.get(self.action, False)
if action:
action()
else:
print 'Action not in action_to_method dict'
#@clog()
def action_list(self):
''' This method processes a list sent in argv[2], and either installs or remove packages.
The list is sent as a string:
install_packageid1|=|install_packageid2|=|removal_packageid3'''
self.heading = 'App Store'
action_string = sys.argv[2]
action_dict = self.parse_argv2(action_string)
self.update()
self.cache.open()
for pkg in self.cache:
# mark packages as install or remove
if pkg.shortname in action_dict['install']:
pkg.mark_install()
if pkg.shortname in action_dict['removal']:
pkg.mark_delete(purge=True)
# commit
self.commit_action()
if action_dict['removal']:
# if there were removals then remove the packages that arent needed any more
self.update()
self.cache.open()
removals = False
for pkg in self.cache:
if pkg.is_auto_removable:
pkg.mark_delete(purge=True)
removals = True
if removals:
# commit
self.commit_action()
# #@clog()
def parse_argv2(self, action_string):
install = []
removal = []
actions = action_string.split('|=|')
for action in actions:
if action.startswith('install_'):
install.append(action[len('install_'):])
elif action.startswith('removal_'):
removal.append(action[len('removal_'):])
return {'install': install, 'removal': removal}
#@clog()
def update(self):
dprg = Download_Progress(partial_heading='Updating')
self.cache.update(fetch_progress=dprg, pulse_interval=1000)
# call the parent and kill the pDialog
call_parent('progress_bar', {'kill': True})
return '%s %s cache updated' % (t.now(), 'apt_cache_action.py')
#@clog()
def commit(self):
# check whether any packages are broken, if they are then the install needs to take place outside of Kodi
for pkg in self.cache:
if pkg.is_inst_broken or pkg.is_now_broken:
return "%s is BROKEN, cannot proceed with commit" % pkg.shortname
print '%s %s upgrading all packages' % (t.now(), 'apt_cache_action.py')
self.cache.upgrade(True)
print '%s %s committing cache' % (t.now(), 'apt_cache_action.py')
self.commit_action()
#@clog()
def commit_action(self):
dprg = Download_Progress()
iprg = Install_Progress(self)
self.cache.commit(fetch_progress=dprg, install_progress=iprg)
# call the parent and kill the pDialog
call_parent('progress_bar', {'kill': True})
# remove the file that blocks further update checks
try:
os.remove(self.block_update_file)
except:
return 'Failed to remove block_update_file'
return '%s %s cache committed' % (t.now(), 'apt_cache_action.py')
#@clog()
def fetch(self):
self.cache.upgrade(True)
print '%s %s fetching all packages' % (t.now(), 'apt_cache_action.py')
dprg = Download_Progress()
self.cache.fetch_archives(progress=dprg)
# call the parent and kill the pDialog
call_parent('progress_bar', {'kill': True})
return '%s %s all packages fetched' % (t.now(), 'apt_cache_action.py')
class Operation_Progress(apt.progress.base.OpProgress):
def __init__(self):
super(Operation_Progress, self).__init__()
def update(self):
call_parent('progress_bar', {'percent': self.percent, 'heading': self.op, 'message':self.sub_op,})
def done(self):
call_parent('progress_bar', {'kill': True})
class Install_Progress(apt.progress.base.InstallProgress):
def __init__(self, parent):
self.parent = parent
super(Install_Progress, self).__init__()
call_parent('progress_bar', {'percent': 0, 'heading': self.parent.heading, 'message':'Starting Installation'})
#@clog()
def error(self, pkg, errormsg):
print 'ERROR!!! \n%s\n' % errormsg
try:
pkgname = os.path.basename(pkg).split('_')
print 'Package affected!!! \n%s\n' % pkgname
self.parent.error_package = pkgname[0]
if len(pkgname) > 1:
self.parent.error_package += ' (' + pkgname[1] + ')'
except:
self.parent.error_package = '(unknown package)'
self.parent.error_message = errormsg
''' (Abstract) Called when a error is detected during the install. '''
# The following method should be overridden to implement progress reporting for dpkg-based runs
# i.e. calls to run() with a filename:
# def processing(self, pkg, stage):
# ''' This method is called just before a processing stage starts. The parameter pkg is the name of the
# package and the parameter stage is one of the stages listed in the dpkg manual under the
# status-fd option, i.e. "upgrade", "install" (both sent before unpacking), "configure", "trigproc",
# "remove", "purge". '''
# def dpkg_status_change(self, pkg, status):
# ''' This method is called whenever the dpkg status of the package changes. The parameter pkg is the
# name of the package and the parameter status is one of the status strings used in the status file
# (/var/lib/dpkg/status) and documented in dpkg(1). '''
# The following methods should be overridden to implement progress reporting for run() calls
# with an apt_pkg.PackageManager object as their parameter:
#@clog()
def status_change(self, pkg, percent, status):
''' This method implements progress reporting for package installation by APT and may be extended to
dpkg at a later time. This method takes two parameters: The parameter percent is a float value
describing the overall progress and the parameter status is a string describing the current status
in an human-readable manner. '''
diff = t.now() - self.pulse_time
if (diff.total_seconds() * 10) < 12:
return True
self.pulse_time = t.now()
call_parent('progress_bar', {'percent': int(percent), 'heading': self.parent.heading, 'message': status})
#@clog()
def start_update(self):
''' This method is called before the installation of any package starts. '''
self.pulse_time = t.now()
return 'Start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#@clog()
def finish_update(self):
''' This method is called when all changes have been applied. '''
return 'Stop !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
class Download_Progress(apt.progress.base.AcquireProgress):
def __init__(self, partial_heading='Downloading'):
super(Download_Progress, self).__init__()
self.partial_heading = partial_heading
call_parent('progress_bar', {'percent': 0, 'heading': 'Downloading Update', 'message':'Starting Download',})
#@clog()
def start(self):
''' Invoked when the Acquire process starts running. '''
self.pulse_time = t.now()
return 'Start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#@clog()
def stop(self):
''' Invoked when the Acquire process stops running. '''
return 'Stop !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#@clog()
def fetch(self, item):
''' Invoked when an item is being fetched. '''
dsc = item.description.split('/')
self.fetching = self.partial_heading + ': ' + dsc[-1]
# call_parent('progress_bar',{'message': 'Downloading: ' + dsc[-1]})
return 'Fetch' + item.description + '++++++++++++++++++++++++++++++'
#@clog()
def pulse(self, owner):
''' Periodically invoked as something is being downloaded. '''
# if the pulse is less than one second since the last one then ignore the pulse
# this needs to be done as the parents _daemon only checks the queue once a second
diff = t.now() - self.pulse_time
if (diff.total_seconds() * 10) < 11:
return True
else:
self.pulse_time = t.now()
print 'Pulse ==========================================='
print 'current_items', self.current_items
print 'total_items', self.total_items
print 'total_bytes', self.total_bytes
print 'fetched_bytes', self.fetched_bytes
print 'current_bytes', self.current_bytes
print 'current_cps', self.current_cps
print 'Pulse ==========================================='
pct = int(self.current_bytes / float(self.total_bytes) * 100)
cps = self.current_cps / 1024.0
if cps > 1024:
cps = '{0:.2f} MBps'.format(cps / 1024)
else:
cps = '{0:.0f} kBps'.format(cps)
cmb = self.current_bytes / 1048576.0
tmb = self.total_bytes / 1048576.0
msg = self.fetching
hdg = '{0:d} / {1:d} items -- {2:} -- {3:.1f} / {4:.1f}MB'.format(self.current_items, self.total_items, cps, cmb, tmb)
call_parent('progress_bar', {'percent': pct, 'heading': hdg, 'message': msg})
return True
#@clog()
def done(self, item):
''' Invoked when an item has finished downloading. '''
return 'Done ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
if __name__ == "__main__":
if len(sys.argv) > 1:
action = sys.argv[1]
m = Main(action)
del m
| indie1982/osmc-fixes | package/mediacenter-addon-osmc/src/script.module.osmcsetting.updates/resources/lib/apt_cache_action.py | Python | gpl-2.0 | 11,354 | 0.035406 |
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
""" This module defines the charaterizing constants of the Kademlia network
C{checkRefreshInterval} and C{udpDatagramMaxSize} are implementation-specific
constants, and do not affect general Kademlia operation.
"""
######### KADEMLIA CONSTANTS ###########
#: Small number Representing the degree of parallelism in network calls
alpha = 3
#: Maximum number of contacts stored in a bucket; this should be an even number
k = 8
#: Timeout for network operations (in seconds)
rpcTimeout = 5
# Delay between iterations of iterative node lookups (for loose parallelism) (in seconds)
iterativeLookupDelay = rpcTimeout / 2
#: If a k-bucket has not been used for this amount of time, refresh it (in seconds)
refreshTimeout = 3600 # 1 hour
#: The interval at which nodes replicate (republish/refresh) data they are holding
replicateInterval = refreshTimeout
# The time it takes for data to expire in the network; the original publisher of the data
# will also republish the data at this time if it is still valid
dataExpireTimeout = 86400 # 24 hours
######## IMPLEMENTATION-SPECIFIC CONSTANTS ###########
#: The interval in which the node should check its whether any buckets need refreshing,
#: or whether any data needs to be republished (in seconds)
checkRefreshInterval = refreshTimeout/5
#: Max size of a single UDP datagram, in bytes. If a message is larger than this, it will
#: be spread accross several UDP packets.
udpDatagramMaxSize = 8192 # 8 KB
| tjgillies/distributed-draw | entangled/kademlia/constants.py | Python | lgpl-3.0 | 1,820 | 0.006593 |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
task = Table('task', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_id', Integer, default=ColumnDefault(0)),
Column('body', String),
Column('taskname', String(length=140)),
Column('timestamp', DateTime),
Column('user_id', Integer),
Column('project_id', Integer),
Column('status', String(length=10)),
Column('depth', Integer, default=ColumnDefault(0)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['task'].columns['parent_id'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['task'].columns['parent_id'].drop()
| itJunky/web-tasker.py | db_repository/versions/028_migration.py | Python | gpl-2.0 | 1,041 | 0.000961 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'zappyk'
import sys, subprocess
from gi.repository import Gtk, Gio
from gi.repository import GLib
###############################################################################
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Hello World")
self.button = Gtk.Button(label="Click Here")
self.button.connect("clicked", self.on_button_clicked)
self.add(self.button)
def on_button_clicked(self, widget):
print("Hello World")
#______________________________________________________________________________
#
def test_1():
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
###############################################################################
class Handler():
def __init__(self, cmd):
self.cmd = CommandTextView(cmd)
def on_button1_clicked(self, widget):
self.cmd.run()
pass
def on_button2_clicked(self, widget):
pass
def on_textview1_add(self, widget):
widget.inset
pass
def on_window1_delete_event(self, *args):
Gtk.main_quit(*args)
###############################################################################
class CommandTextView(Gtk.TextView):
''' NICE TEXTVIEW THAT READS THE OUTPUT OF A COMMAND SYNCRONOUSLY '''
def __init__(self, command):
'''COMMAND : THE SHELL COMMAND TO SPAWN'''
super(CommandTextView, self).__init__()
self.command = command
def run(self):
''' RUNS THE PROCESS '''
proc = subprocess.Popen(self.command, stdout = subprocess.PIPE) # SPAWNING
GLib.io_add_watch(proc.stdout, # FILE DESCRIPTOR
GLib.IO_IN, # CONDITION
self.write_to_buffer) # CALLBACK
def write_to_buffer(self, fd, condition):
if condition == GLib.IO_IN: #IF THERE'S SOMETHING INTERESTING TO READ
#CZ#char = fd.read(1) # WE READ ONE BYTE PER TIME, TO AVOID BLOCKING
char = fd.read().decode("utf-8")
buff = self.get_buffer()
buff.insert_at_cursor(char) # WHEN RUNNING DON'T TOUCH THE TEXTVIEW!!
return True # FUNDAMENTAL, OTHERWISE THE CALLBACK ISN'T RECALLED
else:
return False # RAISED AN ERROR: EXIT AND I DON'T WANT TO SEE YOU ANYMORE
#______________________________________________________________________________
#
def test_2():
cmd = CommandTextView("find")
win = Gtk.Window()
win.connect("delete-event", lambda wid, event: Gtk.main_quit()) # DEFINING CALLBACKS WITH LAMBDAS
win.set_size_request(200,300)
win.add(cmd)
win.show_all()
cmd.run()
Gtk.main()
#______________________________________________________________________________
#
def test_3():
cmd = CommandTextView("find")
builder = Gtk.Builder()
builder.add_from_file("test-gui-Gtk.glade")
builder.connect_signals(Handler(cmd))
window = builder.get_object("window1")
window.show_all()
cmd.run()
Gtk.main()
###############################################################################
class HeaderBarWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Stack Demo")
#CZ#Gtk.Window.__init__(self, title="Stack Demo", type=Gtk.WINDOW_TOPLEVEL)
self.set_border_width(10)
self.set_default_size(400, 200)
#CZ#self.has_toplevel_focus()
#hb = Gtk.HeaderBar()
#hb.props.show_close_button = True
#hb.props.title = "HeaderBar example"
#self.set_titlebar(hb)
button = Gtk.Button()
icon = Gio.ThemedIcon(name="mail-send-receive-symbolic")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button.add(image)
#hb.pack_end(button)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
Gtk.StyleContext.add_class(box.get_style_context(), "linked")
button = Gtk.Button()
button.add(Gtk.Arrow(Gtk.ArrowType.LEFT, Gtk.ShadowType.NONE))
box.add(button)
button = Gtk.Button()
button.add(Gtk.Arrow(Gtk.ArrowType.RIGHT, Gtk.ShadowType.NONE))
box.add(button)
#hb.pack_start(box)
self.add(Gtk.TextView())
#______________________________________________________________________________
#
def test_4():
win = HeaderBarWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
#______________________________________________________________________________
#
if __name__ == '__main__':
test_4() | zappyk-github/zappyk-python | src/src_zappyk/developing/test-gui-Gtk.py | Python | gpl-2.0 | 4,640 | 0.009483 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ODB++ surface parser components
"""
import re
from collections import namedtuple
from .Decoder import DecoderOption
from .Treeifier import TreeifierRule
from .PolygonParser import Polygon
from .Structures import Polarity, polarity_map
from .Attributes import parse_attributes
__all__ = ["surface_decoder_options",
"SurfaceBeginTag", "surface_treeify_rules",
"surface_decoder_options",
"SurfaceEndTag", "Surface", "Polarity"]
Surface = namedtuple("Surface", ["polarity", "dcode", "polygons", "attributes"])
SurfaceBeginTag = namedtuple("SurfaceBeginTag", ["polarity", "dcode", "attributes"])
SurfaceEndTag = namedtuple("SurfaceEndTag", [])
# Surface syntax regular expressions
_surface_re = re.compile(r"^S\s+([PN])\s+(\d+)\s*(;\s*.+?)?$")
_surface_end_re = re.compile(r"^SE\s*$")
def _parse_surface_start(match):
"Parse a surface begin tag regex match"
polarity, dcode, attributes = match.groups()
# Parse attribute string
attributes = parse_attributes(attributes[1:]) \
if attributes is not None else {}
return SurfaceBeginTag(polarity_map[polarity],
int(dcode), attributes)
def _parse_surface_end(match):
"Parse a surface end tag regex match"
return SurfaceEndTag()
surface_decoder_options = [
DecoderOption(_surface_re, _parse_surface_start),
DecoderOption(_surface_end_re, _parse_surface_end)
]
def _treeifier_process_surface(elems):
"""Treeifier processor function for surfaces."""
polygons = []
polarity, dcode, attributes = elems[0] # Poly begin tag
for elem in elems[1:]: # Iterate everything except the end tag
if isinstance(elem, Polygon):
polygons.append(elem)
# Build polygon structure
return Surface(polarity, dcode, polygons, attributes)
surface_treeify_rules = [
TreeifierRule(SurfaceBeginTag, SurfaceEndTag, _treeifier_process_surface),
]
| ulikoehler/ODBPy | ODBPy/SurfaceParser.py | Python | apache-2.0 | 1,978 | 0.004044 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
RESULT_TYPE_PROCESS_HIDE = 0
RESULT_TYPE_PROCESS_UNHIDE = 1
class Result:
def __init__(self):
self.__dict__['type'] = 0
self.__dict__['item'] = ''
self.__dict__['metaData'] = ''
def __getattr__(self, name):
if name == 'type':
return self.__dict__['type']
if name == 'item':
return self.__dict__['item']
if name == 'metaData':
return self.__dict__['metaData']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'type':
self.__dict__['type'] = value
elif name == 'item':
self.__dict__['item'] = value
elif name == 'metaData':
self.__dict__['metaData'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU8(MSG_KEY_RESULT_TYPE, self.__dict__['type'])
submsg.AddStringUtf8(MSG_KEY_RESULT_ITEM, self.__dict__['item'])
submsg.AddStringUtf8(MSG_KEY_RESULT_METADATA, self.__dict__['metaData'])
mmsg.AddMessage(MSG_KEY_RESULT, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['type'] = submsg.FindU8(MSG_KEY_RESULT_TYPE)
self.__dict__['item'] = submsg.FindString(MSG_KEY_RESULT_ITEM)
self.__dict__['metaData'] = submsg.FindString(MSG_KEY_RESULT_METADATA) | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/security/cmd/hide/type_Result.py | Python | unlicense | 1,899 | 0.002633 |
# -*- coding: utf-8 -*-
#
# TaskBuster documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 16 10:01:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from django.conf import settings
settings.configure()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TaskBuster'
copyright = u'2015, Patrick Mazulo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TaskBusterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TaskBuster.tex', u'TaskBuster Documentation',
u'Patrick Mazulo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'taskbuster', u'TaskBuster Documentation',
[u'Patrick Mazulo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TaskBuster', u'TaskBuster Documentation',
u'Patrick Mazulo', 'TaskBuster', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mazulo/taskbuster-boilerplate | docs/conf.py | Python | mit | 8,303 | 0.006263 |
# -*- coding: utf-8 -*-
"""The plist event formatter."""
from plaso.formatters import interface
from plaso.formatters import manager
class PlistFormatter(interface.ConditionalEventFormatter):
"""Formatter for a plist key event."""
DATA_TYPE = u'plist:key'
FORMAT_STRING_SEPARATOR = u''
FORMAT_STRING_PIECES = [
u'{root}/',
u'{key}',
u' {desc}']
SOURCE_LONG = u'Plist Entry'
SOURCE_SHORT = u'PLIST'
manager.FormattersManager.RegisterFormatter(PlistFormatter)
| ostree/plaso | plaso/formatters/plist.py | Python | apache-2.0 | 497 | 0.012072 |
import sublime, sublime_plugin
import difflib
import time
import datetime
import codecs
import os
def diff_changes(file_name, result):
try:
if "Body" in result:
server = result["Body"].splitlines()
elif "Markup" in result:
server = result["Markup"].splitlines()
local = codecs.open(file_name, "r", "utf-8").read().splitlines()
except UnicodeDecodeError:
show_diff_panel("Diff only works with UTF-8 files")
return
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
bdate_literal = result["LastModifiedDate"].split(".")[0]
server_date = datetime.datetime.strptime(bdate_literal, "%Y-%m-%dT%H:%M:%S")
local_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
diff = difflib.unified_diff(server, local, "Server", "Local ", server_date, local_date, lineterm='')
difftxt = u"\n".join(line for line in diff)
if difftxt == "":
show_diff_panel("There is no difference between %s and server" % os.path.basename(file_name))
return
show_diff_panel(difftxt)
def diff_files(file_name, other_file_name):
try:
this_file_content = codecs.open(file_name, "r", "utf-8").read().splitlines()
other_file_content = codecs.open(other_file_name, "r", "utf-8").read().splitlines()
except UnicodeDecodeError:
show_diff_panel("Diff only works with UTF-8 files")
return
diff = difflib.unified_diff(this_file_content, other_file_content, "Server", "Local ", "", "", lineterm='')
difftxt = u"\n".join(line for line in diff)
if difftxt == "":
show_diff_panel("There is no difference between %s and %s" % (
file_name,
other_file_name
))
return
show_diff_panel(difftxt)
def show_diff_panel(difftxt):
win = sublime.active_window()
v = win.create_output_panel('diff_with_server')
v.assign_syntax('Packages/Diff/Diff.tmLanguage')
v.run_command('append', {'characters': difftxt})
win.run_command("show_panel", {"panel": "output.diff_with_server"})
| xjsender/haoide | salesforce/lib/diff.py | Python | mit | 2,098 | 0.005243 |
# posting to: http://localhost:3000/api/articles/update/:articleid with title, content
# changes title, content
#
# id1: (darwinbot1 P@ssw0rd!! 57d748bc67d0eaf026dff431) <-- this will change with differing mongo instances
import time # for testing, this is not good
import requests # if not installed already, run python -m pip install requests OR pip install requests, whatever you normally do
r = requests.post('http://localhost:80/api/games/search', data={'devkey': "581ced5d7563227053011823", 'username': 'darwinbot2'}) # search for new game
json = r.json() # when request comes back, that means you've found a match! (validation if server goes down?)
print(json)
gameID = json['gameID']
playerID = json['playerID']
print(gameID)
print(playerID)
input = ' '
while input != '':
input = raw_input('input move: ')
r = requests.post('http://localhost:80/api/games/submit/' + gameID, data={'playerID': playerID, 'move': input, 'devkey': "581ced5d7563227053011823"}); # submit sample move
json = r.json()
print(json) | dubwub/F2016-UPE-AI | sample_AIs/darwinbot2.py | Python | mit | 1,018 | 0.016699 |
from django.apps import AppConfig
class AlarmConfig(AppConfig):
name = 'alarm'
| pythonvietnam/nms | apps/alarm/apps.py | Python | mit | 85 | 0 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import socket
import sys
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.core.chrome import inspector_console
from telemetry.core.chrome import inspector_memory
from telemetry.core.chrome import inspector_page
from telemetry.core.chrome import inspector_runtime
from telemetry.core.chrome import inspector_timeline
from telemetry.core.chrome import png_bitmap
from telemetry.core.chrome import websocket
class InspectorException(Exception):
pass
class InspectorBackend(object):
def __init__(self, browser, browser_backend, debugger_url):
assert debugger_url
self._browser = browser
self._browser_backend = browser_backend
self._debugger_url = debugger_url
self._socket = None
self._domain_handlers = {}
self._cur_socket_timeout = 0
self._next_request_id = 0
self._console = inspector_console.InspectorConsole(self)
self._memory = inspector_memory.InspectorMemory(self)
self._page = inspector_page.InspectorPage(self)
self._runtime = inspector_runtime.InspectorRuntime(self)
self._timeline = inspector_timeline.InspectorTimeline(self)
def __del__(self):
self.Disconnect()
def _Connect(self):
if self._socket:
return
self._socket = websocket.create_connection(self._debugger_url)
self._cur_socket_timeout = 0
self._next_request_id = 0
def Disconnect(self):
for _, handlers in self._domain_handlers.items():
_, will_close_handler = handlers
will_close_handler()
self._domain_handlers = {}
if self._socket:
self._socket.close()
self._socket = None
# General public methods.
@property
def browser(self):
return self._browser
@property
def url(self):
self.Disconnect()
return self._browser_backend.tab_list_backend.GetTabUrl(self._debugger_url)
def Activate(self):
self._Connect()
self._browser_backend.tab_list_backend.ActivateTab(self._debugger_url)
def Close(self):
self.Disconnect()
self._browser_backend.tab_list_backend.CloseTab(self._debugger_url)
# Public methods implemented in JavaScript.
def WaitForDocumentReadyStateToBeComplete(self, timeout):
util.WaitFor(
lambda: self._runtime.Evaluate('document.readyState') == 'complete',
timeout)
def WaitForDocumentReadyStateToBeInteractiveOrBetter(
self, timeout):
def IsReadyStateInteractiveOrBetter():
rs = self._runtime.Evaluate('document.readyState')
return rs == 'complete' or rs == 'interactive'
util.WaitFor(IsReadyStateInteractiveOrBetter, timeout)
@property
def screenshot_supported(self):
if self._runtime.Evaluate(
'window.chrome.gpuBenchmarking === undefined'):
return False
if self._runtime.Evaluate(
'window.chrome.gpuBenchmarking.beginWindowSnapshotPNG === undefined'):
return False
# TODO(dtu): Also check for Chrome branch number, because of a bug in
# beginWindowSnapshotPNG in older versions. crbug.com/171592
return True
def Screenshot(self, timeout):
if self._runtime.Evaluate(
'window.chrome.gpuBenchmarking === undefined'):
raise Exception("Browser was not started with --enable-gpu-benchmarking")
if self._runtime.Evaluate(
'window.chrome.gpuBenchmarking.beginWindowSnapshotPNG === undefined'):
raise Exception("Browser does not support window snapshot API.")
self._runtime.Evaluate("""
if(!window.__telemetry) {
window.__telemetry = {}
}
window.__telemetry.snapshotComplete = false;
window.__telemetry.snapshotData = null;
window.chrome.gpuBenchmarking.beginWindowSnapshotPNG(
function(snapshot) {
window.__telemetry.snapshotData = snapshot;
window.__telemetry.snapshotComplete = true;
}
);
""")
def IsSnapshotComplete():
return self._runtime.Evaluate('window.__telemetry.snapshotComplete')
util.WaitFor(IsSnapshotComplete, timeout)
snap = self._runtime.Evaluate("""
(function() {
var data = window.__telemetry.snapshotData;
delete window.__telemetry.snapshotComplete;
delete window.__telemetry.snapshotData;
return data;
})()
""")
if snap:
return png_bitmap.PngBitmap(snap['data'])
return None
# Console public methods.
@property
def message_output_stream(self): # pylint: disable=E0202
return self._console.message_output_stream
@message_output_stream.setter
def message_output_stream(self, stream): # pylint: disable=E0202
self._console.message_output_stream = stream
# Memory public methods.
def GetDOMStats(self, timeout):
dom_counters = self._memory.GetDOMCounters(timeout)
return {
'document_count': dom_counters['documents'],
'node_count': dom_counters['nodes'],
'event_listener_count': dom_counters['jsEventListeners']
}
# Page public methods.
def PerformActionAndWaitForNavigate(self, action_function, timeout):
self._page.PerformActionAndWaitForNavigate(action_function, timeout)
def Navigate(self, url, script_to_evaluate_on_commit, timeout):
self._page.Navigate(url, script_to_evaluate_on_commit, timeout)
def GetCookieByName(self, name, timeout):
return self._page.GetCookieByName(name, timeout)
# Runtime public methods.
def ExecuteJavaScript(self, expr, timeout):
self._runtime.Execute(expr, timeout)
def EvaluateJavaScript(self, expr, timeout):
return self._runtime.Evaluate(expr, timeout)
# Timeline public methods.
@property
def timeline_model(self):
return self._timeline.timeline_model
def StartTimelineRecording(self):
self._timeline.Start()
def StopTimelineRecording(self):
self._timeline.Stop()
# Methods used internally by other backends.
def DispatchNotifications(self, timeout=10):
self._Connect()
self._SetTimeout(timeout)
try:
data = self._socket.recv()
except (socket.error, websocket.WebSocketException):
if self._browser_backend.tab_list_backend.DoesDebuggerUrlExist(
self._debugger_url):
return
raise exceptions.TabCrashException()
res = json.loads(data)
logging.debug('got [%s]', data)
if 'method' in res:
self._HandleNotification(res)
def _HandleNotification(self, res):
if (res['method'] == 'Inspector.detached' and
res.get('params', {}).get('reason','') == 'replaced_with_devtools'):
self._WaitForInspectorToGoAwayAndReconnect()
return
mname = res['method']
dot_pos = mname.find('.')
domain_name = mname[:dot_pos]
if domain_name in self._domain_handlers:
try:
self._domain_handlers[domain_name][0](res)
except Exception:
import traceback
traceback.print_exc()
else:
logging.debug('Unhandled inspector message: %s', res)
def SendAndIgnoreResponse(self, req):
self._Connect()
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
self._socket.send(data)
logging.debug('sent [%s]', data)
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _WaitForInspectorToGoAwayAndReconnect(self):
sys.stderr.write('The connection to Chrome was lost to the Inspector UI.\n')
sys.stderr.write('Telemetry is waiting for the inspector to be closed...\n')
self._socket.close()
self._socket = None
def IsBack():
return self._browser_backend.tab_list_backend.DoesDebuggerUrlExist(
self._debugger_url)
util.WaitFor(IsBack, 512, 0.5)
sys.stderr.write('\n')
sys.stderr.write('Inspector\'s UI closed. Telemetry will now resume.\n')
self._Connect()
def SyncRequest(self, req, timeout=10):
self._Connect()
# TODO(nduca): Listen to the timeout argument
# pylint: disable=W0613
self._SetTimeout(timeout)
self.SendAndIgnoreResponse(req)
while True:
try:
data = self._socket.recv()
except (socket.error, websocket.WebSocketException):
if self._browser_backend.tab_list_backend.DoesDebuggerUrlExist(
self._debugger_url):
raise util.TimeoutException(
'Timed out waiting for reply. This is unusual.')
raise exceptions.TabCrashException()
res = json.loads(data)
logging.debug('got [%s]', data)
if 'method' in res:
self._HandleNotification(res)
continue
if res['id'] != req['id']:
logging.debug('Dropped reply: %s', json.dumps(res))
continue
return res
def RegisterDomain(self,
domain_name, notification_handler, will_close_handler):
"""Registers a given domain for handling notification methods.
For example, given inspector_backend:
def OnConsoleNotification(msg):
if msg['method'] == 'Console.messageAdded':
print msg['params']['message']
return
def OnConsoleClose(self):
pass
inspector_backend.RegisterDomain('Console',
OnConsoleNotification, OnConsoleClose)
"""
assert domain_name not in self._domain_handlers
self._domain_handlers[domain_name] = (notification_handler,
will_close_handler)
def UnregisterDomain(self, domain_name):
"""Unregisters a previously registered domain."""
assert domain_name in self._domain_handlers
self._domain_handlers.pop(domain_name)
| codenote/chromium-test | tools/telemetry/telemetry/core/chrome/inspector_backend.py | Python | bsd-3-clause | 9,767 | 0.009727 |
#!/usr/bin/python3
import rem_backend.query_data as qd
import rem_backend.propagation_model_estimation as pm
import threading
import _thread
__author__ = "Daniel Denkovski", "Valentin Rakovic"
__copyright__ = "Copyright (c) 2017, Faculty of Electrical Engineering and Information Technologies, UKIM, Skopje, Macedonia"
__version__ = "0.1.0"
__email__ = "{danield, valentin}@feit.ukim.edu.mk"
'''
REM console module
Showcases the REM backend capabilities of the extension
Used as console interface for users to interact with the platform
'''
def main():
run = 1;
while (run):
print("Please choose from the selection:")
print("1. WiFi device localization")
print("2. Duty cycle calculation")
print("3. Path loss model estimation")
print("0. Quit")
choice = input(" >> ")
if (choice == '0'):
run = 0
elif (choice == '1'):
print("Loc:Enter the channel of interest")
chann = input(" >> ")
dev_list = qd.get_all_active_devices_on_channel(chann,1)
try:
print("Select the index of the device of interest")
ind = 1
for row in dev_list:
print("{}. {}".format(ind,row[0]))
ind += 1
devind = input(" >> ")
print(dev_list[int(devind)-1][0])
try:
location = qd.estimate_tx_location(str(dev_list[int(devind)-1][0]),10)
print("The location of devices {} is:".format(str(dev_list[int(devind)-1][0])))
print("x:{} y:{} z:{} Pt:{} dBm".format(location[0],location[1],location[2],location[3]))
except:
print("not sufficient data for modeling")
print("")
except:
print("no devices")
print("")
elif (choice == '2'):
print("DC:Enter the channel of interest")
chann = input(" >> ")
ux, ul, dx, dy = input("provide ux ul dx dl coordinates of interest: ").split(' ')
try:
val = qd.get_duty_cycle_by_area(chann,10,ux,ul,dx,dy)
dc = val[0][0]
print("Duty cycle value for channel={} is {}".format(chann,dc))
except:
print("not sufficient data for modeling")
print("")
elif (choice == '3'):
print("PL:Enter the channel of interest")
chann = input(" >> ")
try:
val = pm.get_chann_model(10,chann)
print(val)
except:
print("not sufficient data for modeling")
print("")
if __name__=="__main__":
main()
| danieldUKIM/controllers_dockers | rem_console/REM_console.py | Python | apache-2.0 | 2,275 | 0.037528 |
"""
Support for Lutron lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.lutron/
"""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.components.lutron import (
LutronDevice, LUTRON_DEVICES, LUTRON_CONTROLLER)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron lights."""
devs = []
for (area_name, device) in hass.data[LUTRON_DEVICES]['light']:
dev = LutronLight(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
add_entities(devs, True)
def to_lutron_level(level):
"""Convert the given HASS light level (0-255) to Lutron (0.0-100.0)."""
return float((level * 100) / 255)
def to_hass_level(level):
"""Convert the given Lutron (0.0-100.0) light level to HASS (0-255)."""
return int((level * 255) / 100)
class LutronLight(LutronDevice, Light):
"""Representation of a Lutron Light, including dimmable."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the light."""
self._prev_brightness = None
super().__init__(area_name, lutron_device, controller)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of the light."""
new_brightness = to_hass_level(self._lutron_device.last_level())
if new_brightness != 0:
self._prev_brightness = new_brightness
return new_brightness
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs and self._lutron_device.is_dimmable:
brightness = kwargs[ATTR_BRIGHTNESS]
elif self._prev_brightness == 0:
brightness = 255 / 2
else:
brightness = self._prev_brightness
self._prev_brightness = brightness
self._lutron_device.level = to_lutron_level(brightness)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._lutron_device.level = 0
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = {'lutron_integration_id': self._lutron_device.id}
return attr
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_level() > 0
def update(self):
"""Call when forcing a refresh of the device."""
if self._prev_brightness is None:
self._prev_brightness = to_hass_level(self._lutron_device.level)
| PetePriority/home-assistant | homeassistant/components/lutron/light.py | Python | apache-2.0 | 2,793 | 0 |
import json as json_
# Template for code 200 requests so data can easily be added
def ok(d=None, *, json=True):
code = {'code': 200, 'status': 'OK', 'data': d}
if json:
code = json_.dumps(code)
return code
# The 400 codes shouldn't require any special aruments.
def invalid_request(*, json=True):
code = {'code': 400, 'status': 'MALFORMED_REQUEST'}
if json:
code = json_.dumps(code)
return code
def unknown_request(*, json=True):
code = {'code': 400, 'status': 'UNKNOWN_REQUEST'}
if json:
code = json_.dumps(code)
return code
# You can assign the internal server error a number for debugging purposes.
def internal_server_error(n=None, *, json=True):
status_string = 'INTERNAL_SERVER_ERROR'
if n is not None:
status_string += '_{}'.format(n)
code = {'code': 500, 'status': status_string}
if json:
code = json_.dumps(code)
return code
| TacticAlpha/basic-lan-webserver | server/status.py | Python | agpl-3.0 | 938 | 0 |
from bank_CI import BankCI
from bank_controller import BankController
from settings import DB_NAME, CREATE_TABLES, DROP_DATABASE
from sql_manager import BankDatabaseManager
def main():
manager = BankDatabaseManager.create_from_db_and_sql(DB_NAME, CREATE_TABLES, DROP_DATABASE, create_if_exists=False)
controller = BankController(manager)
command_interface = BankCI(controller)
command_interface.main_menu()
if __name__ == '__main__':
main()
| pepincho/Python101-and-Algo1-Courses | Programming-101-v3/week9/1-Money-In-The-Bank/start.py | Python | mit | 465 | 0.002151 |
# Major, Minor
VERSION = (1, 4) | duointeractive/python-bluefin | bluefin/__init__.py | Python | bsd-3-clause | 31 | 0.032258 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from netman.core.objects.interface_states import OFF, ON
class BackwardCompatibleSwitchOperations(object):
"""
Depecrated methods
"""
def remove_access_vlan(self, interface_id):
warnings.warn("Deprecated, use unset_interface_access_vlan(interface_id) instead", DeprecationWarning)
return self.unset_interface_access_vlan(interface_id)
def configure_native_vlan(self, interface_id, vlan):
warnings.warn("Deprecated, use set_interface_native_vlan(interface_id, vlan) instead", DeprecationWarning)
return self.set_interface_native_vlan(interface_id, vlan)
def remove_native_vlan(self, interface_id):
warnings.warn("Deprecated, use unset_interface_native_vlan(interface_id) instead", DeprecationWarning)
return self.unset_interface_native_vlan(interface_id)
def remove_vlan_access_group(self, vlan_number, direction):
warnings.warn("Deprecated, use unset_vlan_access_group(vlan_number, direction) instead", DeprecationWarning)
return self.unset_vlan_access_group(vlan_number, direction)
def remove_vlan_vrf(self, vlan_number):
warnings.warn("Deprecated, use unset_vlan_vrf(vlan_number) instead", DeprecationWarning)
return self.unset_vlan_vrf(vlan_number)
def remove_interface_description(self, interface_id):
warnings.warn("Deprecated, use unset_interface_description(interface_id) instead", DeprecationWarning)
return self.unset_interface_description(interface_id)
def remove_bond_description(self, number):
warnings.warn("Deprecated, use unset_bond_description(number) instead", DeprecationWarning)
return self.unset_bond_description(number)
def configure_bond_native_vlan(self, number, vlan):
warnings.warn("Deprecated, use set_bond_native_vlan(number, vlan) instead", DeprecationWarning)
return self.set_bond_native_vlan(number, vlan)
def remove_bond_native_vlan(self, number):
warnings.warn("Deprecated, use unset_bond_native_vlan(number) instead", DeprecationWarning)
return self.unset_bond_native_vlan(number)
def enable_lldp(self, interface_id, enabled):
warnings.warn("Deprecated, use set_interface_lldp_state(interface_id, enabled) instead", DeprecationWarning)
return self.set_interface_lldp_state(interface_id, enabled)
def shutdown_interface(self, interface_id):
warnings.warn("Deprecated, use set_interface_state(interface_id, state) instead", DeprecationWarning)
return self.set_interface_state(interface_id, OFF)
def openup_interface(self, interface_id):
warnings.warn("Deprecated, use set_interface_state(interface_id, state) instead", DeprecationWarning)
return self.set_interface_state(interface_id, ON)
| internap/netman | netman/core/objects/backward_compatible_switch_operations.py | Python | apache-2.0 | 3,375 | 0.003556 |
#!/usr/bin/python
#
# Script to send email notifications when a change in Galera cluster membership
# occurs.
#
# Complies with http://www.codership.com/wiki/doku.php?id=notification_command
#
# Author: Gabe Guillen <gabeguillen@outlook.com>
# Version: 1.5
# Release: 3/5/2015
# Use at your own risk. No warranties expressed or implied.
#
import os
import sys
import getopt
import smtplib
try: from email.mime.text import MIMEText
except ImportError:
# Python 2.4 (CentOS 5.x)
from email.MIMEText import MIMEText
import socket
import email.utils
# Change this to some value if you don't want your server hostname to show in
# the notification emails
THIS_SERVER = socket.gethostname()
# Server hostname or IP address
SMTP_SERVER = 'YOUR_SMTP_HERE'
SMTP_PORT = 25
# Set to True if you need SMTP over SSL
SMTP_SSL = False
# Set to True if you need to authenticate to your SMTP server
SMTP_AUTH = False
# Fill in authorization information here if True above
SMTP_USERNAME = ''
SMTP_PASSWORD = ''
# Takes a single sender
MAIL_FROM = 'YOUR_EMAIL_HERE'
# Takes a list of recipients
MAIL_TO = ['SOME_OTHER_EMAIL_HERE']
# Need Date in Header for SMTP RFC Compliance
DATE = email.utils.formatdate()
# Edit below at your own risk
################################################################################
def main(argv):
str_status = ''
str_uuid = ''
str_primary = ''
str_members = ''
str_index = ''
message = ''
usage = "Usage: " + os.path.basename(sys.argv[0]) + " --status <status str>"
usage += " --uuid <state UUID> --primary <yes/no> --members <comma-seperated"
usage += " list of the component member UUIDs> --index <n>"
try:
opts, args = getopt.getopt(argv, "h", ["status=","uuid=",'primary=','members=','index='])
except getopt.GetoptError:
print usage
sys.exit(2)
if(len(opts) > 0):
message_obj = GaleraStatus(THIS_SERVER)
for opt, arg in opts:
if opt == '-h':
print usage
sys.exit()
elif opt in ("--status"):
message_obj.set_status(arg)
elif opt in ("--uuid"):
message_obj.set_uuid(arg)
elif opt in ("--primary"):
message_obj.set_primary(arg)
elif opt in ("--members"):
message_obj.set_members(arg)
elif opt in ("--index"):
message_obj.set_index(arg)
try:
send_notification(MAIL_FROM, MAIL_TO, 'Galera Notification: ' + THIS_SERVER, DATE,
str(message_obj), SMTP_SERVER, SMTP_PORT, SMTP_SSL, SMTP_AUTH,
SMTP_USERNAME, SMTP_PASSWORD)
except Exception, e:
print "Unable to send notification: %s" % e
sys.exit(1)
else:
print usage
sys.exit(2)
sys.exit(0)
def send_notification(from_email, to_email, subject, date, message, smtp_server,
smtp_port, use_ssl, use_auth, smtp_user, smtp_pass):
msg = MIMEText(message)
msg['From'] = from_email
msg['To'] = ', '.join(to_email)
msg['Subject'] = subject
msg['Date'] = date
if(use_ssl):
mailer = smtplib.SMTP_SSL(smtp_server, smtp_port)
else:
mailer = smtplib.SMTP(smtp_server, smtp_port)
if(use_auth):
mailer.login(smtp_user, smtp_pass)
mailer.sendmail(from_email, to_email, msg.as_string())
mailer.close()
class GaleraStatus:
def __init__(self, server):
self._server = server
self._status = ""
self._uuid = ""
self._primary = ""
self._members = ""
self._index = ""
self._count = 0
def set_status(self, status):
self._status = status
self._count += 1
def set_uuid(self, uuid):
self._uuid = uuid
self._count += 1
def set_primary(self, primary):
self._primary = primary.capitalize()
self._count += 1
def set_members(self, members):
self._members = members.split(',')
self._count += 1
def set_index(self, index):
self._index = index
self._count += 1
def __str__(self):
message = "Galera running on " + self._server + " has reported the following"
message += " cluster membership change"
if(self._count > 1):
message += "s"
message += ":\n\n"
if(self._status):
message += "Status of this node: " + self._status + "\n\n"
if(self._uuid):
message += "Cluster state UUID: " + self._uuid + "\n\n"
if(self._primary):
message += "Current cluster component is primary: " + self._primary + "\n\n"
if(self._members):
message += "Current members of the component:\n"
if(self._index):
for i in range(len(self._members)):
if(i == int(self._index)):
message += "-> "
else:
message += "-- "
message += self._members[i] + "\n"
else:
message += "\n".join((" " + str(x)) for x in self._members)
message += "\n"
if(self._index):
message += "Index of this node in the member list: " + self._index + "\n"
return message
if __name__ == "__main__":
main(sys.argv[1:])
| gguillen/galeranotify | galeranotify.py | Python | gpl-2.0 | 5,427 | 0.00387 |
# making a class callable:
class a(object):
def __init__(self, a):
print "__init__"
self.a = a
def __call__(self, *args):
print "__call__"
self.a = args[0]
# Based on this code, when we call a, the __init__ function gets called. My guess
# is that we dont have an instance initiallzed, then when we have an instance,
# we can call it
# Initallizing an obj
apple = a("Hello")
# Calling an obj
apple("Hi")
# What will this do? :
# @a
# def fuu(*args):
# print args
# fuu("hi")
| mr-uuid/snippets | python/classes/init_vs_call.py | Python | mit | 530 | 0.001887 |
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Robert Hammelrath
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Some parts of the software are a port of code provided by Rinky-Dink Electronics, Henning Karlsen,
# with the following copyright notice:
#
## Copyright (C)2015 Rinky-Dink Electronics, Henning Karlsen. All right reserved
## This library is free software; you can redistribute it and/or
## modify it under the terms of the CC BY-NC-SA 3.0 license.
## Please see the included documents for further information.
#
# Class supporting TFT LC-displays with a parallel Interface
# First example: Controller SSD1963 with a 4.3" or 7" display
#
# The minimal connection is:
# X1..X8 for data, Y9 for /Reset, Y10 for /RD, Y11 for /WR and Y12 for /RS
# Then LED must be hard tied to Vcc and /CS to GND.
#
import pyb, stm
from uctypes import addressof
from tft.driver import TFT_io
import gc
# define constants
#
RESET = const(1 << 10) ## Y9
RD = const(1 << 11) ## Y10
WR = const(0x01) ## Y11
D_C = const(0x02) ## Y12
LED = const(1 << 8) ## Y3
POWER = const(1 << 9) ## Y4
## CS is not used and must be hard tied to GND
PORTRAIT = const(1)
LANDSCAPE = const(0)
class TFT:
def __init__(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
self.tft_init(controller, lcd_type, orientation, v_flip, h_flip)
def tft_init(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
#
# For convenience, define X1..X1 and Y9..Y12 as output port using thy python functions.
# X1..X8 will be redefind on the fly as Input by accessing the MODER control registers
# when needed. Y9 is treate seperately, since it is used for Reset, which is done at python level
# since it need long delays anyhow, 5 and 15 ms vs. 10 µs.
#
# Set TFT general defaults
self.controller = controller
self.lcd_type = lcd_type
self.orientation = orientation
self.v_flip = v_flip # flip vertical
self.h_flip = h_flip # flip horizontal
self.c_flip = 0 # flip blue/red
self.rc_flip = 0 # flip row/column
self.setColor((255, 255, 255)) # set FG color to white as can be.
self.setBGColor((0, 0, 0)) # set BG to black
self.bg_buf = bytearray()
#
self.pin_led = None # deferred init Flag
self.power_control = power_control
if self.power_control:
# special treat for Power Pin
self.pin_power = pyb.Pin("Y4", pyb.Pin.OUT_PP)
self.power(True) ## switch Power on
#
pyb.delay(10)
# this may have to be moved to the controller specific section
if orientation == PORTRAIT:
self.setXY = TFT_io.setXY_P
self.drawPixel = TFT_io.drawPixel_P
else:
self.setXY = TFT_io.setXY_L
self.drawPixel = TFT_io.drawPixel_L
self.swapbytes = TFT_io.swapbytes
self.swapcolors = TFT_io.swapcolors
# ----------
for pin_name in ["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8",
"Y10", "Y11", "Y12"]:
pin = pyb.Pin(pin_name, pyb.Pin.OUT_PP) # set as output
pin.value(1) ## set high as default
# special treat for Reset
self.pin_reset = pyb.Pin("Y9", pyb.Pin.OUT_PP)
# Reset the device
self.pin_reset.value(1) ## do a hard reset
pyb.delay(10)
self.pin_reset.value(0) ## Low
pyb.delay(20)
self.pin_reset.value(1) ## set high again
pyb.delay(20)
#
# Now initialiize the LCD
# This is for the SSD1963 controller and two specific LCDs. More may follow.
# Data taken from the SSD1963 data sheet, SSD1963 Application Note and the LCD Data sheets
#
if controller == "SSD1963": # 1st approach for 480 x 272
TFT_io.tft_cmd_data(0xe2, bytearray(b'\x1d\x02\x54'), 3) # PLL multiplier, set PLL clock to 100M
# N=0x2D for 6.5MHz, 0x1D for 10MHz crystal
# PLLClock = Crystal * (Mult + 1) / (Div + 1)
# The intermediate value Crystal * (Mult + 1) must be between 250MHz and 750 MHz
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x01'), 1) # PLL Enable
pyb.delay(10)
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x03'), 1)
pyb.delay(10)
TFT_io.tft_cmd(0x01) # software reset
pyb.delay(10)
#
# Settings for the LCD
#
# The LCDC_FPR depends on PLL clock and the reccomended LCD Dot clock DCLK
#
# LCDC_FPR = (DCLK * 1048576 / PLLClock) - 1
#
# The other settings are less obvious, since the definitions of the SSD1963 data sheet and the
# LCD data sheets differ. So what' common, even if the names may differ:
# HDP Horizontal Panel width (also called HDISP, Thd). The value store in the register is HDP - 1
# VDP Vertical Panel Width (also called VDISP, Tvd). The value stored in the register is VDP - 1
# HT Total Horizontal Period, also called HP, th... The exact value does not matter
# VT Total Vertical Period, alco called VT, tv, .. The exact value does not matter
# HPW Width of the Horizontal sync pulse, also called HS, thpw.
# VPW Width of the Vertical sync pulse, also called VS, tvpw
# Front Porch (HFP and VFP) Time between the end of display data and the sync pulse
# Back Porch (HBP and VBP Time between the start of the sync pulse and the start of display data.
# HT = FP + HDP + BP and VT = VFP + VDP + VBP (sometimes plus sync pulse width)
# Unfortunately, the controller does not use these front/back porch times, instead it uses an starting time
# in the front porch area and defines (see also figures in chapter 13.3 of the SSD1963 data sheet)
# HPS Time from that horiz. starting point to the start of the horzontal display area
# LPS Time from that horiz. starting point to the horizontal sync pulse
# VPS Time from the vert. starting point to the first line
# FPS Time from the vert. starting point to the vertical sync pulse
#
# So the following relations must be held:
#
# HT > HDP + HPS
# HPS >= HPW + LPS
# HPS = Back Porch - LPS, or HPS = Horizontal back Porch
# VT > VDP + VPS
# VPS >= VPW + FPS
# VPS = Back Porch - FPS, or VPS = Vertical back Porch
#
# LPS or FPS may have a value of zero, since the length of the front porch is detemined by the
# other figures
#
# The best is to start with the recomendations of the lCD data sheet for Back porch, grab a
# sync pulse with and the determine the other, such that they meet the relations. Typically, these
# values allow for some ambuigity.
#
if lcd_type == "LB04301": # Size 480x272, 4.3", 24 Bit, 4.3"
#
# Value Min Typical Max
# DotClock 5 MHZ 9 MHz 12 MHz
# HT (Hor. Total 490 531 612
# HDP (Hor. Disp) 480
# HBP (back porch) 8 43
# HFP (Fr. porch) 2 8
# HPW (Hor. sync) 1
# VT (Vert. Total) 275 288 335
# VDP (Vert. Disp) 272
# VBP (back porch) 2 12
# VFP (fr. porch) 1 4
# VPW (vert. sync) 1 10
#
# This table in combination with the relation above leads to the settings:
# HPS = 43, HPW = 8, LPS = 0, HT = 531
# VPS = 14, VPW = 10, FPS = 0, VT = 288
#
self.disp_x_size = 479
self.disp_y_size = 271
TFT_io.tft_cmd_data_AS(0xe6, bytearray(b'\x01\x70\xa3'), 3) # PLL setting for PCLK
# (9MHz * 1048576 / 100MHz) - 1 = 94371 = 0x170a3
TFT_io.tft_cmd_data_AS(0xb0, bytearray( # # LCD SPECIFICATION
[0x20, # 24 Color bits, HSync/VSync low, No Dithering
0x00, # TFT mode
self.disp_x_size >> 8, self.disp_x_size & 0xff, # physical Width of TFT
self.disp_y_size >> 8, self.disp_y_size & 0xff, # physical Height of TFT
0x00]), 7) # Last byte only required for a serial TFT
TFT_io.tft_cmd_data_AS(0xb4, bytearray(b'\x02\x13\x00\x2b\x08\x00\x00\x00'), 8)
# HSYNC, Set HT 531 HPS 43 HPW=Sync pulse 8 LPS 0
TFT_io.tft_cmd_data_AS(0xb6, bytearray(b'\x01\x20\x00\x0e\x0a\x00\x00'), 7)
# VSYNC, Set VT 288 VPS 14 VPW 10 FPS 0
TFT_io.tft_cmd_data_AS(0x36, bytearray([(orientation & 1) << 5 | (h_flip & 1) << 1 | (v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
elif lcd_type == "AT070TN92": # Size 800x480, 7", 18 Bit, lower color bits ignored
#
# Value Min Typical Max
# DotClock 26.4 MHz 33.3 MHz 46.8 MHz
# HT (Hor. Total 862 1056 1200
# HDP (Hor. Disp) 800
# HBP (back porch) 46 46 46
# HFP (Fr. porch) 16 210 254
# HPW (Hor. sync) 1 40
# VT (Vert. Total) 510 525 650
# VDP (Vert. Disp) 480
# VBP (back porch) 23 23 23
# VFP (fr. porch) 7 22 147
# VPW (vert. sync) 1 20
#
# This table in combination with the relation above leads to the settings:
# HPS = 46, HPW = 8, LPS = 0, HT = 1056
# VPS = 23, VPW = 10, VPS = 0, VT = 525
#
self.disp_x_size = 799
self.disp_y_size = 479
TFT_io.tft_cmd_data_AS(0xe6, bytearray(b'\x05\x53\xf6'), 3) # PLL setting for PCLK
# (33.3MHz * 1048576 / 100MHz) - 1 = 349174 = 0x553f6
TFT_io.tft_cmd_data_AS(0xb0, bytearray( # # LCD SPECIFICATION
[0x00, # 18 Color bits, HSync/VSync low, No Dithering/FRC
0x00, # TFT mode
self.disp_x_size >> 8, self.disp_x_size & 0xff, # physical Width of TFT
self.disp_y_size >> 8, self.disp_y_size & 0xff, # physical Height of TFT
0x00]), 7) # Last byte only required for a serial TFT
TFT_io.tft_cmd_data_AS(0xb4, bytearray(b'\x04\x1f\x00\x2e\x08\x00\x00\x00'), 8)
# HSYNC, Set HT 1056 HPS 46 HPW 8 LPS 0
TFT_io.tft_cmd_data_AS(0xb6, bytearray(b'\x02\x0c\x00\x17\x08\x00\x00'), 7)
# VSYNC, Set VT 525 VPS 23 VPW 08 FPS 0
TFT_io.tft_cmd_data_AS(0x36, bytearray([(orientation & 1) << 5 | (h_flip & 1) << 1 | (v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
else:
print("Wrong Parameter lcd_type: ", lcd_type)
return
TFT_io.tft_cmd_data_AS(0xBA, bytearray(b'\x0f'), 1) # GPIO[3:0] out 1
TFT_io.tft_cmd_data_AS(0xB8, bytearray(b'\x07\x01'), 1) # GPIO3=input, GPIO[2:0]=output
TFT_io.tft_cmd_data_AS(0xf0, bytearray(b'\x00'), 1) # Pixel data Interface 8 Bit
TFT_io.tft_cmd(0x29) # Display on
TFT_io.tft_cmd_data_AS(0xbe, bytearray(b'\x06\xf0\x01\xf0\x00\x00'), 6)
# Set PWM for B/L
TFT_io.tft_cmd_data_AS(0xd0, bytearray(b'\x0d'), 1) # Set DBC: enable, agressive
else:
print("Wrong Parameter controller: ", controller)
return
#
# Set character printing defaults
#
self.text_font = None
self.setTextStyle(self.color, self.BGcolor, 0, None, 0)
#
# Init done. clear Screen and switch BG LED on
#
self.text_x = self.text_y = self.text_yabs = 0
self.clrSCR() # clear the display
# self.backlight(100) ## switch BG LED on
#
# Return screen dimensions
#
def getScreensize(self):
if self.orientation == LANDSCAPE:
return (self.disp_x_size + 1, self.disp_y_size + 1)
else:
return (self.disp_y_size + 1, self.disp_x_size + 1)
#
# set backlight brightness
#
def backlight(self, percent):
# deferred init of LED PIN
if self.pin_led is None:
# special treat for BG LED
self.pin_led = pyb.Pin("Y3", pyb.Pin.OUT_PP)
self.led_tim = pyb.Timer(4, freq=500)
self.led_ch = self.led_tim.channel(3, pyb.Timer.PWM, pin=self.pin_led)
percent = max(0, min(percent, 100))
self.led_ch.pulse_width_percent(percent) # set LED
#
# switch power on/off
#
def power(self, onoff):
if self.power_control:
if onoff:
self.pin_power.value(True) ## switch power on or off
else:
self.pin_power.value(False)
#
# set the tft flip modes
#
def set_tft_mode(self, v_flip = False, h_flip = False, c_flip = False, orientation = LANDSCAPE):
self.v_flip = v_flip # flip vertical
self.h_flip = h_flip # flip horizontal
self.c_flip = c_flip # flip blue/red
self.orientation = orientation # LANDSCAPE/PORTRAIT
TFT_io.tft_cmd_data_AS(0x36,
bytearray([(self.orientation << 5) |(self.c_flip << 3) | (self.h_flip & 1) << 1 | (self.v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
#
# get the tft flip modes
#
def get_tft_mode(self):
return (self.v_flip, self.h_flip, self.c_flip, self.orientation) #
#
# set the color used for the draw commands
#
def setColor(self, fgcolor):
self.color = fgcolor
self.colorvect = bytearray(self.color) # prepare byte array
#
# Set BG color used for the draw commands
#
def setBGColor(self, bgcolor):
self.BGcolor = bgcolor
self.BGcolorvect = bytearray(self.BGcolor) # prepare byte array
self.BMPcolortable = bytearray([self.BGcolorvect[2], # create colortable
self.BGcolorvect[1], self.BGcolorvect[0],0,
self.colorvect[2], self.colorvect[1], self.colorvect[0],0])
#
# get the color used for the draw commands
#
def getColor(self):
return self.color
#
# get BG color used for
#
def getBGColor(self):
return self.BGcolor
#
# Draw a single pixel at location x, y with color
# Rather slow at 40µs/Pixel
#
def drawPixel_py(self, x, y, color):
self.setXY(x, y, x, y)
TFT_io.displaySCR_AS(color, 1) #
#
# clear screen, set it to BG color.
#
def clrSCR(self, color = None):
colorvect = self.BGcolorvect if color is None else bytearray(color)
self.clrXY()
TFT_io.fillSCR_AS(colorvect, (self.disp_x_size + 1) * (self.disp_y_size + 1))
self.setScrollArea(0, self.disp_y_size + 1, 0)
self.setScrollStart(0)
self.setTextPos(0,0)
#
# reset the address range to fullscreen
#
def clrXY(self):
if self.orientation == LANDSCAPE:
self.setXY(0, 0, self.disp_x_size, self.disp_y_size)
else:
self.setXY(0, 0, self.disp_y_size, self.disp_x_size)
#
# Draw a line from x1, y1 to x2, y2 with the color set by setColor()
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawLine(self, x1, y1, x2, y2, color = None):
if y1 == y2:
self.drawHLine(x1, y1, x2 - x1 + 1, color)
elif x1 == x2:
self.drawVLine(x1, y1, y2 - y1 + 1, color)
else:
colorvect = self.colorvect if color is None else bytearray(color)
dx, xstep = (x2 - x1, 1) if x2 > x1 else (x1 - x2, -1)
dy, ystep = (y2 - y1, 1) if y2 > y1 else (y1 - y2, -1)
col, row = x1, y1
if dx < dy:
t = - (dy >> 1)
while True:
self.drawPixel(col, row, colorvect)
if row == y2:
return
row += ystep
t += dx
if t >= 0:
col += xstep
t -= dy
else:
t = - (dx >> 1)
while True:
self.drawPixel(col, row, colorvect)
if col == x2:
return
col += xstep
t += dy
if t >= 0:
row += ystep
t -= dx
#
# Draw a horizontal line with 1 Pixel width, from x,y to x + l - 1, y
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawHLine(self, x, y, l, color = None): # draw horiontal Line
colorvect = self.colorvect if color is None else bytearray(color)
if l < 0: # negative length, swap parameters
l = -l
x -= l
self.setXY(x, y, x + l - 1, y) # set display window
TFT_io.fillSCR_AS(colorvect, l)
#
# Draw a vertical line with 1 Pixel width, from x,y to x, y + l - 1
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawVLine(self, x, y, l, color = None): # draw horiontal Line
colorvect = self.colorvect if color is None else bytearray(color)
if l < 0: # negative length, swap parameters
l = -l
y -= l
self.setXY(x, y, x, y + l - 1) # set display window
TFT_io.fillSCR_AS(colorvect, l)
#
# Draw rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.drawHLine(x1, y1, x2 - x1 + 1, color)
self.drawHLine(x1, y2, x2 - x1 + 1, color)
self.drawVLine(x1, y1, y2 - y1 + 1, color)
self.drawVLine(x2, y1, y2 - y1 + 1, color)
#
# Fill rectangle
# Almost straight port from the UTFT Library at Rinky-Dink Electronics
#
def fillRectangle(self, x1, y1, x2, y2, color=None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.setXY(x1, y1, x2, y2) # set display window
if color:
TFT_io.fillSCR_AS(bytearray(color), (x2 - x1 + 1) * (y2 - y1 + 1))
else:
TFT_io.fillSCR_AS(self.colorvect, (x2 - x1 + 1) * (y2 - y1 + 1))
#
# Draw smooth rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawClippedRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
if (x2-x1) > 4 and (y2-y1) > 4:
colorvect = self.colorvect if color is None else bytearray(color)
self.drawPixel(x1 + 2,y1 + 1, colorvect)
self.drawPixel(x1 + 1,y1 + 2, colorvect)
self.drawPixel(x2 - 2,y1 + 1, colorvect)
self.drawPixel(x2 - 1,y1 + 2, colorvect)
self.drawPixel(x1 + 2,y2 - 1, colorvect)
self.drawPixel(x1 + 1,y2 - 2, colorvect)
self.drawPixel(x2 - 2,y2 - 1, colorvect)
self.drawPixel(x2 - 1,y2 - 2, colorvect)
self.drawHLine(x1 + 3, y1, x2 - x1 - 5, colorvect)
self.drawHLine(x1 + 3, y2, x2 - x1 - 5, colorvect)
self.drawVLine(x1, y1 + 3, y2 - y1 - 5, colorvect)
self.drawVLine(x2, y1 + 3, y2 - y1 - 5, colorvect)
#
# Fill smooth rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def fillClippedRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
t = x1; x1 = x2; x2 = t
if y1 > y2:
t = y1; y1 = y2; y2 = t
if (x2-x1) > 4 and (y2-y1) > 4:
for i in range(((y2 - y1) // 2) + 1):
if i == 0:
self.drawHLine(x1 + 3, y1 + i, x2 - x1 - 5, color)
self.drawHLine(x1 + 3, y2 - i, x2 - x1 - 5, color)
elif i == 1:
self.drawHLine(x1 + 2, y1 + i, x2 - x1 - 3, color)
self.drawHLine(x1 + 2, y2 - i, x2 - x1 - 3, color)
elif i == 2:
self.drawHLine(x1 + 1, y1 + i, x2 - x1 - 1, color)
self.drawHLine(x1 + 1, y2 - i, x2 - x1 - 1, color)
else:
self.drawHLine(x1, y1 + i, x2 - x1 + 1, color)
self.drawHLine(x1, y2 - i, x2 - x1 + 1, color)
#
# draw a circle at x, y with radius
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawCircle(self, x, y, radius, color = None):
colorvect = self.colorvect if color is None else bytearray(color)
f = 1 - radius
ddF_x = 1
ddF_y = -2 * radius
x1 = 0
y1 = radius
self.drawPixel(x, y + radius, colorvect)
self.drawPixel(x, y - radius, colorvect)
self.drawPixel(x + radius, y, colorvect)
self.drawPixel(x - radius, y, colorvect)
while x1 < y1:
if f >= 0:
y1 -= 1
ddF_y += 2
f += ddF_y
x1 += 1
ddF_x += 2
f += ddF_x
self.drawPixel(x + x1, y + y1, colorvect)
self.drawPixel(x - x1, y + y1, colorvect)
self.drawPixel(x + x1, y - y1, colorvect)
self.drawPixel(x - x1, y - y1, colorvect)
self.drawPixel(x + y1, y + x1, colorvect)
self.drawPixel(x - y1, y + x1, colorvect)
self.drawPixel(x + y1, y - x1, colorvect)
self.drawPixel(x - y1, y - x1, colorvect)
#
# fill a circle at x, y with radius
# Straight port from the UTFT Library at Rinky-Dink Electronics
# Instead of calculating x = sqrt(r*r - y*y), it searches the x
# for r*r = x*x + x*x
#
def fillCircle(self, x, y, radius, color = None):
r_square = radius * radius * 4
for y1 in range (-(radius * 2), 1):
y_square = y1 * y1
for x1 in range (-(radius * 2), 1):
if x1*x1+y_square <= r_square:
x1i = x1 // 2
y1i = y1 // 2
self.drawHLine(x + x1i, y + y1i, 2 * (-x1i), color)
self.drawHLine(x + x1i, y - y1i, 2 * (-x1i), color)
break;
#
# Draw a bitmap at x,y with size sx, sy
# mode determines the type of expected data
# mode = 1: The data contains 1 bit per pixel, mapped to fg/bg color
# unless a colortable is provided
# mode = 2: The data contains 2 bit per pixel; a colortable with 4 entries must be provided
# mode = 4: The data contains 4 bit per pixel;
# a colortable with 16 entries must be provided
# mode = 8: The data contains 8 bit per pixel;
# a colortable with 256 entries must be provided
# mode = 16: The data must contain 2 packed bytes/pixel red/green/blue in 565 format
# mode = 24: The data must contain 3 bytes/pixel red/green/blue
#
def drawBitmap(self, x, y, sx, sy, data, mode = 24, colortable = None):
self.setXY(x, y, x + sx - 1, y + sy - 1)
if mode == 24:
TFT_io.displaySCR_AS(data, sx * sy)
elif mode == 16:
TFT_io.displaySCR565_AS(data, sx * sy)
elif mode == 1:
if colortable is None:
colortable = self.BMPcolortable # create colortable
TFT_io.displaySCR_bmp(data, sx*sy, 1, colortable)
elif mode == 2:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 2, colortable)
elif mode == 4:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 4, colortable)
elif mode == 8:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 8, colortable)
#
# set scroll area to the region between the first and last line
#
def setScrollArea(self, tfa, vsa, bfa):
TFT_io.tft_cmd_data_AS(0x33, bytearray( #set scrolling range
[(tfa >> 8) & 0xff, tfa & 0xff,
(vsa >> 8) & 0xff, vsa & 0xff,
(bfa >> 8) & 0xff, bfa & 0xff]), 6)
self.scroll_tfa = tfa
self.scroll_vsa = vsa
self.scroll_bfa = bfa
self.setScrollStart(self.scroll_tfa)
x, y = self.getTextPos()
self.setTextPos(x, y) # realign pointers
#
# get scroll area of the region between the first and last line
#
def getScrollArea(self):
return self.scroll_tfa, self.scroll_vsa, self.scroll_bfa
#
# set the line which is displayed first
#
def setScrollStart(self, lline):
self.scroll_start = lline # store the logical first line
TFT_io.tft_cmd_data_AS(0x37, bytearray([(lline >> 8) & 0xff, lline & 0xff]), 2)
#
# get the line which is displayed first
#
def getScrollStart(self):
return self.scroll_start # get the logical first line
#
# Scroll vsa up/down by a number of pixels
#
def scroll(self, pixels):
line = ((self.scroll_start - self.scroll_tfa + pixels) % self.scroll_vsa
+ self.scroll_tfa)
self.setScrollStart(line) # set the new line
#
# Set text position
#
def setTextPos(self, x, y, clip = False, scroll = True):
self.text_width, self.text_height = self.getScreensize() ## height possibly wrong
self.text_x = x
if self.scroll_tfa <= y < (self.scroll_tfa + self.scroll_vsa): # in scroll area ? check later for < or <=
# correct position relative to scroll start
self.text_y = (y + self.scroll_start - self.scroll_tfa)
if self.text_y >= (self.scroll_tfa + self.scroll_vsa):
self.text_y -= self.scroll_vsa
else: # absolute
self.text_y = y
self.text_yabs = y
# Hint: self.text_yabs = self.text_y - self.scroll_start) % self.scroll_vsa + self.scroll_tfa)
if clip and (self.text_x + clip) < self.text_width:
self.text_width = self.text_x + clip
self.text_scroll = scroll
#
# Get text position
#
def getTextPos(self, abs = True):
if abs:
return (self.text_x, self.text_yabs)
else:
return (self.text_x, self.text_y)
#
# Set Text Style
#
def setTextStyle(self, fgcolor=None, bgcolor=None, transparency=None, font=None, gap=None):
if font is not None:
self.text_font = font
self.text_rows = font.height()
self.text_cols = font.max_width()
if transparency is not None:
self.transparency = transparency
if gap is not None:
self.text_gap = gap
if bgcolor is not None:
self.text_bgcolor = bgcolor
if fgcolor is not None:
self.text_fgcolor = fgcolor
self.text_color = (bytearray(self.text_bgcolor)
+ bytearray(self.text_fgcolor)
+ bytearray([self.transparency]))
#
# Get Text Style: return (color, bgcolor, font, transpareny, gap)
#
def getTextStyle(self):
return (self.text_color[3:6], self.text_color[0:3],
self.transparency, self.text_font, self.text_gap)
#
# Check, if a new line is to be opened
# if yes, advance, including scrolling, and clear line, if flags is set
# Obsolete?
#
def printNewline(self, clear = False):
if (self.text_yabs + self.text_rows) >= (self.scroll_tfa + self.scroll_vsa): # does the line fit?
self.scroll(self.text_rows) # no. scroll
else: # Yes, just advance pointers
self.text_yabs += self.text_rows
self.setTextPos(self.text_x, self.text_yabs)
if clear:
self.printClrLine(2) # clear actual line
#
# Carriage Return
#
def printCR(self): # clear to end of line
self.text_x = 0
#
# clear line modes
#
def printClrLine(self, mode = 0): # clear to end of line/bol/line
if mode == 0:
self.setXY(self.text_x, self.text_y,
self.text_width - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, (self.text_width - self.text_x + 1) * self.text_rows)
elif mode == 1 and self.text_x > 0:
self.setXY(0, self.text_y,
self.text_x - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, (self.text_x - 1) * self.text_rows)
elif mode == 2:
self.setXY(0, self.text_y,
self.text_width - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, self.text_width * self.text_rows)
#
# clear sreen modes
#
def printClrSCR(self): # clear Area set by setScrollArea
self.setXY(0, self.scroll_tfa,
self.text_width - 1, self.scroll_tfa + self.scroll_vsa) # set display window
TFT_io.fillSCR_AS(self.text_color, self.text_width * self.scroll_vsa)
self.setScrollStart(self.scroll_tfa)
self.setTextPos(0, self.scroll_tfa)
#
# Print string s, returning the length of the printed string in pixels
#
def printString(self, s, bg_buf=None):
len = 0
for c in s:
cols = self.printChar(c, bg_buf)
if cols == 0: # could not print (any more)
break
len += cols
return len
#
# Print string c using the given char bitmap at location x, y, returning the width of the printed char in pixels
#
def printChar(self, c, bg_buf=None):
# get the charactes pixel bitmap and dimensions
if self.text_font:
fmv, rows, cols = self.text_font.get_ch(c)
else:
raise AttributeError('No font selected')
cbytes, cbits = divmod(cols, 8) # Not in packed format
dcols = (cbytes + 1) * 8 if cbits else cbytes * 8 # cols for display
pix_count = dcols * rows # number of bits in the char
# test char fit
if self.text_x + cols > self.text_width: # does the char fit on the screen?
if self.text_scroll:
self.printCR() # No, then CR
self.printNewline(True) # NL: advance to the next line
else:
return 0
# Retrieve Background data if transparency is required
if self.transparency: # in case of transpareny, the frame buffer content is needed
if bg_buf is None: # buffer allocation needed?
if len(self.bg_buf) < pix_count * 3:
del(self.bg_buf)
gc.collect()
self.bg_buf = bytearray(pix_count * 3) # Make it bigger
bg_buf = self.bg_buf
self.setXY(self.text_x, self.text_y, self.text_x + dcols - 1, self.text_y + rows - 1) # set area
TFT_io.tft_read_cmd_data_AS(0x2e, bg_buf, pix_count * 3) # read background data
else:
bg_buf = 0 # dummy assignment, since None is not accepted
# Set XY range & print char
self.setXY(self.text_x, self.text_y, self.text_x + dcols - 1, self.text_y + rows - 1) # set area
TFT_io.displaySCR_charbitmap(addressof(fmv), pix_count, self.text_color, bg_buf) # display char!
#advance pointer
self.text_x += (cols + self.text_gap)
return cols + self.text_gap
| peterhinch/micropython-tft-gui | tft/driver/tft.py | Python | mit | 32,591 | 0.010034 |
# -*- coding: utf-8 -*-
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider
class BaiduSpider(RedisCrawlSpider):
"""Spider that reads urls from redis queue (myspider:start_urls)."""
name = 'baidu'
redis_key = 'crawler:start_keyword'
"""Spider that reads urls from redis queue when idle."""
rules = (
Rule(LinkExtractor("baidu.php"), callback='parse_page', follow=True),
)
def __init__(self, *args, **kwargs):
# Dynamically define the allowed domains list.
domain = kwargs.pop('domain', '')
self.allowed_domains = filter(None, domain.split(','))
super(BaiduSpider, self).__init__(*args, **kwargs)
def parse_page(self, response):
data = {
'name': response.css('title::text').extract_first(),
'url': response.url,
}
import pprint
pprint.pprint(data)
return data
| scrapycloud/scrapy-cluster | crawler/crawler/spiders/baidu.py | Python | mit | 979 | 0.002043 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Locking functionality when launching things from the command line.
Uses a pidfile.
This prevents multiple identical workflows to be launched simultaneously.
"""
from __future__ import print_function
import hashlib
import os
from luigi import six
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
cmd = 'ps -p %s -o command=' % (pid,)
with os.popen(cmd, 'r') as p:
return p.readline().strip()
def get_info(pid_dir, my_pid=None):
# Check the name and pid of this process
if my_pid is None:
my_pid = os.getpid()
my_cmd = getpcmd(my_pid)
if six.PY3:
cmd_hash = my_cmd.encode('utf8')
else:
cmd_hash = my_cmd
pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
return my_pid, my_cmd, pid_file
def acquire_for(pid_dir, num_available=1):
"""
Makes sure the process is only run once at the same time with the same name.
Notice that we since we check the process name, different parameters to the same
command can spawn multiple processes at the same time, i.e. running
"/usr/bin/my_process" does not prevent anyone from launching
"/usr/bin/my_process --foo bar".
"""
my_pid, my_cmd, pid_file = get_info(pid_dir)
# Check if there is a pid file corresponding to this name
if not os.path.exists(pid_dir):
os.mkdir(pid_dir)
os.chmod(pid_dir, 0o777)
pids = set()
pid_cmds = {}
if os.path.exists(pid_file):
# There is such a file - read the pid and look up its process name
pids.update(filter(None, map(str.strip, open(pid_file))))
pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)
matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))
if len(matching_pids) >= num_available:
# We are already running under a different pid
print('Pid(s)', ', '.join(matching_pids), 'already running')
return False
else:
# The pid belongs to something else, we could
pass
pid_cmds[str(my_pid)] = my_cmd
# Write pids
pids.add(str(my_pid))
with open(pid_file, 'w') as f:
f.writelines('%s\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))
# Make the file writable by all
if os.name == 'nt':
pass
else:
s = os.stat(pid_file)
if os.getuid() == s.st_uid:
os.chmod(pid_file, s.st_mode | 0o777)
return True
| torypages/luigi | luigi/lock.py | Python | apache-2.0 | 3,102 | 0.001289 |
#!/usr/bin/env python
"""alerts.py Classes for sendings alerts
"""
__author__ = "Jean-Martin Archer"
__copyright__ = "Copyright 2013, MIT License."
import smtplib
from twilio.rest import TwilioRestClient
from vendors.pushbullet.pushbullet import PushBullet
import configuration
class Alerts(object):
"""<ac:image ac:thumbnail="true" ac:width="300">for alerts"""
def __init__(self, config_path='./config/'):
self.config = configuration.load(config_path)
self.register()
def register(self):
alerts = self.config['alerts']
alerts_list = []
if alerts['sms']['on']:
alerts_list.append(alerts.sms(alerts['AlertSMS']))
if alerts['pushbullet']['on']:
alerts_list.append(alerts.pushbullet(alerts['AlertPushBullet']))
if alerts['email']['on']:
alerts_list.append(alerts.sms(alerts['AlertPushBullet']))
self.alerts = alerts_list
def send(self, message):
for alert in self.alerts:
alert.send_notification(message)
class BasicAlert(object):
"""<ac:image ac:thumbnail="true" ac:width="300">for BasicAlert class. This is more an interface/contract
than anything else"""
def __init__(self, config):
self.config = config
self.setup()
def setup(self):
raise NotImplementedError
def send_notification(self, message):
raise NotImplementedError
class AlertEmail(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertEmail"""
def setup(self):
self.sender = self.config['email_sender']
self.receivers = self.config['email_receivers']
self.server = self.config['server']
def send_notification(self, message):
email_body = """From: Alert <%s>
To: Alert <%s>
Subject: %s
This is a test e-mail message.
""" % (self.sender, self.receivers, message)
try:
smtpObj = smtplib.SMTP(self.server)
smtpObj.sendmail(self.sender, self.receivers, email_body)
print "Successfully sent AlertEmail"
except SMTPException:
print "Error: unable to send AlertEmail"
class AlertPushBullet(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertPushBullet. Get you api key from
https://www.PushBullet.com/account
Use the pyPushBullet API to know which deviceID to use.
"""
def setup(self):
self.push = PushBullet(self.config['apikey'])
def send_notification(self, message):
for device in self.config['device']:
self.push.pushNote(device, message, message)
def get_device_id(self):
print self.push.getDevices()
class AlertSMS(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertSMS, uses your twilio.com account."""
def setup(self):
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = self.config['twilio_sid']
auth_token = self.config['twilio_auth_token']
self.client = TwilioRestClient(account_sid, auth_token)
self.create = client.sms.messages.create
def send_notification(self, message):
message = self.create(body=message,
to=self.config['to_number'],
from_=self.config["from_number"])
| j-martin/raspberry-gpio-zmq | raspzmq/alerts.py | Python | mit | 3,371 | 0.00089 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import subprocess
from collections import OrderedDict
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.tasks.protobuf_parse import ProtobufParse
from pants.backend.codegen.tasks.simple_codegen_task import SimpleCodegenTask
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jar_import_products import JarImportProducts
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants.binaries.binary_util import BinaryUtil
from pants.fs.archive import ZIP
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
class ProtobufGen(SimpleCodegenTask):
@classmethod
def global_subsystems(cls):
return super(ProtobufGen, cls).global_subsystems() + (BinaryUtil.Factory,)
@classmethod
def register_options(cls, register):
super(ProtobufGen, cls).register_options(register)
# The protoc version and the plugin names are used as proxies for the identity of the protoc
# executable environment here. Although version is an obvious proxy for the protoc binary
# itself, plugin names are less so and plugin authors must include a version in the name for
# proper invalidation of protobuf products in the face of plugin modification that affects
# plugin outputs.
register('--version', advanced=True, fingerprint=True,
help='Version of protoc. Used to create the default --javadeps and as part of '
'the path to lookup the tool with --pants-support-baseurls and '
'--pants-bootstrapdir. When changing this parameter you may also need to '
'update --javadeps.',
default='2.4.1')
register('--plugins', advanced=True, fingerprint=True, action='append',
help='Names of protobuf plugins to invoke. Protoc will look for an executable '
'named protoc-gen-$NAME on PATH.',
default=[])
register('--extra_path', advanced=True, action='append',
help='Prepend this path onto PATH in the environment before executing protoc. '
'Intended to help protoc find its plugins.',
default=None)
register('--supportdir', advanced=True,
help='Path to use for the protoc binary. Used as part of the path to lookup the'
'tool under --pants-bootstrapdir.',
default='bin/protobuf')
register('--javadeps', advanced=True, action='append',
help='Dependencies to bootstrap this task for generating java code. When changing '
'this parameter you may also need to update --version.',
default=['3rdparty:protobuf-java'])
# TODO https://github.com/pantsbuild/pants/issues/604 prep start
@classmethod
def prepare(cls, options, round_manager):
super(ProtobufGen, cls).prepare(options, round_manager)
round_manager.require_data(JarImportProducts)
round_manager.require_data('deferred_sources')
# TODO https://github.com/pantsbuild/pants/issues/604 prep finish
def __init__(self, *args, **kwargs):
"""Generates Java files from .proto files using the Google protobuf compiler."""
super(ProtobufGen, self).__init__(*args, **kwargs)
self.plugins = self.get_options().plugins
self._extra_paths = self.get_options().extra_path
@memoized_property
def protobuf_binary(self):
binary_util = BinaryUtil.Factory.create()
return binary_util.select_binary(self.get_options().supportdir,
self.get_options().version,
'protoc')
@property
def javadeps(self):
return self.resolve_deps(self.get_options().javadeps)
@property
def synthetic_target_type(self):
return JavaLibrary
def synthetic_target_extra_dependencies(self, target):
deps = OrderedSet()
if target.imported_jars:
# We need to add in the proto imports jars.
jars_address = Address(os.path.relpath(self.codegen_workdir(target), get_buildroot()),
target.id + '-rjars')
jars_target = self.context.add_new_target(jars_address,
JarLibrary,
jars=target.imported_jars,
derived_from=target)
deps.update([jars_target])
deps.update(self.javadeps)
return deps
def is_gentarget(self, target):
return isinstance(target, JavaProtobufLibrary)
@classmethod
def supported_strategy_types(cls):
return [cls.IsolatedCodegenStrategy, cls.ProtobufGlobalCodegenStrategy]
def sources_generated_by_target(self, target):
genfiles = []
for source in target.sources_relative_to_source_root():
path = os.path.join(target.target_base, source)
genfiles.extend(self.calculate_genfiles(path, source))
return genfiles
def execute_codegen(self, targets):
if not targets:
return
sources_by_base = self._calculate_sources(targets)
if self.codegen_strategy.name() == 'isolated':
sources = OrderedSet()
for target in targets:
sources.update(target.sources_relative_to_buildroot())
else:
sources = OrderedSet(itertools.chain.from_iterable(sources_by_base.values()))
if not self.validate_sources_present(sources, targets):
return
bases = OrderedSet(sources_by_base.keys())
bases.update(self._proto_path_imports(targets))
check_duplicate_conflicting_protos(self, sources_by_base, sources, self.context.log)
for target in targets:
# NB(gm): If the strategy is set to 'isolated', then 'targets' should contain only a single
# element, which means this simply sets the output directory depending on that element.
# If the strategy is set to 'global', the target passed in as a parameter here will be
# completely arbitrary, but that's OK because the codegen_workdir function completely
# ignores the target parameter when using a global strategy.
output_dir = self.codegen_workdir(target)
break
gen_flag = '--java_out'
safe_mkdir(output_dir)
gen = '{0}={1}'.format(gen_flag, output_dir)
args = [self.protobuf_binary, gen]
if self.plugins:
for plugin in self.plugins:
# TODO(Eric Ayers) Is it a good assumption that the generated source output dir is
# acceptable for all plugins?
args.append("--{0}_out={1}".format(plugin, output_dir))
for base in bases:
args.append('--proto_path={0}'.format(base))
args.extend(sources)
# Tack on extra path entries. These can be used to find protoc plugins
protoc_environ = os.environ.copy()
if self._extra_paths:
protoc_environ['PATH'] = os.pathsep.join(self._extra_paths
+ protoc_environ['PATH'].split(os.pathsep))
self.context.log.debug('Executing: {0}'.format('\\\n '.join(args)))
process = subprocess.Popen(args, env=protoc_environ)
result = process.wait()
if result != 0:
raise TaskError('{0} ... exited non-zero ({1})'.format(self.protobuf_binary, result))
def _calculate_sources(self, targets):
gentargets = OrderedSet()
def add_to_gentargets(target):
if self.is_gentarget(target):
gentargets.add(target)
self.context.build_graph.walk_transitive_dependency_graph(
[target.address for target in targets],
add_to_gentargets,
postorder=True)
sources_by_base = OrderedDict()
# TODO(Eric Ayers) Extract this logic for general use? When using unpacked_jars it is needed
# to get the correct source root for paths outside the current BUILD tree.
for target in gentargets:
for source in target.sources_relative_to_buildroot():
base = SourceRoot.find_by_path(source)
if not base:
base, _ = target.target_base, target.sources_relative_to_buildroot()
self.context.log.debug('Could not find source root for {source}.'
' Missing call to SourceRoot.register()? Fell back to {base}.'
.format(source=source, base=base))
if base not in sources_by_base:
sources_by_base[base] = OrderedSet()
sources_by_base[base].add(source)
return sources_by_base
def _jars_to_directories(self, target):
"""Extracts and maps jars to directories containing their contents.
:returns: a set of filepaths to directories containing the contents of jar.
"""
files = set()
jar_import_products = self.context.products.get_data(JarImportProducts)
imports = jar_import_products.imports(target)
for coordinate, jar in imports:
files.add(self._extract_jar(coordinate, jar))
return files
def _extract_jar(self, coordinate, jar_path):
"""Extracts the jar to a subfolder of workdir/extracted and returns the path to it."""
with open(jar_path, 'rb') as f:
outdir = os.path.join(self.workdir, 'extracted', sha1(f.read()).hexdigest())
if not os.path.exists(outdir):
ZIP.extract(jar_path, outdir)
self.context.log.debug('Extracting jar {jar} at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
else:
self.context.log.debug('Jar {jar} already extracted at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
return outdir
def _proto_path_imports(self, proto_targets):
for target in proto_targets:
for path in self._jars_to_directories(target):
yield os.path.relpath(path, get_buildroot())
def calculate_genfiles(self, path, source):
protobuf_parse = ProtobufParse(path, source)
protobuf_parse.parse()
return OrderedSet(self.calculate_java_genfiles(protobuf_parse))
def calculate_java_genfiles(self, protobuf_parse):
basepath = protobuf_parse.package.replace('.', os.path.sep)
classnames = {protobuf_parse.outer_class_name}
if protobuf_parse.multiple_files:
classnames |= protobuf_parse.enums | protobuf_parse.messages | protobuf_parse.services | \
set(['{name}OrBuilder'.format(name=m) for m in protobuf_parse.messages])
for classname in classnames:
yield os.path.join(basepath, '{0}.java'.format(classname))
class ProtobufGlobalCodegenStrategy(SimpleCodegenTask.GlobalCodegenStrategy):
def find_sources(self, target):
return self._task.sources_generated_by_target(target)
def _same_contents(a, b):
"""Perform a comparison of the two files"""
with open(a, 'rb') as fp_a, open(b, 'rb') as fp_b:
return fp_a.read() == fp_b.read()
def check_duplicate_conflicting_protos(task, sources_by_base, sources, log):
"""Checks if proto files are duplicate or conflicting.
There are sometimes two files with the same name on the .proto path. This causes the protobuf
compiler to stop with an error. Some repos have legitimate cases for this, and so this task
decides to just choose one to keep the entire build from failing. Sometimes, they are identical
copies. That is harmless, but if there are two files with the same name with different contents,
that is ambiguous and we want to complain loudly.
:param task: provides an implementation of the method calculate_genfiles()
:param dict sources_by_base: mapping of base to path
:param set|OrderedSet sources: set of sources
:param Context.Log log: writes error messages to the console for conflicts
"""
sources_by_genfile = {}
for base in sources_by_base.keys(): # Need to iterate over /original/ bases.
for path in sources_by_base[base]:
if not path in sources:
continue # Check to make sure we haven't already removed it.
source = path[len(base):]
genfiles = task.calculate_genfiles(path, source)
for genfile in genfiles:
if genfile in sources_by_genfile:
# Possible conflict!
prev = sources_by_genfile[genfile]
if not prev in sources:
# Must have been culled by an earlier pass.
continue
if not _same_contents(path, prev):
log.error('Proto conflict detected (.proto files are different):\n'
'1: {prev}\n2: {curr}'.format(prev=prev, curr=path))
else:
log.warn('Proto duplication detected (.proto files are identical):\n'
'1: {prev}\n2: {curr}'.format(prev=prev, curr=path))
log.warn(' Arbitrarily favoring proto 1.')
if path in sources:
sources.remove(path) # Favor the first version.
continue
sources_by_genfile[genfile] = path
| kslundberg/pants | src/python/pants/backend/codegen/tasks/protobuf_gen.py | Python | apache-2.0 | 13,284 | 0.008883 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config Drive v2 helper."""
import os
import shutil
import tempfile
from oslo.config import cfg
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova import version
LOG = logging.getLogger(__name__)
configdrive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
help='Config drive format. One of iso9660 (default) or vfat'),
cfg.StrOpt('config_drive_tempdir',
default=tempfile.tempdir,
help=('Where to put temporary files associated with '
'config drive creation')),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help='Name and optionally path of the tool used for '
'ISO image creation')
]
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * 1024 * 1024
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
# TODO(mikal): I don't think I can use utils.tempdir here, because
# I need to have the directory last longer than the scope of this
# method call
self.tempdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_gen_')
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, path, data):
filepath = os.path.join(self.tempdir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'w') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, value) in instance_md.metadata_for_config_drive():
self._add_file(path, value)
LOG.debug(_('Added %(filepath)s to config drive'),
{'filepath': path})
def _make_iso9660(self, path):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
'-V', 'config-2',
self.tempdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'w') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
mounted = False
try:
mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_mnt_')
_out, err = utils.trycmd('mount', '-o',
'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path, mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use shutils.copytree here, because the
# destination directory already exists. This is annoying.
for ent in os.listdir(self.tempdir):
shutil.copytree(os.path.join(self.tempdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
shutil.rmtree(mountdir)
def make_drive(self, path):
"""Make the config drive.
:param path: the path to place the config drive image at
:raises ProcessExecuteError if a helper process has failed.
"""
if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path)
elif CONF.config_drive_format == 'vfat':
self._make_vfat(path)
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
fileutils.delete_if_exists(self.imagefile)
try:
shutil.rmtree(self.tempdir)
except OSError as e:
LOG.error(_('Could not remove tmpdir: %s'), str(e))
def required_by(instance):
return instance.get('config_drive') or CONF.force_config_drive
| ntt-sic/nova | nova/virt/configdrive.py | Python | apache-2.0 | 6,518 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnStringInfo(Model):
"""Database connection string information.
:param name: Name of connection string.
:type name: str
:param connection_string: Connection string value.
:type connection_string: str
:param type: Type of database. Possible values include: 'MySql',
'SQLServer', 'SQLAzure', 'Custom', 'NotificationHub', 'ServiceBus',
'EventHub', 'ApiHub', 'DocDb', 'RedisCache'
:type type: str or :class:`ConnectionStringType
<azure.mgmt.web.models.ConnectionStringType>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'type': {'key': 'type', 'type': 'ConnectionStringType'},
}
def __init__(self, name=None, connection_string=None, type=None):
self.name = name
self.connection_string = connection_string
self.type = type
| rjschwei/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/conn_string_info.py | Python | mit | 1,434 | 0 |
'''
Created on Feb. 23, 2019
Utility functions to extract data from xarray Dataset or DataArray classes.
@author: Andre R. Erler, GPL v3
'''
from warnings import warn
from datetime import datetime
import os
import numpy as np
import xarray as xr
import netCDF4 as nc
from dask.diagnostics import ProgressBar
# internal imports
from geospatial.netcdf_tools import getNCAtts, geospatial_netcdf_version, zlib_default # this import should be fine
## an important option I am relying on!
xr.set_options(keep_attrs=True)
# names of valid geographic/projected coordinates
default_x_coords = dict(geo=('lon','long','longitude',), proj=('x','easting','west_east') )
default_y_coords = dict(geo=('lat','latitude',), proj=('y','northing','south_north'))
default_lon_coords = default_x_coords['geo']; default_lat_coords = default_y_coords['geo']
## helper functions
def getAtts(xvar, lraise=True):
''' return dictionary of attributed from netCDF4 or xarray '''
if isinstance(xvar,(xr.DataArray,xr.Variable,xr.Dataset)):
atts = xvar.attrs.copy()
elif isinstance(xvar,(nc.Variable,nc.Dataset)):
atts = getNCAtts(xvar)
elif lraise:
raise TypeError(xvar)
return atts
## functions to interface with rasterio
def getGeoDims(xvar, x_coords=None, y_coords=None, lraise=True):
''' helper function to identify geographic/projected dimensions by name '''
if x_coords is None: x_coords = default_x_coords
if y_coords is None: y_coords = default_y_coords
xlon,ylat = None,None # return None, if nothing is found
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
for name in xvar.dims.keys() if isinstance(xvar,xr.Dataset) else xvar.dims:
if name.lower() in x_coords[coord_type]:
xlon = name; break
for name in xvar.dims.keys() if isinstance(xvar,xr.Dataset) else xvar.dims:
if name.lower() in y_coords[coord_type]:
ylat = name; break
if xlon is not None and ylat is not None: break
else: xlon,ylat = None,None
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
for name in xvar.dimensions:
if name.lower() in x_coords[coord_type]:
xlon = name; break
for name in xvar.dimensions:
if name.lower() in y_coords[coord_type]:
ylat = name; break
if xlon is not None and ylat is not None: break
else: xlon,ylat = None,None
elif lraise: # optionally check input
raise TypeError("Can only infer coordinates from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
pass # return None,None
return xlon,ylat
def getGeoCoords(xvar, x_coords=None, y_coords=None, lraise=True, lvars=True):
''' helper function to extract geographic/projected coordinates from xarray'''
# find dim names
xlon_dim,ylat_dim = getGeoDims(xvar, x_coords=x_coords, y_coords=y_coords, lraise=lraise)
# find coordinates
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
if xlon_dim in xvar.coords:
xlon = xvar.coords[xlon_dim] if lvars else xlon_dim
else: xlon = None
if ylat_dim in xvar.coords:
ylat = xvar.coords[ylat_dim] if lvars else ylat_dim
else: ylat = None
elif isinstance(xvar,nc.Variable) and lraise:
raise TypeError("Cannot infer coordinates from netCDF4 Variable - only Dataset!")
elif isinstance(xvar,nc.Dataset):
if xlon_dim in xvar.variables:
xlon = xvar.variables[xlon_dim] if lvars else xlon_dim
else: xlon = None
if ylat_dim in xvar.variables:
ylat = xvar.variables[ylat_dim] if lvars else ylat_dim
else: ylat = None
# optionally raise error if no coordinates are found, otherwise just return None
if lraise and (xlon is None or ylat is None):
raise ValueError("No valid pair of geographic coodinates found:\n {}".format(xvar.dims))
# return a valid pair of geographic or projected coordinate axis
return xlon,ylat
def isGeoVar(xvar, x_coords=None, y_coords=None, lraise=True):
''' helper function to identify variables that have geospatial coordinates (geographic or
projected), based on xarray or netCDF4 dimension names '''
if x_coords is None: x_coords = default_x_coords
if y_coords is None: y_coords = default_y_coords
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
dims = xvar.coords.keys()
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
dims = xvar.dimensions
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
return None # evaluates as False, but allows checking
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
xlon,ylat = False,False
for name in dims:
if name.lower() in x_coords[coord_type]:
xlon = True; break
for name in dims:
if name.lower() in y_coords[coord_type]:
ylat = True; break
if xlon and ylat: break
# if it has a valid pair of geographic or projected coordinate axis
return ( xlon and ylat )
def isGeoCRS(xvar, lat_coords=None, lon_coords=None, lraise=True):
''' helper function to determine if we have a simple geographic lat/lon CRS (based on xarray dimension names) '''
lat,lon = False,False
if lon_coords is None: lon_coords = default_x_coords['geo']
if lat_coords is None: lat_coords = default_y_coords['geo']
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
dims = xvar.coords.keys()
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
dims = xvar.dimensions
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4- not from {}".format(xvar.__class__))
else:
return None # evaluates as False, but allows checking
# check dimension names
for name in dims:
if name.lower() in lon_coords:
lon = True; break
for name in dims:
if name.lower() in lat_coords:
lat = True; break
# it is a geographic coordinate system if both, lat & lon are present
return ( lat and lon )
def getTransform(xvar=None, x=None, y=None, lcheck=True):
''' generate an affine transformation from xarray coordinate axes '''
from rasterio.transform import Affine # to generate Affine transform
if isinstance(xvar,(xr.DataArray,xr.Dataset,nc.Dataset)):
x,y = getGeoCoords(xvar, lraise=True)
elif xvar is None and isinstance(x,(xr.DataArray,nc.Variable)) and isinstance(y,(xr.DataArray,nc.Variable)):
pass # x and y axes are supplied directly
elif xvar:
raise TypeError('Can only infer GeoTransform from xarray Dataset or DataArray or netCDF4 Dataset\n - not from {}.'.format(xvar))
# check X-axis
if isinstance(x,xr.DataArray): x = x.data
elif isinstance(x,nc.Variable): x = x[:]
if not isinstance(x,np.ndarray):
raise TypeError(x)
diff_x = np.diff(x); dx = diff_x.min()
if lcheck and not np.isclose(dx, diff_x.max(), rtol=1.e-2):
raise ValueError("X-axis is not regular: {} - {}".format(dx, diff_x.max()))
# check Y-axis
if isinstance(y,xr.DataArray): y = y.data
elif isinstance(y,nc.Variable): y = y[:]
if not isinstance(y,np.ndarray):
raise TypeError(y)
diff_y = np.diff(y); dy = diff_y.min()
if lcheck and not np.isclose(dy, diff_y.max(), rtol=1.e-2):
raise ValueError("Y-axis is not regular. {} - {}".format(dy, diff_y.max()))
# generate transform
return Affine.from_gdal(x[0]-dx/2.,dx,0.,y[0]-dy/2.,0.,dy), (len(x),len(y))
def readCFCRS(xds, grid_mapping=None, lraise=True, lproj4=False):
''' function to generate CRS from CF-Convention grid mapping variable; only works with Datasets '''
# read CF convention string
if not isinstance(xds,(nc.Dataset,xr.Dataset)):
raise TypeError("Only xarray of netCDF4 Datasets are supported.")
atts = getAtts(xds) # works for xarray or netCDF4
if 'Conventions' in atts:
cf_str = atts['Conventions']
if cf_str[:3] != 'CF-' or float(cf_str[3:]) < 1:
raise ValueError("Only CF convection version 1 or later is supported; not '{}'.".format(cf_str))
elif lraise:
raise ValueError("No CF convention attribute found; this Dataset may not adhere to CF conventions.")
else:
return None # return without CRS
# find grid mapping variable
if grid_mapping:
if grid_mapping in xds.variables:
grid_type = grid_mapping
grid_atts = getAtts(xds.variables[grid_mapping])
else:
raise ValueError("Grid mapping '{}' not found in dataset.".format(grid_mapping))
else:
grid_type = None
grid_varlist = ['Lambert_Conformal']
for grid_var in grid_varlist:
if grid_var in xds.variables:
if grid_type is None:
grid_type = grid_var
grid_atts = getAtts(xds.variables[grid_var])
else:
raise ValueError("Multiple grid_mapping variables detected:",grid_type,grid_var)
if grid_type is None:
if lraise:
raise NotImplementedError("No supported grid_mapping variable detected:\n",grid_varlist)
else:
return None # return without CRS
elif grid_type == 'Lambert_Conformal':
assert grid_atts['grid_mapping_name'] == "lambert_conformal_conic", grid_atts
proj4 = ('+proj=lcc +lat_1={lat_1} +lat_2={lat_1} '.format(lat_1=grid_atts['standard_parallel'])
+ '+lat_0={lat_0} +lon_0={lon_0} '.format(lat_0=grid_atts['latitude_of_projection_origin'],
lon_0=grid_atts['longitude_of_central_meridian'])
+ '+x_0=0 +y_0=0 +a=6371229 +b=6371229 +units=m +no_defs' )
else:
raise NotImplementedError("The grid_mapping '{}' is currently not implemented/supported.".format(grid_type))
import rasterio as rio
# return either string or CRS object
if lproj4: crs = proj4
else: crs = rio.crs.CRS.from_string(proj4) # initialize from Proj4 string
return crs
def getCRS(xvar, lraise=True):
''' infer projection from a xarray Dataset or DataArray; this function assumes that either a proj4 string or
an EPSG designation is stored in the attributes of the dataset/variable. '''
from geospatial.rasterio_tools import genCRS # used to generate CRS object
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
atts = xvar.attrs
elif isinstance(xvar,(nc.Variable,nc.Dataset)):
atts = getAtts(xvar)
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
return None # no projection
crs = None
# check CF convention
if isinstance(xvar,(xr.Dataset,nc.Dataset)):
crs = readCFCRS(xvar, lraise=False, lproj4=False)
# search for EPSG number
if crs is None:
for key,value in atts.items():
if key.upper() == 'EPSG' and value != 'n/a': crs = genCRS(value); break
# search for Proj4 string
if crs is None:
for key,value in atts.items():
if key.lower() == 'proj4' and value != 'n/a': crs = genCRS(value); break
# check for simple geographic lat/lon system
if crs is None:
if isGeoCRS(xvar, lraise=False): # error will be raised below (if desired)
crs = genCRS() # no arguments for default lat/lon
# return values
if lraise and crs is None:
raise ValueError("No projection information found in attributes.")
# return a GDAL/rasterio CRS instance
return crs
def inferGeoInfo(xvar, varname=None, crs=None, transform=None, size=None, lraise=True, lcheck=True):
''' infere geo-reference information from xarray DataArray or Dataset and netCDF4 Dataset '''
# CRS
_crs = getCRS(xvar, lraise=lraise)
if crs is None: crs = _crs
elif crs != _crs:
from geospatial.rasterio_tools import genCRS # used to generate CRS object
crs = genCRS(crs)
if crs != _crs:
raise ValueError("Prescribed CRS and inferred CRS are incompatible:\n{}\n{}".format(crs,_crs))
crs = _crs # for some reason EPSG ints also pass the equality test...
# geotransform & grid size
xlon,ylat = getGeoCoords(xvar, lraise=True, lvars=False)
_transform, _size = getTransform(xvar, lcheck=lraise)
if transform is None: transform = _transform
elif not transform is _transform:
raise ValueError("Prescribed and inferred Geotransform are incompatible:\n{}\n{}".format(transform,_transform))
if size is None: size = _size
elif not size is _size:
raise ValueError("Prescribed and inferred grid sizes are incompatible:\n{}\n{}".format(size,_size))
# do some checks
if lcheck:
if crs.is_projected and isGeoCRS(xvar):
raise ValueError(crs,xvar) # simple check
if isinstance(xvar,xr.Dataset) and varname:
xvar = xvar[varname]
shape = None; dims = None
if isinstance(xvar,xr.DataArray):
shape = xvar.data.shape; dims = xvar.dims
if xvar.attrs.get('dim_order',None) is False:
raise NotImplementedError("The x/lon and y/lat axes of this xarray have to be swapped:\n {}".format(xvar))
elif isinstance(xvar,nc.Dataset) and varname:
xvar = xvar.variables[varname]
shape = xvar.shape; dims = xvar.dimensions
if shape:
if shape[-2:] != (size[1],size[0]):
raise ValueError(xvar)
if dims:
if dims[-2] != ylat or dims[-1] != xlon:
raise ValueError(xvar)
# return verified georef info
return crs, transform, size
## functions that modify a dataset
def _inferVarmap(varmap=None, varatts=None, linvert=False):
''' simple function that infers a varmap using varatts, if necessary '''
if varmap is None:
varmap = dict()
if varatts is not None:
for varname,atts in varatts.items():
if 'name' in atts: varmap[varname] = atts['name']
elif not isinstance(varmap,dict):
raise TypeError(varmap)
if linvert:
varmap = {value:key for key,value in varmap.items()}
# return varmap (guaranteed to be a dict)
return varmap
def updateVariableAttrs(xds, varatts=None, varmap=None, varlist=None, **kwargs):
''' a helper function to update variable attributes, rename variables, and apply scaling factors '''
# update varatts
if varatts is None:
varatts = dict()
elif isinstance(varatts,dict):
varatts = varatts.copy()
else:
raise TypeError(varatts)
varatts.update(kwargs) # add kwargs
# generate varmap
varmap = _inferVarmap(varmap=varmap, varatts=varatts, linvert=False)
# drop variables
if varlist is not None:
drop_list = []
for varname in xds.data_vars.keys():
name = varmap.get(varname,varname)
if name not in varlist: drop_list.append(varname)
xds = xds.drop_vars(drop_list)
# update attributes (using old names)
date_str = datetime.today().strftime('%Y%m%d')
for varname,atts in varatts.items():
if varname in xds.variables:
if varname == 'time':
warn("The 'time' coordinate is handled automatically by xarray using numpy datetime64; "
+ "changing attributes can break this functionality when the dataset is saved to file. ")
var = xds.variables[varname]
attrs = var.attrs.copy()
if 'updated' not in attrs:
if 'units' in atts:
if 'units' not in attrs or attrs['units'] != atts['units']:
if 'scalefactor' in atts and atts['scalefactor'] != 1:
var *= atts['scalefactor'] # this should execute lazily...
if 'offset' in atts and atts['offset'] != 0:
var += atts['offset'] # this should execute lazily...
# transfer attributes
for key,value in atts.items():
if key not in ('scalefactor','offset'):
if key in attrs: attrs['old_'+key] = attrs[key]
attrs[key] = value
attrs['updated'] = date_str # indicate we have updated with date string
var.attrs = attrs
# actually rename (but only vars that are present and need to be renamed...)
xds = xds.rename({key:val for key,val in varmap.items() if key in xds.variables and key != val})
xds = xds.rename_dims({key:val for key,val in varmap.items() if key in xds.dims and key != val})
xds.attrs['updated'] = date_str
return xds
def addGeoReference(xds, proj4_string=None, x_coords=None, y_coords=None, lcreate=False, xlon_coord=None, ylat_coord=None):
''' helper function to add GDAL/rasterio-style georeferencing information to an xarray dataset;
note that this only refers to attributed, not axes, but also includes variables '''
xlon,ylat = getGeoCoords(xds, x_coords=x_coords, y_coords=y_coords, lvars=lcreate, lraise=not lcreate)
if lcreate:
if (xlon is None and ylat is None):
assert xlon_coord is not None and ylat_coord is not None
# need to find names again...
xlon_dim,ylat_dim = getGeoDims(xds, x_coords=x_coords, y_coords=y_coords, lraise=True)
# create new xlon/ylat coordinates, based on coordinates passed down
coords = {xlon_dim:xlon_coord, ylat_dim:ylat_coord}
xds = xds.assign_coords(**coords)
elif (xlon is not None) and (ylat is not None):
xlon = xlon.name; ylat = ylat.name # from here on only need names
else:
raise ValueError("No valid pair of geographic coodinates found:\n {}".format(xds.dims))
xds.attrs['xlon'] = xlon
xds.attrs['ylat'] = ylat
if proj4_string is None:
if isGeoVar(xds, x_coords, y_coords, lraise=True):
proj4_string = '+proj=longlat +lon_0=0 +lat_0=0 +ellps=WGS84 +datum=WGS84' # default geographic, also EPSG 4326
else:
raise ValueError("Cannot infer projection - need to provide proj4 string!")
elif isinstance(proj4_string,str):
xds.attrs['proj4'] = proj4_string
else:
raise TypeError("Cannot infer projection - need to provide proj4 string!")
for xvar in list(xds.data_vars.values()):
if isGeoVar(xvar):
xvar.attrs['proj4'] = proj4_string
xvar.attrs['xlon'] = xlon
xvar.attrs['ylat'] = ylat
xvar.attrs['dim_order'] = int( xvar.dims[-2:] == (ylat, xlon) )
# N.B.: the NetCDF-4 backend does not like Python bools
return xds
def rechunkTo2Dslices(xvar, **other_chunks):
''' convenience function to rechunk an xarray so that the horizontal dimensions are contiguous (not chunked)
N.B.: rechunking in a way that does not simply combine existing chunks seems to cause all chunks/data
to be loaded into memory (we want to avoid that); also, chunks are defined by their size, not by
their number, i.e. the definition for one large 2D chunk is (len(y),len(x)) and *not* (1,1) '''
if not isinstance(xvar,(xr.DataArray,xr.Dataset)):
raise TypeError(xvar)
# old chunk sizes
if 'chunksizes' in xvar.encoding:
chunks = {dim:cs for dim,cs in zip(xvar.sizes,xvar.encoding['chunksizes'])}
else: chunks = dict()
chunks.update(other_chunks)
# find horizontal/map dimensions
xlon = xvar.attrs['xlon']; ylat = xvar.attrs['ylat']
chunks[xlon] = xvar.sizes[xlon]; chunks[ylat] = xvar.sizes[ylat]
return xvar.chunk(chunks=chunks) # rechunk x/lon and y/lat
def autoChunkXArray(xds, chunks=None, dims=None, **kwargs):
''' apply auto-chunking to an xarray object, like a Dataset or DataArray (chunks kw arg can override) '''
from geospatial.netcdf_tools import autoChunk
if dims is None:
xlon,ylat = getGeoCoords(xds)
dims = ('time', ylat.name, xlon.name)
dims = [dim for dim in dims if dim in xds.sizes]
shape = [xds.sizes[dim] for dim in dims]
cks = autoChunk(shape, **kwargs)
cks = {dim:c for dim,c in zip(dims,cks)}
if chunks: cks.update(chunks) # manually/explicitly specified chunks override
return xds.chunk(chunks=cks)
def getCommonChunks(xds, method='min'):
''' get smallest/largest/mean common denominator for chunks in dataset '''
chunk_list = dict()
# collect chunks
if isinstance(xds,xr.Dataset):
for xvar in xds.data_vars.values():
if 'chunksizes' in xvar.encoding:
for dim,cks in zip(xvar.dims,xvar.encoding['chunksizes']):
if dim in chunk_list: chunk_list[dim].append(cks)
else: chunk_list[dim] = [cks]
elif isinstance(xds,nc.Dataset):
for ncvar in xds.variables.values():
if ncvar.chunking():
for dim,cks in zip(ncvar.dimensions,ncvar.chunking()):
if dim in chunk_list: chunk_list[dim].append(cks)
else: chunk_list[dim] = [cks]
else:
raise TypeError(xds)
# reduce chunks
chunks = dict()
for dim,cks in list(chunk_list.items()):
chunks[dim] = getattr(np,method)(cks)
# return dict with chunksize for each dimension
return chunks
def computeNormals(xds, aggregation='month', time_stamp='time_stamp', lresample=False, time_name='time'):
''' function invoking lazy groupby() call and replacing the resulting time axis with a new time axis '''
lts = time_stamp and time_stamp in xds
# time stamp variable for meta data
if lts:
import pandas as pd
ts_var = xds[time_stamp].load()
period = (pd.to_datetime(ts_var.data[0]).year, (pd.to_datetime(ts_var.data[-1])+pd.Timedelta(31, unit='D')).year)
prdstr = '{:04d}-{:04d}'.format(*period)
# resample data to aggregation interval
if lresample:
if aggregation.lower() == 'month': rsi = 'MS'
else:
raise NotImplementedError(aggregation)
xds = xds.resample(time=rsi,skipna=True,).mean()
# N.B.: I am not sure to which extent resampling is necessary
# compute monthly normals
cds = xds.groupby('time.'+aggregation).mean('time')
assert len(cds['month']) == 12, cds
# convert time axis
cds = cds.rename({aggregation:time_name}) # the new time axis is named 'month'
tm = cds.coords[time_name]
tm.attrs['name'] = time_name
tm.attrs['long_name'] = 'Calendar '+aggregation.title()
tm.attrs['units'] = aggregation
# add period info for quick identification
if lts:
tm.attrs['start_date'] = str(ts_var.data[0])
tm.attrs['end_date'] = str(ts_var.data[-1])
tm.attrs['period'] = prdstr
# add attributes to dataset
cds.attrs['start_date'] = str(ts_var.data[0])
cds.attrs['end_date'] = str(ts_var.data[-1])
cds.attrs['period'] = prdstr
# return formatted climatology dataset
return cds
## function to load a dataset
def _multichunkPresets(multi_chunks):
''' translate string identifiers into valid multichunk dicts, based on presets '''
if isinstance(multi_chunks,str):
if multi_chunks.lower() == 'regular': # 256 MB
multi_chunks = {dim:16 for dim in ('lat','lon','latitude','longitude','x','y',)}
multi_chunks['time'] = 8
elif multi_chunks.lower() == 'small': # 64 MB
multi_chunks = {dim:8 for dim in ('lat','lon','latitude','longitude','x','y','time')}
elif multi_chunks.lower() == 'time': # 184 MB
multi_chunks = {dim:4 for dim in ('lat','lon','latitude','longitude','x','y')}
multi_chunks['time'] = 92 # for reductions along time, we can use a higher value (8 days * 92 ~ 2 years)
else:
raise NotImplementedError(multi_chunks)
elif ( multi_chunks is not None ) and not isinstance(multi_chunks, dict):
raise TypeError(multi_chunks)
# return valid multi_chunks (dict)
return multi_chunks
def loadXArray(varname=None, varlist=None, folder=None, varatts=None, filename_pattern=None, filelist=None, default_varlist=None,
varmap=None, mask_and_scale=True, grid=None, lgeoref=True, geoargs=None, chunks=True, multi_chunks=None,
ldropAtts=False, lskip=False, filetypes=None,
compat='override', join='inner', fill_value=np.NaN, combine_attrs='no_conflicts', **kwargs):
''' function to open a dataset in one of two modes: 1) variables are stored in separate files, but in the same folder (this mainly
applies to high-resolution, high-frequency (daily) observations, e.g. SnoDAS) or 2) multiple variables are stored in different
filetypes and each is opened and then merged (usually model output); datasets are opened using xarray '''
# load variables
if filetypes is None:
lopt1 = True
# option 1: one variable per file
if varname and varlist:
raise ValueError(varname,varlist)
elif varname:
varlist = [varname] # load a single variable
elif varlist is None:
varlist = default_varlist
# add variable filetypes
# if there is a (implied) varmap, we need to apply that to variable-filetypes
ravmap = _inferVarmap(varmap=varmap, varatts=varatts, linvert=True)
filetypes = [ravmap.get(varname,varname) for varname in varlist]
# now also transform varatts and varmap
varmap_single = None if varmap is None else varmap.copy()
varatts_single = None if varatts is None else varatts.copy()
varatts = {filetype:varatts_single for filetype in filetypes}
varmap = {filetype:varmap_single for filetype in filetypes}
else:
lopt1 = False # just to remember when using option 2
## now use option 2: multiple variables per file
# expand varmap to filetypes
if varmap is None:
varmap = {filetype:None for filetype in filetypes} # no varmap
elif isinstance(varmap,dict):
filetypes_set = set(filetypes); varmap_set = set(varmap.keys())
if varmap_set.issubset(filetypes_set) or filetypes_set.issubset(varmap_set): # expand to filetypes using None
for filetype in filetypes:
if filetype in varmap_set:
if not isinstance(varmap[filetype],dict) and varmap[filetype] is not None:
raise TypeError(filetype,varmap[filetype])
else:
varmap[filetype] = None
elif any([key in filetypes for key in varmap.keys()]):
raise ValueError("It is unclear if varmap is a dict containing varmap dicts for each filetype or just one varmap dict.",varmap.keys())
if all([key in filetypes for key in varmap.keys()]): # one varmap per filetype
if not all([isinstance(value,dict) or value is None for value in varmap.values()]):
raise TypeError(varmap)
elif any([key in filetypes for key in varmap.keys()]):
raise ValueError(varmap.keys())
else:
varmap = {filetype:varmap for filetype in filetypes} # same varmap for all
else:
raise TypeError(varmap)
# expand varatts to filetypes
if varatts is None:
varatts = {filetype:None for filetype in filetypes} # no varatts
elif isinstance(varatts,dict):
filetypes_set = set(filetypes); varatts_set = set(varatts.keys())
if varatts_set.issubset(filetypes_set) or filetypes_set.issubset(varatts_set): # expand to filetypes using None
for filetype in filetypes:
if filetype in varatts_set:
if not isinstance(varatts[filetype],dict) and varatts[filetype] is not None:
raise TypeError(filetype,varatts[filetype])
else:
varatts[filetype] = None
elif any([key in filetypes for key in varatts.keys()]):
raise ValueError("It is unclear if varatts is a dict containing varatts dicts for each filetype or just one varatts dict.",varatts.keys())
else:
varatts = {filetype:varatts for filetype in filetypes} # same varatts for all
else:
raise TypeError(varatts)
# expand filename/pattern to filetypes
if filename_pattern and not filelist:
filelist = filename_pattern
if isinstance(filelist, dict):
if len(filelist) != len(filetypes):
raise ValueError(filelist)
elif isinstance(filelist, str):
filelist = {filetype:filelist for filetype in filetypes}
else:
raise ValueError(filelist)
# just some default settings that will produce chunks larger than 100 MB on 8*64*64 float chunks
multi_chunks = _multichunkPresets(multi_chunks)
orig_chunks = chunks.copy() if isinstance(chunks, dict) else chunks # deep copy or True or None
# construct dataset
ds_list = []
for filetype in filetypes:
filename = filelist[filetype].lower().format(var=filetype.lower(), type=filetype.lower()) # all lower case
filepath = '{}/{}'.format(folder,filename)
chunks = orig_chunks # reset
# apply varmap in reverse to varlist
if os.path.exists(filepath):
# load dataset
if chunks is True:
# infer chunks from NetCDF-4 file (not sure why xarray doesn't do this automatically...)
with nc.Dataset(filepath, 'r') as ncds : # open in read-only using NetCDF4 module
chunks = dict()
for varname,ncvar in ncds.variables.items():
for dim,size in zip(ncvar.dimensions,ncvar.chunking()):
chunks[dim] = size # this just selects the last value... not necessarily always the same
if dim in chunks and chunks[dim] != size:
print("WARNING: Chunks for dimension '{}' not coherent in file:\n '{}'".format(dim, filepath))
if multi_chunks: # enlarge chunks with multiplier
chunks = {dim:(val*multi_chunks.get(dim,1)) for dim,val in chunks.items()}
# open dataset with xarray
#print(varname,chunks)
ds = xr.open_dataset(filepath, chunks=chunks, mask_and_scale=mask_and_scale, **kwargs)
# N.B.: the use of open_mfdataset is problematic, because it does not play nicely with chunking -
# by default it loads everything as one chunk, and it only respects chunking, if chunks are
# specified explicitly at the initial load time (later chunking seems to have no effect!)
# That being said, I don't know if this is still the case...
# rename, prune/drop vars and apply attributes
if ldropAtts: ds.attrs = dict() # drop original attributes from NC file (still add georef etc.)
if varatts or varmap:
ds = updateVariableAttrs(ds, varatts=varatts[filetype], varmap=varmap[filetype],
varlist=None if lopt1 else varlist)
ds_list.append(ds)
else:
if lskip:
print("Skipping missing dataset file '{}' ('{}')".format(filename,folder))
else:
raise IOError("The dataset file '{}' was not found in folder:\n '{}'".format(filename,folder))
# merge into new dataset
if len(ds_list) == 0:
raise ValueError("Dataset is empty - aborting! Folder: \n '{}'".format(folder))
# resolve a very common conflict caused by NCO logging
if np.sum(['history' in ds.attrs for ds in ds_list]) > 1:
for ds in ds_list:
if 'history' in ds.attrs: ds.attrs['history'] = 'conflicting sources'
xds = xr.merge(ds_list, compat=compat, join=join, fill_value=fill_value, combine_attrs=combine_attrs)
# add projection info
if lgeoref:
if geoargs is not None:
# check
if 'proj4' in xds.attrs and 'proj4_string' in geoargs:
if xds.attrs['proj4'] != geoargs['proj4_string']:
raise ValueError(xds.attrs['proj4'])
# custom options
xds = addGeoReference(xds, **geoargs)
# default options
elif 'proj4' in xds.attrs:
# read projection string
xds = addGeoReference(xds, proj4_string=xds.attrs['proj4'])
elif grid:
# load griddef from pickle
from geodata.gdal import loadPickledGridDef
griddef = loadPickledGridDef(grid=grid)
xds = addGeoReference(xds, proj4_string=griddef.projection.ExportToProj4(),)
else:
# use default lat/lon, if it works...
xds = addGeoReference(xds,)
return xds
def saveXArray(xds, filename=None, folder=None, mode='overwrite', varlist=None, chunks=None, encoding=None, laddTime=None,
time_dim='time', time_agg=None, ltmpfile=True, lcompute=True, lprogress=True, lfeedback=True, **kwargs):
''' function to save a xarray dataset to disk, with options to add/overwrite variables, choose smart encoding,
add timstamps, use a temp file, and handle dask functionality '''
from geospatial.netcdf_tools import addTimeStamps, addNameLengthMonth
# file path and tmp file
if folder:
filepath = '{}/{}'.format(folder,filename)
# if file exists, get varlist and chunks
if not os.path.exists(filepath) or mode.lower() in ('overwrite','write'):
# create a new file
nc_mode = 'w'
if lfeedback: print("\nExporting to new NetCDF-4 file:")
else:
# if file exists and we are appending...
nc_mode = 'a' # in most cases
ltmpfile = not lcompute # only works with new file (or dummy...)
if mode.lower() in ('add_new',):
if lfeedback: print("\nAppending to existing NetCDF-4 file (only adding new variables):")
elif mode.lower() in ('add_all',):
if lfeedback: print("\nAppending to existing NetCDF-4 file (overwriting existing variables):")
else:
raise ValueError(mode)
# determine tmp file
if ltmpfile:
tmp_filepath = filepath + ( '.tmp' if lcompute else '.test' ) # use temporary file during creation
else:
tmp_filepath = filepath
if lfeedback: print(" '{}'".format(tmp_filepath))
## handle varlist and existing variables in file
# user-supplied varlist
if varlist:
drop_vars = [xvar for xvar in xds.data_vars.keys() if xvar not in varlist]
xds = xds.drop_vars(drop_vars) # returns a shallow copy with vars removed
# handle existing
if nc_mode == 'a':
# open existing file and get encoding
with nc.Dataset(filepath, 'r') as ncds:
if chunks is None: chunks = getCommonChunks(ncds)
if mode.lower() == 'add_new':
nc_varlist = [var for var in ncds.variables.keys() if var not in ncds.dimensions]
drop_vars = [xvar for xvar in xds.data_vars.keys() if xvar in nc_varlist]
xds = xds.drop_vars(drop_vars) # returns a shallow copy with vars removed
# adding all variables and overwriting existing ones, requires no changes except nc_mode = 'a'
# setup encoding
if encoding is None:
encoding = dict(); default = None
else:
default = encoding.pop('DEFAULT',None)
for varname,xvar in xds.data_vars.items():
tmp = zlib_default.copy()
cks = tuple(1 if dim == 'time' else chunks[dim] for dim in xvar.dims)
tmp['chunksizes'] = cks # depends on variable
# N.B.: use chunk size 1 for time and as before for space; monthly chunks make sense, since
# otherwise normals will be expensive to compute (access patterns are not sequential)
if isinstance(xvar.dtype,np.inexact): encoding[varname]['_FillValue'] = np.NaN
if default: tmp.update(default)
if varname not in encoding:
encoding[varname] = tmp
else:
tmp.update(encoding[varname])
encoding[varname] = tmp
#print(varname,cks,rvar.encoding)
# write to NetCDF
## write to file (with progress)
# write results to file (actually just create file)
task = xds.to_netcdf(tmp_filepath, mode=nc_mode, format='NETCDF4', unlimited_dims=['time'],
engine='netcdf4', encoding=encoding, compute=False)
if lcompute:
# execute with or without progress bar
if lprogress:
with ProgressBar():
task.compute()
else:
task.compute()
## add extras
with nc.Dataset(tmp_filepath, mode='a') as ncds:
if laddTime:
time_coord = ncds.variables[time_dim]
tatts = getNCAtts(time_coord)
tname = tatts.get('long_name','')
if tname.lower().startswith('calendar '):
# info on month for climatology
from geospatial.netcdf_tools import default_mon_name_atts
if default_mon_name_atts['name'] in ncds.variables:
if lfeedback: print("\nName of months variable alrady exists.")
else:
if lfeedback: print("\nAdding name and length of months.")
assert tatts.get('units','').lower().startswith('month'), tatts # this assumes monthly time aggregation
assert not time_agg or time_agg.lower().startswith('month')
addNameLengthMonth(ncds, time_dim=time_dim)
else:
# time stamps for transient
if time_dim+'_stamp' in ncds.variables:
if lfeedback: print("\nTime-stamp variable ('{}_stamp') already exists.".format(time_dim))
else:
time_agg = time_agg.lower()
if time_agg.endswith('ly'): time_agg = time_agg[:-2]
if lfeedback: print("\nAdding human-readable time-stamp variable ('_stamp').".format(time_dim))
addTimeStamps(ncds, units=time_agg) # add time-stamps
## make sure the spatial units are present!!! xarray seems to loose the spatial coordinate units
lgeo = isGeoCRS(ncds, lraise=True)
for coord in getGeoCoords(ncds, lvars=True, lraise=True):
if 'units' not in coord.ncattrs():
coord.setncattr('units','deg' if lgeo else 'm')
# store geospatial code version
ncds.setncattr('geospatial_netcdf_version',geospatial_netcdf_version)
# replace original file
if ltmpfile:
if lfeedback: print("\nMoving file to final destination (overwriting old file):\n '{}'".format(filepath))
if os.path.exists(filepath): os.remove(filepath)
os.rename(tmp_filepath, filepath)
else:
# just show some info and save task graph
if lfeedback:
print("\nEncoding info:")
print(encoding)
print(task)
print("\nSaving task graph to:\n '{}.svg'".format(filepath))
task.visualize(filename=filepath+'.svg') # This file is never produced
# return file path
return filepath
if __name__ == '__main__':
pass | aerler/HGS-Tools | Python/geospatial/xarray_tools.py | Python | gpl-3.0 | 40,502 | 0.015234 |
#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
| chrismattmann/drat | distribution/src/main/resources/bin/list-ghe-repos.py | Python | apache-2.0 | 1,171 | 0 |
from django_markup.filter import MarkupFilter
class MarkdownMarkupFilter(MarkupFilter):
"""
Applies Markdown conversion to a string, and returns the HTML.
"""
title = 'Markdown'
kwargs = {'safe_mode': True}
def render(self, text, **kwargs):
if kwargs:
self.kwargs.update(kwargs)
from markdown import markdown
text = markdown(text, **self.kwargs)
# Markdowns safe_mode is deprecated. We replace it with Bleach
# to keep it backwards compatible.
# https://python-markdown.github.io/change_log/release-2.6/#safe_mode-deprecated
if self.kwargs.get('safe_mode') is True:
from bleach import clean
# fmt: off
markdown_tags = [
"h1", "h2", "h3", "h4", "h5", "h6",
"b", "i", "strong", "em", "tt",
"p", "br",
"span", "div", "blockquote", "pre", "code", "hr",
"ul", "ol", "li", "dd", "dt",
"img",
"a",
"sub", "sup",
]
markdown_attrs = {
"*": ["id"],
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
# fmt: on
text = clean(text, markdown_tags, markdown_attrs)
return text
| bartTC/django-markup | django_markup/filter/markdown_filter.py | Python | bsd-3-clause | 1,360 | 0 |
'''
This module is part of ngs_backbone. This module provide mapping related
analyses
Created on 15/03/2010
@author: peio
'''
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of franklin.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
import os, shutil
from gzip import GzipFile
from tempfile import NamedTemporaryFile
from franklin.backbone.analysis import (Analyzer, scrape_info_from_fname,
_LastAnalysisAnalyzer)
from franklin.mapping import map_reads
from franklin.utils.cmd_utils import call
from franklin.utils.misc_utils import (NamedTemporaryDir, VersionedPath,
rel_symlink)
from franklin.backbone.specifications import (BACKBONE_BASENAMES,
PLOT_FILE_FORMAT,
BACKBONE_DIRECTORIES)
from franklin.sam import (bam2sam, add_header_and_tags_to_sam, merge_sam,
sam2bam, sort_bam_sam, standardize_sam, realign_bam,
bam_distribs, create_bam_index, bam_general_stats)
class SetAssemblyAsReferenceAnalyzer(Analyzer):
'It sets the reference assembly as mapping reference'
def run(self):
'''It runs the analysis.'''
contigs_path = self._get_input_fpaths()['contigs']
contigs_ext = contigs_path.extension
reference_dir = self._create_output_dirs()['result']
reference_fpath = os.path.join(reference_dir,
BACKBONE_BASENAMES['mapping_reference'] + '.' + \
contigs_ext)
if os.path.exists(reference_fpath):
os.remove(reference_fpath)
rel_symlink(contigs_path.last_version, reference_fpath)
def _get_basename(fpath):
'It returns the base name without path and extension'
return os.path.splitext(os.path.basename(fpath))[0]
class MappingAnalyzer(Analyzer):
'It performs the mapping of the sequences to the reference'
def run(self):
'''It runs the analysis.'''
self._log({'analysis_started':True})
project_settings = self._project_settings
settings = project_settings['Mappers']
tmp_dir = project_settings['General_settings']['tmpdir']
project_path = project_settings['General_settings']['project_path']
unmapped_fhand = None
if 'keep_unmapped_reads_in_bam' in settings:
if settings['keep_unmapped_reads_in_bam'] == False:
unmapped_fpath = os.path.join(project_path,
BACKBONE_DIRECTORIES['mappings'][0],
BACKBONE_BASENAMES['unmapped_list'])
unmapped_fhand = GzipFile(unmapped_fpath, 'w')
inputs = self._get_input_fpaths()
reads_fpaths = inputs['reads']
output_dir = self._create_output_dirs(timestamped=True)['result']
# define color and sequence references
reference_path = inputs['reference']
mapping_index_dir = inputs['mapping_index']
#print reference_path, mapping_index_dir
#memory for the java programs
java_mem = self._project_settings['Other_settings']['java_memory']
picard_path = self._project_settings['Other_settings']['picard_path']
for read_fpath in reads_fpaths:
mapping_parameters = {}
read_info = scrape_info_from_fname(read_fpath)
platform = read_info['pl']
#which maper are we using for this platform
mapper = settings['mapper_for_%s' % platform]
(reference_fpath,
color_space) = self._prepare_mapper_index(mapping_index_dir,
reference_path,
platform, mapper)
mapping_parameters['unmapped_fhand'] = unmapped_fhand
mapping_parameters['colorspace'] = color_space
out_bam_fpath = os.path.join(output_dir,
read_fpath.basename + '.bam')
if platform in ('454', 'sanger'):
mapping_parameters['reads_length'] = 'long'
else:
mapping_parameters['reads_length'] = 'short'
if not os.path.exists(out_bam_fpath):
mapping_parameters['threads'] = self.threads
mapping_parameters['java_conf'] = {'java_memory':java_mem,
'picard_path':picard_path}
mapping_parameters['tmp_dir'] = tmp_dir
map_reads(mapper,
reads_fpath=read_fpath.last_version,
reference_fpath=reference_fpath,
out_bam_fpath=out_bam_fpath,
parameters=mapping_parameters)
# Now we run the select _last mapping
self._spawn_analysis(DEFINITIONS['_select_last_mapping'],
silent=self._silent)
self._log({'analysis_finished':True})
def _prepare_mapper_index(self, mapping_index_dir, reference_path, platform,
mapper):
'It creates reference_fpath depending on the mapper and the platform'
kind = 'color' if platform == 'solid' else 'sequence'
color_space = True if kind == 'color' else False
mapping_index_dir = mapping_index_dir[0].original_path
index_dir = mapping_index_dir % (mapper, kind)
if not os.path.exists(index_dir):
os.mkdir(index_dir)
reference_fpath = reference_path.last_version
reference_fname = os.path.basename(reference_fpath)
index_fpath = os.path.join(index_dir, reference_fname)
if not os.path.exists(index_fpath):
rel_symlink(reference_fpath, index_fpath)
return index_fpath, color_space
class MergeBamAnalyzer(Analyzer):
'It performs the merge of various bams into only one'
def run(self):
'''It runs the analysis.'''
self._log({'analysis_started':True})
settings = self._project_settings
project_path = settings['General_settings']['project_path']
tmp_dir = settings['General_settings']['tmpdir']
inputs = self._get_input_fpaths()
bam_paths = inputs['bams']
reference_path = inputs['reference']
output_dir = self._create_output_dirs()['result']
merged_bam_path = VersionedPath(os.path.join(output_dir,
BACKBONE_BASENAMES['merged_bam']))
merged_bam_fpath = merged_bam_path.next_version
#Do we have to add the default qualities to the sam file?
#do we have characters different from ACTGN?
add_qualities = settings['Sam_processing']['add_default_qualities']
#memory for the java programs
java_mem = settings['Other_settings']['java_memory']
picard_path = settings['Other_settings']['picard_path']
if add_qualities:
default_sanger_quality = settings['Other_settings']['default_sanger_quality']
default_sanger_quality = int(default_sanger_quality)
else:
default_sanger_quality = None
temp_dir = NamedTemporaryDir()
for bam_path in bam_paths:
bam_basename = bam_path.basename
temp_sam = NamedTemporaryFile(prefix='%s.' % bam_basename,
suffix='.sam')
sam_fpath = os.path.join(temp_dir.name, bam_basename + '.sam')
bam2sam(bam_path.last_version, temp_sam.name)
sam_fhand = open(sam_fpath, 'w')
# First we need to create the sam with added tags and headers
add_header_and_tags_to_sam(temp_sam, sam_fhand)
temp_sam.close()
sam_fhand.close()
#the standardization
temp_sam2 = NamedTemporaryFile(prefix='%s.' % bam_basename,
suffix='.sam', delete=False)
standardize_sam(open(sam_fhand.name), temp_sam2,
default_sanger_quality,
add_def_qual=add_qualities,
only_std_char=True)
temp_sam2.flush()
shutil.move(temp_sam2.name, sam_fhand.name)
temp_sam2.close()
get_sam_fpaths = lambda dir_: [os.path.join(dir_, fname) for fname in os.listdir(dir_) if fname.endswith('.sam')]
# Once the headers are ready we are going to merge
sams = get_sam_fpaths(temp_dir.name)
sams = [open(sam) for sam in sams]
temp_sam = NamedTemporaryFile(suffix='.sam')
reference_fhand = open(reference_path.last_version)
try:
merge_sam(sams, temp_sam, reference_fhand)
except Exception:
if os.path.exists(merged_bam_fpath):
os.remove(merged_bam_fpath)
raise
reference_fhand.close()
# close files
for sam in sams:
sam.close()
# Convert sam into a bam,(Temporary)
temp_bam = NamedTemporaryFile(suffix='.bam')
sam2bam(temp_sam.name, temp_bam.name)
# finally we need to order the bam
#print 'unsorted.bam', temp_bam.name
#raw_input()
sort_bam_sam(temp_bam.name, merged_bam_fpath,
java_conf={'java_memory':java_mem,
'picard_path':picard_path}, tmp_dir=tmp_dir )
temp_bam.close()
temp_sam.close()
create_bam_index(merged_bam_fpath)
self._log({'analysis_finished':True})
class CalmdBamAnalyzer(Analyzer):
'It runs samtools calmd '
def run(self):
'''It runs the analysis.'''
self._log({'analysis_started':True})
inputs = self._get_input_fpaths()
bam_path = inputs['bam']
bam_fpath = bam_path.last_version
reference_fpath = inputs['reference'].last_version
out_fhand = open(bam_path.next_version, 'w')
cmd = ['samtools', 'calmd', '-Abr', bam_fpath, reference_fpath]
call(cmd, raise_on_error=True, stdout=out_fhand)
create_bam_index(out_fhand.name)
out_fhand.close()
self._log({'analysis_finished':True})
class RealignBamAnalyzer(Analyzer):
'It realigns the bam using GATK'
def run(self):
'''It runs the analysis.'''
self._log({'analysis_started':True})
settings = self._project_settings
project_path = settings['General_settings']['project_path']
tmp_dir = settings['General_settings']['tmpdir']
inputs = self._get_input_fpaths()
bam_path = inputs['bam']
bam_fpath = bam_path.last_version
reference_path = inputs['reference']
#memory for the java programs
osettings = settings['Other_settings']
java_mem = osettings['java_memory']
picard_path = osettings['picard_path']
gatk_path = osettings['gatk_path']
#we need a temporary path
temp_bam = NamedTemporaryFile(suffix='.bam')
temp_bam_fpath = temp_bam.name
temp_bam.close()
#do the realigment
realign_bam(bam_fpath=bam_fpath,
reference_fpath=reference_path.last_version,
out_bam_fpath=temp_bam_fpath,
java_conf={'java_memory':java_mem,
'picard_path':picard_path,
'gatk_path':gatk_path},
threads=self.threads,
tmp_dir=tmp_dir)
#a new version for the original bam
out_bam_fpath = bam_path.next_version
shutil.move(temp_bam_fpath, out_bam_fpath)
self._log({'analysis_finished':True})
class BamStatsAnalyzer(Analyzer):
'It makes the stats of the mapping'
def run(self):
'''It runs the analysis.'''
self._log({'analysis_started':True})
settings = self._project_settings
self._create_output_dirs()['result']
project_name = settings['General_settings']['project_name']
sample_size = settings['Sam_stats']['sampling_size']
project_path = settings['General_settings']['project_path']
inputs = self._get_input_fpaths()
bam_path = inputs['bam']
bam_fpath = bam_path.last_version
bam_fhand = open(bam_fpath)
out_dir = os.path.abspath(self._get_output_dirs()['result'])
summary_fname = os.path.join(out_dir,
BACKBONE_BASENAMES['statistics_file'])
summary_fhand = open(summary_fname, 'w')
# non mapped_reads_fhand
unmapped_fpath = os.path.join(project_path,
BACKBONE_DIRECTORIES['mappings'][0],
BACKBONE_BASENAMES['unmapped_list'])
if os.path.exists(unmapped_fpath):
unmapped_fhand = GzipFile(unmapped_fpath)
else:
unmapped_fhand = None
#The general statistics
bam_general_stats(bam_fhand, summary_fhand, unmapped_fhand)
for kind in ('coverage', 'mapq'):
basename = os.path.join(out_dir, "%s" % (project_name))
bam_fhand.seek(0)
bam_distribs(bam_fhand, kind, basename=basename,
sample_size=sample_size, summary_fhand=summary_fhand,
plot_file_format=PLOT_FILE_FORMAT)
bam_fhand.close()
if unmapped_fhand is not None:
unmapped_fhand.close()
DEFINITIONS = {
'set_assembly_as_reference':
{'inputs':{
'contigs':
{'directory': 'assembly_result',
'file': 'contigs'},
},
'outputs':{'result':{'directory': 'mapping_reference'}},
'analyzer': SetAssemblyAsReferenceAnalyzer,
},
'mapping':
{'inputs':{
'reads':
{'directory': 'cleaned_reads',
'file_kinds': 'sequence_files'},
'reference':
{'directory': 'mapping_reference',
'file': 'mapping_reference'},
'mapping_index':
{'directory': 'mapping_index'},
},
'outputs':{'result':{'directory': 'mappings_by_readgroup'}},
'analyzer': MappingAnalyzer,
},
'_select_last_mapping':
{'inputs':{'analyses_dir':{'directory': 'mappings'}},
'outputs':{'result':{'directory': 'mapping_result',
'create':False}},
'analyzer': _LastAnalysisAnalyzer,
},
'merge_bams':
{'inputs':{
'bams':
{'directory': 'mappings_by_readgroup',
'file_kinds': 'bam'},
'reference':
{'directory': 'mapping_reference',
'file': 'mapping_reference'},
},
'outputs':{'result':{'directory': 'mapping_result'}},
'analyzer': MergeBamAnalyzer,
},
'realign_bam':
{'inputs':{
'bam':
{'directory': 'mapping_result',
'file': 'merged_bam'},
'reference':
{'directory': 'mapping_reference',
'file': 'mapping_reference'},
},
'outputs':{'result':{'directory': 'mapping_result'}},
'analyzer': RealignBamAnalyzer,
},
'calmd_bam':
{'inputs':{
'bam':
{'directory': 'mapping_result',
'file': 'merged_bam'},
'reference':
{'directory': 'mapping_reference',
'file': 'mapping_reference'},
},
'outputs':{'result':{'directory': 'mapping_result'}},
'analyzer': CalmdBamAnalyzer,
},
'mapping_stats':
{'inputs':{
'bam':
{'directory': 'mapping_result',
'file': 'merged_bam'},
},
'outputs':{'result':{'directory': 'mapping_stats'}},
'analyzer': BamStatsAnalyzer,
},
}
| JoseBlanca/franklin | franklin/backbone/mapping.py | Python | agpl-3.0 | 16,787 | 0.005957 |
import os
from setuptools import setup
from setuptools import find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='liblinesdk',
version='0.1.0',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='LINE API Python SDK.',
long_description=README,
url='https://www.example.com/',
author='LINE Corporation',
author_email='matthew.r.tanudjaja@linecorp.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mrexmelle/liblinesdk-py | setup.py | Python | mit | 1,211 | 0.000826 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import uuid
import collections
import moosetree
import MooseDocs
from ..common import exceptions
from ..base import components, MarkdownReader, LatexRenderer, Extension
from ..tree import tokens, html, latex
from . import core
def make_extension(**kwargs):
return FloatExtension(**kwargs)
Float = tokens.newToken('Float', img=False, bottom=False, command='figure')
FloatCaption = tokens.newToken('FloatCaption', key='', prefix='', number='?')
ModalLink = tokens.newToken('ModalLink', bookmark=True, bottom=False, close=True)
ModalLinkTitle = tokens.newToken('ModalLinkTitle')
ModalLinkContent = tokens.newToken('ModalLinkContent')
def create_float(parent, extension, reader, page, settings, bottom=False, img=False,
token_type=Float, **kwargs):
"""
Helper for optionally creating a float based on the existence of caption and/or id.
Inputs:
parent: The parent token that float should be placed
extension: The extension object (to extract 'prefix' from config items)
reader: The Reader object for tokenization of the heading
page: The Page object for passing to the tokenization routine
settings: The command settings to extract a local 'prefix'
bottom[True|False]: Set flag on the float for placing the caption at the bottom
img[True|False]: Set to True if the contents are an image (Materialize only)
token_type: The type of Token object to create; it should derive from Float
"""
cap, _ = _add_caption(None, extension, reader, page, settings)
if cap:
flt = token_type(parent, img=img, bottom=bottom, **kwargs)
cap.parent = flt
return flt
return parent
def caption_settings():
"""Return settings necessary for captions."""
settings = dict()
settings['caption'] = (None, "The caption text for the float object.")
settings['prefix'] = (None, "The numbered caption label to include prior to the caption text.")
return settings
def _add_caption(parent, extension, reader, page, settings):
"""Helper for adding captions to float tokens."""
cap = settings['caption']
key = settings['id']
prefix = settings.get('prefix')
if prefix is None:
prefix = extension.get('prefix', None)
if prefix is None:
msg = "The 'prefix' must be supplied via the settings or the extension configuration."
raise exceptions.MooseDocsException(msg)
caption = None
if key:
caption = FloatCaption(parent, key=key, prefix=prefix)
if cap:
reader.tokenize(caption, cap, page, MarkdownReader.INLINE)
elif cap:
caption = FloatCaption(parent)
reader.tokenize(caption, cap, page, MarkdownReader.INLINE)
return caption, prefix
def create_modal(parent, title=None, content=None, **kwargs):
"""
Create the necessary Modal tokens for creating modal windows with materialize.
"""
modal = ModalLink(parent.root, **kwargs)
if isinstance(title, str):
ModalLinkTitle(modal, string=title)
elif isinstance(title, tokens.Token):
title.parent = ModalLinkTitle(modal)
if isinstance(content, str):
ModalLinkContent(modal, string=content)
elif isinstance(content, tokens.Token):
content.parent = ModalLinkContent(modal)
return parent
def create_modal_link(parent, title=None, content=None, string=None, **kwargs):
"""
Create the necessary tokens to create a link to a modal window with materialize.
"""
kwargs.setdefault('bookmark', str(uuid.uuid4()))
link = core.Link(parent,
url='#{}'.format(kwargs['bookmark']),
class_='modal-trigger',
string=string)
create_modal(parent, title, content, **kwargs)
return link
class FloatExtension(Extension):
"""
Provides ability to add caption float elements (e.g., figures, table, etc.). This is only a
base extension. It does not provide tables for example, just the tools to make floats
in a uniform manner.
"""
def extend(self, reader, renderer):
renderer.add('Float', RenderFloat())
renderer.add('FloatCaption', RenderFloatCaption())
renderer.add('ModalLink', RenderModalLink())
renderer.add('ModalLinkTitle', RenderModalLinkTitle())
renderer.add('ModalLinkContent', RenderModalLinkContent())
if isinstance(renderer, LatexRenderer):
renderer.addPackage('caption', labelsep='period')
def postTokenize(self, page, ast):
"""Set float number for each counter."""
counts = page.get('counts', collections.defaultdict(int))
for node in moosetree.iterate(ast, lambda n: n.name == 'FloatCaption'):
prefix = node.get('prefix', None)
if prefix is not None:
counts[prefix] += 1
node['number'] = counts[prefix]
key = node.get('key')
if key:
shortcut = core.Shortcut(ast.root, key=key, link='#{}'.format(key))
# TODO: This is a bit of a hack to get Figure~\ref{} etc. working in general
if isinstance(self.translator.renderer, LatexRenderer):
shortcut['prefix'] = prefix.title()
else:
tokens.String(shortcut, content='{} {}'.format(prefix.title(), node['number']))
page['counts'] = counts
class RenderFloat(components.RenderComponent):
def createHTML(self, parent, token, page):
div = html.Tag(parent, 'div', token)
div.addClass('moose-float-div')
if token['bottom']:
cap = token(0)
cap.parent = None # Guarantees that "cap" is removed from the current tree
cap.parent = token
return div
def createMaterialize(self, parent, token, page):
div = html.Tag(parent, 'div', token)
div.addClass('card moose-float')
content = html.Tag(div, 'div')
if token['img']:
content.addClass('card-image')
else:
content.addClass('card-content')
if token['bottom']:
cap = token(0)
cap.parent = None
cap.parent = token
return content
def createLatex(self, parent, token, page):
env = latex.Environment(parent, token['command'])
style = latex.parse_style(token)
width = style.get('width', None)
if width and token(0).name == 'Image':
token(0).set('style', 'width:{};'.format(width))
if style.get('text-align', None) == 'center':
latex.Command(env, 'centering')
return env
class RenderFloatCaption(components.RenderComponent):
def createHTML(self, parent, token, page):
caption = html.Tag(parent, 'p', class_="moose-caption")
prefix = token.get('prefix', None)
if prefix:
heading = html.Tag(caption, 'span', class_="moose-caption-heading")
html.String(heading, content="{} {}: ".format(prefix, token['number']))
return html.Tag(caption, 'span', class_="moose-caption-text")
def createLatex(self, parent, token, page):
caption = latex.Command(parent, 'caption')
if token['key']:
latex.Command(caption, 'label', string=token['key'], escape=True)
return caption
class RenderModalLink(core.RenderLink):
def createLatex(self, parent, token, page):
return None
def createHTML(self, parent, token, page):
return None
def createMaterialize(self, parent, token, page):
cls = "modal bottom-sheet" if token['bottom'] else "modal"
modal = html.Tag(parent, 'div', class_=cls, id_=token['bookmark'])
modal.addClass('moose-modal')
modal_content = html.Tag(modal, 'div', class_="modal-content")
if token['close']:
footer = html.Tag(modal, 'div', class_='modal-footer')
html.Tag(footer, 'a', class_='modal-close btn-flat', string='Close')
return modal_content
class RenderModalLinkTitle(components.RenderComponent):
def createHTML(self, parent, token, page):
return None
def createMaterialize(self, parent, token, page):
return html.Tag(parent, 'h4')
def createLatex(self, parent, token, page):
return None
class RenderModalLinkContent(components.RenderComponent):
def createHTML(self, parent, token, page):
return None
def createMaterialize(self, parent, token, page):
return parent
def createLatex(self, parent, token, page):
return None
| nuclear-wizard/moose | python/MooseDocs/extensions/floats.py | Python | lgpl-2.1 | 8,900 | 0.004494 |
# Copyright (c) 2016 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import sqlalchemy as sa
from sqlalchemy.orm import exc as db_exc
from congress.db import api as db
from congress.db import model_base
from congress.db import utils as db_utils
class DSTableData(model_base.BASE):
__tablename__ = 'dstabledata'
ds_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
tablename = sa.Column(sa.String(255), nullable=False, primary_key=True)
# choose long length compatible with MySQL, SQLite, Postgres
tabledata = sa.Column(sa.Text(), nullable=False)
@db_utils.retry_on_db_error
def store_ds_table_data(ds_id, tablename, tabledata, session=None):
session = session or db.get_session()
tabledata = _json_encode_table_data(tabledata)
with session.begin(subtransactions=True):
new_row = session.merge(DSTableData(
ds_id=ds_id,
tablename=tablename,
tabledata=tabledata))
return new_row
@db_utils.retry_on_db_error
def delete_ds_table_data(ds_id, tablename=None, session=None):
session = session or db.get_session()
if tablename is None:
return session.query(DSTableData).filter(
DSTableData.ds_id == ds_id).delete()
else:
return session.query(DSTableData).filter(
DSTableData.ds_id == ds_id,
DSTableData.tablename == tablename).delete()
@db_utils.retry_on_db_error
def get_ds_table_data(ds_id, tablename=None, session=None):
session = session or db.get_session()
try:
if tablename is None:
rows = session.query(DSTableData).filter(
DSTableData.ds_id == ds_id)
return_list = []
for row in rows:
return_list.append(
{'tablename': row.tablename,
'tabledata': _json_decode_table_data(row.tabledata)})
return return_list
else:
return _json_decode_table_data(session.query(DSTableData).filter(
DSTableData.ds_id == ds_id,
DSTableData.tablename == tablename).one().tabledata)
except db_exc.NoResultFound:
pass
def _json_encode_table_data(tabledata):
tabledata = list(tabledata)
for i in range(0, len(tabledata)):
tabledata[i] = list(tabledata[i])
return json.dumps(tabledata)
def _json_decode_table_data(json_tabledata):
tabledata = json.loads(json_tabledata)
for i in range(0, len(tabledata)):
tabledata[i] = tuple(tabledata[i])
return set(tabledata)
| openstack/congress | congress/db/db_ds_table_data.py | Python | apache-2.0 | 3,233 | 0 |
"""Automatic keyboard layout switcher"""
import functools
import logging
import subprocess
from typing import Iterable
from typing import Set
import xkbgroup
import swytcher.settings as settings
import swytcher.xwindow as xwindow
from swytcher.util import suppress_err
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Move this to swytcher.system
@suppress_err(FileNotFoundError, log)
def notify(title: str, msg: str = '') -> None: # pragma: no cover
"""Use notify-send (if available) to inform user of layout switch."""
if not settings.NOTIFY:
return
cmd = [
'notify-send',
'--urgency=low',
'--expire-time=2000',
title,
msg
]
subprocess.call(cmd)
def change_layout(xkb: xkbgroup.XKeyboard, layout: str) -> bool:
"""Set layout; returns True if layout was changed, False otherwise"""
if xkb.group_name == layout: # check against current layout
log.debug("%r is already the active layout", layout)
return False # don't change layout if it's already correct
log.info("setting layout %r", layout)
xkb.group_name = layout
notify("Changed layout", layout)
return True
def _match_substrings(name_list: Iterable[str],
substrings: Iterable[str]) -> set:
"""Substring filter match"""
found_matches = set()
for name in name_list:
for substring in substrings:
if substring in name:
log.debug("Substring filter match: %r in %r", substring, name)
found_matches.update([name])
return found_matches
def matches(name_list: Iterable[str], strings: Iterable[str],
substrings: Iterable[str]) -> Set[str]:
"""Returns True if any of the strings in the two filters `strings` and
`substrings` occur in `name_list`."""
matched = (set(strings) & set(name_list) or
_match_substrings(name_list, substrings or {}))
if matched:
log.debug('%r matched %r from %r or %r',
name_list, matched, strings, substrings)
return matched
def change_callback(name_list, xkb, layouts: list) -> None: # pragma: no cover
"""Event handler when active window is changed"""
# NOTE: These extracted variables should be removed later
primary_filter = layouts[0]['strings']
primary_substrings = layouts[0]['substrings']
primary = layouts[0]['name']
secondary_filter = layouts[1]['strings']
secondary_substrings = layouts[1]['substrings']
secondary = layouts[1]['name']
# matched_layout = match_layout(name_list, layouts)
# if matched_layout:
# change_layout(xkb, matched_layout)
# else:
# change_layout(xkb, last_remembered_layout_for_window)
if matches(name_list, secondary_filter, secondary_substrings):
change_layout(xkb, secondary)
elif matches(name_list, primary_filter, primary_substrings):
change_layout(xkb, primary)
else:
log.debug("%r: No match, using default layout", name_list)
change_layout(xkb, xkb.groups_names[0])
def main(args=None): # pragma: no cover
"""Main"""
if not args:
pass
xkb = xkbgroup.XKeyboard()
layouts = settings.setup_layouts(xkb, settings.CONFIG_INI)
log.info("Layouts configured by setxkbmap: %s", layouts)
partial_cb = functools.partial(change_callback, xkb=xkb, layouts=layouts)
xwindow.run(partial_cb)
| eddie-dunn/swytcher | swytcher/swytcher.py | Python | mit | 3,436 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.version_privacy_level'
db.add_column('projects_project', 'version_privacy_level',
self.gf('django.db.models.fields.CharField')(default='public', max_length=20),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.version_privacy_level'
db.delete_column('projects_project', 'version_privacy_level')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 13, 23, 55, 17, 885486)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 13, 23, 55, 17, 885212)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crate_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'through': "orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_system_packages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version_privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'})
},
'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'superprojects'", 'to': "orm['projects.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subprojects'", 'to': "orm['projects.Project']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['projects']
| d0ugal/readthedocs.org | readthedocs/projects/migrations/0028_add_version_default_privacy.py | Python | mit | 9,508 | 0.007678 |
"Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
import settings
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
logger = logging.getLogger("pyquante")
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**kwargs):
self.iter = 0
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nclosed):
for a in xrange(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**kwargs):
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nalpha):
for a in xrange(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nbeta):
for a in xrange(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**kwargs):
return oep_hf(atoms,orbs,**kwargs)
def oep_hf(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
See notes on options and other args in oep routine.
"""
return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**kwargs)
def oep(atoms,orbs,energy_func,grad_func=None,**kwargs):
"""oep - Form the optimized effective potential for a given energy expression
oep(atoms,orbs,energy_func,grad_func=None,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
energy_func The function that returns the energy for the given method
grad_func The function that returns the force for the given method
Options
-------
verbose False Output terse information to stdout (default)
True Print out additional information
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
verbose = kwargs.get('verbose')
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
opt_method = kwargs.get('opt_method',settings.OEPOptMethod)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = fminBFGS(energy_func,b,grad_func,
(nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij),
logger=logging)
energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=1)
return energy,orbe,orbs
def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the energy for the OEP/HF functional
Options:
return_flag 0 Just return the energy
1 Return energy, orbe, orbs
2 Return energy, orbe, orbs, F
"""
return_flag = kwargs.get('return_flag')
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
if ETemp:
efermi = get_efermi(nel,orbe,ETemp)
occs = get_fermi_occs(efermi,orbe,ETemp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,ETemp)
else:
D = mkdens(orbs,0,nocc)
F = get_fock(D,Ints,h)
energy = trace2(h+F,D)+Enuke
if ETemp: energy += entropy
iref = nel/2
gap = 627.51*(orbe[iref]-orbe[iref-1])
logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f"
% (energy,sqrt(dot(b,b)),gap))
#logging.debug("%s" % orbe)
if return_flag == 1:
return energy,orbe,orbs
elif return_flag == 2:
return energy,orbe,orbs,F
return energy
def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the gradient for the OEP/HF functional.
return_flag 0 Just return gradient
1 Return energy,gradient
2 Return energy,gradient,orbe,orbs
"""
# Dump the gradient every 10 steps so we can restart...
global gradcall
gradcall += 1
#if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b)
# Form the new potential and the new orbitals
energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=2)
Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs))
norb = nbf
bp = zeros(nbf,'d') # dE/db
for g in xrange(nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs))
# Now sum the appropriate terms to get the b gradient
for i in xrange(nocc):
for a in xrange(nocc,norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return_flag = kwargs.get('return_flag')
if return_flag == 1:
return energy,bp
elif return_flag == 2:
return energy,bp,orbe,orbs
return bp
def get_Hoep(b,H0,Gij):
Hoep = H0
# Add the contributions from the gaussian potential functions
# H[ij] += b[g]*<ibf|g|jbf>
for g in xrange(len(b)):
Hoep = Hoep + b[g]*Gij[g]
return Hoep
# Here's a much faster way to do this. Haven't figured out how to
# do it for more generic functions like OEP-GVB
def oep_hf_an(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_hf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = zeros(nbf,'d')
eold = 0
for iter in xrange(maxiter):
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
D = mkdens(orbs,0,nocc)
Vhf = get2JmK(Ints,D)
energy = trace2(2*h+Vhf,D)+Enuke
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
dV_ao = Vhf-Vfa
dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
Gkt = zeros((nbf,nbf),'d')
for k in xrange(nbf):
# This didn't work; in fact, it made things worse:
Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a])
for l in xrange(nbf):
Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a])
# This should actually be a pseudoinverse...
b = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,orbe,orbs
def oep_uhf_an(atoms,orbsa,orbsb,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_uhf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nclosed,nopen = atoms.get_closedopen()
nalpha,nbeta = nclosed+nopen,nclosed
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
ba = zeros(npbf,'d')
bb = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta)
J0 = getJ(Ints,D0)
Vfa = ((nel-1.)/nel)*J0
H0 = h + Vfa
eold = 0
for iter in xrange(maxiter):
Hoepa = get_Hoep(ba,H0,Gij)
Hoepb = get_Hoep(ba,H0,Gij)
orbea,orbsa = geigh(Hoepa,S)
orbeb,orbsb = geigh(Hoepb,S)
if ETemp:
efermia = get_efermi(2*nalpha,orbea,ETemp)
occsa = get_fermi_occs(efermia,orbea,ETemp)
Da = mkdens_occs(orbsa,occsa)
efermib = get_efermi(2*nbeta,orbeb,ETemp)
occsb = get_fermi_occs(efermib,orbeb,ETemp)
Db = mkdens_occs(orbsb,occsb)
entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp))
else:
Da = mkdens(orbsa,0,nalpha)
Db = mkdens(orbsb,0,nbeta)
J = getJ(Ints,Da) + getJ(Ints,Db)
Ka = getK(Ints,Da)
Kb = getK(Ints,Db)
energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\
+Enuke
if ETemp: energy += entropy
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
# Do alpha and beta separately
# Alphas
dV_ao = J-Ka-Vfa
dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a])
# This should actually be a pseudoinverse...
ba = solve(X,c)
# Betas
dV_ao = J-Kb-Vfa
dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a])
# This should actually be a pseudoinverse...
bb = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,(orbea,orbeb),(orbsa,orbsb)
def test_old():
from PyQuante.Molecule import Molecule
from PyQuante.Ints import getbasis,getints
from PyQuante.hartree_fock import rhf
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
#mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))],
# units='Angstrom')
mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr')
bfs = getbasis(mol)
S,h,Ints = getints(bfs,mol)
print "after integrals"
E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True)
print "RHF energy = ",E_hf
E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints))
return
def test():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
solver = HFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = EXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=40000)
return
def utest():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2)
solver = UHFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = UEXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=10000)
return
if __name__ == '__main__':
test()
utest()
| berquist/PyQuante | PyQuante/OEP.py | Python | bsd-3-clause | 25,427 | 0.019664 |
import logging
import time
import os
# will change these to specific imports once code is more final
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
BRUSH_WHITE = QBrush(QColor(255, 255, 255), Qt.SolidPattern)
BRUSH_GREEN = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
BRUSH_BLACK = QBrush(QColor(0, 0, 0), Qt.SolidPattern)
BRUSH_DARK_PURPLE = QBrush(QColor(128, 0, 255), Qt.SolidPattern)
class DeviceNode:
__slots__ = ["_callback", "_name", "_data", "_type", "_brush", "q_name", "q_state", "sub_properties",
"sub_properties_appended", "q_time_added", "log"]
def __init__(self):
self._callback = None
self._name = ""
self._data = {}
self._type = ""
self._brush = BRUSH_BLACK
self.q_name = QStandardItem()
self.q_state = QStandardItem()
self.sub_properties = {}
self.sub_properties_appended = False
self.q_time_added = QStandardItem()
self.q_time_added.setData(time.perf_counter(), Qt.DisplayRole)
self.q_name.setDragEnabled(True)
self.q_state.setData("", Qt.DisplayRole)
self.log = logging.getLogger('Device')
def setName(self, name):
self._name = name
self.q_name.setData(str(self._name), Qt.DisplayRole)
self.log = logging.getLogger('Device {}'.format(self._name))
self.q_state.emitDataChanged()
def setData(self, data):
"""Set data of device."""
if data == self._data:
# do nothing if data did not change
return
if not isinstance(data, dict):
data = {}
if self._callback:
self._callback()
self._data = data
state_str = str(list(self._data.values())[0])
if len(self._data) > 1:
state_str = state_str + " {…}"
self.q_state.setData(state_str, Qt.DisplayRole)
for row in self._data:
if not self.sub_properties_appended:
q_property = QStandardItem()
q_value = QStandardItem()
self.sub_properties.update({row: [q_property, q_value]})
self.q_name.appendRow(self.sub_properties.get(row))
self.sub_properties.get(row)[0].setData(str(row), Qt.DisplayRole)
self.sub_properties.get(row)[1].setData(str(self._data.get(row)), Qt.DisplayRole)
self.sub_properties_appended = True
self.q_state.emitDataChanged()
self._brush = self._calculate_colored_brush()
def setType(self, type):
self._type = type
self._brush = self._calculate_colored_brush()
self.q_state.emitDataChanged()
def get_row(self):
return [self.q_name, self.q_state, self.q_time_added]
def data(self):
return self._data
def type(self):
return self._type
def get_colored_brush(self) -> QBrush:
"""Return colored brush for device."""
return self._brush
def _calculate_color_gamma_correction(self, color):
"""Perform gamma correction.
Feel free to fiddle with these constants until it feels right
With gamma = 0.5 and constant a = 18, the top 54 values are lost,
but the bottom 25% feels much more normal.
"""
gamma = 0.5
a = 18
corrected = []
for value in color:
if value < 0 or value > 255:
self.log.warning("Got value %s for brightness which outside the expected range", value)
value = 0
value = int(pow(value, gamma) * a)
if value > 255:
value = 255
corrected.append(value)
return corrected
def _calculate_colored_brush(self):
if self._type == 'light':
color = self.data()['color']
if color == [0, 0, 0]:
# shortcut for black
return BRUSH_BLACK
color = self._calculate_color_gamma_correction(color)
elif self._type == 'switch':
state = self.data()['state']
if state:
return BRUSH_GREEN
else:
return BRUSH_BLACK
elif self._type == 'diverter':
state = self.data()['active']
if state:
return BRUSH_DARK_PURPLE
else:
return BRUSH_BLACK
else:
# Get first parameter and draw as white if it evaluates True
state = bool(list(self.data().values())[0])
if state:
return BRUSH_WHITE
else:
return BRUSH_BLACK
return QBrush(QColor(*color), Qt.SolidPattern)
def set_change_callback(self, callback):
if self._callback:
# raise AssertionError("Can only have one callback")
old_callback = self._callback
self._callback = callback
return old_callback
else:
self._callback = callback
self.q_state.emitDataChanged()
class DeviceDelegate(QStyledItemDelegate):
def __init__(self):
self.size = None
super().__init__()
def paint(self, painter, view, index):
super().paint(painter, view, index)
color = None
state = None
balls = None
found = False
text = ''
# src_index = index.model().mapToSource(index)
# src_index_model = src_index.model()
# print(index.data())
# print(src_index_model.data())
data = []
try:
data = index.model().itemFromIndex(index).data()
# src_index = index.model().mapToSource(index)
# data = index.model().data(src_index)
except:
pass
num_circles = 1
# return
if index.column() == 0:
return
try:
if 'color' in data:
color = data['color']
found = True
except TypeError:
return
try:
if 'brightness' in data:
color = [data['brightness']]*3
found = True
except TypeError:
return
try:
if 'state' in data:
text = str(data['state'])
found = True
except TypeError:
return
try:
if 'complete' in data:
state = not data['complete']
found = True
except TypeError:
return
try:
if 'enabled' in data:
state = data['enabled']
found = True
except TypeError:
return
try:
if 'balls' in data:
balls = data['balls']
found = True
except TypeError:
return
try:
if 'balls_locked' in data:
balls = data['balls_locked']
found = True
except TypeError:
return
try:
if 'num_balls_requested' in data:
text += 'Requested: {} '.format(
data['num_balls_requested'])
found = True
except TypeError:
return
try:
if 'unexpected_balls' in data:
text += 'Unexpected: {} '.format(
data['unexpected_balls'])
found = True
except TypeError:
return
if not found:
return
text += " " + str(data)
painter.save()
painter.setRenderHint(QPainter.Antialiasing, True)
painter.setPen(QPen(QColor(100, 100, 100), 1, Qt.SolidLine))
if color:
painter.setBrush(QBrush(QColor(*color), Qt.SolidPattern))
elif state is True:
painter.setBrush(QBrush(QColor(0, 255, 0), Qt.SolidPattern))
elif state is False:
painter.setBrush(QBrush(QColor(255, 255, 255), Qt.SolidPattern))
elif isinstance(balls, int):
painter.setBrush(QBrush(QColor(0, 255, 0), Qt.SolidPattern))
num_circles = balls
x_offset = 0
for _ in range(num_circles):
painter.drawEllipse(
view.rect.x() + x_offset, view.rect.y(), 14, 14)
x_offset += 20
if text:
painter.drawText(view.rect.x() + x_offset, view.rect.y() + 12,
str(text))
self.size = QSize(len(text) * 10, 20)
painter.restore()
def sizeHint(self, QStyleOptionViewItem, QModelIndex):
if self.size:
return self.size
else:
# Calling super() here seems to result in a segfault on close sometimes.
# return super().sizeHint(QStyleOptionViewItem, QModelIndex)
return QSize(80, 20)
class DeviceWindow(QWidget):
__slots__ = ["mpfmn", "ui", "model", "log", "already_hidden", "added_index", "device_states",
"device_type_widgets", "_debug_enabled"]
def __init__(self, mpfmon):
self.mpfmon = mpfmon
super().__init__()
self.ui = None
self.model = None
self.draw_ui()
self.attach_model()
self.attach_signals()
self.log = logging.getLogger('Core')
self.already_hidden = False
self.added_index = 0
self.device_states = dict()
self.device_type_widgets = dict()
self._debug_enabled = self.log.isEnabledFor(logging.DEBUG)
def draw_ui(self):
# Load ui file from ./ui/
ui_path = os.path.join(os.path.dirname(__file__), "ui", "searchable_tree.ui")
self.ui = uic.loadUi(ui_path, self)
self.ui.setWindowTitle('Devices')
self.ui.move(self.mpfmon.local_settings.value('windows/devices/pos',
QPoint(200, 200)))
self.ui.resize(self.mpfmon.local_settings.value('windows/devices/size',
QSize(300, 600)))
# Disable option "Sort", select first item.
# TODO: Store and load selected sort index to local_settings
self.ui.sortComboBox.model().item(0).setEnabled(False)
self.ui.sortComboBox.setCurrentIndex(1)
self.ui.treeView.setAlternatingRowColors(True)
def attach_signals(self):
assert (self.ui is not None)
self.ui.treeView.expanded.connect(self.resize_columns_to_content)
self.ui.treeView.collapsed.connect(self.resize_columns_to_content)
self.ui.filterLineEdit.textChanged.connect(self.filter_text)
self.ui.sortComboBox.currentIndexChanged.connect(self.change_sort)
def attach_model(self):
assert (self.ui is not None)
self.treeview = self.ui.treeView
self.model = QStandardItemModel()
self.model.setHorizontalHeaderLabels(["Device", "Data"])
self.treeview.setDragDropMode(QAbstractItemView.DragOnly)
# self.treeview.setItemDelegateForColumn(1, DeviceDelegate())
# Resizing to contents causes huge performance losses. Only resize when rows expanded or collapsed.
# self.treeview.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.filtered_model = QSortFilterProxyModel(self)
self.filtered_model.setSourceModel(self.model)
self.filtered_model.setRecursiveFilteringEnabled(True)
self.filtered_model.setFilterCaseSensitivity(False)
self.treeview.setModel(self.filtered_model)
def resize_columns_to_content(self):
self.ui.treeView.resizeColumnToContents(0)
self.ui.treeView.resizeColumnToContents(1)
def process_device_update(self, name, state, changes, type):
del changes
if self._debug_enabled:
self.log.debug("Device Update: %s.%s: %s", type, name, state)
if type not in self.device_states:
self.device_states[type] = dict()
item = QStandardItem(type)
self.device_type_widgets[type] = item
self.model.appendRow([item, QStandardItem(), QStandardItem(str(time.perf_counter()))])
if name not in self.device_states[type]:
node = DeviceNode()
node.setName(name)
node.setData(state)
node.setType(type)
self.device_states[type][name] = node
self.device_type_widgets[type].appendRow(node.get_row())
self.mpfmon.pf.create_widget_from_config(node, type, name)
else:
self.device_states[type][name].setData(state)
self.ui.treeView.setColumnHidden(2, True)
def filter_text(self, string):
wc_string = "*" + str(string) + "*"
self.filtered_model.setFilterWildcard(wc_string)
self.ui.treeView.resizeColumnToContents(0)
self.ui.treeView.resizeColumnToContents(1)
def change_sort(self, index=1):
self.model.layoutAboutToBeChanged.emit()
self.filtered_model.beginResetModel()
# This is a bit sloppy and probably should be reworked.
if index == 1: # Received up
self.filtered_model.sort(2, Qt.AscendingOrder)
elif index == 2: # Received down
self.filtered_model.sort(2, Qt.DescendingOrder)
elif index == 3: # Name up
self.filtered_model.sort(0, Qt.AscendingOrder)
elif index == 4: # Name down
self.filtered_model.sort(0, Qt.DescendingOrder)
self.filtered_model.endResetModel()
self.model.layoutChanged.emit()
def closeEvent(self, event):
super().closeEvent(event)
self.mpfmon.write_local_settings()
event.accept()
self.mpfmon.check_if_quit()
| missionpinball/mpf-monitor | mpfmonitor/core/devices.py | Python | mit | 13,716 | 0.000948 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ING UNI CT Telegram Bot
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
from utils import utility
from unidecode import unidecode
import json
#need load configuration from file
ROOMS_FILE='utils/rooms.json'
COURSES_FILE='utils/courses.json'
PROFESSORS_FILE='utils/professors.json'
CLASSROOMS_FILE='utils/classrooms.json'
EXAMS_FILE='utils/exams.json'
## Other files
TOKEN_FILE='token.conf'
LOG_FILE='ingbot.log'
##global variables
rooms={}
courses={}
professors={}
classrooms={}
exams=[]
# loading token from file
tokenconf = open(TOKEN_FILE, 'r').read()
tokenconf = tokenconf.replace("\n", "")
TOKEN = tokenconf
# Enable logging
logging.basicConfig(filename=LOG_FILE,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
#define command handlers
def start_handler(bot, update):
newmsg = "Ing UniCT Telegram Bot\nLista Comandi:\n\t"\
"/orari <cld> <anno> Orario delle lezioni\n\t"\
"/esami <id cds> Elenco degli esami\n\t"\
"/corso <nome>\n\t/prof <cognome o nome> Informazioni sul professore\n\t"\
"/insegnamento <nome_insegnamento> Informazioni su un insegnamento\n\t"\
"/aula <numero> Indicazioni sull'ubicazione di un'aula\n\t"\
"/segreteria Informazioni sugli orari della segreteria studenti\n\t"\
"/cus Informazioni sul CUS"
newmsg += "\n\n\nATTENZIONE : Tutti i dati sono ricavati dal sito di Ingegneria,"\
" il bot non ha alcuna responsabilita' sulla corretteza di questi dati!!!\n"
developmode = '\n\n\n Il bot è in via di sviluppo se vuoi contribuire vai su:"\
" https://github.com/gabrik/ingunict-bot\nOppure contatta @Gakbri '
bot.sendMessage(update.message.chat_id, text=newmsg+developmode)
def help_handler(bot, update):
start(bot,update)
def schedule_handler(bot, update):
bot.sendMessage(update.message.chat_id, text='Orari temporaneamente non disponibili')
def professors_handler(bot, update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)>=2:
professor_name = unidecode(" ".join(msg[1:]))
if len(professor_name)>3:
search_result = [professor for professor in professors if professor_name.upper() in professor['Nome'].upper()]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d professori '\
'con la tua ricerca' % len(search_result))
descr=""
for p in search_result:
descr += "Nome: %s\nQualifica: %s\nDipartimento: %s\n" % (p['Nome'], p['Qualifica'], p['Dipartimento'])
descr+= "Indirizzo: %s\nEmail: %s\nTelefono: %s\n" % (p['Indirizzo'], p['Email'], p['Telefono'])
descr+= "Sito: %s\nSSD: %s\n\n" % (p['Sito'], p['SSD'])
bot.sendMessage(update.message.chat_id,text= descr)
else:
bot.sendMessage(update.message.chat_id, text='Professore non trovato')
else:
bot.sendMessage(update.message.chat_id, text='Inserisci almeno 4 caratteri per la ricerca')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire il professore su cui ottenere informazioni!\n/prof <nome cognome>")
def classroom_handler(bot, update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
insegnamento_name=unidecode(" ".join(msg[1:]))
if len(insegnamento_name)>3:
search_result=[insegnamento for insegnamento in classrooms if insegnamento_name.upper() in insegnamento['Nome'].upper()]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d insegnamenti con la tua ricerca' % len(search_result))
descr=""
for m in search_result:
doc=''.join([docente+'\n' for docente in m['Docenti']])
descr += "Nome: %s\nSemestre: %s\nCorso di Laurea: %s\n" % (m['Nome'], m['Semestre'], m['Corso di Laurea'])
descr+= "Anno: %s\nDocenti: %s\nSSD: %s\n" % (m['Anno'], doc, m['SSD'])
descr+= "CFU: %s\n\n" % (m['CFU'])
bot.sendMessage(update.message.chat_id, text=descr)
else:
bot.sendMessage(update.message.chat_id, text='Insegnamento non trovato')
else:
bot.sendMessage(update.message.chat_id, text='Inserisci almeno 4 caratteri per la ricerca')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire l'insegnamento su cui ottenere informazioni!\n/insegnamento <nome>")
def room_handler(bot, update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
key = msg[1].upper().strip()
if key in rooms:
aula = rooms[key]
bot.sendMessage(update.message.chat_id, text='Aula %s , Edificio %s, Piano %s' % (key, aula['Edificio'], aula['Piano']))
else:
bot.sendMessage(update.message.chat_id, text='Aula non trovata')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire l'aula su cui ottenere informazioni!\n/aula <nome>")
def courses_handler(bot,update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
nome_corso = unidecode(msg[1])
if len(nome_corso)>3:
search_result = [corso for corso in courses if nome_corso.upper() in corso['Denominazione'].upper()]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d corsi con la tua ricerca' % len(search_result))
descr=""
for corso in search_result:
descr+="Nome: %s\nID: %s\n" % (corso['Denominazione'], corso['ID'])
descr+="Codice: %s\nOrdinamento: %s\n Tipo: %s\n\n" % (corso['Codice'], corso['Ordinamento'], corso['Tipo'])
bot.sendMessage(update.message.chat_id, text=descr)
else:
bot.sendMessage(update.message.chat_id, text='Corso non trovato')
else:
bot.sendMessage(update.message.chat_id, text='Inserisci almeno 4 caratteri per la ricerca')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire il corso su cui ottenere informazioni!\n/corso <nome>")
def exams_handler(bot,update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
cds_id = unidecode(msg[1])
search_result=[esame for esame in exams if cds_id==str(esame['CDS_ID'])]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d esami con la tua ricerca' % len(search_result))
for esame in search_result:
descr="Materia: %s\nData: %s\nOra: %s\n" % (esame['Insegnamento'], esame['Data'], esame['Ora'])
descr+='Aula: %s\n Scaglione: %s\nTipo: %s\nTipo Appello:%s\n\n' % (esame['Aula'], esame['Scaglione'], esame['Tipo Esame'], esame['Appello'])
bot.sendMessage(update.message.chat_id, text=descr)
else:
bot.sendMessage(update.message.chat_id, text="Corso non trovato verifica di aver inserito l'id corretto")
else:
bot.sendMessage(update.message.chat_id, text="Inserisci l'id del corso, lo puoi conoscere usando il comando corsi")
def secretary_handler(bot, update):
newmsg = "Carriera Studenti - Settore tecnico - scientifico\n\nVia S. Sofia, 64 - Edificio 11 C.U. 95135 Catania\n\nTel.:095-738 6104/2051"
newmsg+= "\n\n Orari\n\n"
newmsg+= "Lunedì 10.00 - 12.30\n"
newmsg= "Martedì 10.00 - 12.30 e 15.00 - 16.30\n"
newmsg+= "Mercoledì Chiusura\n"
newmsg+= "Giovedì 10.00 - 12.30 e 15.00 - 16.30\n"
newmsg+= "Venerdì 10.00 - 12.30\n"
newmsg+= "\n\n Telefonare solo nelle fasce orarie di apertura"
newmsg+= "\n\n Mail: settore.tecnicoscientifico@unict.it"
newmsg+= "\n\n Per ulteriori infomazioni : http://www.unict.it/content/coordinamento-settori-carriere-studenti"
bot.sendMessage(update.message.chat_id, text=newmsg)
def cus_handler(bot, update):
newmsg="CUS CATANIA:\n\nViale A. Doria n° 6 - 95125 Catania\n\ntel. 095336327- fax 095336478\n\n"\
"CUS Catania - info@cuscatania.it\n\n"\
"Segreteria studenti:\ntel. 095/336327 (int. 0) - segreteriastudenti@cuscatania.it "
bot.sendMessage(update.message.chat_id, text=newmsg)
def error_handler(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
# loading data from files
logger.info('[LOADING] rooms from "%s"' % ROOMS_FILE)
global rooms
rooms = utility.load_rooms(ROOMS_FILE)
logger.info('[ DONE ] loading rooms')
logger.info('[LOADING] courses from "%s"' % COURSES_FILE)
global courses
courses = utility.load_courses(COURSES_FILE)
logger.info('[ DONE ] loading courses')
logger.info('[LOADING] professors from "%s"' % PROFESSORS_FILE)
global professors
professors = utility.load_professors(PROFESSORS_FILE)
logger.info('[ DONE ] loading professors')
logger.info('[LOADING] classrooms from "%s"' % CLASSROOMS_FILE)
global classrooms
classrooms = utility.load_classrooms(CLASSROOMS_FILE)
logger.info('[ DONE ] loading classrooms')
logger.info('[LOADING] exams from "%s"' % EXAMS_FILE)
global exams
exams = utility.load_exams(EXAMS_FILE)
logger.info('[ DONE ] loading exams')
#setting up bot
updater = Updater(TOKEN)
dp = updater.dispatcher
#setting handlers
dp.add_handler(CommandHandler("start", start_handler))
dp.add_handler(CommandHandler("help", start_handler))
dp.add_handler(CommandHandler("prof", professors_handler))
dp.add_handler(CommandHandler("corso", courses_handler))
dp.add_handler(CommandHandler("esami", exams_handler))
dp.add_handler(CommandHandler("orari", schedule_handler))
dp.add_handler(CommandHandler("insegnamento", classroom_handler))
dp.add_handler(CommandHandler("aula", room_handler))
dp.add_handler(CommandHandler("segreteria", secretary_handler))
dp.add_handler(CommandHandler("cus", cus_handler))
dp.add_error_handler(error_handler)
updater.start_polling()
logger.info('[ INFO ] Bot started!')
updater.idle()
if __name__ == '__main__':
main()
| gabrik/ingunict-bot | ingbot.py | Python | apache-2.0 | 9,550 | 0.030389 |
from setuptools import setup, find_packages
with open('README.rst') as f:
description = f.read()
setup(
name='knitty-gritty',
version='0.0.2',
description='A tool for managing knitting machine patterns',
long_description=description,
url='https://github.com/mhallin/knitty-gritty',
author='Magnus Hallin',
author_email='mhallin@gmail.com',
license='BSD',
packages=find_packages(),
install_requires=[
'click>=2.4,<2.5',
'Pillow>=2.5,<2.6',
'pyserial>=2.7,<2.8',
],
extras_require={
'dev': [
'flake8>=2.2,<2.3',
'mccabe>=0.2,<0.3',
'pep8>=1.5,<1.6',
'pip-tools>=0.3,<0.4',
'pyflakes>=0.8.1,<0.9',
'wheel>=0.24,<0.25',
],
},
entry_points={
'console_scripts': [
'knitty-gritty = knittygritty.main:cli'
],
},
)
| mhallin/knitty-gritty | setup.py | Python | bsd-3-clause | 918 | 0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "stockpile"
__summary__ = "Generic file storage abstraction"
__uri__ = "https://github.com/dstufft/stockpile/"
__version__ = "0.1"
__author__ = "Donald Stufft"
__email__ = "donald.stufft@gmail.com"
__license__ = "Simplified BSD"
__copyright__ = "Copyright 2012 Donald Stufft"
| pombredanne/stockpile | stockpile/__about__.py | Python | bsd-2-clause | 539 | 0 |
'''
Integration Test for scheduler reboot VM in HA mode.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import test_stub
import time
import os
vm = None
node1_ip = None
node2_ip = None
def test():
global vm
global node1_ip
vm = test_stub.create_basic_vm()
vm.check()
start_date = int(time.time())
schd = vm_ops.reboot_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_reboot_vm_scheduler', start_date+60, 30)
node1_ip = os.environ.get('node1Ip')
node2_ip = os.environ.get('node2Ip')
test_util.test_logger("shutdown node: %s" % (node1_ip))
cmd = "init 0"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
test_util.test_logger("wait for 2 minutes to see if http api still works well")
time.sleep(180)
test_stub.exercise_connection(600)
time.sleep(180)
scheduler_execution_count = 0
for i in range(0, 30):
for j in range(0, 6):
if test_lib.lib_find_in_remote_management_server_log(node1_ip, host_username, host_password, start_date+60+30*i+j, '[msg received]: {"org.zstack.header.vm.RebootVmInstanceMsg', vm.get_vm().uuid):
scheduler_execution_count += 1
if test_lib.lib_find_in_remote_management_server_log(node2_ip, host_username, host_password, start_date+60+30*i+j, '[msg received]: {"org.zstack.header.vm.RebootVmInstanceMsg', vm.get_vm().uuid):
scheduler_execution_count -= 1
if abs(scheduler_execution_count) < 5:
test_util.test_fail('VM reboot scheduler is expected to executed for more than 5 times, while it only execute %s times' % (scheduler_execution_count))
schd_ops.delete_scheduler(schd.uuid)
vm.destroy()
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
time.sleep(180)
test_stub.exercise_connection(600)
test_util.test_pass('Scheduler Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global node1_ip
if vm:
try:
vm.destroy()
except:
pass
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
time.sleep(180)
test_stub.exercise_connection(600)
| zstackio/zstack-woodpecker | integrationtest/vm/ha/test_one_node_shutdown_with_scheduler.py | Python | apache-2.0 | 2,920 | 0.004452 |
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp import SUPERUSER_ID, api
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
_logger = logging.getLogger(__name__)
class stock_internal_transfer(osv.osv):
_name = 'stock.internal.transfer'
_inherit = ['mail.thread', 'ir.needaction_mixin']
# def create(self, cr, uid, vals, context=None):
# data = super(stock_internal_transfer, self).create(cr, uid, vals, context=context)
# if self.pool.get('res.users').browse(cr,uid,uid).company_id.transit_location_id:
# raise osv.except_osv(_('Error!'), _('Please setup your stock transit location in Setting - Warehouse'))
# return data
def action_cancel(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'cancel'
})
return True
def action_draft(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'draft'
})
return True
def action_send(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'send'
})
return True
def action_receive(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'done'
})
return True
def do_enter_wizard(self, cr, uid, ids, context):
if not context:
context = {}
context.update({
'active_model': self._name,
'active_ids': ids,
'active_id': len(ids) and ids[0] or False
})
created_id = self.pool['wizard.stock.internal.transfer'].create(cr, uid, {'transfer_id': len(ids) and ids[0] or False}, context)
return self.pool['wizard.stock.internal.transfer'].wizard_view(cr, uid, created_id, context)
_columns = {
'name' : fields.char('Reference', track_visibility='onchange'),
'date' : fields.datetime('Date', track_visibility='onchange'),
'source_warehouse_id' : fields.many2one('stock.warehouse', 'Source Warehouse', track_visibility='onchange'),
'dest_warehouse_id' : fields.many2one('stock.warehouse', 'Destination Warehouse', track_visibility='onchange'),
'state' : fields.selection([('cancel', 'Cancel'), ('draft', 'Draft'), ('send', 'Send'), ('done', 'Done')], 'Status', track_visibility='onchange'),
'line_ids' : fields.one2many('stock.internal.transfer.line', 'transfer_id', 'Stock Internal Transfer Line'),
'picking_ids' : fields.one2many('stock.picking', 'transfer_id', 'Picking'),
'backorder_id' : fields.many2one('stock.internal.transfer', 'Backorder'),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.internal.transfer'),
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state' : lambda *a: 'draft',
}
class stock_internal_transfer_line(osv.osv):
_name = 'stock.internal.transfer.line'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def product_id_change(self, cr, uid, ids, product_id, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom_id': False,
}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uom_id': product_uom_id}
return result
_columns = {
'name' : fields.char('Reference', track_visibility='onchange'),
'product_id' : fields.many2one('product.product', 'Product', track_visibility='onchange'),
'product_qty' : fields.float('Quantity', track_visibility='onchange'),
'product_uom_id' : fields.many2one('product.uom', 'Unit of Measure', track_visibility='onchange'),
'state' : fields.selection([('cancel', 'Cancel'), ('draft', 'Draft'), ('send', 'Send'), ('done', 'Done')], 'Status', track_visibility='onchange'),
'transfer_id' : fields.many2one('stock.internal.transfer', 'Transfer', track_visibility='onchange'),
}
_defaults = {
'state' : lambda *a: 'draft',
'product_qty' : lambda *a: 1,
} | dendyyangky/sgeede_b2b | sgeede_internal_transfer/stock_internal_transfer.py | Python | unlicense | 4,237 | 0.029974 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2015 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from earwigbot import exceptions
from earwigbot import wiki
__all__ = ["Task"]
class Task:
"""
**EarwigBot: Base Bot Task**
This package provides built-in wiki bot "tasks" EarwigBot runs. Additional
tasks can be installed as plugins in the bot's working directory.
This class (import with ``from earwigbot.tasks import Task``) can be
subclassed to create custom bot tasks.
To run a task, use :py:meth:`bot.tasks.start(name, **kwargs)
<earwigbot.managers.TaskManager.start>`. ``**kwargs`` get passed to the
Task's :meth:`run` method.
"""
name = None
number = 0
def __init__(self, bot):
"""Constructor for new tasks.
This is called once immediately after the task class is loaded by
the task manager (in :py:meth:`tasks.load()
<earwigbot.managers._ResourceManager.load>`). Don't override this
directly; if you do, remember to place ``super().__init()`` first.
Use :py:meth:`setup` for typical task-init/setup needs.
"""
self.bot = bot
self.config = bot.config
self.logger = bot.tasks.logger.getChild(self.name)
number = self.config.tasks.get(self.name, {}).get("number")
if number is not None:
self.number = number
self.setup()
def __repr__(self):
"""Return the canonical string representation of the Task."""
res = "Task(name={0!r}, number={1!r}, bot={2!r})"
return res.format(self.name, self.number, self.bot)
def __str__(self):
"""Return a nice string representation of the Task."""
res = "<Task {0} ({1}) of {2}>"
return res.format(self.name, self.number, self.bot)
def setup(self):
"""Hook called immediately after the task is loaded.
Does nothing by default; feel free to override.
"""
pass
def run(self, **kwargs):
"""Main entry point to run a given task.
This is called directly by :py:meth:`tasks.start()
<earwigbot.managers.TaskManager.start>` and is the main way to make a
task do stuff. *kwargs* will be any keyword arguments passed to
:py:meth:`~earwigbot.managers.TaskManager.start`, which are entirely
optional.
"""
pass
def unload(self):
"""Hook called immediately before the task is unloaded.
Does nothing by default; feel free to override.
"""
pass
def make_summary(self, comment):
"""Make an edit summary by filling in variables in a config value.
:py:attr:`config.wiki["summary"] <earwigbot.config.BotConfig.wiki>` is
used, where ``$2`` is replaced by the main summary body, given by the
*comment* argument, and ``$1`` is replaced by the task number.
If the config value is not found, we'll just return *comment* as-is.
"""
try:
summary = self.bot.config.wiki["summary"]
except KeyError:
return comment
return summary.replace("$1", str(self.number)).replace("$2", comment)
def shutoff_enabled(self, site=None):
"""Return whether on-wiki shutoff is enabled for this task.
We check a certain page for certain content. This is determined by
our config file: :py:attr:`config.wiki["shutoff"]["page"]
<earwigbot.config.BotConfig.wiki>` is used as the title, with any
embedded ``$1`` replaced by our username and ``$2`` replaced by the
task number; and :py:attr:`config.wiki["shutoff"]["disabled"]
<earwigbot.config.BotConfig.wiki>` is used as the content.
If the page has that exact content or the page does not exist, then
shutoff is "disabled", meaning the bot is supposed to run normally, and
we return ``False``. If the page's content is something other than
what we expect, shutoff is enabled, and we return ``True``.
If a site is not provided, we'll try to use :py:attr:`self.site <site>`
if it's set. Otherwise, we'll use our default site.
"""
if not site:
if hasattr(self, "site"):
site = getattr(self, "site")
else:
site = self.bot.wiki.get_site()
try:
cfg = self.config.wiki["shutoff"]
except KeyError:
return False
title = cfg.get("page", "User:$1/Shutoff/Task $2")
username = site.get_user().name
title = title.replace("$1", username).replace("$2", str(self.number))
page = site.get_page(title)
try:
content = page.get()
except exceptions.PageNotFoundError:
return False
if content == cfg.get("disabled", "run"):
return False
self.logger.warn("Emergency task shutoff has been enabled!")
return True
| earwig/earwigbot | earwigbot/tasks/__init__.py | Python | mit | 5,993 | 0.000167 |
from ... import Something
from . import data
try:
from ... import Lala
except ImportError:
pass | arju88nair/projectCulminate | venv/lib/python3.5/site-packages/pylint/test/regrtest_data/beyond_top/__init__.py | Python | apache-2.0 | 108 | 0.009259 |
# pylint:disable-msg=R0201
"""docstring"""
__revision__ = ''
class Interface:
"""base class for interfaces"""
class IMachin(Interface):
"""docstring"""
def truc(self):
"""docstring"""
def troc(self, argument):
"""docstring"""
class Correct1:
"""docstring"""
__implements__ = IMachin
def __init__(self):
pass
def truc(self):
"""docstring"""
pass
def troc(self, argument):
"""docstring"""
pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
def __init__(self):
pass
def truc(self):
"""docstring"""
pass
def troc(self, argument):
"""docstring"""
print argument
class MissingMethod:
"""docstring"""
__implements__ = IMachin,
def __init__(self):
pass
def troc(self, argument):
"""docstring"""
print argument
def other(self):
"""docstring"""
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
def __init__(self):
pass
def truc(self):
"""docstring"""
pass
def troc(self):
"""docstring"""
pass
class InterfaceCantBeFound:
"""docstring"""
__implements__ = undefined
def __init__(self):
"""only to make pylint happier"""
def please(self):
"""public method 1/2"""
def besilent(self):
"""public method 2/2"""
class InterfaceCanNowBeFound:
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
def __init__(self):
"""only to make pylint happier"""
def please(self):
"""public method 1/2"""
def besilent(self):
"""public method 2/2"""
| dbbhattacharya/kitsune | vendor/packages/pylint/test/input/func_interfaces.py | Python | bsd-3-clause | 1,802 | 0.010544 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-15 14:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0008_auto_20171115_1443'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='level',
field=models.PositiveIntegerField(default=1),
),
]
| Petrole/MaturePyRobots | WebPyRobot/backend/migrations/0009_userprofile_level.py | Python | gpl-3.0 | 460 | 0 |
from office import Office
from bank import Bank
bank = Bank("Open Bank")
office = Office("Timisoara", bank)
office.open()
| vtemian/university_projects | practic_stage/hmw7/main.py | Python | apache-2.0 | 125 | 0 |
#
# common.py
#
# Copyright (C) 2009 Justin Noah <justinnoah@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2009 Damien Churchill <damoxc@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
def get_resource(filename):
import pkg_resources, os
return pkg_resources.resource_filename("autobot", os.path.join("data", filename))
| justinnoah/autobot | autobot/common.py | Python | apache-2.0 | 1,761 | 0.001136 |
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
===================
dMRI: Preprocessing
===================
Introduction
============
This script, dmri_preprocessing.py, demonstrates how to prepare dMRI data
for tractography and connectivity analysis with nipype.
We perform this analysis using the FSL course data, which can be acquired from
here: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
Can be executed in command line using ``python dmri_preprocessing.py``
Import necessary modules from nipype.
"""
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as niu # utility
import nipype.algorithms.misc as misc
import nipype.pipeline.engine as pe # pypeline engine
from nipype.interfaces import fsl
from nipype.interfaces import ants
"""
Load specific nipype's workflows for preprocessing of dMRI data:
:class:`nipype.workflows.dmri.preprocess.epi.all_peb_pipeline`,
as data include a *b0* volume with reverse encoding direction
(*P>>>A*, or *y*), in contrast with the general acquisition encoding
that is *A>>>P* or *-y* (in RAS systems).
"""
from nipype.workflows.dmri.fsl.artifacts import all_fsl_pipeline, remove_bias
"""
Map field names into individual subject runs
"""
info = dict(dwi=[['subject_id', 'dwidata']],
bvecs=[['subject_id', 'bvecs']],
bvals=[['subject_id', 'bvals']],
dwi_rev=[['subject_id', 'nodif_PA']])
infosource = pe.Node(interface=niu.IdentityInterface(fields=['subject_id']),
name="infosource")
# Set the subject 1 identifier in subject_list,
# we choose the preproc dataset as it contains uncorrected files.
subject_list = ['subj1_preproc']
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`~nipype.pipeline.engine.Node` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(nio.DataGrabber(infields=['subject_id'],
outfields=list(info.keys())), name='datasource')
datasource.inputs.template = "%s/%s"
# This needs to point to the fdt folder you can find after extracting
# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
datasource.inputs.base_directory = os.path.abspath('fdt1')
datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz',
dwi_rev='%s/%s.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
An inputnode is used to pass the data obtained by the data grabber to the
actual processing functions
"""
inputnode = pe.Node(niu.IdentityInterface(fields=["dwi", "bvecs", "bvals",
"dwi_rev"]), name="inputnode")
"""
Setup for dMRI preprocessing
============================
In this section we initialize the appropriate workflow for preprocessing of
diffusion images.
Artifacts correction
--------------------
We will use the combination of ``topup`` and ``eddy`` as suggested by FSL.
In order to configure the susceptibility distortion correction (SDC), we first
write the specific parameters of our echo-planar imaging (EPI) images.
Particularly, we look into the ``acqparams.txt`` file of the selected subject
to gather the encoding direction, acceleration factor (in parallel sequences
it is > 1), and readout time or echospacing.
"""
epi_AP = {'echospacing': 66.5e-3, 'enc_dir': 'y-'}
epi_PA = {'echospacing': 66.5e-3, 'enc_dir': 'y'}
prep = all_fsl_pipeline(epi_params=epi_AP, altepi_params=epi_PA)
"""
Bias field correction
---------------------
Finally, we set up a node to correct for a single multiplicative bias field
from computed on the *b0* image, as suggested in [Jeurissen2014]_.
"""
bias = remove_bias()
"""
Connect nodes in workflow
=========================
We create a higher level workflow to connect the nodes. Please excuse the
author for writing the arguments of the ``connect`` function in a not-standard
style with readability aims.
"""
wf = pe.Workflow(name="dMRI_Preprocessing")
wf.base_dir = os.path.abspath('preprocessing_dmri_tutorial')
wf.connect([
(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource, prep, [('dwi', 'inputnode.in_file'),
('dwi_rev', 'inputnode.alt_file'),
('bvals', 'inputnode.in_bval'),
('bvecs', 'inputnode.in_bvec')]),
(prep, bias, [('outputnode.out_file', 'inputnode.in_file'),
('outputnode.out_mask', 'inputnode.in_mask')]),
(datasource, bias, [('bvals', 'inputnode.in_bval')])
])
"""
Run the workflow as command line executable
"""
if __name__ == '__main__':
wf.run()
wf.write_graph()
| BrainIntensive/OnlineBrainIntensive | resources/nipype/nipype/examples/dmri_preprocessing.py | Python | mit | 5,464 | 0.000549 |
# Copyright (C) 2019 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import base64
from distutils.version import LooseVersion
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QLabel, QProgressBar,
QHBoxLayout, QPushButton, QDialog)
from electrum_mona import version
from electrum_mona import constants
from electrum_mona import ecc
from electrum_mona.i18n import _
from electrum_mona.util import make_aiohttp_session
from electrum_mona.logging import Logger
from electrum_mona.network import Network
class UpdateCheck(QDialog, Logger):
url = "https://electrum-mona.org/version"
download_url = "https://electrum-mona.org"
VERSION_ANNOUNCEMENT_SIGNING_KEYS = (
"MUJ1nBxpAzdGdNhTN1x3MCtyeBa4DbdqpK",
)
def __init__(self, *, latest_version=None):
QDialog.__init__(self)
self.setWindowTitle('Electrum - ' + _('Update Check'))
self.content = QVBoxLayout()
self.content.setContentsMargins(*[10]*4)
self.heading_label = QLabel()
self.content.addWidget(self.heading_label)
self.detail_label = QLabel()
self.detail_label.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
self.detail_label.setOpenExternalLinks(True)
self.content.addWidget(self.detail_label)
self.pb = QProgressBar()
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.content.addWidget(self.pb)
versions = QHBoxLayout()
versions.addWidget(QLabel(_("Current version: {}".format(version.ELECTRUM_VERSION))))
self.latest_version_label = QLabel(_("Latest version: {}".format(" ")))
versions.addWidget(self.latest_version_label)
self.content.addLayout(versions)
self.update_view(latest_version)
self.update_check_thread = UpdateCheckThread()
self.update_check_thread.checked.connect(self.on_version_retrieved)
self.update_check_thread.failed.connect(self.on_retrieval_failed)
self.update_check_thread.start()
close_button = QPushButton(_("Close"))
close_button.clicked.connect(self.close)
self.content.addWidget(close_button)
self.setLayout(self.content)
self.show()
def on_version_retrieved(self, version):
self.update_view(version)
def on_retrieval_failed(self):
self.heading_label.setText('<h2>' + _("Update check failed") + '</h2>')
self.detail_label.setText(_("Sorry, but we were unable to check for updates. Please try again later."))
self.pb.hide()
@staticmethod
def is_newer(latest_version):
return latest_version > LooseVersion(version.ELECTRUM_VERSION)
def update_view(self, latest_version=None):
if latest_version:
self.pb.hide()
self.latest_version_label.setText(_("Latest version: {}".format(latest_version)))
if self.is_newer(latest_version):
self.heading_label.setText('<h2>' + _("There is a new update available") + '</h2>')
url = "<a href='{u}'>{u}</a>".format(u=UpdateCheck.download_url)
self.detail_label.setText(_("You can download the new version from {}.").format(url))
else:
self.heading_label.setText('<h2>' + _("Already up to date") + '</h2>')
self.detail_label.setText(_("You are already on the latest version of Electrum."))
else:
self.heading_label.setText('<h2>' + _("Checking for updates...") + '</h2>')
self.detail_label.setText(_("Please wait while Electrum checks for available updates."))
class UpdateCheckThread(QThread, Logger):
checked = pyqtSignal(object)
failed = pyqtSignal()
def __init__(self):
QThread.__init__(self)
Logger.__init__(self)
self.network = Network.get_instance()
async def get_update_info(self):
# note: Use long timeout here as it is not critical that we get a response fast,
# and it's bad not to get an update notification just because we did not wait enough.
async with make_aiohttp_session(proxy=self.network.proxy, timeout=120) as session:
async with session.get(UpdateCheck.url) as result:
signed_version_dict = await result.json(content_type=None)
# example signed_version_dict:
# {
# "version": "3.9.9",
# "signatures": {
# "MRkEwoPcvSPaC5WNtQMa7NGPy2tBKbp3Bm": "H84UFTdaBswxTrNty0gLlWiQEQhJA2Se5xVdhR9zFirKYg966IXEkC7km6phIJq+2CT3KwvKuj8YKaSCy1fErwg="
# }
# }
version_num = signed_version_dict['version']
sigs = signed_version_dict['signatures']
for address, sig in sigs.items():
if address not in UpdateCheck.VERSION_ANNOUNCEMENT_SIGNING_KEYS:
continue
sig = base64.b64decode(sig)
msg = version_num.encode('utf-8')
if ecc.verify_message_with_address(address=address, sig65=sig, message=msg,
net=constants.BitcoinMainnet):
self.logger.info(f"valid sig for version announcement '{version_num}' from address '{address}'")
break
else:
raise Exception('no valid signature for version announcement')
return LooseVersion(version_num.strip())
def run(self):
if not self.network:
self.failed.emit()
return
try:
update_info = asyncio.run_coroutine_threadsafe(self.get_update_info(), self.network.asyncio_loop).result()
except Exception as e:
self.logger.info(f"got exception: '{repr(e)}'")
self.failed.emit()
else:
self.checked.emit(update_info)
| wakiyamap/electrum-mona | electrum_mona/gui/qt/update_checker.py | Python | mit | 6,116 | 0.00327 |
"""Common code for Withings."""
import asyncio
from dataclasses import dataclass
import datetime
from datetime import timedelta
from enum import Enum, IntEnum
import logging
import re
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from aiohttp.web import Response
import requests
from withings_api import AbstractWithingsApi
from withings_api.common import (
AuthFailedException,
GetSleepSummaryField,
MeasureGroupAttribs,
MeasureType,
MeasureTypes,
NotifyAppli,
SleepGetSummaryResponse,
UnauthorizedException,
query_measure_groups,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_WEBHOOK_ID,
HTTP_UNAUTHORIZED,
MASS_KILOGRAMS,
PERCENTAGE,
SPEED_METERS_PER_SECOND,
TIME_SECONDS,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.config_entry_oauth2_flow import (
AUTH_CALLBACK_PATH,
AbstractOAuth2Implementation,
LocalOAuth2Implementation,
OAuth2Session,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.network import get_url
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util import dt
from . import const
from .const import Measurement
_LOGGER = logging.getLogger(const.LOG_NAMESPACE)
NOT_AUTHENTICATED_ERROR = re.compile(
f"^{HTTP_UNAUTHORIZED},.*",
re.IGNORECASE,
)
DATA_UPDATED_SIGNAL = "withings_entity_state_updated"
MeasurementData = Dict[Measurement, Any]
class NotAuthenticatedError(HomeAssistantError):
"""Raise when not authenticated with the service."""
class ServiceError(HomeAssistantError):
"""Raise when the service has an error."""
class UpdateType(Enum):
"""Data update type."""
POLL = "poll"
WEBHOOK = "webhook"
@dataclass
class WithingsAttribute:
"""Immutable class for describing withings sensor data."""
measurement: Measurement
measute_type: Enum
friendly_name: str
unit_of_measurement: str
icon: Optional[str]
platform: str
enabled_by_default: bool
update_type: UpdateType
@dataclass
class WithingsData:
"""Represents value and meta-data from the withings service."""
attribute: WithingsAttribute
value: Any
@dataclass
class WebhookConfig:
"""Config for a webhook."""
id: str
url: str
enabled: bool
@dataclass
class StateData:
"""State data held by data manager for retrieval by entities."""
unique_id: str
state: Any
WITHINGS_ATTRIBUTES = [
WithingsAttribute(
Measurement.WEIGHT_KG,
MeasureType.WEIGHT,
"Weight",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_MASS_KG,
MeasureType.FAT_MASS_WEIGHT,
"Fat Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_FREE_MASS_KG,
MeasureType.FAT_FREE_MASS,
"Fat Free Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.MUSCLE_MASS_KG,
MeasureType.MUSCLE_MASS,
"Muscle Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.BONE_MASS_KG,
MeasureType.BONE_MASS,
"Bone Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HEIGHT_M,
MeasureType.HEIGHT,
"Height",
const.UOM_LENGTH_M,
"mdi:ruler",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.TEMP_C,
MeasureType.TEMPERATURE,
"Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.BODY_TEMP_C,
MeasureType.BODY_TEMPERATURE,
"Body Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SKIN_TEMP_C,
MeasureType.SKIN_TEMPERATURE,
"Skin Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_RATIO_PCT,
MeasureType.FAT_RATIO,
"Fat Ratio",
PERCENTAGE,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.DIASTOLIC_MMHG,
MeasureType.DIASTOLIC_BLOOD_PRESSURE,
"Diastolic Blood Pressure",
const.UOM_MMHG,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SYSTOLIC_MMGH,
MeasureType.SYSTOLIC_BLOOD_PRESSURE,
"Systolic Blood Pressure",
const.UOM_MMHG,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HEART_PULSE_BPM,
MeasureType.HEART_RATE,
"Heart Pulse",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SPO2_PCT,
MeasureType.SP02,
"SP02",
PERCENTAGE,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HYDRATION,
MeasureType.HYDRATION,
"Hydration",
MASS_KILOGRAMS,
"mdi:water",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.PWV,
MeasureType.PULSE_WAVE_VELOCITY,
"Pulse Wave Velocity",
SPEED_METERS_PER_SECOND,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_BREATHING_DISTURBANCES_INTENSITY,
GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY,
"Breathing disturbances intensity",
"",
"",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_DEEP_DURATION_SECONDS,
GetSleepSummaryField.DEEP_SLEEP_DURATION,
"Deep sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_TOSLEEP_DURATION_SECONDS,
GetSleepSummaryField.DURATION_TO_SLEEP,
"Time to sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_TOWAKEUP_DURATION_SECONDS,
GetSleepSummaryField.DURATION_TO_WAKEUP,
"Time to wakeup",
TIME_SECONDS,
"mdi:sleep-off",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_AVERAGE,
GetSleepSummaryField.HR_AVERAGE,
"Average heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_MAX,
GetSleepSummaryField.HR_MAX,
"Maximum heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_MIN,
GetSleepSummaryField.HR_MIN,
"Minimum heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_LIGHT_DURATION_SECONDS,
GetSleepSummaryField.LIGHT_SLEEP_DURATION,
"Light sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_REM_DURATION_SECONDS,
GetSleepSummaryField.REM_SLEEP_DURATION,
"REM sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_RESPIRATORY_RATE_AVERAGE,
GetSleepSummaryField.RR_AVERAGE,
"Average respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_RESPIRATORY_RATE_MAX,
GetSleepSummaryField.RR_MAX,
"Maximum respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_RESPIRATORY_RATE_MIN,
GetSleepSummaryField.RR_MIN,
"Minimum respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_SCORE,
GetSleepSummaryField.SLEEP_SCORE,
"Sleep score",
const.SCORE_POINTS,
"mdi:medal",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_SNORING,
GetSleepSummaryField.SNORING,
"Snoring",
"",
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_SNORING_EPISODE_COUNT,
GetSleepSummaryField.SNORING_EPISODE_COUNT,
"Snoring episode count",
"",
None,
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_WAKEUP_COUNT,
GetSleepSummaryField.WAKEUP_COUNT,
"Wakeup count",
const.UOM_FREQUENCY,
"mdi:sleep-off",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_WAKEUP_DURATION_SECONDS,
GetSleepSummaryField.WAKEUP_DURATION,
"Wakeup time",
TIME_SECONDS,
"mdi:sleep-off",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
# Webhook measurements.
WithingsAttribute(
Measurement.IN_BED,
NotifyAppli.BED_IN,
"In bed",
"",
"mdi:bed",
BINARY_SENSOR_DOMAIN,
True,
UpdateType.WEBHOOK,
),
]
WITHINGS_MEASUREMENTS_MAP: Dict[Measurement, WithingsAttribute] = {
attr.measurement: attr for attr in WITHINGS_ATTRIBUTES
}
WITHINGS_MEASURE_TYPE_MAP: Dict[
Union[NotifyAppli, GetSleepSummaryField, MeasureType], WithingsAttribute
] = {attr.measute_type: attr for attr in WITHINGS_ATTRIBUTES}
class ConfigEntryWithingsApi(AbstractWithingsApi):
"""Withing API that uses HA resources."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
implementation: AbstractOAuth2Implementation,
):
"""Initialize object."""
self._hass = hass
self._config_entry = config_entry
self._implementation = implementation
self.session = OAuth2Session(hass, config_entry, implementation)
def _request(
self, path: str, params: Dict[str, Any], method: str = "GET"
) -> Dict[str, Any]:
"""Perform an async request."""
asyncio.run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self._hass.loop
)
access_token = self._config_entry.data["token"]["access_token"]
response = requests.request(
method,
f"{self.URL}/{path}",
params=params,
headers={"Authorization": f"Bearer {access_token}"},
)
return response.json()
def json_message_response(message: str, message_code: int) -> Response:
"""Produce common json output."""
return HomeAssistantView.json({"message": message, "code": message_code}, 200)
class WebhookAvailability(IntEnum):
"""Represents various statuses of webhook availability."""
SUCCESS = 0
CONNECT_ERROR = 1
HTTP_ERROR = 2
NOT_WEBHOOK = 3
class WebhookUpdateCoordinator:
"""Coordinates webhook data updates across listeners."""
def __init__(self, hass: HomeAssistant, user_id: int) -> None:
"""Initialize the object."""
self._hass = hass
self._user_id = user_id
self._listeners: List[CALLBACK_TYPE] = []
self.data: MeasurementData = {}
def async_add_listener(self, listener: CALLBACK_TYPE) -> Callable[[], None]:
"""Add a listener."""
self._listeners.append(listener)
@callback
def remove_listener() -> None:
self.async_remove_listener(listener)
return remove_listener
def async_remove_listener(self, listener: CALLBACK_TYPE) -> None:
"""Remove a listener."""
self._listeners.remove(listener)
def update_data(self, measurement: Measurement, value: Any) -> None:
"""Update the data object and notify listeners the data has changed."""
self.data[measurement] = value
self.notify_data_changed()
def notify_data_changed(self) -> None:
"""Notify all listeners the data has changed."""
for listener in self._listeners:
listener()
class DataManager:
"""Manage withing data."""
def __init__(
self,
hass: HomeAssistant,
profile: str,
api: ConfigEntryWithingsApi,
user_id: int,
webhook_config: WebhookConfig,
):
"""Initialize the data manager."""
self._hass = hass
self._api = api
self._user_id = user_id
self._profile = profile
self._webhook_config = webhook_config
self._notify_subscribe_delay = datetime.timedelta(seconds=5)
self._notify_unsubscribe_delay = datetime.timedelta(seconds=1)
self._is_available = True
self._cancel_interval_update_interval: Optional[CALLBACK_TYPE] = None
self._cancel_configure_webhook_subscribe_interval: Optional[
CALLBACK_TYPE
] = None
self._api_notification_id = f"withings_{self._user_id}"
self.subscription_update_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="subscription_update_coordinator",
update_interval=timedelta(minutes=120),
update_method=self.async_subscribe_webhook,
)
self.poll_data_update_coordinator = DataUpdateCoordinator[
Dict[MeasureType, Any]
](
hass,
_LOGGER,
name="poll_data_update_coordinator",
update_interval=timedelta(minutes=120)
if self._webhook_config.enabled
else timedelta(minutes=10),
update_method=self.async_get_all_data,
)
self.webhook_update_coordinator = WebhookUpdateCoordinator(
self._hass, self._user_id
)
self._cancel_subscription_update: Optional[Callable[[], None]] = None
self._subscribe_webhook_run_count = 0
@property
def webhook_config(self) -> WebhookConfig:
"""Get the webhook config."""
return self._webhook_config
@property
def user_id(self) -> int:
"""Get the user_id of the authenticated user."""
return self._user_id
@property
def profile(self) -> str:
"""Get the profile."""
return self._profile
def async_start_polling_webhook_subscriptions(self) -> None:
"""Start polling webhook subscriptions (if enabled) to reconcile their setup."""
self.async_stop_polling_webhook_subscriptions()
def empty_listener() -> None:
pass
self._cancel_subscription_update = (
self.subscription_update_coordinator.async_add_listener(empty_listener)
)
def async_stop_polling_webhook_subscriptions(self) -> None:
"""Stop polling webhook subscriptions."""
if self._cancel_subscription_update:
self._cancel_subscription_update()
self._cancel_subscription_update = None
async def _do_retry(self, func, attempts=3) -> Any:
"""Retry a function call.
Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.
"""
exception = None
for attempt in range(1, attempts + 1):
_LOGGER.debug("Attempt %s of %s", attempt, attempts)
try:
return await func()
except Exception as exception1: # pylint: disable=broad-except
await asyncio.sleep(0.1)
exception = exception1
continue
if exception:
raise exception
async def async_subscribe_webhook(self) -> None:
"""Subscribe the webhook to withings data updates."""
return await self._do_retry(self._async_subscribe_webhook)
async def _async_subscribe_webhook(self) -> None:
_LOGGER.debug("Configuring withings webhook")
# On first startup, perform a fresh re-subscribe. Withings stops pushing data
# if the webhook fails enough times but they don't remove the old subscription
# config. This ensures the subscription is setup correctly and they start
# pushing again.
if self._subscribe_webhook_run_count == 0:
_LOGGER.debug("Refreshing withings webhook configs")
await self.async_unsubscribe_webhook()
self._subscribe_webhook_run_count += 1
# Get the current webhooks.
response = await self._hass.async_add_executor_job(self._api.notify_list)
subscribed_applis = frozenset(
[
profile.appli
for profile in response.profiles
if profile.callbackurl == self._webhook_config.url
]
)
# Determine what subscriptions need to be created.
ignored_applis = frozenset({NotifyAppli.USER})
to_add_applis = frozenset(
[
appli
for appli in NotifyAppli
if appli not in subscribed_applis and appli not in ignored_applis
]
)
# Subscribe to each one.
for appli in to_add_applis:
_LOGGER.debug(
"Subscribing %s for %s in %s seconds",
self._webhook_config.url,
appli,
self._notify_subscribe_delay.total_seconds(),
)
# Withings will HTTP HEAD the callback_url and needs some downtime
# between each call or there is a higher chance of failure.
await asyncio.sleep(self._notify_subscribe_delay.total_seconds())
await self._hass.async_add_executor_job(
self._api.notify_subscribe, self._webhook_config.url, appli
)
async def async_unsubscribe_webhook(self) -> None:
"""Unsubscribe webhook from withings data updates."""
return await self._do_retry(self._async_unsubscribe_webhook)
async def _async_unsubscribe_webhook(self) -> None:
# Get the current webhooks.
response = await self._hass.async_add_executor_job(self._api.notify_list)
# Revoke subscriptions.
for profile in response.profiles:
_LOGGER.debug(
"Unsubscribing %s for %s in %s seconds",
profile.callbackurl,
profile.appli,
self._notify_unsubscribe_delay.total_seconds(),
)
# Quick calls to Withings can result in the service returning errors. Give them
# some time to cool down.
await asyncio.sleep(self._notify_subscribe_delay.total_seconds())
await self._hass.async_add_executor_job(
self._api.notify_revoke, profile.callbackurl, profile.appli
)
async def async_get_all_data(self) -> Optional[Dict[MeasureType, Any]]:
"""Update all withings data."""
try:
return await self._do_retry(self._async_get_all_data)
except Exception as exception:
# User is not authenticated.
if isinstance(
exception, (UnauthorizedException, AuthFailedException)
) or NOT_AUTHENTICATED_ERROR.match(str(exception)):
context = {
const.PROFILE: self._profile,
"userid": self._user_id,
"source": "reauth",
}
# Check if reauth flow already exists.
flow = next(
iter(
flow
for flow in self._hass.config_entries.flow.async_progress()
if flow.context == context
),
None,
)
if flow:
return
# Start a reauth flow.
await self._hass.config_entries.flow.async_init(
const.DOMAIN,
context=context,
)
return
raise exception
async def _async_get_all_data(self) -> Optional[Dict[MeasureType, Any]]:
_LOGGER.info("Updating all withings data")
return {
**await self.async_get_measures(),
**await self.async_get_sleep_summary(),
}
async def async_get_measures(self) -> Dict[MeasureType, Any]:
"""Get the measures data."""
_LOGGER.debug("Updating withings measures")
response = await self._hass.async_add_executor_job(self._api.measure_get_meas)
# Sort from oldest to newest.
groups = sorted(
query_measure_groups(
response, MeasureTypes.ANY, MeasureGroupAttribs.UNAMBIGUOUS
),
key=lambda group: group.created.datetime,
reverse=False,
)
return {
WITHINGS_MEASURE_TYPE_MAP[measure.type].measurement: round(
float(measure.value * pow(10, measure.unit)), 2
)
for group in groups
for measure in group.measures
}
async def async_get_sleep_summary(self) -> Dict[MeasureType, Any]:
"""Get the sleep summary data."""
_LOGGER.debug("Updating withing sleep summary")
now = dt.utcnow()
yesterday = now - datetime.timedelta(days=1)
yesterday_noon = datetime.datetime(
yesterday.year,
yesterday.month,
yesterday.day,
12,
0,
0,
0,
datetime.timezone.utc,
)
def get_sleep_summary() -> SleepGetSummaryResponse:
return self._api.sleep_get_summary(
lastupdate=yesterday_noon,
data_fields=[
GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY,
GetSleepSummaryField.DEEP_SLEEP_DURATION,
GetSleepSummaryField.DURATION_TO_SLEEP,
GetSleepSummaryField.DURATION_TO_WAKEUP,
GetSleepSummaryField.HR_AVERAGE,
GetSleepSummaryField.HR_MAX,
GetSleepSummaryField.HR_MIN,
GetSleepSummaryField.LIGHT_SLEEP_DURATION,
GetSleepSummaryField.REM_SLEEP_DURATION,
GetSleepSummaryField.RR_AVERAGE,
GetSleepSummaryField.RR_MAX,
GetSleepSummaryField.RR_MIN,
GetSleepSummaryField.SLEEP_SCORE,
GetSleepSummaryField.SNORING,
GetSleepSummaryField.SNORING_EPISODE_COUNT,
GetSleepSummaryField.WAKEUP_COUNT,
GetSleepSummaryField.WAKEUP_DURATION,
],
)
response = await self._hass.async_add_executor_job(get_sleep_summary)
# Set the default to empty lists.
raw_values: Dict[GetSleepSummaryField, List[int]] = {
field: [] for field in GetSleepSummaryField
}
# Collect the raw data.
for serie in response.series:
data = serie.data
for field in GetSleepSummaryField:
raw_values[field].append(data._asdict()[field.value])
values: Dict[GetSleepSummaryField, float] = {}
def average(data: List[int]) -> float:
return sum(data) / len(data)
def set_value(field: GetSleepSummaryField, func: Callable) -> None:
non_nones = [
value for value in raw_values.get(field, []) if value is not None
]
values[field] = func(non_nones) if non_nones else None
set_value(GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY, average)
set_value(GetSleepSummaryField.DEEP_SLEEP_DURATION, sum)
set_value(GetSleepSummaryField.DURATION_TO_SLEEP, average)
set_value(GetSleepSummaryField.DURATION_TO_WAKEUP, average)
set_value(GetSleepSummaryField.HR_AVERAGE, average)
set_value(GetSleepSummaryField.HR_MAX, average)
set_value(GetSleepSummaryField.HR_MIN, average)
set_value(GetSleepSummaryField.LIGHT_SLEEP_DURATION, sum)
set_value(GetSleepSummaryField.REM_SLEEP_DURATION, sum)
set_value(GetSleepSummaryField.RR_AVERAGE, average)
set_value(GetSleepSummaryField.RR_MAX, average)
set_value(GetSleepSummaryField.RR_MIN, average)
set_value(GetSleepSummaryField.SLEEP_SCORE, max)
set_value(GetSleepSummaryField.SNORING, average)
set_value(GetSleepSummaryField.SNORING_EPISODE_COUNT, sum)
set_value(GetSleepSummaryField.WAKEUP_COUNT, sum)
set_value(GetSleepSummaryField.WAKEUP_DURATION, average)
return {
WITHINGS_MEASURE_TYPE_MAP[field].measurement: round(value, 4)
if value is not None
else None
for field, value in values.items()
}
async def async_webhook_data_updated(self, data_category: NotifyAppli) -> None:
"""Handle scenario when data is updated from a webook."""
_LOGGER.debug("Withings webhook triggered")
if data_category in {
NotifyAppli.WEIGHT,
NotifyAppli.CIRCULATORY,
NotifyAppli.SLEEP,
}:
await self.poll_data_update_coordinator.async_request_refresh()
elif data_category in {NotifyAppli.BED_IN, NotifyAppli.BED_OUT}:
self.webhook_update_coordinator.update_data(
Measurement.IN_BED, data_category == NotifyAppli.BED_IN
)
def get_attribute_unique_id(attribute: WithingsAttribute, user_id: int) -> str:
"""Get a entity unique id for a user's attribute."""
return f"withings_{user_id}_{attribute.measurement.value}"
async def async_get_entity_id(
hass: HomeAssistant, attribute: WithingsAttribute, user_id: int
) -> Optional[str]:
"""Get an entity id for a user's attribute."""
entity_registry: EntityRegistry = (
await hass.helpers.entity_registry.async_get_registry()
)
unique_id = get_attribute_unique_id(attribute, user_id)
entity_id = entity_registry.async_get_entity_id(
attribute.platform, const.DOMAIN, unique_id
)
if entity_id is None:
_LOGGER.error("Cannot find entity id for unique_id: %s", unique_id)
return None
return entity_id
class BaseWithingsSensor(Entity):
"""Base class for withings sensors."""
def __init__(self, data_manager: DataManager, attribute: WithingsAttribute) -> None:
"""Initialize the Withings sensor."""
self._data_manager = data_manager
self._attribute = attribute
self._profile = self._data_manager.profile
self._user_id = self._data_manager.user_id
self._name = f"Withings {self._attribute.measurement.value} {self._profile}"
self._unique_id = get_attribute_unique_id(self._attribute, self._user_id)
self._state_data: Optional[Any] = None
@property
def should_poll(self) -> bool:
"""Return False to indicate HA should not poll for changes."""
return False
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def available(self) -> bool:
"""Return True if entity is available."""
if self._attribute.update_type == UpdateType.POLL:
return self._data_manager.poll_data_update_coordinator.last_update_success
if self._attribute.update_type == UpdateType.WEBHOOK:
return self._data_manager.webhook_config.enabled and (
self._attribute.measurement
in self._data_manager.webhook_update_coordinator.data
)
return True
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._attribute.unit_of_measurement
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return self._attribute.icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._attribute.enabled_by_default
@callback
def _on_poll_data_updated(self) -> None:
self._update_state_data(
self._data_manager.poll_data_update_coordinator.data or {}
)
@callback
def _on_webhook_data_updated(self) -> None:
self._update_state_data(
self._data_manager.webhook_update_coordinator.data or {}
)
def _update_state_data(self, data: MeasurementData) -> None:
"""Update the state data."""
self._state_data = data.get(self._attribute.measurement)
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Register update dispatcher."""
if self._attribute.update_type == UpdateType.POLL:
self.async_on_remove(
self._data_manager.poll_data_update_coordinator.async_add_listener(
self._on_poll_data_updated
)
)
self._on_poll_data_updated()
elif self._attribute.update_type == UpdateType.WEBHOOK:
self.async_on_remove(
self._data_manager.webhook_update_coordinator.async_add_listener(
self._on_webhook_data_updated
)
)
self._on_webhook_data_updated()
async def async_get_data_manager(
hass: HomeAssistant, config_entry: ConfigEntry
) -> DataManager:
"""Get the data manager for a config entry."""
hass.data.setdefault(const.DOMAIN, {})
hass.data[const.DOMAIN].setdefault(config_entry.entry_id, {})
config_entry_data = hass.data[const.DOMAIN][config_entry.entry_id]
if const.DATA_MANAGER not in config_entry_data:
profile = config_entry.data.get(const.PROFILE)
_LOGGER.debug("Creating withings data manager for profile: %s", profile)
config_entry_data[const.DATA_MANAGER] = DataManager(
hass,
profile,
ConfigEntryWithingsApi(
hass=hass,
config_entry=config_entry,
implementation=await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, config_entry
),
),
config_entry.data["token"]["userid"],
WebhookConfig(
id=config_entry.data[CONF_WEBHOOK_ID],
url=config_entry.data[const.CONF_WEBHOOK_URL],
enabled=config_entry.data[const.CONF_USE_WEBHOOK],
),
)
return config_entry_data[const.DATA_MANAGER]
def get_data_manager_by_webhook_id(
hass: HomeAssistant, webhook_id: str
) -> Optional[DataManager]:
"""Get a data manager by it's webhook id."""
return next(
iter(
[
data_manager
for data_manager in get_all_data_managers(hass)
if data_manager.webhook_config.id == webhook_id
]
),
None,
)
def get_all_data_managers(hass: HomeAssistant) -> Tuple[DataManager, ...]:
"""Get all configured data managers."""
return tuple(
[
config_entry_data[const.DATA_MANAGER]
for config_entry_data in hass.data[const.DOMAIN].values()
if const.DATA_MANAGER in config_entry_data
]
)
def async_remove_data_manager(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Remove a data manager for a config entry."""
del hass.data[const.DOMAIN][config_entry.entry_id][const.DATA_MANAGER]
async def async_create_entities(
hass: HomeAssistant,
entry: ConfigEntry,
create_func: Callable[[DataManager, WithingsAttribute], Entity],
platform: str,
) -> List[Entity]:
"""Create withings entities from config entry."""
data_manager = await async_get_data_manager(hass, entry)
return [
create_func(data_manager, attribute)
for attribute in get_platform_attributes(platform)
]
def get_platform_attributes(platform: str) -> Tuple[WithingsAttribute, ...]:
"""Get withings attributes used for a specific platform."""
return tuple(
[
attribute
for attribute in WITHINGS_ATTRIBUTES
if attribute.platform == platform
]
)
class WithingsLocalOAuth2Implementation(LocalOAuth2Implementation):
"""Oauth2 implementation that only uses the external url."""
@property
def redirect_uri(self) -> str:
"""Return the redirect uri."""
url = get_url(self.hass, allow_internal=False, prefer_cloud=True)
return f"{url}{AUTH_CALLBACK_PATH}"
| turbokongen/home-assistant | homeassistant/components/withings/common.py | Python | apache-2.0 | 34,960 | 0.00083 |
"""
QFileDialog objects can only be run in the main thread.
"""
### imports ###################################################################
import os
import time
from PySide2 import QtCore
from PySide2 import QtWidgets
from pycmds.project import project_globals as g
from pycmds.project import classes as pc
### FileDialog object #########################################################
directory_filepath = pc.Mutex()
open_filepath = pc.Mutex()
save_filepath = pc.Mutex()
class FileDialog(QtCore.QObject):
update_ui = QtCore.Signal()
queue_emptied = QtCore.Signal()
def __init__(self, enqueued_object, busy_object):
QtCore.QObject.__init__(self)
self.name = "file_dialog"
self.enqueued = enqueued_object
self.busy = busy_object
@QtCore.Slot(str, list)
def dequeue(self, method, inputs):
"""
Slot to accept enqueued commands from main thread.
Method passed as qstring, inputs as list of [args, kwargs].
Calls own method with arguments from inputs.
"""
self.update_ui.emit()
method = str(method) # method passed as qstring
args, kwargs = inputs
if g.debug.read():
print(self.name, " dequeue:", method, inputs, self.busy.read())
self.enqueued.pop()
getattr(self, method)(*args, **kwargs)
if not self.enqueued.read():
self.queue_emptied.emit()
self.check_busy()
def check_busy(self):
"""
decides if the hardware is done and handles writing of 'busy' to False
"""
# must always write busy whether answer is True or False
if self.enqueued.read():
time.sleep(0.1) # don't loop like crazy
self.busy.write(True)
else:
self.busy.write(False)
self.update_ui.emit()
def clean(self, out):
"""
takes the output and returns a string that has the properties I want
"""
out = str(out)
out = out.replace("/", os.sep)
return out
def getExistingDirectory(self, inputs=[]):
caption, directory, options = inputs
options = QtWidgets.QFileDialog.ShowDirsOnly
out = self.clean(
QtWidgets.QFileDialog.getExistingDirectory(
g.main_window.read(), caption, str(directory), options
)
)
directory_filepath.write(out)
def getOpenFileName(self, inputs=[]):
caption, directory, options = inputs
out = self.clean(
QtWidgets.QFileDialog.getOpenFileName(
g.main_window.read(), caption, str(directory), options
)[0]
)
open_filepath.write(out)
def getSaveFileName(self, inputs=[]):
caption, directory, savefilter, selectedfilter, options = inputs
out = self.clean(
QtWidgets.QFileDialog.getSaveFileName(
g.main_window.read(), caption, directory, savefilter, selectedfilter, options,
)[0]
)
save_filepath.write(out)
busy = pc.Busy()
enqueued = pc.Enqueued()
file_dialog = FileDialog(enqueued, busy)
q = pc.Q(enqueued, busy, file_dialog)
### thread-safe file dialog methods ###########################################
# the q method only works between different threads
# call directly if the calling object is in the main thread
def dir_dialog(caption, directory, options=None):
inputs = [caption, directory, options]
if QtCore.QThread.currentThread() == g.main_thread.read():
file_dialog.getExistingDirectory(inputs)
else:
q.push("getExistingDirectory", inputs)
while busy.read():
time.sleep(0.1)
return directory_filepath.read()
def open_dialog(caption, directory, options):
inputs = [caption, directory, options]
if QtCore.QThread.currentThread() == g.main_thread.read():
file_dialog.getOpenFileName(inputs)
else:
q.push("getOpenFileName", inputs)
while busy.read():
time.sleep(0.1)
return open_filepath.read()
def save_dialog(caption, directory, savefilter, selectedfilter, options):
inputs = [caption, directory, savefilter, selectedfilter, options]
if QtCore.QThread.currentThread() == g.main_thread.read():
file_dialog.getSaveFileName(inputs)
else:
q.push("getSaveFileName", inputs)
while busy.read():
time.sleep(0.1)
return save_filepath.read()
| wright-group/PyCMDS | pycmds/project/file_dialog_handler.py | Python | mit | 4,481 | 0.000893 |
#!usr/bin/env python
#
# Copyright 2013 Matthew Wall
# See the file LICENSE.txt for your full rights.
#
# Thanks to Kenneth Lavrsen for the Open2300 implementation:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/WebHome
# description of the station communication interface:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSAPI
# memory map:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSMemoryMap
#
# Thanks to Russell Stuart for the ws2300 python implementation:
# http://ace-host.stuart.id.au/russell/files/ws2300/
# and the map of the station memory:
# http://ace-host.stuart.id.au/russell/files/ws2300/memory_map_2300.txt
#
# This immplementation copies directly from Russell Stuart's implementation,
# but only the parts required to read from and write to the weather station.
"""Classes and functions for interfacing with WS-23xx weather stations.
LaCrosse made a number of stations in the 23xx series, including:
WS-2300, WS-2308, WS-2310, WS-2315, WS-2317, WS-2357
The stations were also sold as the TFA Matrix and TechnoLine 2350.
The WWVB receiver is located in the console.
To synchronize the console and sensors, press and hold the PLUS key for 2
seconds. When console is not synchronized no data will be received.
To do a factory reset, press and hold PRESSURE and WIND for 5 seconds.
A single bucket tip is 0.0204 in (0.518 mm).
The station has 175 history records. That is just over 7 days of data with
the default history recording interval of 60 minutes.
The station supports both wireless and wired communication between the
sensors and a station console. Wired connection updates data every 8 seconds.
Wireless connection updates data in 16 to 128 second intervals, depending on
wind speed and rain activity.
The connection type can be one of 0=cable, 3=lost, 15=wireless
sensor update frequency:
32 seconds when wind speed > 22.36 mph (wireless)
128 seconds when wind speed < 22.36 mph (wireless)
10 minutes (wireless after 5 failed attempts)
8 seconds (wired)
console update frequency:
15 seconds (pressure/temperature)
20 seconds (humidity)
It is possible to increase the rate of wireless updates:
http://www.wxforum.net/index.php?topic=2196.0
Sensors are connected by unshielded phone cables. RF interference can cause
random spikes in data, with one symptom being values of 25.5 m/s or 91.8 km/h
for the wind speed. To reduce the number of spikes in data, replace with
shielded cables:
http://www.lavrsen.dk/sources/weather/windmod.htm
The station records wind speed and direction, but has no notion of gust.
The station calculates windchill and dewpoint.
The station has a serial connection to the computer.
This driver does not keep the serial port open for long periods. Instead, the
driver opens the serial port, reads data, then closes the port.
This driver polls the station. Use the polling_interval parameter to specify
how often to poll for data. If not specified, the polling interval will adapt
based on connection type and status.
USB-Serial Converters
With a USB-serial converter one can connect the station to a computer with
only USB ports, but not every converter will work properly. Perhaps the two
most common converters are based on the Prolific and FTDI chipsets. Many
people report better luck with the FTDI-based converters. Some converters
that use the Prolific chipset (PL2303) will work, but not all of them.
Known to work: ATEN UC-232A
Bounds checking
wind speed: 0-113 mph
wind direction: 0-360
humidity: 0-100
temperature: ok if not -22F and humidity is valid
dewpoint: ok if not -22F and humidity is valid
barometer: 25-35 inHg
rain rate: 0-10 in/hr
Discrepancies Between Implementations
As of December 2013, there are significant differences between the open2300,
wview, and ws2300 implementations. Current version numbers are as follows:
open2300 1.11
ws2300 1.8
wview 5.20.2
History Interval
The factory default is 60 minutes. The value stored in the console is one
less than the actual value (in minutes). So for the factory default of 60,
the console stores 59. The minimum interval is 1.
ws2300.py reports the actual value from the console, e.g., 59 when the
interval is 60. open2300 reports the interval, e.g., 60 when the interval
is 60. wview ignores the interval.
Detecting Bogus Sensor Values
wview queries the station 3 times for each sensor then accepts the value only
if the three values were close to each other.
open2300 sleeps 10 seconds if a wind measurement indicates invalid or overflow.
The ws2300.py implementation includes overflow and validity flags for values
from the wind sensors. It does not retry based on invalid or overflow.
Wind Speed
There is disagreement about how to calculate wind speed and how to determine
whether the wind speed is valid.
This driver introduces a WindConversion object that uses open2300/wview
decoding so that wind speeds match that of open2300/wview. ws2300 1.8
incorrectly uses bcd2num instead of bin2num. This bug is fixed in this driver.
The memory map indicates the following:
addr smpl description
0x527 0 Wind overflow flag: 0 = normal
0x528 0 Wind minimum code: 0=min, 1=--.-, 2=OFL
0x529 0 Windspeed: binary nibble 0 [m/s * 10]
0x52A 0 Windspeed: binary nibble 1 [m/s * 10]
0x52B 0 Windspeed: binary nibble 2 [m/s * 10]
0x52C 8 Wind Direction = nibble * 22.5 degrees
0x52D 8 Wind Direction 1 measurement ago
0x52E 9 Wind Direction 2 measurement ago
0x52F 8 Wind Direction 3 measurement ago
0x530 7 Wind Direction 4 measurement ago
0x531 7 Wind Direction 5 measurement ago
0x532 0
wview 5.20.2 implementation (wview apparently copied from open2300):
read 3 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
fail
} else {
dir = (x[2] >> 4) * 22.5
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0 * 2.23693629)
maxdir = dir
maxspeed = speed
}
open2300 1.10 implementation:
read 6 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
0x52a x[3]
0x52b x[4]
0x52c x[5]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
sleep 10
} else {
dir = x[2] >> 4
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0)
dir0 = (x[2] >> 4) * 22.5
dir1 = (x[3] & 0xf) * 22.5
dir2 = (x[3] >> 4) * 22.5
dir3 = (x[4] & 0xf) * 22.5
dir4 = (x[4] >> 4) * 22.5
dir5 = (x[5] & 0xf) * 22.5
}
ws2300.py 1.8 implementation:
read 1 nibble starting at 0x527
read 1 nibble starting at 0x528
read 4 nibble starting at 0x529
read 3 nibble starting at 0x529
read 1 nibble starting at 0x52c
read 1 nibble starting at 0x52d
read 1 nibble starting at 0x52e
read 1 nibble starting at 0x52f
read 1 nibble starting at 0x530
read 1 nibble starting at 0x531
0x527 overflow
0x528 validity
0x529 speed[0]
0x52a speed[1]
0x52b speed[2]
0x52c dir[0]
speed: ((x[2] * 100 + x[1] * 10 + x[0]) % 1000) / 10
velocity: (x[2] * 100 + x[1] * 10 + x[0]) / 10
dir = data[0] * 22.5
speed = (bcd2num(data) % 10**3 + 0) / 10**1
velocity = (bcd2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
bcd2num([a,b,c]) -> c*100+b*10+a
"""
# TODO: use pyserial instead of LinuxSerialPort
# TODO: put the __enter__ and __exit__ scaffolding on serial port, not Station
# FIXME: unless we can get setTime to work, just ignore the console clock
# FIXME: detect bogus wind speed/direction
# i see these when the wind instrument is disconnected:
# ws 26.399999
# wsh 21
# w0 135
from __future__ import with_statement
import syslog
import time
import string
import fcntl
import os
import select
import struct
import termios
import tty
import weeutil.weeutil
import weewx
import weewx.drivers
import weewx.units
import weewx.wxformulas
DRIVER_NAME = 'WS23xx'
DRIVER_VERSION = '0.22'
def loader(config_dict, _):
return WS23xxDriver(config_dict=config_dict, **config_dict[DRIVER_NAME])
def configurator_loader(_):
return WS23xxConfigurator()
def confeditor_loader():
return WS23xxConfEditor()
DEFAULT_PORT = '/dev/ttyUSB0'
def logmsg(dst, msg):
syslog.syslog(dst, 'ws23xx: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logcrt(msg):
logmsg(syslog.LOG_CRIT, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
class WS23xxConfigurator(weewx.drivers.AbstractConfigurator):
def add_options(self, parser):
super(WS23xxConfigurator, self).add_options(parser)
parser.add_option("--info", dest="info", action="store_true",
help="display weather station configuration")
parser.add_option("--current", dest="current", action="store_true",
help="get the current weather conditions")
parser.add_option("--history", dest="nrecords", type=int, metavar="N",
help="display N history records")
parser.add_option("--history-since", dest="recmin",
type=int, metavar="N",
help="display history records since N minutes ago")
parser.add_option("--clear-memory", dest="clear", action="store_true",
help="clear station memory")
parser.add_option("--set-time", dest="settime", action="store_true",
help="set the station clock to the current time")
parser.add_option("--set-interval", dest="interval",
type=int, metavar="N",
help="set the station archive interval to N minutes")
def do_options(self, options, parser, config_dict, prompt):
self.station = WS23xxDriver(**config_dict[DRIVER_NAME])
if options.current:
self.show_current()
elif options.nrecords is not None:
self.show_history(count=options.nrecords)
elif options.recmin is not None:
ts = int(time.time()) - options.recmin * 60
self.show_history(ts=ts)
elif options.settime:
self.set_clock(prompt)
elif options.interval is not None:
self.set_interval(options.interval, prompt)
elif options.clear:
self.clear_history(prompt)
else:
self.show_info()
self.station.closePort()
def show_info(self):
"""Query the station then display the settings."""
print 'Querying the station for the configuration...'
config = self.station.getConfig()
for key in sorted(config):
print '%s: %s' % (key, config[key])
def show_current(self):
"""Get current weather observation."""
print 'Querying the station for current weather data...'
for packet in self.station.genLoopPackets():
print packet
break
def show_history(self, ts=None, count=0):
"""Show the indicated number of records or records since timestamp"""
print "Querying the station for historical records..."
for i, r in enumerate(self.station.genStartupRecords(since_ts=ts,
count=count)):
print r
if count and i > count:
break
def set_clock(self, prompt):
"""Set station clock to current time."""
ans = None
while ans not in ['y', 'n']:
v = self.station.getTime()
vstr = weeutil.weeutil.timestamp_to_string(v)
print "Station clock is", vstr
if prompt:
ans = raw_input("Set station clock (y/n)? ")
else:
print "Setting station clock"
ans = 'y'
if ans == 'y':
self.station.setTime()
v = self.station.getTime()
vstr = weeutil.weeutil.timestamp_to_string(v)
print "Station clock is now", vstr
elif ans == 'n':
print "Set clock cancelled."
def set_interval(self, interval, prompt):
print "Changing the interval will clear the station memory."
v = self.station.getArchiveInterval()
ans = None
while ans not in ['y', 'n']:
print "Interval is", v
if prompt:
ans = raw_input("Set interval to %d minutes (y/n)? " % interval)
else:
print "Setting interval to %d minutes" % interval
ans = 'y'
if ans == 'y':
self.station.setArchiveInterval(interval)
v = self.station.getArchiveInterval()
print "Interval is now", v
elif ans == 'n':
print "Set interval cancelled."
def clear_history(self, prompt):
ans = None
while ans not in ['y', 'n']:
v = self.station.getRecordCount()
print "Records in memory:", v
if prompt:
ans = raw_input("Clear console memory (y/n)? ")
else:
print 'Clearing console memory'
ans = 'y'
if ans == 'y':
self.station.clearHistory()
v = self.station.getRecordCount()
print "Records in memory:", v
elif ans == 'n':
print "Clear memory cancelled."
class WS23xxDriver(weewx.drivers.AbstractDevice):
"""Driver for LaCrosse WS23xx stations."""
def __init__(self, **stn_dict):
"""Initialize the station object.
port: The serial port, e.g., /dev/ttyS0 or /dev/ttyUSB0
[Required. Default is /dev/ttyS0]
polling_interval: How often to poll the station, in seconds.
[Optional. Default is 8 (wired) or 30 (wireless)]
model: Which station model is this?
[Optional. Default is 'LaCrosse WS23xx']
"""
self._last_rain = None
self._last_cn = None
self._poll_wait = 60
self.model = stn_dict.get('model', 'LaCrosse WS23xx')
self.port = stn_dict.get('port', DEFAULT_PORT)
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 30))
self.polling_interval = stn_dict.get('polling_interval', None)
if self.polling_interval is not None:
self.polling_interval = int(self.polling_interval)
loginf('driver version is %s' % DRIVER_VERSION)
loginf('serial port is %s' % self.port)
loginf('polling interval is %s' % self.polling_interval)
@property
def hardware_name(self):
return self.model
# weewx wants the archive interval in seconds, but the console uses minutes
@property
def archive_interval(self):
return self.getArchiveInterval() * 60
# def closePort(self):
# pass
def genLoopPackets(self):
ntries = 0
while ntries < self.max_tries:
ntries += 1
try:
with WS23xx(self.port) as s:
data = s.get_raw_data(SENSOR_IDS)
packet = data_to_packet(data, int(time.time() + 0.5),
last_rain=self._last_rain)
self._last_rain = packet['rainTotal']
ntries = 0
yield packet
if self.polling_interval is not None:
self._poll_wait = self.polling_interval
if data['cn'] != self._last_cn:
conn_info = get_conn_info(data['cn'])
loginf("connection changed from %s to %s" %
(get_conn_info(self._last_cn)[0], conn_info[0]))
self._last_cn = data['cn']
if self.polling_interval is None:
loginf("using %s second polling interval"
" for %s connection" %
(conn_info[1], conn_info[0]))
self._poll_wait = conn_info[1]
time.sleep(self._poll_wait)
except Ws2300.Ws2300Exception, e:
logerr("Failed attempt %d of %d to get LOOP data: %s" %
(ntries, self.max_tries, e))
logdbg("Waiting %d seconds before retry" % self.retry_wait)
time.sleep(self.retry_wait)
else:
msg = "Max retries (%d) exceeded for LOOP data" % self.max_tries
logerr(msg)
raise weewx.RetriesExceeded(msg)
def genArchiveRecords(self, since_ts, count=0):
with WS23xx(self.port) as s:
last_rain = None
for ts, data in s.gen_records(since_ts=since_ts, count=count):
record = data_to_packet(data, ts, last_rain=last_rain)
record['interval'] = data['interval']
last_rain = record['rainTotal']
yield record
# def getTime(self) :
# with WS23xx(self.port) as s:
# return s.get_time()
# def setTime(self):
# with WS23xx(self.port) as s:
# s.set_time()
def getArchiveInterval(self):
with WS23xx(self.port) as s:
return s.get_archive_interval()
def setArchiveInterval(self, interval):
with WS23xx(self.port) as s:
s.set_archive_interval(interval)
def getConfig(self):
with WS23xx(self.port) as s:
data = s.get_raw_data(Measure.IDS.keys())
fdata = {}
for key in data:
fdata[Measure.IDS[key].name] = data[key]
return fdata
def getRecordCount(self):
with WS23xx(self.port) as s:
return s.get_record_count()
def clearHistory(self):
with WS23xx(self.port) as s:
s.clear_memory()
# ids for current weather conditions and connection type
SENSOR_IDS = ['it','ih','ot','oh','pa','wind','rh','rt','dp','wc','cn']
# polling interval, in seconds, for various connection types
POLLING_INTERVAL = {0: ("cable", 8), 3: ("lost", 60), 15: ("wireless", 30)}
def get_conn_info(conn_type):
return POLLING_INTERVAL.get(conn_type, ("unknown", 60))
def data_to_packet(data, ts, last_rain=None):
"""Convert raw data to format and units required by weewx.
station weewx (metric)
temperature degree C degree C
humidity percent percent
uv index unitless unitless
pressure mbar mbar
wind speed m/s km/h
wind dir degree degree
wind gust None
wind gust dir None
rain mm cm
rain rate cm/h
"""
packet = dict()
packet['usUnits'] = weewx.METRIC
packet['dateTime'] = ts
packet['inTemp'] = data['it']
packet['inHumidity'] = data['ih']
packet['outTemp'] = data['ot']
packet['outHumidity'] = data['oh']
packet['pressure'] = data['pa']
ws, wd, wso, wsv = data['wind']
if wso == 0 and wsv == 0:
packet['windSpeed'] = ws
if packet['windSpeed'] is not None:
packet['windSpeed'] *= 3.6 # weewx wants km/h
packet['windDir'] = wd if packet['windSpeed'] else None
else:
loginf('invalid wind reading: speed=%s dir=%s overflow=%s invalid=%s' %
(ws, wd, wso, wsv))
packet['windSpeed'] = None
packet['windDir'] = None
packet['windGust'] = None
packet['windGustDir'] = None
packet['rainTotal'] = data['rt']
if packet['rainTotal'] is not None:
packet['rainTotal'] /= 10 # weewx wants cm
packet['rain'] = weewx.wxformulas.calculate_rain(
packet['rainTotal'], last_rain)
# station provides some derived variables
packet['rainRate'] = data['rh']
if packet['rainRate'] is not None:
packet['rainRate'] /= 10 # weewx wants cm/hr
packet['dewpoint'] = data['dp']
packet['windchill'] = data['wc']
return packet
class WS23xx(object):
"""Wrap the Ws2300 object so we can easily open serial port, read/write,
close serial port without all of the try/except/finally scaffolding."""
def __init__(self, port):
logdbg('create LinuxSerialPort')
self.serial_port = LinuxSerialPort(port)
logdbg('create Ws2300')
self.ws = Ws2300(self.serial_port)
def __enter__(self):
logdbg('station enter')
return self
def __exit__(self, type, value, traceback):
logdbg('station exit')
self.ws = None
self.close()
def close(self):
logdbg('close LinuxSerialPort')
self.serial_port.close()
self.serial_port = None
def set_time(self, ts):
"""Set station time to indicated unix epoch."""
logdbg('setting station clock to %s' %
weeutil.weeutil.timestamp_to_string(ts))
for m in [Measure.IDS['sd'], Measure.IDS['st']]:
data = m.conv.value2binary(ts)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_time(self):
"""Return station time as unix epoch."""
data = self.get_raw_data(['sw'])
ts = int(data['sw'])
logdbg('station clock is %s' % weeutil.weeutil.timestamp_to_string(ts))
return ts
def set_archive_interval(self, interval):
"""Set the archive interval in minutes."""
if int(interval) < 1:
raise ValueError('archive interval must be greater than zero')
logdbg('setting hardware archive interval to %s minutes' % interval)
interval -= 1
for m,v in [(Measure.IDS['hi'],interval), # archive interval in minutes
(Measure.IDS['hc'],1), # time till next sample in minutes
(Measure.IDS['hn'],0)]: # number of valid records
data = m.conv.value2binary(v)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_archive_interval(self):
"""Return archive interval in minutes."""
data = self.get_raw_data(['hi'])
x = 1 + int(data['hi'])
logdbg('station archive interval is %s minutes' % x)
return x
def clear_memory(self):
"""Clear station memory."""
logdbg('clearing console memory')
for m,v in [(Measure.IDS['hn'],0)]: # number of valid records
data = m.conv.value2binary(v)
cmd = m.conv.write(data, None)
self.ws.write_safe(m.address, *cmd[1:])
def get_record_count(self):
data = self.get_raw_data(['hn'])
x = int(data['hn'])
logdbg('record count is %s' % x)
return x
def gen_records(self, since_ts=None, count=None, use_computer_clock=True):
"""Get latest count records from the station from oldest to newest. If
count is 0 or None, return all records.
The station has a history interval, and it records when the last
history sample was saved. So as long as the interval does not change
between the first and last records, we are safe to infer timestamps
for each record. This assumes that if the station loses power then
the memory will be cleared.
There is no timestamp associated with each record - we have to guess.
The station tells us the time until the next record and the epoch of
the latest record, based on the station's clock. So we can use that
or use the computer clock to guess the timestamp for each record.
To ensure accurate data, the first record must be read within one
minute of the initial read and the remaining records must be read
within numrec * interval minutes.
"""
logdbg("gen_records: since_ts=%s count=%s clock=%s" %
(since_ts, count, use_computer_clock))
measures = [Measure.IDS['hi'], Measure.IDS['hw'],
Measure.IDS['hc'], Measure.IDS['hn']]
raw_data = read_measurements(self.ws, measures)
interval = 1 + int(measures[0].conv.binary2value(raw_data[0])) # minute
latest_ts = int(measures[1].conv.binary2value(raw_data[1])) # epoch
time_to_next = int(measures[2].conv.binary2value(raw_data[2])) # minute
numrec = int(measures[3].conv.binary2value(raw_data[3]))
now = int(time.time())
cstr = 'station'
if use_computer_clock:
latest_ts = now - (interval - time_to_next) * 60
cstr = 'computer'
logdbg("using %s clock with latest_ts of %s" %
(cstr, weeutil.weeutil.timestamp_to_string(latest_ts)))
if not count:
count = HistoryMeasure.MAX_HISTORY_RECORDS
if since_ts is not None:
count = int((now - since_ts) / (interval * 60))
logdbg("count is %d to satisfy timestamp of %s" %
(count, weeutil.weeutil.timestamp_to_string(since_ts)))
if count == 0:
return
if count > numrec:
count = numrec
if count > HistoryMeasure.MAX_HISTORY_RECORDS:
count = HistoryMeasure.MAX_HISTORY_RECORDS
# station is about to overwrite first record, so skip it
if time_to_next <= 1 and count == HistoryMeasure.MAX_HISTORY_RECORDS:
count -= 1
logdbg("downloading %d records from station" % count)
HistoryMeasure.set_constants(self.ws)
measures = [HistoryMeasure(n) for n in range(count-1, -1, -1)]
raw_data = read_measurements(self.ws, measures)
last_ts = latest_ts - (count-1) * interval * 60
for measure, nybbles in zip(measures, raw_data):
value = measure.conv.binary2value(nybbles)
data_dict = {
'interval': interval,
'it': value.temp_indoor,
'ih': value.humidity_indoor,
'ot': value.temp_outdoor,
'oh': value.humidity_outdoor,
'pa': value.pressure_absolute,
'rt': value.rain,
'wind': (value.wind_speed, value.wind_direction, 0, 0),
'rh': None, # no rain rate in history
'dp': None, # no dewpoint in history
'wc': None, # no windchill in history
}
yield last_ts, data_dict
last_ts += interval * 60
def get_raw_data(self, labels):
"""Get raw data from the station, return as dictionary."""
measures = [Measure.IDS[m] for m in labels]
raw_data = read_measurements(self.ws, measures)
data_dict = dict(zip(labels, [m.conv.binary2value(d) for m, d in zip(measures, raw_data)]))
return data_dict
# =============================================================================
# The following code was adapted from ws2300.py by Russell Stuart
# =============================================================================
VERSION = "1.8 2013-08-26"
#
# Debug options.
#
DEBUG_SERIAL = False
#
# A fatal error.
#
class FatalError(StandardError):
source = None
message = None
cause = None
def __init__(self, source, message, cause=None):
self.source = source
self.message = message
self.cause = cause
StandardError.__init__(self, message)
#
# The serial port interface. We can talk to the Ws2300 over anything
# that implements this interface.
#
class SerialPort(object):
#
# Discard all characters waiting to be read.
#
def clear(self): raise NotImplementedError()
#
# Close the serial port.
#
def close(self): raise NotImplementedError()
#
# Wait for all characters to be sent.
#
def flush(self): raise NotImplementedError()
#
# Read a character, waiting for a most timeout seconds. Return the
# character read, or None if the timeout occurred.
#
def read_byte(self, timeout): raise NotImplementedError()
#
# Release the serial port. Closes it until it is used again, when
# it is automatically re-opened. It need not be implemented.
#
def release(self): pass
#
# Write characters to the serial port.
#
def write(self, data): raise NotImplementedError()
#
# A Linux Serial port. Implements the Serial interface on Linux.
#
class LinuxSerialPort(SerialPort):
SERIAL_CSIZE = {
"7": tty.CS7,
"8": tty.CS8, }
SERIAL_PARITIES= {
"e": tty.PARENB,
"n": 0,
"o": tty.PARENB|tty.PARODD, }
SERIAL_SPEEDS = {
"300": tty.B300,
"600": tty.B600,
"1200": tty.B1200,
"2400": tty.B2400,
"4800": tty.B4800,
"9600": tty.B9600,
"19200": tty.B19200,
"38400": tty.B38400,
"57600": tty.B57600,
"115200": tty.B115200, }
SERIAL_SETTINGS = "2400,n,8,1"
device = None # string, the device name.
orig_settings = None # class, the original ports settings.
select_list = None # list, The serial ports
serial_port = None # int, OS handle to device.
settings = None # string, the settings on the command line.
#
# Initialise ourselves.
#
def __init__(self,device,settings=SERIAL_SETTINGS):
self.device = device
self.settings = settings.split(",")
self.settings.extend([None,None,None])
self.settings[0] = self.__class__.SERIAL_SPEEDS.get(self.settings[0], None)
self.settings[1] = self.__class__.SERIAL_PARITIES.get(self.settings[1].lower(), None)
self.settings[2] = self.__class__.SERIAL_CSIZE.get(self.settings[2], None)
if len(self.settings) != 7 or None in self.settings[:3]:
raise FatalError(self.device, 'Bad serial settings "%s".' % settings)
self.settings = self.settings[:4]
#
# Open the port.
#
try:
self.serial_port = os.open(self.device, os.O_RDWR)
except EnvironmentError, e:
raise FatalError(self.device, "can't open tty device - %s." % str(e))
try:
fcntl.flock(self.serial_port, fcntl.LOCK_EX)
self.orig_settings = tty.tcgetattr(self.serial_port)
setup = self.orig_settings[:]
setup[0] = tty.INPCK
setup[1] = 0
setup[2] = tty.CREAD|tty.HUPCL|tty.CLOCAL|reduce(lambda x,y: x|y, self.settings[:3])
setup[3] = 0 # tty.ICANON
setup[4] = self.settings[0]
setup[5] = self.settings[0]
setup[6] = ['\000']*len(setup[6])
setup[6][tty.VMIN] = 1
setup[6][tty.VTIME] = 0
tty.tcflush(self.serial_port, tty.TCIOFLUSH)
#
# Restart IO if stopped using software flow control (^S/^Q). This
# doesn't work on FreeBSD.
#
try:
tty.tcflow(self.serial_port, tty.TCOON|tty.TCION)
except termios.error:
pass
tty.tcsetattr(self.serial_port, tty.TCSAFLUSH, setup)
#
# Set DTR low and RTS high and leave other control lines untouched.
#
arg = struct.pack('I', 0)
arg = fcntl.ioctl(self.serial_port, tty.TIOCMGET, arg)
portstatus = struct.unpack('I', arg)[0]
portstatus = portstatus & ~tty.TIOCM_DTR | tty.TIOCM_RTS
arg = struct.pack('I', portstatus)
fcntl.ioctl(self.serial_port, tty.TIOCMSET, arg)
self.select_list = [self.serial_port]
except Exception:
os.close(self.serial_port)
raise
def close(self):
if self.orig_settings:
tty.tcsetattr(self.serial_port, tty.TCSANOW, self.orig_settings)
os.close(self.serial_port)
def read_byte(self, timeout):
ready = select.select(self.select_list, [], [], timeout)
if not ready[0]:
return None
return os.read(self.serial_port, 1)
#
# Write a string to the port.
#
def write(self, data):
os.write(self.serial_port, data)
#
# Flush the input buffer.
#
def clear(self):
tty.tcflush(self.serial_port, tty.TCIFLUSH)
#
# Flush the output buffer.
#
def flush(self):
tty.tcdrain(self.serial_port)
#
# This class reads and writes bytes to a Ws2300. It is passed something
# that implements the Serial interface. The major routines are:
#
# Ws2300() - Create one of these objects that talks over the serial port.
# read_batch() - Reads data from the device using an scatter/gather interface.
# write_safe() - Writes data to the device.
#
class Ws2300(object):
#
# An exception for us.
#
class Ws2300Exception(StandardError):
def __init__(self, *args):
StandardError.__init__(self, *args)
#
# Constants we use.
#
MAXBLOCK = 30
MAXRETRIES = 50
MAXWINDRETRIES= 20
WRITENIB = 0x42
SETBIT = 0x12
UNSETBIT = 0x32
WRITEACK = 0x10
SETACK = 0x04
UNSETACK = 0x0C
RESET_MIN = 0x01
RESET_MAX = 0x02
MAX_RESETS = 100
#
# Instance data.
#
log_buffer = None # list, action log
log_mode = None # string, Log mode
long_nest = None # int, Nesting of log actions
serial_port = None # string, SerialPort port to use
#
# Initialise ourselves.
#
def __init__(self,serial_port):
self.log_buffer = []
self.log_nest = 0
self.serial_port = serial_port
#
# Write data to the device.
#
def write_byte(self,data):
if self.log_mode != 'w':
if self.log_mode != 'e':
self.log(' ')
self.log_mode = 'w'
self.log("%02x" % ord(data))
self.serial_port.write(data)
#
# Read a byte from the device.
#
def read_byte(self, timeout=1.0):
if self.log_mode != 'r':
self.log_mode = 'r'
self.log(':')
result = self.serial_port.read_byte(timeout)
if result == None:
self.log("--")
else:
self.log("%02x" % ord(result))
return result
#
# Remove all pending incoming characters.
#
def clear_device(self):
if self.log_mode != 'e':
self.log(' ')
self.log_mode = 'c'
self.log("C")
self.serial_port.clear()
#
# Write a reset string and wait for a reply.
#
def reset_06(self):
self.log_enter("re")
try:
for retry in range(self.__class__.MAX_RESETS):
self.clear_device()
self.write_byte('\x06')
#
# Occasionally 0, then 2 is returned. If 0 comes back,
# continue reading as this is more efficient than sending
# an out-of sync reset and letting the data reads restore
# synchronization. Occasionally, multiple 2's are returned.
# Read with a fast timeout until all data is exhausted, if
# we got a 2 back at all, we consider it a success.
#
success = False
answer = self.read_byte()
while answer != None:
if answer == '\x02':
success = True
answer = self.read_byte(0.05)
if success:
return
msg = "Reset failed, %d retries, no response" % self.__class__.MAX_RESETS
raise self.Ws2300Exception(msg)
finally:
self.log_exit()
#
# Encode the address.
#
def write_address(self,address):
for digit in range(4):
byte = chr((address >> (4 * (3-digit)) & 0xF) * 4 + 0x82)
self.write_byte(byte)
ack = chr(digit * 16 + (ord(byte) - 0x82) // 4)
answer = self.read_byte()
if ack != answer:
self.log("??")
return False
return True
#
# Write data, checking the reply.
#
def write_data(self,nybble_address,nybbles,encode_constant=None):
self.log_enter("wd")
try:
if not self.write_address(nybble_address):
return None
if encode_constant == None:
encode_constant = self.WRITENIB
encoded_data = ''.join([
chr(nybbles[i]*4 + encode_constant)
for i in range(len(nybbles))])
ack_constant = {
self.SETBIT: self.SETACK,
self.UNSETBIT: self.UNSETACK,
self.WRITENIB: self.WRITEACK
}[encode_constant]
self.log(",")
for i in range(len(encoded_data)):
self.write_byte(encoded_data[i])
answer = self.read_byte()
if chr(nybbles[i] + ack_constant) != answer:
self.log("??")
return None
return True
finally:
self.log_exit()
#
# Reset the device and write a command, verifing it was written correctly.
#
def write_safe(self,nybble_address,nybbles,encode_constant=None):
self.log_enter("ws")
try:
for retry in range(self.MAXRETRIES):
self.reset_06()
command_data = self.write_data(nybble_address,nybbles,encode_constant)
if command_data != None:
return command_data
raise self.Ws2300Exception("write_safe failed, retries exceeded")
finally:
self.log_exit()
#
# A total kuldge this, but its the easiest way to force the 'computer
# time' to look like a normal ws2300 variable, which it most definitely
# isn't, of course.
#
def read_computer_time(self,nybble_address,nybble_count):
now = time.time()
tm = time.localtime(now)
tu = time.gmtime(now)
year2 = tm[0] % 100
datetime_data = (
tu[5]%10, tu[5]//10, tu[4]%10, tu[4]//10, tu[3]%10, tu[3]//10,
tm[5]%10, tm[5]//10, tm[4]%10, tm[4]//10, tm[3]%10, tm[3]//10,
tm[2]%10, tm[2]//10, tm[1]%10, tm[1]//10, year2%10, year2//10)
address = nybble_address+18
return datetime_data[address:address+nybble_count]
#
# Read 'length' nybbles at address. Returns: (nybble_at_address, ...).
# Can't read more than MAXBLOCK nybbles at a time.
#
def read_data(self,nybble_address,nybble_count):
if nybble_address < 0:
return self.read_computer_time(nybble_address,nybble_count)
self.log_enter("rd")
try:
if nybble_count < 1 or nybble_count > self.MAXBLOCK:
StandardError("Too many nybbles requested")
bytes = (nybble_count + 1) // 2
if not self.write_address(nybble_address):
return None
#
# Write the number bytes we want to read.
#
encoded_data = chr(0xC2 + bytes*4)
self.write_byte(encoded_data)
answer = self.read_byte()
check = chr(0x30 + bytes)
if answer != check:
self.log("??")
return None
#
# Read the response.
#
self.log(", :")
response = ""
for i in range(bytes):
answer = self.read_byte()
if answer == None:
return None
response += answer
#
# Read and verify checksum
#
answer = self.read_byte()
checksum = sum([ord(b) for b in response]) % 256
if chr(checksum) != answer:
self.log("??")
return None
flatten = lambda a,b: a + (ord(b) % 16, ord(b) / 16)
return reduce(flatten, response, ())[:nybble_count]
finally:
self.log_exit()
#
# Read a batch of blocks. Batches is a list of data to be read:
# [(address_of_first_nybble, length_in_nybbles), ...]
# returns:
# [(nybble_at_address, ...), ...]
#
def read_batch(self,batches):
self.log_enter("rb start")
self.log_exit()
try:
if [b for b in batches if b[0] >= 0]:
self.reset_06()
result = []
for batch in batches:
address = batch[0]
data = ()
for start_pos in range(0,batch[1],self.MAXBLOCK):
for retry in range(self.MAXRETRIES):
bytes = min(self.MAXBLOCK, batch[1]-start_pos)
response = self.read_data(address + start_pos, bytes)
if response != None:
break
self.reset_06()
if response == None:
raise self.Ws2300Exception("read failed, retries exceeded")
data += response
result.append(data)
return result
finally:
self.log_enter("rb end")
self.log_exit()
#
# Reset the device, read a block of nybbles at the passed address.
#
def read_safe(self,nybble_address,nybble_count):
self.log_enter("rs")
try:
return self.read_batch([(nybble_address,nybble_count)])[0]
finally:
self.log_exit()
#
# Debug logging of serial IO.
#
def log(self, s):
if not DEBUG_SERIAL:
return
self.log_buffer[-1] = self.log_buffer[-1] + s
def log_enter(self, action):
if not DEBUG_SERIAL:
return
self.log_nest += 1
if self.log_nest == 1:
if len(self.log_buffer) > 1000:
del self.log_buffer[0]
self.log_buffer.append("%5.2f %s " % (time.time() % 100, action))
self.log_mode = 'e'
def log_exit(self):
if not DEBUG_SERIAL:
return
self.log_nest -= 1
#
# Print a data block.
#
def bcd2num(nybbles):
digits = list(nybbles)[:]
digits.reverse()
return reduce(lambda a,b: a*10 + b, digits, 0)
def num2bcd(number, nybble_count):
result = []
for i in range(nybble_count):
result.append(int(number % 10))
number //= 10
return tuple(result)
def bin2num(nybbles):
digits = list(nybbles)
digits.reverse()
return reduce(lambda a,b: a*16 + b, digits, 0)
def num2bin(number, nybble_count):
result = []
number = int(number)
for i in range(nybble_count):
result.append(number % 16)
number //= 16
return tuple(result)
#
# A "Conversion" encapsulates a unit of measurement on the Ws2300. Eg
# temperature, or wind speed.
#
class Conversion(object):
description = None # Description of the units.
nybble_count = None # Number of nybbles used on the WS2300
units = None # Units name (eg hPa).
#
# Initialise ourselves.
# units - text description of the units.
# nybble_count- Size of stored value on ws2300 in nybbles
# description - Description of the units
#
def __init__(self, units, nybble_count, description):
self.description = description
self.nybble_count = nybble_count
self.units = units
#
# Convert the nybbles read from the ws2300 to our internal value.
#
def binary2value(self, data): raise NotImplementedError()
#
# Convert our internal value to nybbles that can be written to the ws2300.
#
def value2binary(self, value): raise NotImplementedError()
#
# Print value.
#
def str(self, value): raise NotImplementedError()
#
# Convert the string produced by "str()" back to the value.
#
def parse(self, s): raise NotImplementedError()
#
# Transform data into something that can be written. Returns:
# (new_bytes, ws2300.write_safe_args, ...)
# This only becomes tricky when less than a nybble is written.
#
def write(self, data, nybble):
return (data, data)
#
# Test if the nybbles read from the Ws2300 is sensible. Sometimes a
# communications error will make it past the weak checksums the Ws2300
# uses. This optional function implements another layer of checking -
# does the value returned make sense. Returns True if the value looks
# like garbage.
#
def garbage(self, data):
return False
#
# For values stores as binary numbers.
#
class BinConversion(Conversion):
mult = None
scale = None
units = None
def __init__(self, units, nybble_count, scale, description, mult=1, check=None):
Conversion.__init__(self, units, nybble_count, description)
self.mult = mult
self.scale = scale
self.units = units
def binary2value(self, data):
return (bin2num(data) * self.mult) / 10.0**self.scale
def value2binary(self, value):
return num2bin(int(value * 10**self.scale) // self.mult, self.nybble_count)
def str(self, value):
return "%.*f" % (self.scale, value)
def parse(self, s):
return float(s)
#
# For values stored as BCD numbers.
#
class BcdConversion(Conversion):
offset = None
scale = None
units = None
def __init__(self, units, nybble_count, scale, description, offset=0):
Conversion.__init__(self, units, nybble_count, description)
self.offset = offset
self.scale = scale
self.units = units
def binary2value(self, data):
num = bcd2num(data) % 10**self.nybble_count + self.offset
return float(num) / 10**self.scale
def value2binary(self, value):
return num2bcd(int(value * 10**self.scale) - self.offset, self.nybble_count)
def str(self, value):
return "%.*f" % (self.scale, value)
def parse(self, s):
return float(s)
#
# For pressures. Add a garbage check.
#
class PressureConversion(BcdConversion):
def __init__(self):
BcdConversion.__init__(self, "hPa", 5, 1, "pressure")
def garbage(self, data):
value = self.binary2value(data)
return value < 900 or value > 1200
#
# For values the represent a date.
#
class ConversionDate(Conversion):
format = None
def __init__(self, nybble_count, format):
description = format
for xlate in "%Y:yyyy,%m:mm,%d:dd,%H:hh,%M:mm,%S:ss".split(","):
description = description.replace(*xlate.split(":"))
Conversion.__init__(self, "", nybble_count, description)
self.format = format
def str(self, value):
return time.strftime(self.format, time.localtime(value))
def parse(self, s):
return time.mktime(time.strptime(s, self.format))
class DateConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 6, "%Y-%m-%d")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[2] + tm[1] * 100 + (tm[0]-2000) * 10000
return num2bcd(dt, self.nybble_count)
class DatetimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 11, "%Y-%m-%d %H:%M")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 1000000000 % 100 + 2000,
x // 10000000 % 100,
x // 100000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dow = tm[6] + 1
dt = tm[4]+(tm[3]+(dow+(tm[2]+(tm[1]+(tm[0]-2000)*100)*100)*10)*100)*100
return num2bcd(dt, self.nybble_count)
class UnixtimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 12, "%Y-%m-%d %H:%M:%S")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x //10000000000 % 100 + 2000,
x // 100000000 % 100,
x // 1000000 % 100,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[5]+(tm[4]+(tm[3]+(tm[2]+(tm[1]+(tm[0]-2000)*100)*100)*100)*100)*100
return num2bcd(dt, self.nybble_count)
class TimestampConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 10, "%Y-%m-%d %H:%M")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
x // 100000000 % 100 + 2000,
x // 1000000 % 100,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0,
0))
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[4] + (tm[3] + (tm[2] + (tm[1] + (tm[0]-2000)*100)*100)*100)*100
return num2bcd(dt, self.nybble_count)
class TimeConversion(ConversionDate):
def __init__(self):
ConversionDate.__init__(self, 6, "%H:%M:%S")
def binary2value(self, data):
x = bcd2num(data)
return time.mktime((
0,
0,
0,
x // 10000 % 100,
x // 100 % 100,
x % 100,
0,
0,
0)) - time.timezone
def value2binary(self, value):
tm = time.localtime(value)
dt = tm[5] + tm[4]*100 + tm[3]*10000
return num2bcd(dt, self.nybble_count)
def parse(self, s):
return time.mktime((0,0,0) + time.strptime(s, self.format)[3:]) + time.timezone
class WindDirectionConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "deg", 1, "North=0 clockwise")
def binary2value(self, data):
return data[0] * 22.5
def value2binary(self, value):
return (int((value + 11.25) / 22.5),)
def str(self, value):
return "%g" % value
def parse(self, s):
return float(s)
class WindVelocityConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "ms,d", 4, "wind speed and direction")
def binary2value(self, data):
return (bin2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
def value2binary(self, value):
return num2bin(value[0]*10, 3) + num2bin((value[1] + 11.5) / 22.5, 1)
def str(self, value):
return "%.1f,%g" % value
def parse(self, s):
return tuple([float(x) for x in s.split(",")])
# The ws2300 1.8 implementation does not calculate wind speed correctly -
# it uses bcd2num instead of bin2num. This conversion object uses bin2num
# decoding and it reads all wind data in a single transcation so that we do
# not suffer coherency problems.
class WindConversion(Conversion):
def __init__(self):
Conversion.__init__(self, "ms,d,o,v", 12, "wind speed, dir, validity")
def binary2value(self, data):
overflow = data[0]
validity = data[1]
speed = bin2num(data[2:5]) / 10.0
direction = data[5] * 22.5
return (speed, direction, overflow, validity)
def str(self, value):
return "%.1f,%g,%s,%s" % value
def parse(self, s):
return tuple([float(x) for x in s.split(",")])
#
# For non-numerical values.
#
class TextConversion(Conversion):
constants = None
def __init__(self, constants):
items = constants.items()[:]
items.sort()
fullname = ",".join([c[1]+"="+str(c[0]) for c in items]) + ",unknown-X"
Conversion.__init__(self, "", 1, fullname)
self.constants = constants
def binary2value(self, data):
return data[0]
def value2binary(self, value):
return (value,)
def str(self, value):
result = self.constants.get(value, None)
if result != None:
return result
return "unknown-%d" % value
def parse(self, s):
result = [c[0] for c in self.constants.items() if c[1] == s]
if result:
return result[0]
return None
#
# For values that are represented by one bit.
#
class ConversionBit(Conversion):
bit = None
desc = None
def __init__(self, bit, desc):
self.bit = bit
self.desc = desc
Conversion.__init__(self, "", 1, desc[0] + "=0," + desc[1] + "=1")
def binary2value(self, data):
return data[0] & (1 << self.bit) and 1 or 0
def value2binary(self, value):
return (value << self.bit,)
def str(self, value):
return self.desc[value]
def parse(self, s):
return [c[0] for c in self.desc.items() if c[1] == s][0]
class BitConversion(ConversionBit):
def __init__(self, bit, desc):
ConversionBit.__init__(self, bit, desc)
#
# Since Ws2300.write_safe() only writes nybbles and we have just one bit,
# we have to insert that bit into the data_read so it can be written as
# a nybble.
#
def write(self, data, nybble):
data = (nybble & ~(1 << self.bit) | data[0],)
return (data, data)
class AlarmSetConversion(BitConversion):
bit = None
desc = None
def __init__(self, bit):
BitConversion.__init__(self, bit, {0:"off", 1:"on"})
class AlarmActiveConversion(BitConversion):
bit = None
desc = None
def __init__(self, bit):
BitConversion.__init__(self, bit, {0:"inactive", 1:"active"})
#
# For values that are represented by one bit, and must be written as
# a single bit.
#
class SetresetConversion(ConversionBit):
bit = None
def __init__(self, bit, desc):
ConversionBit.__init__(self, bit, desc)
#
# Setreset bits use a special write mode.
#
def write(self, data, nybble):
if data[0] == 0:
operation = Ws2300.UNSETBIT
else:
operation = Ws2300.SETBIT
return ((nybble & ~(1 << self.bit) | data[0],), [self.bit], operation)
#
# Conversion for history. This kludge makes history fit into the framework
# used for all the other measures.
#
class HistoryConversion(Conversion):
class HistoryRecord(object):
temp_indoor = None
temp_outdoor = None
pressure_absolute = None
humidity_indoor = None
humidity_outdoor = None
rain = None
wind_speed = None
wind_direction = None
def __str__(self):
return "%4.1fc %2d%% %4.1fc %2d%% %6.1fhPa %6.1fmm %2dm/s %5g" % (
self.temp_indoor, self.humidity_indoor,
self.temp_outdoor, self.humidity_outdoor,
self.pressure_absolute, self.rain,
self.wind_speed, self.wind_direction)
def parse(cls, s):
rec = cls()
toks = [tok.rstrip(string.ascii_letters + "%/") for tok in s.split()]
rec.temp_indoor = float(toks[0])
rec.humidity_indoor = int(toks[1])
rec.temp_outdoor = float(toks[2])
rec.humidity_outdoor = int(toks[3])
rec.pressure_absolute = float(toks[4])
rec.rain = float(toks[5])
rec.wind_speed = int(toks[6])
rec.wind_direction = int((float(toks[7]) + 11.25) / 22.5) % 16
return rec
parse = classmethod(parse)
def __init__(self):
Conversion.__init__(self, "", 19, "history")
def binary2value(self, data):
value = self.__class__.HistoryRecord()
n = bin2num(data[0:5])
value.temp_indoor = (n % 1000) / 10.0 - 30
value.temp_outdoor = (n - (n % 1000)) / 10000.0 - 30
n = bin2num(data[5:10])
value.pressure_absolute = (n % 10000) / 10.0
if value.pressure_absolute < 500:
value.pressure_absolute += 1000
value.humidity_indoor = (n - (n % 10000)) / 10000.0
value.humidity_outdoor = bcd2num(data[10:12])
value.rain = bin2num(data[12:15]) * 0.518
value.wind_speed = bin2num(data[15:18])
value.wind_direction = bin2num(data[18:19]) * 22.5
return value
def value2binary(self, value):
result = ()
n = int((value.temp_indoor + 30) * 10.0 + (value.temp_outdoor + 30) * 10000.0 + 0.5)
result = result + num2bin(n, 5)
n = value.pressure_absolute % 1000
n = int(n * 10.0 + value.humidity_indoor * 10000.0 + 0.5)
result = result + num2bin(n, 5)
result = result + num2bcd(value.humidity_outdoor, 2)
result = result + num2bin(int((value.rain + 0.518/2) / 0.518), 3)
result = result + num2bin(value.wind_speed, 3)
result = result + num2bin(value.wind_direction, 1)
return result
#
# Print value.
#
def str(self, value):
return str(value)
#
# Convert the string produced by "str()" back to the value.
#
def parse(self, s):
return self.__class__.HistoryRecord.parse(s)
#
# Various conversions we know about.
#
conv_ala0 = AlarmActiveConversion(0)
conv_ala1 = AlarmActiveConversion(1)
conv_ala2 = AlarmActiveConversion(2)
conv_ala3 = AlarmActiveConversion(3)
conv_als0 = AlarmSetConversion(0)
conv_als1 = AlarmSetConversion(1)
conv_als2 = AlarmSetConversion(2)
conv_als3 = AlarmSetConversion(3)
conv_buzz = SetresetConversion(3, {0:'on', 1:'off'})
conv_lbck = SetresetConversion(0, {0:'off', 1:'on'})
conv_date = DateConversion()
conv_dtme = DatetimeConversion()
conv_utme = UnixtimeConversion()
conv_hist = HistoryConversion()
conv_stmp = TimestampConversion()
conv_time = TimeConversion()
conv_wdir = WindDirectionConversion()
conv_wvel = WindVelocityConversion()
conv_conn = TextConversion({0:"cable", 3:"lost", 15:"wireless"})
conv_fore = TextConversion({0:"rainy", 1:"cloudy", 2:"sunny"})
conv_spdu = TextConversion({0:"m/s", 1:"knots", 2:"beaufort", 3:"km/h", 4:"mph"})
conv_tend = TextConversion({0:"steady", 1:"rising", 2:"falling"})
conv_wovr = TextConversion({0:"no", 1:"overflow"})
conv_wvld = TextConversion({0:"ok", 1:"invalid", 2:"overflow"})
conv_lcon = BinConversion("", 1, 0, "contrast")
conv_rec2 = BinConversion("", 2, 0, "record number")
conv_humi = BcdConversion("%", 2, 0, "humidity")
conv_pres = PressureConversion()
conv_rain = BcdConversion("mm", 6, 2, "rain")
conv_temp = BcdConversion("C", 4, 2, "temperature", -3000)
conv_per2 = BinConversion("s", 2, 1, "time interval", 5)
conv_per3 = BinConversion("min", 3, 0, "time interval")
conv_wspd = BinConversion("m/s", 3, 1, "speed")
conv_wind = WindConversion()
#
# Define a measurement on the Ws2300. This encapsulates:
# - The names (abbrev and long) of the thing being measured, eg wind speed.
# - The location it can be found at in the Ws2300's memory map.
# - The Conversion used to represent the figure.
#
class Measure(object):
IDS = {} # map, Measures defined. {id: Measure, ...}
NAMES = {} # map, Measures defined. {name: Measure, ...}
address = None # int, Nybble address in the Ws2300
conv = None # object, Type of value
id = None # string, Short name
name = None # string, Long name
reset = None # string, Id of measure used to reset this one
def __init__(self, address, id, conv, name, reset=None):
self.address = address
self.conv = conv
self.reset = reset
if id != None:
self.id = id
assert not id in self.__class__.IDS
self.__class__.IDS[id] = self
if name != None:
self.name = name
assert not name in self.__class__.NAMES
self.__class__.NAMES[name] = self
def __hash__(self):
return hash(self.id)
def __cmp__(self, other):
if isinstance(other, Measure):
return cmp(self.id, other.id)
return cmp(type(self), type(other))
#
# Conversion for raw Hex data. These are created as needed.
#
class HexConversion(Conversion):
def __init__(self, nybble_count):
Conversion.__init__(self, "", nybble_count, "hex data")
def binary2value(self, data):
return data
def value2binary(self, value):
return value
def str(self, value):
return ",".join(["%x" % nybble for nybble in value])
def parse(self, s):
toks = s.replace(","," ").split()
for i in range(len(toks)):
s = list(toks[i])
s.reverse()
toks[i] = ''.join(s)
list_str = list(''.join(toks))
self.nybble_count = len(list_str)
return tuple([int(nybble) for nybble in list_str])
#
# The raw nybble measure.
#
class HexMeasure(Measure):
def __init__(self, address, id, conv, name):
self.address = address
self.name = name
self.conv = conv
#
# A History record. Again a kludge to make history fit into the framework
# developed for the other measurements. History records are identified
# by their record number. Record number 0 is the most recently written
# record, record number 1 is the next most recently written and so on.
#
class HistoryMeasure(Measure):
HISTORY_BUFFER_ADDR = 0x6c6 # int, Address of the first history record
MAX_HISTORY_RECORDS = 0xaf # string, Max number of history records stored
LAST_POINTER = None # int, Pointer to last record
RECORD_COUNT = None # int, Number of records in use
recno = None # int, The record number this represents
conv = conv_hist
def __init__(self, recno):
self.recno = recno
def set_constants(cls, ws2300):
measures = [Measure.IDS["hp"], Measure.IDS["hn"]]
data = read_measurements(ws2300, measures)
cls.LAST_POINTER = int(measures[0].conv.binary2value(data[0]))
cls.RECORD_COUNT = int(measures[1].conv.binary2value(data[1]))
set_constants = classmethod(set_constants)
def id(self):
return "h%03d" % self.recno
id = property(id)
def name(self):
return "history record %d" % self.recno
name = property(name)
def offset(self):
if self.LAST_POINTER is None:
raise StandardError("HistoryMeasure.set_constants hasn't been called")
return (self.LAST_POINTER - self.recno) % self.MAX_HISTORY_RECORDS
offset = property(offset)
def address(self):
return self.HISTORY_BUFFER_ADDR + self.conv.nybble_count * self.offset
address = property(address)
#
# The measurements we know about. This is all of them documented in
# memory_map_2300.txt, bar the history. History is handled specially.
# And of course, the "c?"'s aren't real measures at all - its the current
# time on this machine.
#
Measure( -18, "ct", conv_time, "this computer's time")
Measure( -12, "cw", conv_utme, "this computer's date time")
Measure( -6, "cd", conv_date, "this computer's date")
Measure(0x006, "bz", conv_buzz, "buzzer")
Measure(0x00f, "wsu", conv_spdu, "wind speed units")
Measure(0x016, "lb", conv_lbck, "lcd backlight")
Measure(0x019, "sss", conv_als2, "storm warn alarm set")
Measure(0x019, "sts", conv_als0, "station time alarm set")
Measure(0x01a, "phs", conv_als3, "pressure max alarm set")
Measure(0x01a, "pls", conv_als2, "pressure min alarm set")
Measure(0x01b, "oths", conv_als3, "out temp max alarm set")
Measure(0x01b, "otls", conv_als2, "out temp min alarm set")
Measure(0x01b, "iths", conv_als1, "in temp max alarm set")
Measure(0x01b, "itls", conv_als0, "in temp min alarm set")
Measure(0x01c, "dphs", conv_als3, "dew point max alarm set")
Measure(0x01c, "dpls", conv_als2, "dew point min alarm set")
Measure(0x01c, "wchs", conv_als1, "wind chill max alarm set")
Measure(0x01c, "wcls", conv_als0, "wind chill min alarm set")
Measure(0x01d, "ihhs", conv_als3, "in humidity max alarm set")
Measure(0x01d, "ihls", conv_als2, "in humidity min alarm set")
Measure(0x01d, "ohhs", conv_als1, "out humidity max alarm set")
Measure(0x01d, "ohls", conv_als0, "out humidity min alarm set")
Measure(0x01e, "rhhs", conv_als1, "rain 1h alarm set")
Measure(0x01e, "rdhs", conv_als0, "rain 24h alarm set")
Measure(0x01f, "wds", conv_als2, "wind direction alarm set")
Measure(0x01f, "wshs", conv_als1, "wind speed max alarm set")
Measure(0x01f, "wsls", conv_als0, "wind speed min alarm set")
Measure(0x020, "siv", conv_ala2, "icon alarm active")
Measure(0x020, "stv", conv_ala0, "station time alarm active")
Measure(0x021, "phv", conv_ala3, "pressure max alarm active")
Measure(0x021, "plv", conv_ala2, "pressure min alarm active")
Measure(0x022, "othv", conv_ala3, "out temp max alarm active")
Measure(0x022, "otlv", conv_ala2, "out temp min alarm active")
Measure(0x022, "ithv", conv_ala1, "in temp max alarm active")
Measure(0x022, "itlv", conv_ala0, "in temp min alarm active")
Measure(0x023, "dphv", conv_ala3, "dew point max alarm active")
Measure(0x023, "dplv", conv_ala2, "dew point min alarm active")
Measure(0x023, "wchv", conv_ala1, "wind chill max alarm active")
Measure(0x023, "wclv", conv_ala0, "wind chill min alarm active")
Measure(0x024, "ihhv", conv_ala3, "in humidity max alarm active")
Measure(0x024, "ihlv", conv_ala2, "in humidity min alarm active")
Measure(0x024, "ohhv", conv_ala1, "out humidity max alarm active")
Measure(0x024, "ohlv", conv_ala0, "out humidity min alarm active")
Measure(0x025, "rhhv", conv_ala1, "rain 1h alarm active")
Measure(0x025, "rdhv", conv_ala0, "rain 24h alarm active")
Measure(0x026, "wdv", conv_ala2, "wind direction alarm active")
Measure(0x026, "wshv", conv_ala1, "wind speed max alarm active")
Measure(0x026, "wslv", conv_ala0, "wind speed min alarm active")
Measure(0x027, None, conv_ala3, "pressure max alarm active alias")
Measure(0x027, None, conv_ala2, "pressure min alarm active alias")
Measure(0x028, None, conv_ala3, "out temp max alarm active alias")
Measure(0x028, None, conv_ala2, "out temp min alarm active alias")
Measure(0x028, None, conv_ala1, "in temp max alarm active alias")
Measure(0x028, None, conv_ala0, "in temp min alarm active alias")
Measure(0x029, None, conv_ala3, "dew point max alarm active alias")
Measure(0x029, None, conv_ala2, "dew point min alarm active alias")
Measure(0x029, None, conv_ala1, "wind chill max alarm active alias")
Measure(0x029, None, conv_ala0, "wind chill min alarm active alias")
Measure(0x02a, None, conv_ala3, "in humidity max alarm active alias")
Measure(0x02a, None, conv_ala2, "in humidity min alarm active alias")
Measure(0x02a, None, conv_ala1, "out humidity max alarm active alias")
Measure(0x02a, None, conv_ala0, "out humidity min alarm active alias")
Measure(0x02b, None, conv_ala1, "rain 1h alarm active alias")
Measure(0x02b, None, conv_ala0, "rain 24h alarm active alias")
Measure(0x02c, None, conv_ala2, "wind direction alarm active alias")
Measure(0x02c, None, conv_ala2, "wind speed max alarm active alias")
Measure(0x02c, None, conv_ala2, "wind speed min alarm active alias")
Measure(0x200, "st", conv_time, "station set time", reset="ct")
Measure(0x23b, "sw", conv_dtme, "station current date time")
Measure(0x24d, "sd", conv_date, "station set date", reset="cd")
Measure(0x266, "lc", conv_lcon, "lcd contrast (ro)")
Measure(0x26b, "for", conv_fore, "forecast")
Measure(0x26c, "ten", conv_tend, "tendency")
Measure(0x346, "it", conv_temp, "in temp")
Measure(0x34b, "itl", conv_temp, "in temp min", reset="it")
Measure(0x350, "ith", conv_temp, "in temp max", reset="it")
Measure(0x354, "itlw", conv_stmp, "in temp min when", reset="sw")
Measure(0x35e, "ithw", conv_stmp, "in temp max when", reset="sw")
Measure(0x369, "itla", conv_temp, "in temp min alarm")
Measure(0x36e, "itha", conv_temp, "in temp max alarm")
Measure(0x373, "ot", conv_temp, "out temp")
Measure(0x378, "otl", conv_temp, "out temp min", reset="ot")
Measure(0x37d, "oth", conv_temp, "out temp max", reset="ot")
Measure(0x381, "otlw", conv_stmp, "out temp min when", reset="sw")
Measure(0x38b, "othw", conv_stmp, "out temp max when", reset="sw")
Measure(0x396, "otla", conv_temp, "out temp min alarm")
Measure(0x39b, "otha", conv_temp, "out temp max alarm")
Measure(0x3a0, "wc", conv_temp, "wind chill")
Measure(0x3a5, "wcl", conv_temp, "wind chill min", reset="wc")
Measure(0x3aa, "wch", conv_temp, "wind chill max", reset="wc")
Measure(0x3ae, "wclw", conv_stmp, "wind chill min when", reset="sw")
Measure(0x3b8, "wchw", conv_stmp, "wind chill max when", reset="sw")
Measure(0x3c3, "wcla", conv_temp, "wind chill min alarm")
Measure(0x3c8, "wcha", conv_temp, "wind chill max alarm")
Measure(0x3ce, "dp", conv_temp, "dew point")
Measure(0x3d3, "dpl", conv_temp, "dew point min", reset="dp")
Measure(0x3d8, "dph", conv_temp, "dew point max", reset="dp")
Measure(0x3dc, "dplw", conv_stmp, "dew point min when", reset="sw")
Measure(0x3e6, "dphw", conv_stmp, "dew point max when", reset="sw")
Measure(0x3f1, "dpla", conv_temp, "dew point min alarm")
Measure(0x3f6, "dpha", conv_temp, "dew point max alarm")
Measure(0x3fb, "ih", conv_humi, "in humidity")
Measure(0x3fd, "ihl", conv_humi, "in humidity min", reset="ih")
Measure(0x3ff, "ihh", conv_humi, "in humidity max", reset="ih")
Measure(0x401, "ihlw", conv_stmp, "in humidity min when", reset="sw")
Measure(0x40b, "ihhw", conv_stmp, "in humidity max when", reset="sw")
Measure(0x415, "ihla", conv_humi, "in humidity min alarm")
Measure(0x417, "ihha", conv_humi, "in humidity max alarm")
Measure(0x419, "oh", conv_humi, "out humidity")
Measure(0x41b, "ohl", conv_humi, "out humidity min", reset="oh")
Measure(0x41d, "ohh", conv_humi, "out humidity max", reset="oh")
Measure(0x41f, "ohlw", conv_stmp, "out humidity min when", reset="sw")
Measure(0x429, "ohhw", conv_stmp, "out humidity max when", reset="sw")
Measure(0x433, "ohla", conv_humi, "out humidity min alarm")
Measure(0x435, "ohha", conv_humi, "out humidity max alarm")
Measure(0x497, "rd", conv_rain, "rain 24h")
Measure(0x49d, "rdh", conv_rain, "rain 24h max", reset="rd")
Measure(0x4a3, "rdhw", conv_stmp, "rain 24h max when", reset="sw")
Measure(0x4ae, "rdha", conv_rain, "rain 24h max alarm")
Measure(0x4b4, "rh", conv_rain, "rain 1h")
Measure(0x4ba, "rhh", conv_rain, "rain 1h max", reset="rh")
Measure(0x4c0, "rhhw", conv_stmp, "rain 1h max when", reset="sw")
Measure(0x4cb, "rhha", conv_rain, "rain 1h max alarm")
Measure(0x4d2, "rt", conv_rain, "rain total", reset=0)
Measure(0x4d8, "rtrw", conv_stmp, "rain total reset when", reset="sw")
Measure(0x4ee, "wsl", conv_wspd, "wind speed min", reset="ws")
Measure(0x4f4, "wsh", conv_wspd, "wind speed max", reset="ws")
Measure(0x4f8, "wslw", conv_stmp, "wind speed min when", reset="sw")
Measure(0x502, "wshw", conv_stmp, "wind speed max when", reset="sw")
Measure(0x527, "wso", conv_wovr, "wind speed overflow")
Measure(0x528, "wsv", conv_wvld, "wind speed validity")
Measure(0x529, "wv", conv_wvel, "wind velocity")
Measure(0x529, "ws", conv_wspd, "wind speed")
Measure(0x52c, "w0", conv_wdir, "wind direction")
Measure(0x52d, "w1", conv_wdir, "wind direction 1")
Measure(0x52e, "w2", conv_wdir, "wind direction 2")
Measure(0x52f, "w3", conv_wdir, "wind direction 3")
Measure(0x530, "w4", conv_wdir, "wind direction 4")
Measure(0x531, "w5", conv_wdir, "wind direction 5")
Measure(0x533, "wsla", conv_wspd, "wind speed min alarm")
Measure(0x538, "wsha", conv_wspd, "wind speed max alarm")
Measure(0x54d, "cn", conv_conn, "connection type")
Measure(0x54f, "cc", conv_per2, "connection time till connect")
Measure(0x5d8, "pa", conv_pres, "pressure absolute")
Measure(0x5e2, "pr", conv_pres, "pressure relative")
Measure(0x5ec, "pc", conv_pres, "pressure correction")
Measure(0x5f6, "pal", conv_pres, "pressure absolute min", reset="pa")
Measure(0x600, "prl", conv_pres, "pressure relative min", reset="pr")
Measure(0x60a, "pah", conv_pres, "pressure absolute max", reset="pa")
Measure(0x614, "prh", conv_pres, "pressure relative max", reset="pr")
Measure(0x61e, "plw", conv_stmp, "pressure min when", reset="sw")
Measure(0x628, "phw", conv_stmp, "pressure max when", reset="sw")
Measure(0x63c, "pla", conv_pres, "pressure min alarm")
Measure(0x650, "pha", conv_pres, "pressure max alarm")
Measure(0x6b2, "hi", conv_per3, "history interval")
Measure(0x6b5, "hc", conv_per3, "history time till sample")
Measure(0x6b8, "hw", conv_stmp, "history last sample when")
Measure(0x6c2, "hp", conv_rec2, "history last record pointer",reset=0)
Measure(0x6c4, "hn", conv_rec2, "history number of records", reset=0)
# get all of the wind info in a single invocation
Measure(0x527, "wind", conv_wind, "wind")
#
# Read the requests.
#
def read_measurements(ws2300, read_requests):
if not read_requests:
return []
#
# Optimise what we have to read.
#
batches = [(m.address, m.conv.nybble_count) for m in read_requests]
batches.sort()
index = 1
addr = {batches[0][0]: 0}
while index < len(batches):
same_sign = (batches[index-1][0] < 0) == (batches[index][0] < 0)
same_area = batches[index-1][0] + batches[index-1][1] + 6 >= batches[index][0]
if not same_sign or not same_area:
addr[batches[index][0]] = index
index += 1
continue
addr[batches[index][0]] = index-1
batches[index-1] = batches[index-1][0], batches[index][0] + batches[index][1] - batches[index-1][0]
del batches[index]
#
# Read the data.
#
nybbles = ws2300.read_batch(batches)
#
# Return the data read in the order it was requested.
#
results = []
for measure in read_requests:
index = addr[measure.address]
offset = measure.address - batches[index][0]
results.append(nybbles[index][offset:offset+measure.conv.nybble_count])
return results
class WS23xxConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WS23xx]
# This section is for the La Crosse WS-2300 series of weather stations.
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0
port = /dev/ttyUSB0
# The station model, e.g., 'LaCrosse WS2317' or 'TFA Primus'
model = LaCrosse WS23xx
# The driver to use:
driver = weewx.drivers.ws23xx
"""
def prompt_for_settings(self):
print "Specify the serial port on which the station is connected, for"
print "example /dev/ttyUSB0 or /dev/ttyS0."
port = self._prompt('port', '/dev/ttyUSB0')
return {'port': port}
# define a main entry point for basic testing of the station without weewx
# engine and service overhead. invoke this as follows from the weewx root dir:
#
# PYTHONPATH=bin python bin/weewx/drivers/ws23xx.py
if __name__ == '__main__':
import optparse
usage = """%prog [options] [--debug] [--help]"""
syslog.openlog('ws23xx', syslog.LOG_PID | syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO))
port = DEFAULT_PORT
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='display driver version')
parser.add_option('--debug', dest='debug', action='store_true',
help='display diagnostic information while running')
parser.add_option('--port', dest='port', metavar='PORT',
help='serial port to which the station is connected')
parser.add_option('--readings', dest='readings', action='store_true',
help='display sensor readings')
parser.add_option("--records", dest="records", type=int, metavar="N",
help="display N station records, oldest to newest")
parser.add_option('--help-measures', dest='hm', action='store_true',
help='display measure names')
parser.add_option('--measure', dest='measure', type=str,
metavar="MEASURE", help='display single measure')
(options, args) = parser.parse_args()
if options.version:
print "ws23xx driver version %s" % DRIVER_VERSION
exit(1)
if options.debug is not None:
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
if options.port:
port = options.port
with WS23xx(port) as s:
if options.readings:
data = s.get_raw_data(SENSOR_IDS)
print data
if options.records is not None:
for ts,record in s.gen_records(count=options.records):
print ts,record
if options.measure:
data = s.get_raw_data([options.measure])
print data
if options.hm:
for m in Measure.IDS:
print "%s\t%s" % (m, Measure.IDS[m].name)
| knnniggett/weewx | bin/weewx/drivers/ws23xx.py | Python | gpl-3.0 | 77,294 | 0.004463 |
from __future__ import unicode_literals
import logging
import os
import string
import urllib
import urlparse
import glib
logger = logging.getLogger(__name__)
XDG_DIRS = {
'XDG_CACHE_DIR': glib.get_user_cache_dir(),
'XDG_CONFIG_DIR': glib.get_user_config_dir(),
'XDG_DATA_DIR': glib.get_user_data_dir(),
'XDG_MUSIC_DIR': glib.get_user_special_dir(glib.USER_DIRECTORY_MUSIC),
}
# XDG_MUSIC_DIR can be none, so filter out any bad data.
XDG_DIRS = dict((k, v) for k, v in XDG_DIRS.items() if v is not None)
def get_or_create_dir(dir_path):
if not isinstance(dir_path, bytes):
raise ValueError('Path is not a bytestring.')
dir_path = expand_path(dir_path)
if os.path.isfile(dir_path):
raise OSError(
'A file with the same name as the desired dir, '
'"%s", already exists.' % dir_path)
elif not os.path.isdir(dir_path):
logger.info('Creating dir %s', dir_path)
os.makedirs(dir_path, 0755)
return dir_path
def get_or_create_file(file_path, mkdir=True, content=None):
if not isinstance(file_path, bytes):
raise ValueError('Path is not a bytestring.')
file_path = expand_path(file_path)
if mkdir:
get_or_create_dir(os.path.dirname(file_path))
if not os.path.isfile(file_path):
logger.info('Creating file %s', file_path)
with open(file_path, 'w') as fh:
if content:
fh.write(content)
return file_path
def path_to_uri(path):
"""
Convert OS specific path to file:// URI.
Accepts either unicode strings or bytestrings. The encoding of any
bytestring will be maintained so that :func:`uri_to_path` can return the
same bytestring.
Returns a file:// URI as an unicode string.
"""
if isinstance(path, unicode):
path = path.encode('utf-8')
path = urllib.quote(path)
return urlparse.urlunsplit((b'file', b'', path, b'', b''))
def uri_to_path(uri):
"""
Convert an URI to a OS specific path.
Returns a bytestring, since the file path can contain chars with other
encoding than UTF-8.
If we had returned these paths as unicode strings, you wouldn't be able to
look up the matching dir or file on your file system because the exact path
would be lost by ignoring its encoding.
"""
if isinstance(uri, unicode):
uri = uri.encode('utf-8')
return urllib.unquote(urlparse.urlsplit(uri).path)
def split_path(path):
parts = []
while True:
path, part = os.path.split(path)
if part:
parts.insert(0, part)
if not path or path == b'/':
break
return parts
def expand_path(path):
# TODO: document as we want people to use this.
if not isinstance(path, bytes):
raise ValueError('Path is not a bytestring.')
try:
path = string.Template(path).substitute(XDG_DIRS)
except KeyError:
return None
path = os.path.expanduser(path)
path = os.path.abspath(path)
return path
def find_files(path):
"""
Finds all files within a path.
Directories and files with names starting with ``.`` is ignored.
:returns: yields the full path to files as bytestrings
"""
if isinstance(path, unicode):
path = path.encode('utf-8')
if os.path.isfile(path):
return
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
for dirname in dirnames:
if dirname.startswith(b'.'):
# Skip hidden dirs by modifying dirnames inplace
dirnames.remove(dirname)
for filename in filenames:
if filename.startswith(b'.'):
# Skip hidden files
continue
yield os.path.relpath(os.path.join(dirpath, filename), path)
def check_file_path_is_inside_base_dir(file_path, base_path):
assert not file_path.endswith(os.sep), (
'File path %s cannot end with a path separator' % file_path)
# Expand symlinks
real_base_path = os.path.realpath(base_path)
real_file_path = os.path.realpath(file_path)
# Use dir of file for prefix comparision, so we don't accept
# /tmp/foo.m3u as being inside /tmp/foo, simply because they have a
# common prefix, /tmp/foo, which matches the base path, /tmp/foo.
real_dir_path = os.path.dirname(real_file_path)
# Check if dir of file is the base path or a subdir
common_prefix = os.path.commonprefix([real_base_path, real_dir_path])
assert common_prefix == real_base_path, (
'File path %s must be in %s' % (real_file_path, real_base_path))
# FIXME replace with mock usage in tests.
class Mtime(object):
def __init__(self):
self.fake = None
def __call__(self, path):
if self.fake is not None:
return self.fake
return int(os.stat(path).st_mtime)
def set_fake_time(self, time):
self.fake = time
def undo_fake(self):
self.fake = None
mtime = Mtime()
| abarisain/mopidy | mopidy/utils/path.py | Python | apache-2.0 | 5,006 | 0.0002 |
def First_Part(orbits):
Orbits = dict()
checksum = 0
for orbit in orbits:
od, og = orbit.split(')')
Orbits[og] = od
for og in Orbits.keys():
while 1:
try:
og = Orbits[og]
checksum += 1
except KeyError:
break
return checksum
def Second_Part(orbits):
Orbits = dict()
for orbit in orbits:
od, og = orbit.split(')')
Orbits[og] = od
oPast = ["YOU"]
oCurr = [Orbits["YOU"]]
oNext = list()
dist = 0
while "SAN" not in oCurr:
for o in oCurr:
oNext += ([Orbits[o]] if o != "COM" else []) + [i for i in Orbits.keys() if Orbits[i] == o and i not in oPast]
oCurr = oNext
oNext = list()
oPast += oCurr
dist += 1
return dist - 1
Orbits = '''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN'''.split('\n')
Orbits = open("Inputs/Day_06.txt", 'r').read().split('\n')[:-1]
print(First_Part(Orbits))
print(Second_Part(Orbits))
| ImpregnableProgrammer/Advent-of-Code | 2019/Day_06.py | Python | gpl-3.0 | 1,071 | 0.006536 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: J.Y Han
# start
# spawn-fcgi -d /users/hanjiyun/project/geeksoho -f /users/hanjiyun/project/geeksoho/application.py -a 127.0.0.1 -p 9001
#stop
# kill `pgrep -f "/users/hanjiyun/project/geeksoho/application.py"`
import os
import web
import rediswebpy
from web.contrib.template import render_jinja
import misc
db = web.database(dbn='mysql', db='geeksoho', user='geeksoho', passwd='geeksoho')
urls = (
'/', 'index',
'/test', 'test'
)
# controllers
# ===============
class index:
"""Home"""
def GET(self):
# return pjax('jobs.html')
jobsList = GetJobs()
return render.jobs(jobsList=jobsList)
def POST(self):
data = web.input(title='', link='', company='', company_weibo='', company_website='', city='', salary='', intro='')
CreatNewJob(data)
raise web.seeother('/')
class test:
"""test"""
def GET(self):
# return pjax('test.html')
return render.test()
# models
# =============
def CreatNewJob(data):
db.insert(
'jobs',
title = data.title,
link = data.link,
company = data.company,
company_weibo = data.company_weibo,
company_website = data.company_website,
city = data.city,
salary = data.salary,
intro = data.intro)
def GetJobs():
return db.select('jobs', limit = 100, order='id DESC')
# globals = get_all_functions(misc)
app = web.application(urls, globals())
web.config.debug = True
cache = False
session = web.session.Session(app, rediswebpy.RedisStore(), initializer={'count': 0})
render = render_jinja(
'templates', # 设置模板路径.
encoding = 'utf-8', # 编码.
)
myFilters = {'filter_tags': misc.filter_tags,}
render._lookup.filters.update(myFilters)
if __name__ == "__main__":
web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
app.run() | naoyeye/geeksoho | application.py | Python | mit | 1,969 | 0.019457 |
from __future__ import with_statement
from ofxclient.account import Account
from configparser import ConfigParser
import os
import os.path
try:
import keyring
KEYRING_AVAILABLE = True
except:
KEYRING_AVAILABLE = False
try:
DEFAULT_CONFIG = os.path.expanduser(os.path.join('~', 'ofxclient.ini'))
except:
DEFAULT_CONFIG = None
class SecurableConfigParser(ConfigParser):
""":py:class:`ConfigParser.ConfigParser` subclass that knows how to store
options marked as secure into the OS specific
keyring/keychain.
To mark an option as secure, the caller must call
'set_secure' at least one time for the particular
option and from then on it will be seen as secure
and will be stored / retrieved from the keychain.
Example::
from ofxclient.config import SecurableConfigParser
# password will not be saved in the config file
c = SecurableConfigParser()
c.add_section('Info')
c.set('Info','username','bill')
c.set_secure('Info','password','s3cre7')
with open('config.ini','w') as fp:
c.write(fp)
"""
_secure_placeholder = '%{secured}'
def __init__(self, keyring_name='ofxclient',
keyring_available=KEYRING_AVAILABLE, **kwargs):
ConfigParser.__init__(self, interpolation = None)
self.keyring_name = keyring_name
self.keyring_available = keyring_available
self._unsaved = {}
self.keyring_name = keyring_name
def is_secure_option(self, section, option):
"""Test an option to see if it is secured or not.
:param section: section id
:type section: string
:param option: option name
:type option: string
:rtype: boolean
otherwise.
"""
if not self.has_section(section):
return False
if not self.has_option(section, option):
return False
if ConfigParser.get(self, section, option) == self._secure_placeholder:
return True
return False
def has_secure_option(self, section, option):
"""See is_secure_option"""
return self.is_secure_option(section, option)
def items(self, section):
"""Get all items for a section. Subclassed, to ensure secure
items come back with the unencrypted data.
:param section: section id
:type section: string
"""
items = []
for k, v in ConfigParser.items(self, section):
if self.is_secure_option(section, k):
v = self.get(section, k)
items.append((k, v))
return items
def secure_items(self, section):
"""Like items() but only return secure items.
:param section: section id
:type section: string
"""
return [x
for x in self.items(section)
if self.is_secure_option(section, x[0])]
def set(self, section, option, value):
"""Set an option value. Knows how to set options properly marked
as secure."""
if self.is_secure_option(section, option):
self.set_secure(section, option, value)
else:
ConfigParser.set(self, section, option, value)
def set_secure(self, section, option, value):
"""Set an option and mark it as secure.
Any subsequent uses of 'set' or 'get' will also
now know that this option is secure as well.
"""
if self.keyring_available:
s_option = "%s%s" % (section, option)
self._unsaved[s_option] = ('set', value)
value = self._secure_placeholder
ConfigParser.set(self, section, option, value)
def get(self, section, option, *args):
"""Get option value from section. If an option is secure,
populates the plain text."""
if self.is_secure_option(section, option) and self.keyring_available:
s_option = "%s%s" % (section, option)
if self._unsaved.get(s_option, [''])[0] == 'set':
return self._unsaved[s_option][1]
else:
return keyring.get_password(self.keyring_name, s_option)
return ConfigParser.get(self, section, option, *args)
def remove_option(self, section, option):
"""Removes the option from ConfigParser as well as
the secure storage backend
"""
if self.is_secure_option(section, option) and self.keyring_available:
s_option = "%s%s" % (section, option)
self._unsaved[s_option] = ('delete', None)
ConfigParser.remove_option(self, section, option)
def write(self, *args):
"""See ConfigParser.write(). Also writes secure items to keystore."""
ConfigParser.write(self, *args)
if self.keyring_available:
for key, thing in self._unsaved.items():
action = thing[0]
value = thing[1]
if action == 'set':
keyring.set_password(self.keyring_name, key, value)
elif action == 'delete':
try:
keyring.delete_password(self.keyring_name, key)
except:
pass
self._unsaved = {}
class OfxConfig(object):
"""Default config file handler for other tools to use.
This can read and write from the default config which is
$USERS_HOME/ofxclient.ini
:param file_name: absolute path to a config file (optional)
:type file_name: string or None
Example usage::
from ofxclient.config import OfxConfig
from ofxclient import Account
a = Account()
c = OfxConfig(file_name='/tmp/new.ini')
c.add_account(a)
c.save()
account_list = c.accounts()
one_account = c.account( a.local_id() )
"""
def __init__(self, file_name=None):
self.secured_field_names = [
'institution.username',
'institution.password'
]
f = file_name or DEFAULT_CONFIG
if f is None:
raise ValueError('file_name is required')
self._load(f)
def reload(self):
"""Reload the config file from disk"""
return self._load()
def accounts(self):
"""List of confgured :py:class:`ofxclient.Account` objects"""
return [self._section_to_account(s)
for s in self.parser.sections()]
def encrypted_accounts(self):
return [a
for a in self.accounts()
if self.is_encrypted_account(a.local_id())]
def unencrypted_accounts(self):
return [a
for a in self.accounts()
if not self.is_encrypted_account(a.local_id())]
def account(self, id):
"""Get :py:class:`ofxclient.Account` by section id"""
if self.parser.has_section(id):
return self._section_to_account(id)
return None
def add_account(self, account):
"""Add Account to config (does not save)"""
serialized = account.serialize()
section_items = flatten_dict(serialized)
section_id = section_items['local_id']
if not self.parser.has_section(section_id):
self.parser.add_section(section_id)
for key in sorted(section_items):
self.parser.set(section_id, key, section_items[key])
self.encrypt_account(id=section_id)
return self
def encrypt_account(self, id):
"""Make sure that certain fields are encrypted."""
for key in self.secured_field_names:
value = self.parser.get(id, key)
self.parser.set_secure(id, key, value)
return self
def is_encrypted_account(self, id):
"""Are all fields for the account id encrypted?"""
for key in self.secured_field_names:
if not self.parser.is_secure_option(id, key):
return False
return True
def remove_account(self, id):
"""Add Account from config (does not save)"""
if self.parser.has_section(id):
self.parser.remove_section(id)
return True
return False
def save(self):
"""Save changes to config file"""
with open(self.file_name, 'w') as fp:
self.parser.write(fp)
return self
def _load(self, file_name=None):
self.parser = None
file_name = file_name or self.file_name
if not os.path.exists(file_name):
with open(file_name, 'a'):
os.utime(file_name, None)
self.file_name = file_name
conf = SecurableConfigParser()
conf.readfp(open(self.file_name))
self.parser = conf
return self
def _section_to_account(self, section):
section_items = dict(self.parser.items(section))
serialized = unflatten_dict(section_items)
return Account.deserialize(serialized)
def unflatten_dict(dict, prefix=None, separator='.'):
ret = {}
for k, v in dict.items():
key_parts = k.split(separator)
if len(key_parts) == 1:
ret[k] = v
else:
first = key_parts[0]
rest = key_parts[1:]
temp = ret.setdefault(first, {})
for idx, part in enumerate(rest):
if (idx+1) == len(rest):
temp[part] = v
else:
temp = temp.setdefault(part, {})
return ret
def flatten_dict(dict_, prefix=None, separator='.'):
ret = {}
for k, v in dict_.items():
if prefix:
flat_key = separator.join([prefix, k])
else:
flat_key = k
if isinstance(v, dict):
deflated = flatten_dict(v, prefix=flat_key)
for dk, dv in deflated.items():
ret[dk] = dv
else:
ret[flat_key] = v
return ret
| jbms/ofxclient | ofxclient/config.py | Python | mit | 9,882 | 0.000506 |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from uuid import uuid4
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir, pardir))
sys.path.append(join(root, "ClientRuntimes" , "Python", "msrest"))
sys.path.append(join(root, "ClientRuntimes" , "Python", "msrestazure"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "Paging"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
from msrestazure.azure_exceptions import CloudError
from msrest.authentication import BasicTokenAuthentication
from autorestpagingtestservice import AutoRestPagingTestService
from autorestpagingtestservice.models import PagingGetMultiplePagesWithOffsetOptions
class PagingTests(unittest.TestCase):
def setUp(self):
cred = BasicTokenAuthentication({"access_token" :str(uuid4())})
self.client = AutoRestPagingTestService(cred, base_url="http://localhost:3000")
self.client._client._adapter.add_hook("request", self.client._client._adapter._test_pipeline)
return super(PagingTests, self).setUp()
def test_paging_happy_path(self):
pages = self.client.paging.get_single_pages()
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 1)
self.assertEqual(items[0].properties.id, 1)
self.assertEqual(items[0].properties.name, "Product")
pages = self.client.paging.get_multiple_pages()
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 10)
pages.reset()
more_items = [i for i in pages]
eq = [e for e in items if e not in more_items]
self.assertEqual(len(eq), 0)
with self.assertRaises(GeneratorExit):
pages.next()
pages = self.client.paging.get_multiple_pages_retry_first()
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 10)
pages = self.client.paging.get_multiple_pages_retry_second()
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 10)
pages = self.client.paging.get_single_pages(raw=True)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 1)
self.assertEqual(items, pages.raw.output)
pages = self.client.paging.get_multiple_pages(raw=True)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
self.assertIsNotNone(pages.raw.response)
options = PagingGetMultiplePagesWithOffsetOptions(100)
pages = self.client.paging.get_multiple_pages_with_offset(paging_get_multiple_pages_with_offset_options=options)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
self.assertEqual(items[-1].properties.id, 110)
pages = self.client.paging.get_multiple_pages_retry_first(raw=True)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
pages = self.client.paging.get_multiple_pages_retry_second(raw=True)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
def test_paging_sad_path(self):
pages = self.client.paging.get_single_pages_failure()
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure()
self.assertIsNotNone(pages.next_link)
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure_uri()
with self.assertRaises(ValueError):
items = [i for i in pages]
pages = self.client.paging.get_single_pages_failure(raw=True)
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure(raw=True)
self.assertIsNotNone(pages.next_link)
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure_uri(raw=True)
with self.assertRaises(ValueError):
items = [i for i in pages]
if __name__ == '__main__':
unittest.main()
| sharadagarwal/autorest | AutoRest/Generators/Python/Azure.Python.Tests/AcceptanceTests/paging_tests.py | Python | mit | 6,270 | 0.003191 |
import os
from threading import RLock
from path import Path
import mtt.config as base_config # noqa
__all__ = ['config', 'lock']
class ConfigAccessor:
def __init__(self, configuration_items):
self.config = configuration_items
def update(self, other):
self.config.update(other.config if 'config' in other else other)
def __getattr__(self, item):
if item in self.config:
return self.config[item]
raise AttributeError(f'Unknown configuration option \'{item}\'')
def __getitem__(self, item):
try:
return self.config[item]
except KeyError:
raise KeyError(f'Unknown configuration option \'{item}\'')
def get_variables_in_module(module_name: str) -> ConfigAccessor:
module = globals().get(module_name, None)
module_type = type(os)
class_type = type(Path)
variables = {}
if module:
variables = {key: value for key, value in module.__dict__.items()
if not (key.startswith('__') or key.startswith('_'))
and not isinstance(value, module_type)
and not isinstance(value, class_type)}
return ConfigAccessor(variables)
config = get_variables_in_module('base_config')
try:
import mtt.user_config as user_config # noqa
config.update(get_variables_in_module('user_config'))
except ImportError:
pass
lock = RLock()
| halcy/MastodonToTwitter | mtt/__init__.py | Python | mit | 1,421 | 0 |
import unittest
from integration.thrift.test_thrift import ThriftTestCase
from scales.thriftmux import ThriftMux
class ThriftMuxTestCase(ThriftTestCase):
BUILDER = ThriftMux
if __name__ == '__main__':
unittest.main()
| steveniemitz/scales | test/integration/thrift/test_mux.py | Python | mit | 224 | 0.017857 |
from mediadrop.lib.auth.group_based_policy import *
| kgao/MediaDrop | mediacore/lib/auth/group_based_policy.py | Python | gpl-3.0 | 52 | 0 |
import os
from queue import Queue
from bears.python.requirements.PySafetyBear import PySafetyBear
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.testing.BearTestHelper import generate_skip_decorator
def get_testfile_path(name):
return os.path.join(os.path.dirname(__file__),
'PySafety_test_files',
name)
def load_testfile(name):
return open(get_testfile_path(name)).readlines()
@generate_skip_decorator(PySafetyBear)
class PySafetyBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = PySafetyBear(self.section, Queue())
def test_without_vulnerability(self):
self.check_validity(self.uut, ['lxml==3.6.0'])
def test_with_vulnerability(self):
self.check_invalidity(self.uut, ['bottle==0.10.1'])
def test_with_cve_vulnerability(self):
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
file_contents = [file_contents[0]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'bottle<0.12.10 is vulnerable '
'to CVE-2016-9964 and your project '
'is using 0.10.0.',
file=get_testfile_path(file_name),
line=1,
column=9,
end_line=1,
end_column=15,
severity=RESULT_SEVERITY.NORMAL,
additional_info='redirect() in bottle.py '
'in bottle 0.12.10 doesn\'t filter '
'a "\\r\\n" sequence, which leads '
'to a CRLF attack, as demonstrated '
'by a redirect("233\\r\\nSet-Cookie: '
'name=salt") call.'),
Result.from_values('PySafetyBear',
'bottle>=0.10,<0.10.12 is vulnerable to '
'CVE-2014-3137 and your project is '
'using 0.10.0.',
file=get_testfile_path(file_name),
line=1,
column=9,
end_line=1,
end_column=15,
severity=RESULT_SEVERITY.NORMAL,
additional_info='Bottle 0.10.x before 0.10.12,'
' 0.11.x before 0.11.7, and 0.12.x before'
' 0.12.6 does not properly limit content'
' types, which allows remote attackers to'
' bypass intended access restrictions via an'
' accepted Content-Type followed by a ;'
' (semi-colon) and a Content-Type that'
' would not be accepted, as demonstrated in'
' YouCompleteMe to execute arbitrary code.')],
filename=get_testfile_path(file_name))
def test_without_cve_vulnerability(self):
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
file_contents = [file_contents[1]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'locustio<0.7 is vulnerable to pyup.io-25878 '
'and your project is using 0.5.1.',
file=get_testfile_path(file_name),
line=1,
column=11,
end_line=1,
end_column=16,
severity=RESULT_SEVERITY.NORMAL,
additional_info='locustio before '
'0.7 uses pickle.',
)],
filename=get_testfile_path(file_name))
def test_with_cve_ignore(self):
self.section.append(Setting('cve_ignore', 'CVE-2016-9964, '
'CVE-2014-3137'))
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
# file_contents = [file_contents[0]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'locustio<0.7 is vulnerable to pyup.io-25878 '
'and your project is using 0.5.1.',
file=get_testfile_path(file_name),
line=2,
column=11,
end_line=2,
end_column=16,
severity=RESULT_SEVERITY.NORMAL,
additional_info='locustio before '
'0.7 uses pickle.',
)],
filename=get_testfile_path(file_name))
def test_with_no_requirements(self):
self.check_validity(self.uut, [])
def test_with_no_pinned_requirements(self):
self.check_validity(self.uut, ['foo'])
| coala/coala-bears | tests/python/requirements/PySafetyBearWithoutMockTest.py | Python | agpl-3.0 | 5,749 | 0 |
import datetime
import uuid
from flask import current_app as app
from flask import url_for
from database import db
from sqlalchemy import Column, DateTime, String, Text
from sqlalchemy.dialects.postgresql import UUID
class Poster(db.Model):
__tablename__ = 'posters'
id = Column(UUID(as_uuid=True), primary_key=True)
title = Column(String(400), nullable=False, default='Untitled')
authors = Column(Text)
abstract = Column(Text)
source_url = Column(String(400), nullable=False)
download_url = Column(String(400), nullable=False)
presented_at = Column(String(200))
created_at = Column('create_date', DateTime, default=datetime.datetime.now())
id_admin = Column(UUID(as_uuid=True), unique=True, nullable=False)
email = Column(String(50))
def __init__(self, title, source_url, download_url, authors=None,
abstract=None, presented_at=None):
self.id = uuid.uuid4()
self.title = title
self.authors = authors
self.abstract = abstract
self.source_url = source_url
self.download_url = download_url
self.presented_at = presented_at
self.id_admin = uuid.uuid4()
def __repr__(self):
return '<User {}>'.format(str(self.id))
def serialize(self):
return {
'id': self.id,
'title': self.title,
'authors': self.authors,
'abstract': self.abstract,
'source_url': self.source_url,
'download_url': self.download_url,
'presented_at': self.presented_at,
'created_at': self.created_at.isoformat(),
'thumbnail_url': self.thumbnail_url(),
}
def public_url(self, absolute=False):
return url_for('get_poster', id=self.id, _external=absolute)
def admin_url(self, absolute=False):
return url_for('edit_poster', id_admin=self.id_admin, _external=absolute)
def qrcode_svg_url(self, absolute=False):
return url_for('get_qrcode_svg', id=self.id, _external=absolute)
def qrcode_png_url(self, absolute=False):
return url_for('get_qrcode_png', id=self.id, _external=absolute)
def is_image(self):
return self.download_url.endswith('.png') or self.download_url.endswith('.jpg')
def viewable_download_url(self):
cloudinary = app.config['CLOUDINARY_BASE_URL']
if self.is_image() or self.download_url.startswith(cloudinary):
return self.download_url
return '{}/image/fetch/{}'.format(cloudinary, self.download_url)
def thumbnail_url(self):
cloudinary = app.config['CLOUDINARY_BASE_URL']
transformations = 'c_thumb,w_370,h_200,f_png'
if self.download_url.startswith(cloudinary):
return self.download_url.replace('/upload/', '/upload/{}/'.format(transformations))
return '{}/image/fetch/{}/{}'.format(cloudinary, transformations, self.download_url)
| TailorDev/pauling | api/models.py | Python | mit | 2,935 | 0.001704 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, 2014, Pyhrol, pyhrol@rambler.ru
# GEO: N55.703431,E37.623324 .. N48.742359,E44.536997
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 4. Neither the name of the Pyhrol nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import example_0050
try:
example_0050.function_with_keywords()
except TypeError as ex:
print '***', ex
try:
example_0050.function_with_keywords(arg1 = 1)
except TypeError as ex:
print '***', ex
example_0050.function_with_keywords(counter = 1, description = "One")
example_0050.function_with_keywords(description = "Two", counter = 2)
| dyomas/pyhrol | examples/example_0050.py | Python | bsd-3-clause | 1,983 | 0.00706 |
#!/usr/bin/env python
#
# fdaemon.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# Author: Tarun Kumar <reach.tarun.here AT gmail.com>
# NOTE: THIS IS AN INITIAL RELEASE AND IS LIKELY TO BE UNSTABLE
import ConfigParser
import logging
import os
import time
from django.core.management.base import BaseCommand, CommandError
from interface.models import *
from django.core import serializers
import pymongo
import gridfs
from bson import ObjectId
from bson.json_util import loads,dumps
import json
from bson import json_util
from interface.producer import Producer
import pika
STATUS_NEW = 0 # identifies local status of task
STATUS_PROCESSING = 1
STATUS_FAILED = 2
STATUS_COMPLETED = 3
STATUS_TIMEOUT = 4
NEW_SCAN_TASK = 1 # identifies data being sent to back end
SEND_ANY = 'Any'
ANY_QUEUE = 'any_queue'
PRIVATE_QUEUE = 'private_queue'
RPC_PORT = 5672
config = ConfigParser.ConfigParser()
config.read(os.path.join(settings.BASE_DIR, "conf", "backend.conf"))
BACKEND_HOST = config.get('backend', 'host', 'localhost')
# mongodb connection settings
client = pymongo.MongoClient()
db = client.thug
dbfs = client.thugfs
fs = gridfs.GridFS(dbfs)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
active_scans = [] # List of started threads waiting for a result to be returned from backend otr timeout
def fetch_new_tasks(self):
return Task.objects.filter(status__exact=STATUS_NEW).order_by('submitted_on')
def fetch_pending_tasks(self):
return Task.objects.filter(status__exact=STATUS_PROCESSING)
# Task.objects.filter(status__exact=STATUS_PROCESSING).update(status=STATUS_NEW)
def mark_as_running(self, task):
logger.debug("[{}] Marking task as running".format(task.id))
task.started_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_PROCESSING
task.save()
def mark_as_failed(self, task):
logger.debug("[{}] Marking task as failed".format(task.id))
task.completed_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_FAILED
task.save()
def mark_as_timeout(self, task):
logger.debug("[{}] Marking task timeout".format(task.id))
task.completed_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_TIMEOUT
task.save()
def mark_as_completed(self, task):
logger.debug("[{}] Marking task as completed".format(task.id))
task.completed_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_COMPLETED
task.save()
def renderTaskDetail(self, pkval):
return dumps(
loads(
serializers.serialize(
'json',
[Task.objects.get(pk=pkval), ]
)
)[0]
)
def post_new_task(self, task):
temp1 = loads(self.renderTaskDetail(task.id))
temp = temp1['fields']
backend = temp.pop("backend")
temp.pop("user")
temp.pop("sharing_model")
temp.pop("plugin_status")
temp.pop("sharing_groups")
temp.pop("star")
temp["frontend_id"] = temp1.pop("pk")
temp["task"] = NEW_SCAN_TASK
logger.debug("Posting task {}".format(temp["frontend_id"]))
if backend == SEND_ANY:
# start the thread to post the scan on any queue
scan = Producer(json.dumps(temp),
BACKEND_HOST,
RPC_PORT,
ANY_QUEUE,
temp["frontend_id"])
scan.start()
self.active_scans.append(scan)
self.mark_as_running(task)
else:
# start the thread to post the scan on private queue
scan = Producer(json.dumps(temp),
backend,
RPC_PORT,
PRIVATE_QUEUE,
temp["frontend_id"])
scan.start()
self.active_scans.append(scan)
self.mark_as_running(task)
def search_samples_dict_list(self, search_id,sample_dict):
# returns new gridfs sample_id
for x in sample_dict:
if x["_id"] == search_id:
return x["sample_id"]
def retrieve_save_document(self, response, files):
# now files for locations
for x in response["locations"]:
if x['content_id'] is not None:
dfile = [
item["data"] for item in files
if str(item["content_id"]) == x["content_id"]
][0]
new_fs_id = str(fs.put(dfile.encode('utf-8')))
# now change id in repsonse
x['location_id'] = new_fs_id
# now for samples
for x in response["samples"]:
dfile = [
item["data"] for item in files
if str(item["sample_id"]) == x["sample_id"]
][0]
new_fs_id = str(fs.put(dfile.encode('utf-8')))
# now change id in repsonse
x['sample_id'] = new_fs_id
# same for pcaps
for x in response["pcaps"]:
if x['content_id'] is not None:
dfile = [
item["data"] for item in files
if str(item["content_id"]) == x["content_id"]
][0]
new_fs_id = str(fs.put(dfile.encode('utf-8')))
# now change id in repsonse
x['content_id'] = new_fs_id
# for vt,andro etc. eoint sample_id to gridfs id
# check for issues in this
for x in response["virustotal"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
for x in response["honeyagent"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
for x in response["androguard"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
for x in response["peepdf"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
# remove id from all samples and pcaps
for x in response["samples"]:
x.pop("_id")
response.pop("_id")
frontend_analysis_id = db.analysiscombo.insert(response)
return frontend_analysis_id
def process_response(self, task):
analysis = json.loads(task.response, object_hook=decoder)
if analysis["status"] is STATUS_COMPLETED:
logger.info("Task Completed")
analysis_response = analysis["data"]
files = json_util.loads(analysis["files"])
local_task = Task.objects.get(id=analysis_response["frontend_id"])
frontend_analysis_id = self.retrieve_save_document(analysis_response,
files)
local_task.object_id = frontend_analysis_id
local_task.save()
self.mark_as_completed(local_task)
self.active_scans.remove(task)
else:
logger.info("Task Failed")
local_scan = Task.objects.get(id=analysis["data"])
self.mark_as_failed(local_scan)
self.active_scans.remove(task)
def handle(self, *args, **options):
logger.debug("Starting up frontend daemon")
while True:
logger.debug("Fetching new tasks to post to backend.")
tasks = self.fetch_new_tasks()
logger.debug("Got {} new tasks".format(len(tasks)))
for task in tasks:
self.post_new_task(task)
logger.debug("Checking for complete tasks")
for task in self.active_scans:
if task.thread_exception is None:
if hasattr(task, 'response') and task.response is not None:
self.process_response(task)
else:
if task.thread_exception == pika.exceptions.ConnectionClosed:
logger.info("Cannot make connection to backend via {} {} {}".format(task.host,
task.port,
task.routing_key))
self.mark_as_failed(Task.objects.filter(pk=int(task.frontend_id))[0])
self.active_scans.remove(task)
if task.thread_exception == pika.exceptions.ProbableAuthenticationError or \
task.thread_exception == pika.exceptions.ProbableAccessDeniedError:
logger.info("Task {} Authentication Error".format(int(task.frontend_id)))
self.mark_as_failed(Task.objects.filter(pk=int(task.frontend_id))[0])
self.active_scans.remove(task)
if task.thread_exception == TimeOutException:
logger.info("Task {} took too long to reply".format(int(task.frontend_id)))
self.mark_as_timeout(Task.objects.filter(pk=int(task.frontend_id))[0])
self.active_scans.remove(task)
logger.debug("Sleeping for {} seconds".format(6))
time.sleep(6)
| Dennisparchkov/rumal | interface/management/commands/fdaemon.py | Python | gpl-2.0 | 10,312 | 0.001649 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hhlregistrations', '0004_auto_20150411_1935'),
]
operations = [
migrations.AddField(
model_name='event',
name='payment_due',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='event',
name='require_registration',
field=models.BooleanField(default=False),
),
]
| hacklab-fi/hhlevents | hhlevents/apps/hhlregistrations/migrations/0005_auto_20150412_1806.py | Python | bsd-3-clause | 592 | 0 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Note: The deserialization code originally comes from ABE.
import bitcoin
from bitcoin import *
from util import print_error
import time
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import mmap
import random
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def parse_redeemScript(bytes):
dec = [ x for x in script_GetOp(bytes.decode('hex')) ]
# 2 of 2
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex') ]
return 2, pubkeys
# 2 of 3
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex'), dec[3][1].encode('hex') ]
return 2, pubkeys
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
s = []
for sig in x_sig:
if sig[-2:] == '01':
s.append(sig[:-2])
else:
assert sig == NO_SIGNATURE
s.append(None)
return s
def is_extended_pubkey(x_pubkey):
return x_pubkey[0:2] in ['fe', 'ff']
def x_to_xpub(x_pubkey):
if x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub
def parse_xpub(x_pubkey):
if x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
pubkey = BIP32_Account.derive_pubkey_from_xpub(xpub, s[0], s[1])
elif x_pubkey[0:2] == 'fe':
from account import OldAccount
mpk, s = OldAccount.parse_xpubkey(x_pubkey)
pubkey = OldAccount.get_pubkey_from_mpk(mpk.decode('hex'), s[0], s[1])
else:
pubkey = x_pubkey
return pubkey
def parse_scriptSig(d, bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except Exception:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bytes.encode('hex'))
return
# payto_pubkey
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
d['address'] = "(pubkey)"
d['signatures'] = [sig]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
x_pubkey = decoded[1][1].encode('hex')
try:
signatures = parse_sig([sig])
pubkey = parse_xpub(x_pubkey)
except:
import traceback
traceback.print_exc(file=sys.stdout)
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = public_key_to_bc_address(pubkey.decode('hex'))
return
# p2sh transaction, 2 of n
match = [ opcodes.OP_0 ]
while len(match) < len(decoded):
match.append(opcodes.OP_PUSHDATA4)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_sig = map(lambda x:x[1].encode('hex'), decoded[1:-1])
d['signatures'] = parse_sig(x_sig)
d['num_sig'] = 2
dec2 = [ x for x in script_GetOp(decoded[-1][1]) ]
match_2of2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
match_2of3 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match_2of2):
x_pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex') ]
elif match_decoded(dec2, match_2of3):
x_pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex'), dec2[3][1].encode('hex') ]
else:
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['x_pubkeys'] = x_pubkeys
pubkeys = map(parse_xpub, x_pubkeys)
d['pubkeys'] = pubkeys
redeemScript = Transaction.multisig_script(pubkeys,2)
d['redeemScript'] = redeemScript
d['address'] = hash_160_to_bc_address(hash_160(redeemScript.decode('hex')), 9)
def get_address_from_output_script(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return 'pubkey', decoded[0][1].encode('hex')
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return 'address', hash_160_to_bc_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return 'address', hash_160_to_bc_address(decoded[1][1],9)
# OP_RETURN
match = [ opcodes.OP_RETURN, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return 'op_return', decoded[1][1]
return "(None)", "(None)"
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
d['scriptSig'] = scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
if prevout_hash == '00'*32:
d['is_coinbase'] = True
else:
d['is_coinbase'] = False
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
if scriptSig:
parse_scriptSig(d, scriptSig)
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
type, address = get_address_from_output_script(scriptPubKey)
d['type'] = type
d['address'] = address
d['scriptPubKey'] = scriptPubKey.encode('hex')
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(raw.decode('hex'))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = []
for i in xrange(n_vin):
d['inputs'].append(parse_input(vds))
n_vout = vds.read_compact_size()
d['outputs'] = []
for i in xrange(n_vout):
d['outputs'].append(parse_output(vds, i))
d['lockTime'] = vds.read_uint32()
return d
push_script = lambda x: op_push(len(x)/2) + x
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, inputs, outputs, locktime=0):
self.inputs = inputs
self.outputs = outputs
self.locktime = locktime
self.raw = None
@classmethod
def deserialize(klass, raw):
self = klass([],[])
self.update(raw)
return self
def update(self, raw):
d = deserialize(raw)
self.raw = raw
self.inputs = d['inputs']
self.outputs = map(lambda x: (x['type'], x['address'], x['value']), d['outputs'])
self.locktime = d['lockTime']
@classmethod
def sweep(klass, privkeys, network, to_address, fee):
inputs = []
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get([ ('blockchain.address.listunspent',[address])])[0]
pay_script = klass.pay_script('address', address)
for item in u:
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [None]
item['signatures'] = [None]
item['num_sig'] = 1
inputs += u
if not inputs:
return
total = sum( map(lambda x:int(x.get('value')), inputs) ) - fee
outputs = [('address', to_address, total)]
self = klass(inputs, outputs)
self.sign({ pubkey:privkey })
return self
@classmethod
def multisig_script(klass, public_keys, num=None):
n = len(public_keys)
if num is None: num = n
# supports only "2 of 2", and "2 of 3" transactions
assert num <= n and n in [2,3]
if num==2:
s = '52'
elif num == 3:
s = '53'
else:
raise
for k in public_keys:
s += op_push(len(k)/2)
s += k
if n==2:
s += '52'
elif n==3:
s += '53'
else:
raise
s += 'ae'
return s
@classmethod
def pay_script(self, type, addr):
if type == 'op_return':
h = addr.encode('hex')
return '6a' + push_script(h)
else:
assert type == 'address'
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == 50:
script = '76a9' # op_dup, op_hash_160
script += push_script(hash_160.encode('hex'))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == 9:
script = 'a9' # op_hash_160
script += push_script(hash_160.encode('hex'))
script += '87' # op_equal
else:
raise
return script
def serialize(self, for_sig=None):
# for_sig:
# -1 : do not sign, estimate length
# i>=0 : sign input i
# None : add all signatures
inputs = self.inputs
outputs = self.outputs
s = int_to_hex(1,4) # version
s += var_int( len(inputs) ) # number of inputs
for i in range(len(inputs)):
txin = inputs[i]
s += txin['prevout_hash'].decode('hex')[::-1].encode('hex') # prev hash
s += int_to_hex(txin['prevout_n'],4) # prev index
p2sh = txin.get('redeemScript') is not None
num_sig = txin['num_sig']
address = txin['address']
x_signatures = txin['signatures']
signatures = filter(lambda x: x is not None, x_signatures)
is_complete = len(signatures) == num_sig
if for_sig in [-1, None]:
# if we have enough signatures, we use the actual pubkeys
# use extended pubkeys (with bip32 derivation)
sig_list = []
if for_sig == -1:
# we assume that signature will be 0x48 bytes long
pubkeys = txin['pubkeys']
sig_list = [ "00"* 0x48 ] * num_sig
elif is_complete:
pubkeys = txin['pubkeys']
for signature in signatures:
sig_list.append(signature + '01')
else:
pubkeys = txin['x_pubkeys']
for signature in x_signatures:
sig_list.append((signature + '01') if signature is not None else NO_SIGNATURE)
sig_list = ''.join( map( lambda x: push_script(x), sig_list))
if not p2sh:
script = sig_list
script += push_script(pubkeys[0])
else:
script = '00' # op_0
script += sig_list
redeem_script = self.multisig_script(pubkeys,2)
script += push_script(redeem_script)
elif for_sig==i:
script = txin['redeemScript'] if p2sh else self.pay_script('address', address)
else:
script = ''
s += var_int( len(script)/2 ) # script length
s += script
s += "ffffffff" # sequence
s += var_int( len(outputs) ) # number of outputs
for output in outputs:
type, addr, amount = output
s += int_to_hex( amount, 8) # amount
script = self.pay_script(type, addr)
s += var_int( len(script)/2 ) # script length
s += script # script
s += int_to_hex(0,4) # lock time
if for_sig is not None and for_sig != -1:
s += int_to_hex(1, 4) # hash type
return s
def tx_for_sig(self,i):
return self.serialize(for_sig = i)
def hash(self):
return Hash(self.raw.decode('hex') )[::-1].encode('hex')
def add_signature(self, i, pubkey, sig):
print_error("adding signature for", pubkey)
txin = self.inputs[i]
pubkeys = txin['pubkeys']
ii = pubkeys.index(pubkey)
txin['signatures'][ii] = sig
txin['x_pubkeys'][ii] = pubkey
self.inputs[i] = txin
self.raw = self.serialize()
def add_input(self, input):
self.inputs.append(input)
self.raw = None
def input_value(self):
return sum([x['value'] for x in self.inputs])
def output_value(self):
return sum([ x[2] for x in self.outputs])
def get_fee(self):
return self.input_value() - self.output_value()
def signature_count(self):
r = 0
s = 0
for txin in self.inputs:
if txin.get('is_coinbase'):
continue
signatures = filter(lambda x: x is not None, txin['signatures'])
s += len(signatures)
r += txin['num_sig']
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def inputs_to_sign(self):
from account import BIP32_Account, OldAccount
xpub_list = []
addr_list = set()
for txin in self.inputs:
x_signatures = txin['signatures']
signatures = filter(lambda x: x is not None, x_signatures)
if len(signatures) == txin['num_sig']:
# input is complete
continue
for k, x_pubkey in enumerate(txin['x_pubkeys']):
if x_signatures[k] is not None:
# this pubkey already signed
continue
if x_pubkey[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
xpub_list.append((xpub,sequence))
elif x_pubkey[0:2] == 'fe':
xpub, sequence = OldAccount.parse_xpubkey(x_pubkey)
xpub_list.append((xpub,sequence))
else:
addr_list.add(txin['address'])
return addr_list, xpub_list
def sign(self, keypairs):
print_error("tx.sign(), keypairs:", keypairs)
for i, txin in enumerate(self.inputs):
# continue if this txin is complete
signatures = filter(lambda x: x is not None, txin['signatures'])
num = txin['num_sig']
if len(signatures) == num:
continue
redeem_pubkeys = txin['pubkeys']
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
for pubkey in redeem_pubkeys:
if pubkey in keypairs.keys():
# add signature
sec = keypairs[pubkey]
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic( for_sig, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der )
assert public_key.verify_digest( sig, for_sig, sigdecode = ecdsa.util.sigdecode_der)
self.add_signature(i, pubkey, sig.encode('hex'))
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def add_pubkey_addresses(self, txlist):
for i in self.inputs:
if i.get("address") == "(pubkey)":
prev_tx = txlist.get(i.get('prevout_hash'))
if prev_tx:
address, value = prev_tx.get_outputs()[i.get('prevout_n')]
print_error("found pay-to-pubkey address:", address)
i["address"] = address
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs:
if type == 'address':
addr = x
elif type == 'pubkey':
addr = public_key_to_bc_address(x.decode('hex'))
elif type == 'op_return':
try:
addr = 'OP_RETURN: "' + x.decode('utf8') + '"'
except:
addr = 'OP_RETURN: "' + x.encode('hex') + '"'
else:
addr = "(None)"
o.append((addr,v))
return o
def get_output_addresses(self):
return map(lambda x:x[0], self.get_outputs())
def has_address(self, addr):
found = False
for txin in self.inputs:
if addr == txin.get('address'):
found = True
break
if addr in self.get_output_addresses():
found = True
return found
def get_value(self, addresses, prevout_values):
# return the balance for that tx
is_relevant = False
is_send = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in self.inputs:
addr = item.get('address')
if addr in addresses:
is_send = True
is_relevant = True
key = item['prevout_hash'] + ':%d'%item['prevout_n']
value = prevout_values.get( key )
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_send: is_partial = False
for addr, value in self.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_send:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
is_send = v < 0
else:
# all inputs are mine
fee = v_out - v_in
return is_relevant, is_send, v, fee
def as_dict(self):
import json
out = {
"hex":str(self),
"complete":self.is_complete()
}
return out
def required_fee(self, verifier):
# see https://en.bitcoin.it/wiki/Transaction_fees
threshold = 57600000*4
size = len(self.serialize(-1))/2
fee = 0
for o in self.get_outputs():
value = o[1]
if value < DUST_SOFT_LIMIT:
fee += MIN_RELAY_TX_FEE
sum = 0
for i in self.inputs:
age = verifier.get_confirmations(i["prevout_hash"])[0]
sum += i["value"] * age
priority = sum / size
print_error(priority, threshold)
if size < 5000 and fee == 0 and priority > threshold:
return 0
fee += (1 + size / 1000) * MIN_RELAY_TX_FEE
print_error(fee)
return fee
| wozz/electrum-myr | lib/transaction.py | Python | gpl-3.0 | 30,650 | 0.006754 |
from django.apps import AppConfig
class DocenteConfig(AppConfig):
name = 'docente'
| Bleno/sisgestor-django | docente/apps.py | Python | mit | 89 | 0 |
import unittest
import json
from privacyidea.app import create_app
from privacyidea.models import db
from privacyidea.lib.resolver import (save_resolver)
from privacyidea.lib.realm import (set_realm)
from privacyidea.lib.user import User
from privacyidea.lib.auth import create_db_admin
from privacyidea.api.lib.postpolicy import DEFAULT_POLICY_TEMPLATE_URL
PWFILE = "tests/testdata/passwords"
class FakeFlaskG():
policy_object = None
class MyTestCase(unittest.TestCase):
resolvername1 = "resolver1"
resolvername2 = "Resolver2"
resolvername3 = "reso3"
realm1 = "realm1"
realm2 = "realm2"
serials = ["SE1", "SE2", "SE3"]
otpkey = "3132333435363738393031323334353637383930"
@classmethod
def setUpClass(cls):
cls.app = create_app('testing', "")
cls.app_context = cls.app.app_context()
cls.app_context.push()
db.create_all()
# Create an admin for tests.
create_db_admin(cls.app, "testadmin", "admin@test.tld", "testpw")
def setUp_user_realms(self):
# create user realm
rid = save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": PWFILE})
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.realm1,
[self.resolvername1])
self.assertTrue(len(failed) == 0)
self.assertTrue(len(added) == 1)
user = User(login="root",
realm=self.realm1,
resolver=self.resolvername1)
user_str = "%s" % user
self.assertTrue(user_str == "<root.resolver1@realm1>", user_str)
self.assertFalse(user.is_empty())
self.assertTrue(User().is_empty())
user_repr = "%r" % user
expected = "User(login='root', realm='realm1', resolver='resolver1')"
self.assertTrue(user_repr == expected, user_repr)
def setUp_user_realm2(self):
# create user realm
rid = save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": PWFILE})
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.realm2,
[self.resolvername1])
self.assertTrue(len(failed) == 0)
self.assertTrue(len(added) == 1)
user = User(login="root",
realm=self.realm2,
resolver=self.resolvername1)
user_str = "%s" % user
self.assertTrue(user_str == "<root.resolver1@realm2>", user_str)
self.assertFalse(user.is_empty())
self.assertTrue(User().is_empty())
user_repr = "%r" % user
expected = "User(login='root', realm='realm2', resolver='resolver1')"
self.assertTrue(user_repr == expected, user_repr)
@classmethod
def tearDownClass(cls):
db.session.remove()
db.drop_all()
cls.app_context.pop()
def setUp(self):
self.authenticate()
def authenticate(self):
with self.app.test_request_context('/auth',
data={"username": "testadmin",
"password": "testpw"},
method='POST'):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status"), res.data)
self.at = result.get("value").get("token")
def authenticate_selfserive_user(self):
with self.app.test_request_context('/auth',
method='POST',
data={"username":
"selfservice@realm1",
"password": "test"}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status"), res.data)
# In self.at_user we store the user token
self.at_user = result.get("value").get("token")
# check that this is a user
role = result.get("value").get("role")
self.assertTrue(role == "user", result)
self.assertEqual(result.get("value").get("realm"), "realm1")
# Test logout time
self.assertEqual(result.get("value").get("logout_time"), 120)
self.assertEqual(result.get("value").get("policy_template_url"),
DEFAULT_POLICY_TEMPLATE_URL)
| woddx/privacyidea | tests/base.py | Python | agpl-3.0 | 4,803 | 0.000625 |
#------------------------------------------------------------------------------
__author__ = 'James T. Dietrich'
__contact__ = 'james.t.dietrich@dartmouth.edu'
__copyright__ = '(c) James Dietrich 2016'
__license__ = 'MIT'
__date__ = 'Wed Nov 16 11:33:39 2016'
__version__ = '1.0'
__status__ = "initial release"
__url__ = "https://github.com/geojames/..."
"""
Name: Week6-1_Matplotlib_Adv.py
Compatibility: Python 3.5
Description: This program does stuff
URL: https://github.com/geojames/...
Requires: libraries
Dev ToDo:
AUTHOR: James T. Dietrich
ORGANIZATION: Dartmouth College
Contact: james.t.dietrich@dartmouth.edu
Copyright: (c) James Dietrich 2016
"""
#------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# MESHGRID
#
# Meshgrid is a command/function that allows you to easily build X and Y
# grids from 1-D arrays/vectors which can be used to evaluate equations
# in 2D or 3D space
# different conventions for naming meshed variables
# x > xv
# x > xx
# x > xg
#
# meshgrid takes to 1-D arrays of X and Y coordinates and returns two X and Y
# "meshes" 2D arrays that cover the X and Y spaces
x = np.linspace(-10.,10.,30)
y = np.linspace(-10.,10.,30)
xg, yg = np.meshgrid(x,y)
r = np.sqrt((xg**2 + yg**2))
z = np.sin(r) * xg**2
plt.pcolor(z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xg, yg, z,rstride=1, cstride=1, cmap='coolwarm')
# 3D Subplots
fig = plt.figure(figsize=plt.figaspect(0.33333))
ax1 = fig.add_subplot(1, 3, 1)
ax1.pcolor(z, cmap = 'hot')
ax2 = fig.add_subplot(1, 3, 2, projection='3d')
ax2.plot_surface(xg, yg, z,rstride=1, cstride=1, cmap='hot')
ax3 = fig.add_subplot(1, 3, 3, projection='3d')
ax3.contour(xg,yg,z)
#%% Formatted text for plots
#
# Matplotlib
# http://matplotlib.org/users/mathtext.html#mathtext-tutorial
# it basically uses TeX syntax and formatting codes
def f(x,y):
return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 -y**2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
xx,yy = np.meshgrid(x, y)
#plt.axes([0.025, 0.025, 0.95, 0.95])
plt.contourf(xx, yy, f(xx, yy), 8, alpha=.75, cmap=plt.cm.hot)
C = plt.contour(xx, yy, f(xx, yy), 8, colors='black', linewidth=0.5)
plt.clabel(C, inline=1, fontsize=10)
plt.text (-2.5,-2,r'$\frac{1-x}{2 + x^5 + y^3} \times e^{(-x^2 -y^2)}$',fontsize=20)
plt.xlabel(r'$\mathbf{Bold \ x}$ x', fontsize=20)
plt.ylabel(r'$\mathit{Y-Label}$', fontsize=20)
plt.title('Regular ' r'$\mathbf{Bold}$ $\mathit{and \ italic}$ words')
#%% Double Y Axis Plots (from the Matplotlib Gallery)
fig, ax1 = plt.subplots()
t = np.arange(0.01, 10.0, 0.01)
s1 = np.exp(t)
ax1.plot(t, s1, 'b-')
ax1.set_xlabel('time (s)')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('exp', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
s2 = np.sin(2*np.pi*t)
ax2.plot(t, s2, 'r.')
ax2.set_ylabel('sin', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r') | geojames/Dart_EnvGIS | Week6-2_Matplotlib_Adv.py | Python | mit | 3,114 | 0.008992 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import os
from knack.log import get_logger
from azure.cli.core.commands import LongRunningOperation
from azure.cli.command_modules.vm.custom import set_vm, _compute_client_factory, _is_linux_os
from azure.cli.command_modules.vm._vm_utils import get_key_vault_base_url, create_keyvault_data_plane_client
_DATA_VOLUME_TYPE = 'DATA'
_ALL_VOLUME_TYPE = 'ALL'
_STATUS_ENCRYPTED = 'Encrypted'
logger = get_logger(__name__)
vm_extension_info = {
'Linux': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryptionForLinux',
'version': '1.1',
'legacy_version': '0.1'
},
'Windows': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryption',
'version': '2.2',
'legacy_version': '1.1'
}
}
def _find_existing_ade(vm, use_instance_view=False, ade_ext_info=None):
if not ade_ext_info:
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
if use_instance_view:
exts = vm.instance_view.extensions or []
r = next((e for e in exts if e.type and e.type.lower().startswith(ade_ext_info['publisher'].lower()) and
e.name.lower() == ade_ext_info['name'].lower()), None)
else:
exts = vm.resources or []
r = next((e for e in exts if (e.publisher.lower() == ade_ext_info['publisher'].lower() and
e.type_properties_type.lower() == ade_ext_info['name'].lower())), None)
return r
def _detect_ade_status(vm):
if vm.storage_profile.os_disk.encryption_settings:
return False, True
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
ade = _find_existing_ade(vm, ade_ext_info=ade_ext_info)
if ade is None:
return False, False
if ade.type_handler_version.split('.')[0] == ade_ext_info['legacy_version'].split('.')[0]:
return False, True
return True, False # we believe impossible to have both old & new ADE
def encrypt_vm(cmd, resource_group_name, vm_name, # pylint: disable=too-many-locals, too-many-statements
disk_encryption_keyvault,
aad_client_id=None,
aad_client_secret=None, aad_client_cert_thumbprint=None,
key_encryption_keyvault=None,
key_encryption_key=None,
key_encryption_algorithm='RSA-OAEP',
volume_type=None,
encrypt_format_all=False,
force=False):
from msrestazure.tools import parse_resource_id
from knack.util import CLIError
# pylint: disable=no-member
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
is_linux = _is_linux_os(vm)
backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False
_, has_old_ade = _detect_ade_status(vm)
use_new_ade = not aad_client_id and not has_old_ade
extension = vm_extension_info['Linux' if is_linux else 'Windows']
if not use_new_ade and not aad_client_id:
raise CLIError('Please provide --aad-client-id')
# 1. First validate arguments
if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret:
raise CLIError('Please provide either --aad-client-cert-thumbprint or --aad-client-secret')
if volume_type is None:
if not is_linux:
volume_type = _ALL_VOLUME_TYPE
elif vm.storage_profile.data_disks:
raise CLIError('VM has data disks, please supply --volume-type')
else:
volume_type = 'OS'
# sequence_version should be unique
sequence_version = uuid.uuid4()
# retrieve keyvault details
disk_encryption_keyvault_url = get_key_vault_base_url(
cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])
# disk encryption key itself can be further protected, so let us verify
if key_encryption_key:
key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
# to avoid bad server errors, ensure the vault has the right configurations
_verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vm, force)
# if key name and not key url, get url.
if key_encryption_key and '://' not in key_encryption_key: # if key name and not key url
key_encryption_key = _get_keyvault_key_url(
cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)
# 2. we are ready to provision/update the disk encryption extensions
# The following logic was mostly ported from xplat-cli
public_config = {
'KeyVaultURL': disk_encryption_keyvault_url,
'VolumeType': volume_type,
'EncryptionOperation': 'EnableEncryption' if not encrypt_format_all else 'EnableEncryptionFormatAll',
'KeyEncryptionKeyURL': key_encryption_key,
'KeyEncryptionAlgorithm': key_encryption_algorithm,
'SequenceVersion': sequence_version,
}
if use_new_ade:
public_config.update({
"KeyVaultResourceId": disk_encryption_keyvault,
"KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
})
else:
public_config.update({
'AADClientID': aad_client_id,
'AADClientCertThumbprint': aad_client_cert_thumbprint,
})
ade_legacy_private_config = {
'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '')
}
VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
'KeyVaultKeyReference', 'SubResource')
ext = VirtualMachineExtension(
location=vm.location, # pylint: disable=no-member
publisher=extension['publisher'],
type_properties_type=extension['name'],
protected_settings=None if use_new_ade else ade_legacy_private_config,
type_handler_version=extension['version'] if use_new_ade else extension['legacy_version'],
settings=public_config,
auto_upgrade_minor_version=True)
poller = compute_client.virtual_machine_extensions.begin_create_or_update(
resource_group_name, vm_name, extension['name'], ext)
LongRunningOperation(cmd.cli_ctx)(poller)
poller.result()
# verify the extension was ok
extension_result = compute_client.virtual_machine_extensions.get(
resource_group_name, vm_name, extension['name'], 'instanceView')
if extension_result.provisioning_state != 'Succeeded':
raise CLIError('Extension needed for disk encryption was not provisioned correctly')
if not use_new_ade:
if not (extension_result.instance_view.statuses and
extension_result.instance_view.statuses[0].message):
raise CLIError('Could not find url pointing to the secret for disk encryption')
# 3. update VM's storage profile with the secrets
status_url = extension_result.instance_view.statuses[0].message
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
secret_ref = KeyVaultSecretReference(secret_url=status_url,
source_vault=SubResource(id=disk_encryption_keyvault))
key_encryption_key_obj = None
if key_encryption_key:
key_encryption_key_obj = KeyVaultKeyReference(key_url=key_encryption_key,
source_vault=SubResource(id=key_encryption_keyvault))
disk_encryption_settings = DiskEncryptionSettings(disk_encryption_key=secret_ref,
key_encryption_key=key_encryption_key_obj,
enabled=True)
if vm_encrypted:
# stop the vm before update if the vm is already encrypted
logger.warning("Deallocating the VM before updating encryption settings...")
compute_client.virtual_machines.deallocate(resource_group_name, vm_name).result()
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
set_vm(cmd, vm)
if vm_encrypted:
# and start after the update
logger.warning("Restarting the VM after the update...")
compute_client.virtual_machines.start(resource_group_name, vm_name).result()
if is_linux and volume_type != _DATA_VOLUME_TYPE:
old_ade_msg = "If you see 'VMRestartPending', please restart the VM, and the encryption will finish shortly"
logger.warning("The encryption request was accepted. Please use 'show' command to monitor "
"the progress. %s", "" if use_new_ade else old_ade_msg)
def decrypt_vm(cmd, resource_group_name, vm_name, volume_type=None, force=False):
from knack.util import CLIError
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
has_new_ade, has_old_ade = _detect_ade_status(vm)
if not has_new_ade and not has_old_ade:
logger.warning('Azure Disk Encryption is not enabled')
return
is_linux = _is_linux_os(vm)
# pylint: disable=no-member
# 1. be nice, figure out the default volume type and also verify VM will not be busted
if is_linux:
if volume_type:
if not force and volume_type != _DATA_VOLUME_TYPE:
raise CLIError("Only Data disks can have encryption disabled in a Linux VM. "
"Use '--force' to ignore the warning")
else:
volume_type = _DATA_VOLUME_TYPE
elif volume_type is None:
volume_type = _ALL_VOLUME_TYPE
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# sequence_version should be incremented since encryptions occurred before
sequence_version = uuid.uuid4()
# 2. update the disk encryption extension
# The following logic was mostly ported from xplat-cli
public_config = {
'VolumeType': volume_type,
'EncryptionOperation': 'DisableEncryption',
'SequenceVersion': sequence_version,
}
VirtualMachineExtension, DiskEncryptionSettings = cmd.get_models(
'VirtualMachineExtension', 'DiskEncryptionSettings')
ext = VirtualMachineExtension(
location=vm.location, # pylint: disable=no-member
publisher=extension['publisher'],
virtual_machine_extension_type=extension['name'],
type_handler_version=extension['version'] if has_new_ade else extension['legacy_version'],
settings=public_config,
auto_upgrade_minor_version=True)
poller = compute_client.virtual_machine_extensions.begin_create_or_update(resource_group_name,
vm_name,
extension['name'], ext)
LongRunningOperation(cmd.cli_ctx)(poller)
poller.result()
extension_result = compute_client.virtual_machine_extensions.get(resource_group_name, vm_name,
extension['name'],
'instanceView')
if extension_result.provisioning_state != 'Succeeded':
raise CLIError("Extension updating didn't succeed")
if not has_new_ade:
# 3. Remove the secret from VM's storage profile
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
disk_encryption_settings = DiskEncryptionSettings(enabled=False)
vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
set_vm(cmd, vm)
def _show_vm_encryption_status_thru_new_ade(vm_instance_view):
ade = _find_existing_ade(vm_instance_view, use_instance_view=True)
disk_infos = []
for div in vm_instance_view.instance_view.disks or []:
disk_infos.append({
'name': div.name,
'encryptionSettings': div.encryption_settings,
'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')],
})
return {
'status': ade.statuses if ade else None,
'substatus': ade.substatuses if ade else None,
'disks': disk_infos
}
def show_vm_encryption_status(cmd, resource_group_name, vm_name):
encryption_status = {
'osDisk': 'NotEncrypted',
'osDiskEncryptionSettings': None,
'dataDisk': 'NotEncrypted',
'osType': None
}
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name, 'instanceView')
has_new_ade, has_old_ade = _detect_ade_status(vm)
if not has_new_ade and not has_old_ade:
logger.warning('Azure Disk Encryption is not enabled')
return None
if has_new_ade:
return _show_vm_encryption_status_thru_new_ade(vm)
is_linux = _is_linux_os(vm)
# pylint: disable=no-member
# The following logic was mostly ported from xplat-cli
os_type = 'Linux' if is_linux else 'Windows'
encryption_status['osType'] = os_type
extension = vm_extension_info[os_type]
extension_result = compute_client.virtual_machine_extensions.get(resource_group_name,
vm_name,
extension['name'],
'instanceView')
logger.debug(extension_result)
if extension_result.instance_view and extension_result.instance_view.statuses:
encryption_status['progressMessage'] = extension_result.instance_view.statuses[0].message
substatus_message = None
if getattr(extension_result.instance_view, 'substatuses', None):
substatus_message = extension_result.instance_view.substatuses[0].message
encryption_status['osDiskEncryptionSettings'] = vm.storage_profile.os_disk.encryption_settings
import json
if is_linux:
try:
message_object = json.loads(substatus_message)
except Exception: # pylint: disable=broad-except
message_object = None # might be from outdated extension
if message_object and ('os' in message_object):
encryption_status['osDisk'] = message_object['os']
else:
encryption_status['osDisk'] = 'Unknown'
if message_object and 'data' in message_object:
encryption_status['dataDisk'] = message_object['data']
else:
encryption_status['dataDisk'] = 'Unknown'
else:
# Windows - get os and data volume encryption state from the vm model
if (encryption_status['osDiskEncryptionSettings'] and
encryption_status['osDiskEncryptionSettings'].enabled and
encryption_status['osDiskEncryptionSettings'].disk_encryption_key and
encryption_status['osDiskEncryptionSettings'].disk_encryption_key.secret_url):
encryption_status['osDisk'] = _STATUS_ENCRYPTED
else:
encryption_status['osDisk'] = 'Unknown'
if extension_result.provisioning_state == 'Succeeded':
volume_type = extension_result.settings.get('VolumeType', None)
about_data_disk = not volume_type or volume_type.lower() != 'os'
if about_data_disk and extension_result.settings.get('EncryptionOperation', None) == 'EnableEncryption':
encryption_status['dataDisk'] = _STATUS_ENCRYPTED
return encryption_status
def _get_keyvault_key_url(cli_ctx, keyvault_name, key_name):
client = create_keyvault_data_plane_client(cli_ctx)
result = client.get_key(get_key_vault_base_url(cli_ctx, keyvault_name), key_name, '')
return result.key.kid # pylint: disable=no-member
def _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force):
if is_linux:
volume_type = volume_type or _DATA_VOLUME_TYPE
if volume_type != _DATA_VOLUME_TYPE:
msg = 'OS disk encyrption is not yet supported for Linux VM scale sets'
if force:
logger.warning(msg)
else:
from knack.util import CLIError
raise CLIError(msg)
else:
volume_type = volume_type or _ALL_VOLUME_TYPE
return volume_type
def encrypt_vmss(cmd, resource_group_name, vmss_name, # pylint: disable=too-many-locals, too-many-statements
disk_encryption_keyvault,
key_encryption_keyvault=None,
key_encryption_key=None,
key_encryption_algorithm='RSA-OAEP',
volume_type=None,
force=False):
from msrestazure.tools import parse_resource_id
# pylint: disable=no-member
UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
'UpgradeMode', 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')
compute_client = _compute_client_factory(cmd.cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
is_linux = _is_linux_os(vmss.virtual_machine_profile)
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# 1. First validate arguments
volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)
# retrieve keyvault details
disk_encryption_keyvault_url = get_key_vault_base_url(cmd.cli_ctx,
(parse_resource_id(disk_encryption_keyvault))['name'])
# disk encryption key itself can be further protected, so let us verify
if key_encryption_key:
key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
# to avoid bad server errors, ensure the vault has the right configurations
_verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vmss, force)
# if key name and not key url, get url.
if key_encryption_key and '://' not in key_encryption_key:
key_encryption_key = _get_keyvault_key_url(
cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)
# 2. we are ready to provision/update the disk encryption extensions
public_config = {
'KeyVaultURL': disk_encryption_keyvault_url,
'KeyEncryptionKeyURL': key_encryption_key or '',
"KeyVaultResourceId": disk_encryption_keyvault,
"KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
'KeyEncryptionAlgorithm': key_encryption_algorithm if key_encryption_key else '',
'VolumeType': volume_type,
'EncryptionOperation': 'EnableEncryption'
}
ext = VirtualMachineScaleSetExtension(name=extension['name'],
publisher=extension['publisher'],
type_properties_type=extension['name'],
type_handler_version=extension['version'],
settings=public_config,
auto_upgrade_minor_version=True,
force_update_tag=uuid.uuid4())
exts = [ext]
# remove any old ade extensions set by this command and add the new one.
vmss_ext_profile = vmss.virtual_machine_profile.extension_profile
if vmss_ext_profile and vmss_ext_profile.extensions:
exts.extend(old_ext for old_ext in vmss.virtual_machine_profile.extension_profile.extensions
if old_ext.type != ext.type or old_ext.name != ext.name)
vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=exts)
# Avoid unnecessary permission error
vmss.virtual_machine_profile.storage_profile.image_reference = None
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss)
LongRunningOperation(cmd.cli_ctx)(poller)
_show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, True)
def decrypt_vmss(cmd, resource_group_name, vmss_name, volume_type=None, force=False):
UpgradeMode, VirtualMachineScaleSetExtension = cmd.get_models('UpgradeMode', 'VirtualMachineScaleSetExtension')
compute_client = _compute_client_factory(cmd.cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
is_linux = _is_linux_os(vmss.virtual_machine_profile)
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# 1. be nice, figure out the default volume type
volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)
# 2. update the disk encryption extension
public_config = {
'VolumeType': volume_type,
'EncryptionOperation': 'DisableEncryption',
}
ext = VirtualMachineScaleSetExtension(name=extension['name'],
publisher=extension['publisher'],
type_properties_type=extension['name'],
type_handler_version=extension['version'],
settings=public_config,
auto_upgrade_minor_version=True,
force_update_tag=uuid.uuid4())
if (not vmss.virtual_machine_profile.extension_profile or
not vmss.virtual_machine_profile.extension_profile.extensions):
extensions = []
else:
extensions = vmss.virtual_machine_profile.extension_profile.extensions
ade_extension = [x for x in extensions if
x.type_properties_type.lower() == extension['name'].lower() and x.publisher.lower() == extension['publisher'].lower()] # pylint: disable=line-too-long
if not ade_extension:
from knack.util import CLIError
raise CLIError("VM scale set '{}' was not encrypted".format(vmss_name))
index = vmss.virtual_machine_profile.extension_profile.extensions.index(ade_extension[0])
vmss.virtual_machine_profile.extension_profile.extensions[index] = ext
# Avoid unnecessary permission error
vmss.virtual_machine_profile.storage_profile.image_reference = None
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss)
LongRunningOperation(cmd.cli_ctx)(poller)
_show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, False)
def _show_post_action_message(resource_group_name, vmss_name, maunal_mode, enable):
msg = ''
if maunal_mode:
msg = ("With manual upgrade mode, you will need to run 'az vmss update-instances -g {} -n {} "
"--instance-ids \"*\"' to propagate the change.\n".format(resource_group_name, vmss_name))
msg += ("Note, {} encryption will take a while to finish. Please query the status using "
"'az vmss encryption show -g {} -n {}'. For Linux VM, you will lose the access during the period".format(
'enabling' if enable else 'disabling', resource_group_name, vmss_name))
logger.warning(msg)
def show_vmss_encryption_status(cmd, resource_group_name, vmss_name):
client = _compute_client_factory(cmd.cli_ctx)
vm_instances = list(client.virtual_machine_scale_set_vms.list(resource_group_name, vmss_name,
select='instanceView', expand='instanceView'))
result = []
for instance in vm_instances:
view = instance.instance_view
disk_infos = []
vm_enc_info = {
'id': instance.id,
'disks': disk_infos
}
for div in view.disks:
disk_infos.append({
'name': div.name,
'encryptionSettings': div.encryption_settings,
'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')]
})
result.append(vm_enc_info)
return result
def _verify_keyvault_good_for_encryption(cli_ctx, disk_vault_id, kek_vault_id, vm_or_vmss, force):
def _report_client_side_validation_error(msg):
if force:
logger.warning("WARNING: %s %s", msg, "Encryption might fail.")
else:
from knack.util import CLIError
raise CLIError("ERROR: {}".format(msg))
resource_type = "VMSS" if vm_or_vmss.type.lower().endswith("virtualmachinescalesets") else "VM"
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
disk_vault_resource_info = parse_resource_id(disk_vault_id)
key_vault = client.get(disk_vault_resource_info['resource_group'], disk_vault_resource_info['name'])
# ensure vault has 'EnabledForDiskEncryption' permission
if not key_vault.properties or not key_vault.properties.enabled_for_disk_encryption:
_report_client_side_validation_error("Keyvault '{}' is not enabled for disk encryption.".format(
disk_vault_resource_info['resource_name']))
if kek_vault_id:
kek_vault_info = parse_resource_id(kek_vault_id)
if disk_vault_resource_info['name'].lower() != kek_vault_info['name'].lower():
client.get(kek_vault_info['resource_group'], kek_vault_info['name'])
# verify subscription mataches
vm_vmss_resource_info = parse_resource_id(vm_or_vmss.id)
if vm_vmss_resource_info['subscription'].lower() != disk_vault_resource_info['subscription'].lower():
_report_client_side_validation_error("{} {}'s subscription does not match keyvault's subscription."
.format(resource_type, vm_vmss_resource_info['name']))
# verify region matches
if key_vault.location.replace(' ', '').lower() != vm_or_vmss.location.replace(' ', '').lower():
_report_client_side_validation_error(
"{} {}'s region does not match keyvault's region.".format(resource_type, vm_vmss_resource_info['name']))
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/vm/disk_encryption.py | Python | mit | 27,499 | 0.004291 |
import tornado.web
import traceback
from plugins.bases.plugin import PluginBase
import os
import sys
import gdata
import gdata.youtube
import gdata.youtube.service
class HandlersBase(tornado.web.RequestHandler, PluginBase):
# Every handler must have a web path, override this in this fashion
WEB_PATH = r"/"
STORE_ATTRS = True
STORE_UNREF = True
# Specifies what JS and CSS files to load from templates/bootstrap/[css|js]
JS_FILES = []
CSS_FILES = []
# Used as a default for every page
PAGE_TITLE = "Home"
def initialize(self, **kwargs):
self.sysconf = kwargs.get("sysconf", None)
def get_template_path(self):
return "%s/templates" % os.path.dirname(os.path.realpath(sys.argv[0]))
# Initialize YouTube reference to perform actions
def yt_instance(self):
self.yt_service = gdata.youtube.service.YouTubeService()
self.yt_service.ssl = True
self.yt_service.developer_key = self.sysconf.devid
self.yt_service.client_id = self.sysconf.clientid
self.yt_service.email = self.sysconf.gmail
self.yt_service.password = self.sysconf.gpass
self.yt_service.source = self.sysconf.clientid
self.yt_service.ProgrammaticLogin()
# Simple class property to return the playlist URI
@property
def yt_plist_uri(self):
return "http://gdata.youtube.com/feeds/api/playlists/%s" % self.sysconf.playlist
# Return the data about the playlist
def yt_playlist(self):
return self.yt_service.GetYouTubePlaylistVideoFeed(uri=self.yt_plist_uri)
# Get total number of videos in playlist
def yt_playlist_count(self):
plist = self.yt_playlist()
entry = []
for e in plist.entry:
entry.append(e)
return len(entry)
# Wrapper to get upload token for YouTube video post request
def yt_uploadtoken(self, mg):
video_entry = gdata.youtube.YouTubeVideoEntry(media=mg)
response = self.yt_service.GetFormUploadToken(video_entry)
return (response[0], response[1])
# This defines various aspects of the video
def yt_mediagroup(self, title, desc):
return gdata.media.Group(
title = gdata.media.Title(text=title),
description = gdata.media.Description(description_type='plain', text=desc),
keywords=gdata.media.Keywords(text='amber, eric, wedding, 2013, october, 31, halloween'),
category=[gdata.media.Category(
text='People',
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label='People')],
player=None
)
# Adds a video to playlist
def yt_vid2pl(self, vidid, title, desc):
video_entry = self.yt_service.AddPlaylistVideoEntryToPlaylist(
self.yt_plist_uri, vidid, title, desc
)
if isinstance(video_entry, gdata.youtube.YouTubePlaylistVideoEntry):
return 1
return 0
"""
show
Wrapper around RequestHandler's render function to make rendering these templates easier/better.
This way the class just has to specify what special CSS and/or JavaScript files to load (see handlers/main),
and it is automatically passed to the template engine to parse and deal with.
Easier management and use IMO.
"""
def show(self, templ, **kwargs):
# What JavaScript files to load?
js = ["jquery", "bootstrap.min", "common", "jquery.prettyPhoto"]
js.extend(self.JS_FILES)
# CSS files we want for the particular page
css = ["common", "prettyPhoto"]
css.extend(self.CSS_FILES)
# We pass specifics to the page as well as any uniques via kwargs passed from here
self.render("%s.html" % templ,
js=js, css=css,
page_title=self.PAGE_TITLE,
plistid=self.sysconf.playlist,
**kwargs)
def write_error(self, status_code, **kwargs):
path = os.path.dirname(os.path.realpath(sys.argv[0]))
_,err,_ = kwargs['exc_info']
msg = "Unfortunately an error has occured. If you believe this is in error, please contact support.<br /><br />"
msg += "Error: %s" % (err)
self.show("%s/templates/message" % path, path=path, message=msg)
| anzenehansen/wedding-photos | plugins/bases/handlers.py | Python | mpl-2.0 | 4,489 | 0.008911 |
"""Initial
Revision ID: cfbd6d35cab
Revises:
Create Date: 2015-03-04 04:13:56.547992
"""
# revision identifiers, used by Alembic.
revision = 'cfbd6d35cab'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('location',
sa.Column('city', sa.String(length=50), nullable=True),
sa.Column('zip', sa.String(length=9), nullable=True),
sa.Column('county', sa.String(length=50), nullable=True),
sa.Column('state', sa.String(length=2), nullable=True),
sa.Column('address_1', sa.String(length=100), nullable=True),
sa.Column('address_2', sa.String(length=100), nullable=True),
sa.Column('location_source_value', sa.String(length=300), nullable=True),
sa.Column('location_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('location_id', name=op.f('pk_location'))
)
op.create_index(op.f('ix_location_location_id'), 'location', ['location_id'], unique=False, postgresql_ops={})
op.create_table('cohort',
sa.Column('cohort_end_date', sa.DateTime(), nullable=True),
sa.Column('cohort_id', sa.Integer(), nullable=False),
sa.Column('subject_id', sa.Integer(), nullable=False),
sa.Column('stop_reason', sa.String(length=100), nullable=True),
sa.Column('cohort_concept_id', sa.Integer(), nullable=False),
sa.Column('cohort_start_date', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('cohort_id', name=op.f('pk_cohort'))
)
op.create_index(op.f('ix_cohort_cohort_id'), 'cohort', ['cohort_id'], unique=False, postgresql_ops={})
op.create_table('organization',
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('place_of_service_concept_id', sa.Integer(), nullable=True),
sa.Column('place_of_service_source_value', sa.String(length=100), nullable=True),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.Column('organization_source_value', sa.String(length=50), nullable=False),
sa.ForeignKeyConstraint(['location_id'], [u'location.location_id'], name=op.f('fk_organization_location_id_location')),
sa.PrimaryKeyConstraint('organization_id', name=op.f('pk_organization')),
sa.UniqueConstraint('organization_source_value', name=op.f('uq_organization_organization_source_value'))
)
op.create_index(op.f('ix_organization_location_id'), 'organization', ['location_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_organization_organization_id'), 'organization', ['organization_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_organization_organization_source_value_place_of_service_source_value'), 'organization', ['organization_source_value', 'place_of_service_source_value'], unique=False, postgresql_ops={u'place_of_service_source_value': u'varchar_pattern_ops', u'organization_source_value': u'varchar_pattern_ops'})
op.create_table('care_site',
sa.Column('place_of_service_source_value', sa.String(length=100), nullable=True),
sa.Column('place_of_service_concept_id', sa.Integer(), nullable=True),
sa.Column('care_site_source_value', sa.String(length=100), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('care_site_id', sa.Integer(), nullable=False),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['location_id'], [u'location.location_id'], name=op.f('fk_care_site_location_id_location')),
sa.ForeignKeyConstraint(['organization_id'], [u'organization.organization_id'], name=op.f('fk_care_site_organization_id_organization')),
sa.PrimaryKeyConstraint('care_site_id', name=op.f('pk_care_site')),
sa.UniqueConstraint('organization_id', 'care_site_source_value', name=op.f('uq_care_site_organization_id_care_site_source_value'))
)
op.create_index(op.f('ix_care_site_care_site_id'), 'care_site', ['care_site_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_care_site_location_id'), 'care_site', ['location_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_care_site_organization_id'), 'care_site', ['organization_id'], unique=False, postgresql_ops={})
op.create_table('provider',
sa.Column('provider_id', sa.Integer(), nullable=False),
sa.Column('npi', sa.String(length=20), nullable=True),
sa.Column('specialty_concept_id', sa.Integer(), nullable=True),
sa.Column('provider_source_value', sa.String(length=100), nullable=False),
sa.Column('dea', sa.String(length=20), nullable=True),
sa.Column('care_site_id', sa.Integer(), nullable=False),
sa.Column('specialty_source_value', sa.String(length=300), nullable=True),
sa.ForeignKeyConstraint(['care_site_id'], [u'care_site.care_site_id'], name=op.f('fk_provider_care_site_id_care_site')),
sa.PrimaryKeyConstraint('provider_id', name=op.f('pk_provider'))
)
op.create_index(op.f('ix_provider_care_site_id'), 'provider', ['care_site_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_provider_provider_id'), 'provider', ['provider_id'], unique=False, postgresql_ops={})
op.create_table('person',
sa.Column('provider_id', sa.Integer(), nullable=True),
sa.Column('ethnicity_concept_id', sa.Integer(), nullable=True),
sa.Column('ethnicity_source_value', sa.String(length=50), nullable=True),
sa.Column('person_source_value', sa.String(length=100), nullable=False),
sa.Column('month_of_birth', sa.Numeric(precision=2, scale=0), nullable=True),
sa.Column('pn_time_of_birth', sa.DateTime(), nullable=True),
sa.Column('day_of_birth', sa.Numeric(precision=2, scale=0), nullable=True),
sa.Column('year_of_birth', sa.Numeric(precision=4, scale=0), nullable=False),
sa.Column('gender_source_value', sa.String(length=50), nullable=True),
sa.Column('race_source_value', sa.String(length=50), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('care_site_id', sa.Integer(), nullable=False),
sa.Column('gender_concept_id', sa.Integer(), nullable=False),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.Column('race_concept_id', sa.Integer(), nullable=True),
sa.Column('pn_gestational_age', sa.Numeric(precision=4, scale=2), nullable=True),
sa.ForeignKeyConstraint(['care_site_id'], [u'care_site.care_site_id'], name=op.f('fk_person_care_site_id_care_site')),
sa.ForeignKeyConstraint(['location_id'], [u'location.location_id'], name=op.f('fk_person_location_id_location')),
sa.ForeignKeyConstraint(['provider_id'], [u'provider.provider_id'], name=op.f('fk_person_provider_id_provider')),
sa.PrimaryKeyConstraint('person_id', name=op.f('pk_person')),
sa.UniqueConstraint('person_source_value', name=op.f('uq_person_person_source_value'))
)
op.create_index(op.f('ix_person_care_site_id'), 'person', ['care_site_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_person_location_id'), 'person', ['location_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_person_person_id'), 'person', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_person_provider_id'), 'person', ['provider_id'], unique=False, postgresql_ops={})
op.create_table('death',
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('death_type_concept_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('death_date', sa.DateTime(), nullable=False),
sa.Column('cause_of_death_concept_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('cause_of_death_source_value', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_death_person_id_person')),
sa.PrimaryKeyConstraint('person_id', 'death_type_concept_id', 'cause_of_death_concept_id', name=op.f('pk_death'))
)
op.create_index(op.f('ix_death_person_id'), 'death', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_death_person_id_death_type_concept_id_cause_of_death_concept_id'), 'death', ['person_id', 'death_type_concept_id', 'cause_of_death_concept_id'], unique=False, postgresql_ops={})
op.create_table('visit_occurrence',
sa.Column('provider_id', sa.Integer(), nullable=True),
sa.Column('place_of_service_concept_id', sa.Integer(), nullable=False),
sa.Column('visit_start_date', sa.DateTime(), nullable=False),
sa.Column('place_of_service_source_value', sa.String(length=100), nullable=True),
sa.Column('visit_end_date', sa.DateTime(), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('care_site_id', sa.Integer(), nullable=True),
sa.Column('visit_occurrence_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_visit_occurrence_person_id_person')),
sa.PrimaryKeyConstraint('visit_occurrence_id', name=op.f('pk_visit_occurrence'))
)
op.create_index(op.f('ix_visit_occurrence_person_id'), 'visit_occurrence', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_visit_occurrence_person_id_visit_start_date'), 'visit_occurrence', ['person_id', 'visit_start_date'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_visit_occurrence_visit_occurrence_id'), 'visit_occurrence', ['visit_occurrence_id'], unique=False, postgresql_ops={})
op.create_table('payer_plan_period',
sa.Column('plan_source_value', sa.String(length=100), nullable=True),
sa.Column('family_source_value', sa.String(length=100), nullable=True),
sa.Column('payer_plan_period_id', sa.Integer(), nullable=False),
sa.Column('payer_plan_period_end_date', sa.DateTime(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('payer_source_value', sa.String(length=100), nullable=True),
sa.Column('payer_plan_period_start_date', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_payer_plan_period_person_id_person')),
sa.PrimaryKeyConstraint('payer_plan_period_id', name=op.f('pk_payer_plan_period'))
)
op.create_index(op.f('ix_payer_plan_period_payer_plan_period_id'), 'payer_plan_period', ['payer_plan_period_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_payer_plan_period_person_id'), 'payer_plan_period', ['person_id'], unique=False, postgresql_ops={})
op.create_table('drug_era',
sa.Column('drug_era_end_date', sa.DateTime(), nullable=False),
sa.Column('drug_era_start_date', sa.DateTime(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('drug_era_id', sa.Integer(), nullable=False),
sa.Column('drug_exposure_count', sa.Numeric(precision=4, scale=0), nullable=True),
sa.Column('drug_type_concept_id', sa.Integer(), nullable=False),
sa.Column('drug_concept_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_drug_era_person_id_person')),
sa.PrimaryKeyConstraint('drug_era_id', name=op.f('pk_drug_era'))
)
op.create_index(op.f('ix_drug_era_drug_era_id'), 'drug_era', ['drug_era_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_drug_era_person_id'), 'drug_era', ['person_id'], unique=False, postgresql_ops={})
op.create_table('observation_period',
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('observation_period_end_date', sa.DateTime(), nullable=True),
sa.Column('observation_period_start_date', sa.DateTime(), nullable=False),
sa.Column('observation_period_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_observation_period_person_id_person')),
sa.PrimaryKeyConstraint('observation_period_id', name=op.f('pk_observation_period'))
)
op.create_index(op.f('ix_observation_period_observation_period_id'), 'observation_period', ['observation_period_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_observation_period_person_id'), 'observation_period', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_observation_period_person_id_observation_period_start_date'), 'observation_period', ['person_id', 'observation_period_start_date'], unique=False, postgresql_ops={})
op.create_table('condition_era',
sa.Column('condition_concept_id', sa.Integer(), nullable=False),
sa.Column('condition_occurrence_count', sa.Numeric(precision=4, scale=0), nullable=True),
sa.Column('condition_era_id', sa.Integer(), nullable=False),
sa.Column('condition_type_concept_id', sa.Integer(), nullable=False),
sa.Column('condition_era_start_date', sa.DateTime(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('condition_era_end_date', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_condition_era_person_id_person')),
sa.PrimaryKeyConstraint('condition_era_id', name=op.f('pk_condition_era'))
)
op.create_index(op.f('ix_condition_era_condition_era_id'), 'condition_era', ['condition_era_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_condition_era_person_id'), 'condition_era', ['person_id'], unique=False, postgresql_ops={})
op.create_table('observation',
sa.Column('range_high', sa.Numeric(precision=14, scale=3), nullable=True),
sa.Column('observation_concept_id', sa.Integer(), nullable=False),
sa.Column('range_low', sa.Numeric(precision=14, scale=3), nullable=True),
sa.Column('observation_id', sa.Integer(), nullable=False),
sa.Column('relevant_condition_concept_id', sa.Integer(), nullable=True),
sa.Column('observation_time', sa.DateTime(), nullable=True),
sa.Column('unit_concept_id', sa.Integer(), nullable=True),
sa.Column('value_as_number', sa.Numeric(precision=14, scale=3), nullable=True),
sa.Column('observation_source_value', sa.String(length=100), nullable=True),
sa.Column('value_as_string', sa.String(length=4000), nullable=True),
sa.Column('observation_type_concept_id', sa.Integer(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('observation_date', sa.DateTime(), nullable=False),
sa.Column('value_as_concept_id', sa.Integer(), nullable=True),
sa.Column('associated_provider_id', sa.Integer(), nullable=True),
sa.Column('visit_occurrence_id', sa.Integer(), nullable=True),
sa.Column('units_source_value', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['associated_provider_id'], [u'provider.provider_id'], name=op.f('fk_observation_associated_provider_id_provider')),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_observation_person_id_person')),
sa.ForeignKeyConstraint(['visit_occurrence_id'], [u'visit_occurrence.visit_occurrence_id'], name=op.f('fk_observation_visit_occurrence_id_visit_occurrence')),
sa.PrimaryKeyConstraint('observation_id', name=op.f('pk_observation'))
)
op.create_index(op.f('ix_observation_associated_provider_id'), 'observation', ['associated_provider_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_observation_observation_id'), 'observation', ['observation_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_observation_person_id'), 'observation', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_observation_person_id_observation_concept_id'), 'observation', ['person_id', 'observation_concept_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_observation_visit_occurrence_id'), 'observation', ['visit_occurrence_id'], unique=False, postgresql_ops={})
op.create_table('drug_exposure',
sa.Column('refills', sa.Numeric(precision=3, scale=0), nullable=True),
sa.Column('stop_reason', sa.String(length=100), nullable=True),
sa.Column('relevant_condition_concept_id', sa.Integer(), nullable=True),
sa.Column('days_supply', sa.Numeric(precision=4, scale=0), nullable=True),
sa.Column('drug_exposure_start_date', sa.DateTime(), nullable=False),
sa.Column('prescribing_provider_id', sa.Integer(), nullable=True),
sa.Column('sig', sa.String(length=500), nullable=True),
sa.Column('drug_exposure_id', sa.Integer(), nullable=False),
sa.Column('drug_source_value', sa.String(length=100), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('drug_exposure_end_date', sa.DateTime(), nullable=True),
sa.Column('visit_occurrence_id', sa.Integer(), nullable=True),
sa.Column('drug_type_concept_id', sa.Integer(), nullable=False),
sa.Column('drug_concept_id', sa.Integer(), nullable=False),
sa.Column('quantity', sa.Numeric(precision=4, scale=0), nullable=True),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_drug_exposure_person_id_person')),
sa.ForeignKeyConstraint(['prescribing_provider_id'], [u'provider.provider_id'], name=op.f('fk_drug_exposure_prescribing_provider_id_provider')),
sa.ForeignKeyConstraint(['visit_occurrence_id'], [u'visit_occurrence.visit_occurrence_id'], name=op.f('fk_drug_exposure_visit_occurrence_id_visit_occurrence')),
sa.PrimaryKeyConstraint('drug_exposure_id', name=op.f('pk_drug_exposure'))
)
op.create_index(op.f('ix_drug_exposure_drug_exposure_id'), 'drug_exposure', ['drug_exposure_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_drug_exposure_person_id'), 'drug_exposure', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_drug_exposure_prescribing_provider_id'), 'drug_exposure', ['prescribing_provider_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_drug_exposure_visit_occurrence_id'), 'drug_exposure', ['visit_occurrence_id'], unique=False, postgresql_ops={})
op.create_table('procedure_occurrence',
sa.Column('procedure_concept_id', sa.Integer(), nullable=False),
sa.Column('relevant_condition_concept_id', sa.Integer(), nullable=True),
sa.Column('procedure_date', sa.DateTime(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('procedure_type_concept_id', sa.Integer(), nullable=False),
sa.Column('procedure_source_value', sa.String(length=100), nullable=True),
sa.Column('procedure_occurrence_id', sa.Integer(), nullable=False),
sa.Column('associated_provider_id', sa.Integer(), nullable=True),
sa.Column('visit_occurrence_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['associated_provider_id'], [u'provider.provider_id'], name=op.f('fk_procedure_occurrence_associated_provider_id_provider')),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_procedure_occurrence_person_id_person')),
sa.ForeignKeyConstraint(['visit_occurrence_id'], [u'visit_occurrence.visit_occurrence_id'], name=op.f('fk_procedure_occurrence_visit_occurrence_id_visit_occurrence')),
sa.PrimaryKeyConstraint('procedure_occurrence_id', name=op.f('pk_procedure_occurrence'))
)
op.create_index(op.f('ix_procedure_occurrence_associated_provider_id'), 'procedure_occurrence', ['associated_provider_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_procedure_occurrence_person_id'), 'procedure_occurrence', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_procedure_occurrence_procedure_occurrence_id'), 'procedure_occurrence', ['procedure_occurrence_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_procedure_occurrence_visit_occurrence_id'), 'procedure_occurrence', ['visit_occurrence_id'], unique=False, postgresql_ops={})
op.create_table('condition_occurrence',
sa.Column('condition_concept_id', sa.Integer(), nullable=False),
sa.Column('condition_type_concept_id', sa.Integer(), nullable=False),
sa.Column('stop_reason', sa.String(length=100), nullable=True),
sa.Column('condition_start_date', sa.DateTime(), nullable=False),
sa.Column('condition_end_date', sa.DateTime(), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('condition_source_value', sa.String(length=100), nullable=True),
sa.Column('condition_occurrence_id', sa.Integer(), nullable=False),
sa.Column('associated_provider_id', sa.Integer(), nullable=True),
sa.Column('visit_occurrence_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['associated_provider_id'], [u'provider.provider_id'], name=op.f('fk_condition_occurrence_associated_provider_id_provider')),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_condition_occurrence_person_id_person')),
sa.ForeignKeyConstraint(['visit_occurrence_id'], [u'visit_occurrence.visit_occurrence_id'], name=op.f('fk_condition_occurrence_visit_occurrence_id_visit_occurrence')),
sa.PrimaryKeyConstraint('condition_occurrence_id', name=op.f('pk_condition_occurrence'))
)
op.create_index(op.f('ix_condition_occurrence_associated_provider_id'), 'condition_occurrence', ['associated_provider_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_condition_occurrence_condition_occurrence_id'), 'condition_occurrence', ['condition_occurrence_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_condition_occurrence_person_id'), 'condition_occurrence', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_condition_occurrence_visit_occurrence_id'), 'condition_occurrence', ['visit_occurrence_id'], unique=False, postgresql_ops={})
op.create_table('procedure_cost',
sa.Column('total_out_of_pocket', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('revenue_code_source_value', sa.String(length=100), nullable=True),
sa.Column('paid_toward_deductible', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('revenue_code_concept_id', sa.Integer(), nullable=True),
sa.Column('payer_plan_period_id', sa.Integer(), nullable=True),
sa.Column('paid_by_payer', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('procedure_cost_id', sa.Integer(), nullable=False),
sa.Column('paid_copay', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('paid_coinsurance', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('paid_by_coordination_benefits', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('procedure_occurrence_id', sa.Integer(), nullable=False),
sa.Column('total_paid', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('disease_class_concept_id', sa.Integer(), nullable=True),
sa.Column('disease_class_source_value', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['payer_plan_period_id'], [u'payer_plan_period.payer_plan_period_id'], name=op.f('fk_procedure_cost_payer_plan_period_id_payer_plan_period')),
sa.ForeignKeyConstraint(['procedure_occurrence_id'], [u'procedure_occurrence.procedure_occurrence_id'], name=op.f('fk_procedure_cost_procedure_occurrence_id_procedure_occurrence')),
sa.PrimaryKeyConstraint('procedure_cost_id', name=op.f('pk_procedure_cost'))
)
op.create_index(op.f('ix_procedure_cost_payer_plan_period_id'), 'procedure_cost', ['payer_plan_period_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_procedure_cost_procedure_cost_id'), 'procedure_cost', ['procedure_cost_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_procedure_cost_procedure_occurrence_id'), 'procedure_cost', ['procedure_occurrence_id'], unique=False, postgresql_ops={})
op.create_table('drug_cost',
sa.Column('total_out_of_pocket', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('paid_toward_deductible', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('payer_plan_period_id', sa.Integer(), nullable=True),
sa.Column('drug_cost_id', sa.Integer(), nullable=False),
sa.Column('paid_by_payer', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('drug_exposure_id', sa.Integer(), nullable=False),
sa.Column('paid_copay', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('paid_coinsurance', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('paid_by_coordination_benefits', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('average_wholesale_price', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('ingredient_cost', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('total_paid', sa.Numeric(precision=8, scale=2), nullable=True),
sa.Column('dispensing_fee', sa.Numeric(precision=8, scale=2), nullable=True),
sa.ForeignKeyConstraint(['drug_exposure_id'], [u'drug_exposure.drug_exposure_id'], name=op.f('fk_drug_cost_drug_exposure_id_drug_exposure')),
sa.ForeignKeyConstraint(['payer_plan_period_id'], [u'payer_plan_period.payer_plan_period_id'], name=op.f('fk_drug_cost_payer_plan_period_id_payer_plan_period')),
sa.PrimaryKeyConstraint('drug_cost_id', name=op.f('pk_drug_cost'))
)
op.create_index(op.f('ix_drug_cost_drug_cost_id'), 'drug_cost', ['drug_cost_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_drug_cost_drug_exposure_id'), 'drug_cost', ['drug_exposure_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_drug_cost_payer_plan_period_id'), 'drug_cost', ['payer_plan_period_id'], unique=False, postgresql_ops={})
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_drug_cost_payer_plan_period_id'), table_name='drug_cost')
op.drop_index(op.f('ix_drug_cost_drug_exposure_id'), table_name='drug_cost')
op.drop_index(op.f('ix_drug_cost_drug_cost_id'), table_name='drug_cost')
op.drop_table('drug_cost')
op.drop_index(op.f('ix_procedure_cost_procedure_occurrence_id'), table_name='procedure_cost')
op.drop_index(op.f('ix_procedure_cost_procedure_cost_id'), table_name='procedure_cost')
op.drop_index(op.f('ix_procedure_cost_payer_plan_period_id'), table_name='procedure_cost')
op.drop_table('procedure_cost')
op.drop_index(op.f('ix_condition_occurrence_visit_occurrence_id'), table_name='condition_occurrence')
op.drop_index(op.f('ix_condition_occurrence_person_id'), table_name='condition_occurrence')
op.drop_index(op.f('ix_condition_occurrence_condition_occurrence_id'), table_name='condition_occurrence')
op.drop_index(op.f('ix_condition_occurrence_associated_provider_id'), table_name='condition_occurrence')
op.drop_table('condition_occurrence')
op.drop_index(op.f('ix_procedure_occurrence_visit_occurrence_id'), table_name='procedure_occurrence')
op.drop_index(op.f('ix_procedure_occurrence_procedure_occurrence_id'), table_name='procedure_occurrence')
op.drop_index(op.f('ix_procedure_occurrence_person_id'), table_name='procedure_occurrence')
op.drop_index(op.f('ix_procedure_occurrence_associated_provider_id'), table_name='procedure_occurrence')
op.drop_table('procedure_occurrence')
op.drop_index(op.f('ix_drug_exposure_visit_occurrence_id'), table_name='drug_exposure')
op.drop_index(op.f('ix_drug_exposure_prescribing_provider_id'), table_name='drug_exposure')
op.drop_index(op.f('ix_drug_exposure_person_id'), table_name='drug_exposure')
op.drop_index(op.f('ix_drug_exposure_drug_exposure_id'), table_name='drug_exposure')
op.drop_table('drug_exposure')
op.drop_index(op.f('ix_observation_visit_occurrence_id'), table_name='observation')
op.drop_index(op.f('ix_observation_person_id_observation_concept_id'), table_name='observation')
op.drop_index(op.f('ix_observation_person_id'), table_name='observation')
op.drop_index(op.f('ix_observation_observation_id'), table_name='observation')
op.drop_index(op.f('ix_observation_associated_provider_id'), table_name='observation')
op.drop_table('observation')
op.drop_index(op.f('ix_condition_era_person_id'), table_name='condition_era')
op.drop_index(op.f('ix_condition_era_condition_era_id'), table_name='condition_era')
op.drop_table('condition_era')
op.drop_index(op.f('ix_observation_period_person_id_observation_period_start_date'), table_name='observation_period')
op.drop_index(op.f('ix_observation_period_person_id'), table_name='observation_period')
op.drop_index(op.f('ix_observation_period_observation_period_id'), table_name='observation_period')
op.drop_table('observation_period')
op.drop_index(op.f('ix_drug_era_person_id'), table_name='drug_era')
op.drop_index(op.f('ix_drug_era_drug_era_id'), table_name='drug_era')
op.drop_table('drug_era')
op.drop_index(op.f('ix_payer_plan_period_person_id'), table_name='payer_plan_period')
op.drop_index(op.f('ix_payer_plan_period_payer_plan_period_id'), table_name='payer_plan_period')
op.drop_table('payer_plan_period')
op.drop_index(op.f('ix_visit_occurrence_visit_occurrence_id'), table_name='visit_occurrence')
op.drop_index(op.f('ix_visit_occurrence_person_id_visit_start_date'), table_name='visit_occurrence')
op.drop_index(op.f('ix_visit_occurrence_person_id'), table_name='visit_occurrence')
op.drop_table('visit_occurrence')
op.drop_index(op.f('ix_death_person_id_death_type_concept_id_cause_of_death_concept_id'), table_name='death')
op.drop_index(op.f('ix_death_person_id'), table_name='death')
op.drop_table('death')
op.drop_index(op.f('ix_person_provider_id'), table_name='person')
op.drop_index(op.f('ix_person_person_id'), table_name='person')
op.drop_index(op.f('ix_person_location_id'), table_name='person')
op.drop_index(op.f('ix_person_care_site_id'), table_name='person')
op.drop_table('person')
op.drop_index(op.f('ix_provider_provider_id'), table_name='provider')
op.drop_index(op.f('ix_provider_care_site_id'), table_name='provider')
op.drop_table('provider')
op.drop_index(op.f('ix_care_site_organization_id'), table_name='care_site')
op.drop_index(op.f('ix_care_site_location_id'), table_name='care_site')
op.drop_index(op.f('ix_care_site_care_site_id'), table_name='care_site')
op.drop_table('care_site')
op.drop_index(op.f('ix_organization_organization_source_value_place_of_service_source_value'), table_name='organization')
op.drop_index(op.f('ix_organization_organization_id'), table_name='organization')
op.drop_index(op.f('ix_organization_location_id'), table_name='organization')
op.drop_table('organization')
op.drop_index(op.f('ix_cohort_cohort_id'), table_name='cohort')
op.drop_table('cohort')
op.drop_index(op.f('ix_location_location_id'), table_name='location')
op.drop_table('location')
### end Alembic commands ###
| PEDSnet/pedsnetcdms | pedsnetcdms/pedsnetcdm/alembic/versions/cfbd6d35cab_initial.py | Python | bsd-2-clause | 30,989 | 0.012101 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.exceptions import ServiceRequestError
from azure.identity._internal.msal_client import MsalClient
import pytest
from helpers import mock, mock_response, validating_transport, Request
def test_retries_requests():
"""The client should retry token requests"""
message = "can't connect"
transport = mock.Mock(send=mock.Mock(side_effect=ServiceRequestError(message)))
client = MsalClient(transport=transport)
with pytest.raises(ServiceRequestError, match=message):
client.post("https://localhost")
assert transport.send.call_count > 1
transport.send.reset_mock()
with pytest.raises(ServiceRequestError, match=message):
client.get("https://localhost")
assert transport.send.call_count > 1
def test_get_error_response():
first_result = {"error": "first"}
first_response = mock_response(401, json_payload=first_result)
second_result = {"error": "second"}
second_response = mock_response(401, json_payload=second_result)
transport = validating_transport(
requests=[Request(url="https://localhost")] * 2, responses=[first_response, second_response]
)
client = MsalClient(transport=transport)
for result in (first_result, second_result):
assert not client.get_error_response(result)
client.get("https://localhost")
response = client.get_error_response(first_result)
assert response is first_response
client.post("https://localhost")
response = client.get_error_response(second_result)
assert response is second_response
assert not client.get_error_response(first_result)
| Azure/azure-sdk-for-python | sdk/identity/azure-identity/tests/test_msal_client.py | Python | mit | 1,769 | 0.001131 |
from pattern import Pattern
import copy
import numpy as np
import random
import collections
#from scipy.signal import convolve2d
import time
from collections import deque
class SpiralOutFast(Pattern):
def __init__(self):
self.register_param("r_leak", 0, 3, 1.2)
self.register_param("g_leak", 0, 3, 1.7)
self.register_param("b_leak", 0, 3, 2)
self.register_param("speed", 0, 1 , 0)
#Initialise time and color history
self.t = [0,0]
self.r = [0,0]
self.g = [0,0]
self.b = [0,0]
self.buff_len = 1500
self.start_time = np.float(time.time())
def on_pattern_select(self, octopus):
self.pixels = octopus.pixels_spiral()
self.previous_time = np.float16(time.time())
def next_frame(self, octopus, data):
current_time = time.time() - self.start_time
self.previous_time = current_time
scale = float(255)
self.t.append(current_time)
self.r.append(scale*np.mean([data.eq[0], data.eq[1]]))
self.g.append(scale*np.mean([data.eq[2], data.eq[3]]))
self.b.append(scale*np.mean([data.eq[4], data.eq[5], data.eq[6]]))
if len(self.t) > self.buff_len:
del self.t[0]
del self.r[0]
del self.g[0]
del self.b[0]
domain_r = np.linspace(current_time, current_time - self.r_leak, len(self.pixels))
domain_g = np.linspace(current_time, current_time - self.g_leak, len(self.pixels))
domain_b = np.linspace(current_time, current_time - self.b_leak, len(self.pixels))
r = np.interp(domain_r, self.t, self.r)
g = np.interp(domain_g, self.t, self.g)
b = np.interp(domain_b, self.t, self.b)
for i in range(len(self.pixels)):
self.pixels[i].color = (r[i], g[i], b[i])
| TheGentlemanOctopus/thegentlemanoctopus | octopus_code/core/octopus/patterns/spiralOutFast.py | Python | gpl-3.0 | 1,846 | 0.008667 |
# -*- coding: utf-8 -*-
import math
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line
from kivy.vector import Vector
from kivy.properties import StringProperty, DictProperty, BooleanProperty, BoundedNumericProperty, ListProperty
class Painter(Widget):
tools = DictProperty({'arrow': {'color': (1, 1, 1, 1), 'thickness': 0.5},
'line': {'color': (1, 1, 1, 1), 'thickness': 0.5},
'freeline': {'color': (1, 1, 1, 1), 'thickness': 0.5},
'eraser': {'thickness': 0.4}
})
current_tool = StringProperty('arrow')
thickness = BoundedNumericProperty(1, min=0.5, max=10, errorvalue=0.5)
color = ListProperty((1, 1, 1, 1))
locked = BooleanProperty(False)
def on_thickness(self, instance, value):
self.tools[self.current_tool]['thickness'] = value
def on_color(self, instance, value):
self.tools[self.current_tool]['color'] = value
def on_current_tool(self, instance, value):
self.color = self.tools[value]['color']
self.thickness = self.tools[value]['thickness']
def on_touch_down(self, touch):
if not self.locked and self.collide_point(*touch.pos):
touch.grab(self)
with self.canvas:
Color(*self.color, mode='rgba')
touch.ud['line'] = Line(points=(touch.x, touch.y), width=self.thickness, cap='round', joint='miter')
if self.current_tool == 'arrow':
touch.ud['arrowhead'] = Line(width=self.thickness, cap='square', joint='miter')
touch.ud['initial_pos'] = touch.pos
else:
return False
return super(Painter, self).on_touch_down(touch)
def on_touch_move(self, touch):
if not self.locked and self.collide_point(*touch.pos):
try:
if self.current_tool == 'freeline':
touch.ud['line'].points += [touch.x, touch.y]
else:
touch.ud['line'].points = [touch.ox, touch.oy, touch.x, touch.y]
except KeyError:
pass
else:
return False
return super(Painter, self).on_touch_move(touch)
def arrowhead(self, start, end):
'''
start : list of points (x, y) for the start of the arrow.
end : list of points (x, y) for the end of the arrow.
return : list of points for each line forming the arrow head.
'''
# TODO: Adjust arrowhead size according to line thickness.
A = Vector(start)
B = Vector(end)
h = 10 * math.sqrt(3)
w = 10
U = (B - A) / Vector(B - A).length()
V = Vector(-U.y, U.x)
v1 = B - h * U + w * V
v2 = B - h * U - w * V
return (v1, v2)
def on_touch_up(self, touch):
if not self.locked and touch.grab_current == self and \
self.collide_point(*touch.pos) and self.current_tool == 'arrow':
try:
arrowhead = self.arrowhead(touch.ud['initial_pos'], touch.pos)
except KeyError:
pass
except ZeroDivisionError:
pass
else:
touch.ud['arrowhead'].points += arrowhead[0]
touch.ud['arrowhead'].points += (touch.x, touch.y)
touch.ud['arrowhead'].points += arrowhead[1]
touch.ungrab(self)
else:
return False
return super(Painter, self).on_touch_up(touch)
def on_size(self, *kwargs):
# TODO: Update every drawing according to size.
self.canvas.clear()
| octogene/hadaly | hadaly/painter.py | Python | gpl-3.0 | 3,680 | 0.00163 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.