repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
antoinecarme/pyaf
|
tests/artificial/transf_RelativeDifference/trend_PolyTrend/cycle_7/ar_/test_artificial_1024_RelativeDifference_PolyTrend_7__20.py
|
Python
|
bsd-3-clause
| 274 | 0.083942 |
i
|
mport pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order
|
= 0);
|
AlexandreProenca/django-url-filter
|
tests/filtersets/test_base.py
|
Python
|
mit
| 7,675 | 0.000782 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import pytest
from django import forms
from django.http import QueryDict
from test_project.one_to_one.models import Restaurant, Waiter
from url_filter.backends.django import DjangoFilterBackend
from url_filter.filters import Filter
from url_filter.filtersets.base import FilterSet, StrictMode
from url_filter.utils import FilterSpec
class TestFilterSet(object):
def test_init(self):
fs = FilterSet(
data='some data',
queryset='queryset',
context={'context': 'here'},
strict_mode=StrictMode.fail,
)
assert fs.data == 'some data'
assert fs.queryset == 'queryset'
assert fs.context == {'context': 'here'}
assert fs.strict_mode == StrictMode.fail
def test_get_filters(self):
class TestFilterSet(FilterSet):
foo = Filter(form_field=forms.CharField())
filters = TestFilterSet().get_filters()
assert isinstance(filters, dict)
assert list(filters.keys()) == ['foo']
assert isinstance(filters['foo'], Filter)
assert filters['foo'].parent is None
def test_filters(self):
class TestFilterSet(FilterSet):
foo = Filter(form_field=forms.CharField())
fs = TestFilterSet()
filters = fs.filters
assert isinstance(filters, dict)
assert list(filters.keys()) == ['foo']
assert isinstance(filters['foo'], Filter)
assert filters['foo'].parent is fs
assert filters['foo'].name == 'foo'
|
def test_default_filter_no_default(self):
class TestFilterSet(FilterSet):
|
foo = Filter(form_field=forms.CharField())
assert TestFilterSet().default_filter is None
def test_default_filter(self):
class TestFilterSet(FilterSet):
foo = Filter(form_field=forms.CharField(), is_default=True)
bar = Filter(form_field=forms.CharField())
default = TestFilterSet().default_filter
assert isinstance(default, Filter)
assert default.name == 'foo'
def test_validate_key(self):
assert FilterSet().validate_key('foo') is None
assert FilterSet().validate_key('foo__bar') is None
assert FilterSet().validate_key('foo__bar!') is None
with pytest.raises(forms.ValidationError):
FilterSet().validate_key('f!oo')
def test_get_filter_backend(self):
backend = FilterSet().get_filter_backend()
assert isinstance(backend, DjangoFilterBackend)
def test_filter_no_queryset(self):
fs = FilterSet()
with pytest.raises(AssertionError):
fs.filter()
def test_filter_data_not_querydict(self):
fs = FilterSet(queryset=[])
with pytest.raises(AssertionError):
fs.filter()
def test_get_specs(self):
class BarFilterSet(FilterSet):
other = Filter(source='stuff',
form_field=forms.CharField(),
default_lookup='contains')
thing = Filter(form_field=forms.IntegerField(min_value=0, max_value=15))
class FooFilterSet(FilterSet):
field = Filter(form_field=forms.CharField())
bar = BarFilterSet()
def _test(data, expected, **kwargs):
fs = FooFilterSet(
data=QueryDict(data),
queryset=[],
**kwargs
)
assert set(fs.get_specs()) == set(expected)
_test('field=earth&bar__other=mars', [
FilterSpec(['field'], 'exact', 'earth', False),
FilterSpec(['bar', 'stuff'], 'contains', 'mars', False),
])
_test('field!=earth&bar__other=mars', [
FilterSpec(['field'], 'exact', 'earth', True),
FilterSpec(['bar', 'stuff'], 'contains', 'mars', False),
])
_test('field__in=earth,pluto&bar__other__icontains!=mars', [
FilterSpec(['field'], 'in', ['earth', 'pluto'], False),
FilterSpec(['bar', 'stuff'], 'icontains', 'mars', True),
])
_test('fields__in=earth,pluto&bar__other__icontains!=mars', [
FilterSpec(['bar', 'stuff'], 'icontains', 'mars', True),
])
_test('field__in=earth,pluto&bar__ot!her__icontains!=mars', [
FilterSpec(['field'], 'in', ['earth', 'pluto'], False),
])
_test('bar__thing=5', [
FilterSpec(['bar', 'thing'], 'exact', 5, False),
])
_test('bar__thing__in=5,10,15', [
FilterSpec(['bar', 'thing'], 'in', [5, 10, 15], False),
])
_test('bar__thing__range=5,10', [
FilterSpec(['bar', 'thing'], 'range', [5, 10], False),
])
_test('bar=5', [])
_test('bar__thing__range=5,10,15', [])
_test('bar__thing=100', [])
_test('bar__thing__in=100,5', [])
with pytest.raises(forms.ValidationError):
_test('bar__thing__in=100,5', [], strict_mode=StrictMode.fail)
def test_filter_one_to_one(self, one_to_one):
class PlaceFilterSet(FilterSet):
pk = Filter(form_field=forms.IntegerField(min_value=0), is_default=True)
name = Filter(form_field=forms.CharField(max_length=50))
address = Filter(form_field=forms.CharField(max_length=80))
class RestaurantFilterSet(FilterSet):
pk = Filter(form_field=forms.IntegerField(min_value=0), is_default=True)
place = PlaceFilterSet()
serves_hot_dogs = Filter(form_field=forms.BooleanField(required=False))
serves_pizza = Filter(form_field=forms.BooleanField(required=False))
class WaiterFilterSet(FilterSet):
pk = Filter(form_field=forms.IntegerField(min_value=0), is_default=True)
restaurant = RestaurantFilterSet()
name = Filter(form_field=forms.CharField(max_length=50))
def _test(fs, data, qs, expected, count):
_fs = fs(
data=QueryDict(data),
queryset=qs,
)
filtered = _fs.filter()
assert filtered.count() == count
assert set(filtered) == set(expected)
_test(
RestaurantFilterSet,
'place__name__startswith=Demon',
Restaurant.objects.all(),
Restaurant.objects.filter(place__name__startswith='Demon'),
1
)
_test(
RestaurantFilterSet,
'place__address__contains!=Ashland',
Restaurant.objects.all(),
Restaurant.objects.exclude(place__address__contains='Ashland'),
1
)
_test(
WaiterFilterSet,
'restaurant__place__pk=1',
Waiter.objects.all(),
Waiter.objects.filter(restaurant__place=1),
2
)
_test(
WaiterFilterSet,
'restaurant__place=1',
Waiter.objects.all(),
Waiter.objects.filter(restaurant__place=1),
2
)
_test(
WaiterFilterSet,
'restaurant__place__name__startswith=Demon',
Waiter.objects.all(),
Waiter.objects.filter(restaurant__place__name__startswith="Demon"),
2
)
_test(
WaiterFilterSet,
('restaurant__place__name__startswith=Demon'
'&name__icontains!=jon'),
Waiter.objects.all(),
(Waiter.objects
.filter(restaurant__place__name__startswith="Demon")
.exclude(name__icontains='jon')),
1
)
|
klahnakoski/esReplicate
|
pyLibrary/queries/namespace/typed.py
|
Python
|
mpl-2.0
| 4,356 | 0.002525 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import Mapping
from mo_logs import Log
from mo_dots import set_default, wrap, split_field, join_field, concat_field
from mo_math import Math
from pyLibrary.queries.domains import is_keyword
from pyLibrary.queries.expressions import Expression
from pyLibrary.queries.namespace import convert_list, Namespace
from pyLibrary.queries.query import QueryOp
from mo_times.dates import Date
class Typed(Namespace):
"""
NOTE: USING THE ".$value" SUFFIX IS DEPRECIATED: CURRENT VERSIONS OF ES ARE STRONGLY TYPED, LEAVING NO
CASE WHERE A GENERAL "value" IS USEFUL. WE WOULD LIKE TO MOVE TO ".$number", ".$string", ETC. FOR
EACH TYPE, LIKE WE DO WITH DATABASES
"""
def __init__(self):
self.converter_map = {
"and": self._convert_many,
"or": self._convert_many,
"not": self.convert,
"missing": self.convert,
"exists": self.convert
}
def convert(self, expr):
"""
ADD THE ".$value" SUFFIX TO ALL VARIABLES
"""
if isinstance(expr, Expression):
vars_ = expr.vars()
rename = {v: concat_field(v, "$value") for v in vars_}
return expr.map(rename)
if expr is True or expr == None or expr is False:
return expr
elif Math.is_number(expr):
return expr
elif expr == ".":
return "."
elif is_keyword(expr):
#TODO: LOOKUP SCHEMA AND ADD ALL COLUMNS WITH THIS PREFIX
return expr + ".$value"
elif isinstance(expr, basestring):
Log.error("{{name|quote}} is not a valid variable name", name=expr)
elif isinstance(expr, Date):
return expr
elif isinstance(expr, QueryOp):
return self._convert_query(expr)
elif isinstance(expr, Mapping):
if expr["from"]:
return self._convert_query(expr)
elif len(expr) >= 2:
#ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION
return wrap({name: self.convert(value) for name, value in expr.items()})
else:
# ASSUME SINGLE-CLAUSE EXPRESSION
k, v = expr.items()[0]
return self.converter_map.get(k, self._convert_bop)(k, v)
elif isinstance(expr, (list, set, tuple)):
return wrap([self.convert(value) for value in expr])
def _convert_query(self, query):
output = QueryOp("from", None)
output.select = self._convert_clause(query.select)
output.where = self.convert(query.where)
output.frum = self._convert_from(query.frum)
output.edges = self._convert_clause(query.edges)
output.groupby = self._convert_clause(query.groupby)
output.window = convert_list(self._convert_window, query.window)
output.having = convert_list(self._convert_having, query.having)
output.sort = self._convert_clause(query.sort)
output.limit = query.limit
output.format = query.format
return output
def _convert_clause(self, clause):
"""
JSON QUERY EXPRESSIONS HAVE MANY CLAUSES WITH SIMILAR COLUMN DELCARATIONS
"""
if clause == None:
return None
elif isinstance(clause, Mapping):
return set_default({"value": self.convert(clause["value"])}, clause)
else:
return [set_default({"value": self.convert(c.value)}, c) for c in clause]
def _convert_from(self, frum):
return frum
|
def _convert_having(self, having):
raise NotImplementedError()
def _convert_window(self, window):
raise NotImplementedError()
def _convert_many(self, k, v):
return {k: map(self.convert, v)}
def _convert_
|
bop(self, op, term):
if isinstance(term, list):
return {op: map(self.convert, term)}
return {op: {var: val for var, val in term.items()}}
|
mdejongh/ModelComparison
|
lib/ModelComparison/ModelComparisonClient.py
|
Python
|
mit
| 7,099 | 0.001127 |
############################################################
#
# Autogenerated by the KBase
|
type compiler -
# any changes made here will be overwritten
#
############################################################
try:
import json as _json
except Import
|
Error:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as _json
import requests as _requests
import urlparse as _urlparse
import random as _random
import base64 as _base64
from ConfigParser import ConfigParser as _ConfigParser
import os as _os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
auth = _base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
ret = _requests.get(auth_svc, headers=headers, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination for user %s' % (user_id))
else:
raise Exception(ret.text)
return tok['access_token']
def _read_rcfile(file=_os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if _os.path.exists(file):
try:
with open(file) as authrc:
rawdata = _json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=_os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', _os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in ('user_id', 'token',
'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class ModelComparison(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False):
if url is None:
raise ValueError('A url is required')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in _os.environ:
self._headers['AUTHORIZATION'] = _os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_context:
arg_hash['context'] = json_rpc_context
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
json_header = None
if _CT in ret.headers:
json_header = ret.headers[_CT]
if _CT in ret.headers and ret.headers[_CT] == _AJ:
err = _json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
ret.encoding = 'utf-8'
resp = _json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def compare_models(self, params, json_rpc_context = None):
if json_rpc_context and type(json_rpc_context) is not dict:
raise ValueError('Method compare_models: argument json_rpc_context is not type dict as required.')
resp = self._call('ModelComparison.compare_models',
[params], json_rpc_context)
return resp[0]
|
beav/pulp
|
server/test/unit/test_migration_0005.py
|
Python
|
gpl-2.0
| 2,511 | 0.001195 |
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from base import PulpServerTests
from pulp.server.db import connection
from pulp.server.db.migrate.models import MigrationModule
from pulp.plugins.types.database import TYPE_COLLECTION_PREFIX
ID = '_id'
LAST_UPDATED = '_last_updated'
MIGRATION = 'pulp.server.db.migrations.0005_unit_last_updated'
def test_collections(n=3):
names = []
for suffix in range(0, n):
name = TYPE_COLLECTION_PREFIX + str(suffix)
names.append(name)
return names
def test_units(n=10):
units = []
for unit_id in range(0, n):
unit = {ID: unit_id}
if unit_id % 2 == 0:
unit[LAST_UPDATED] = 1
units.append(unit)
return units
TEST_COLLECTIONS = test_collections()
TEST_UNITS = test_units()
class TestMigration_0005(PulpServerTests):
def setUp(self):
self.clean()
super(TestMigration_0005, self).setUp()
for collection in [connection.get_collection(n, True) for n in TEST_COLLECTIONS]:
for unit in TEST_UNITS:
collection.save(unit, safe=True)
def tearDown(self):
super(TestMigration_0005, self).tearDown()
self.clean()
def clean(self):
database = connection.get_database()
for name in [n for n in database.collection_names() if n in TEST_COLLECTIONS]:
database.drop_collection(name)
def test(self):
# migrate
module = MigrationModule(MIGRATION)._mo
|
dule
module.migrate()
# validation
for collection in [connection.get_collection(n) for n in TEST_COLLECTIONS]:
for unit in collection.find({}):
self.assertTrue(LAST_UPDATED in unit)
unit_id = unit[ID]
last_updated = unit[LAST_UPDATED]
if unit_id % 2 == 0:
self.assertEqual(last_updated, 1)
|
else:
self.assertTrue(isinstance(last_updated, float))
|
google-research/google-research
|
multiple_user_representations/models/parametric_attention_test.py
|
Python
|
apache-2.0
| 1,915 | 0.001567 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR COND
|
ITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_attention."""
import numpy as np
import tensorflow as tf
from multi
|
ple_user_representations.models import parametric_attention
class ParametricAttentionTest(tf.test.TestCase):
def test_parametric_attention_model_with_single_representation(self):
model = parametric_attention.SimpleParametricAttention(
output_dimension=2,
input_embedding_dimension=2,
vocab_size=10,
num_representations=1,
max_sequence_size=20)
input_batch = tf.convert_to_tensor(
np.random.randint(low=0, high=10, size=(10, 20)))
output = model(input_batch)
self.assertIsInstance(model, tf.keras.Model)
self.assertSequenceEqual(output.numpy().shape, [10, 1, 2])
def test_parametric_attention_model_with_multiple_representations(self):
model = parametric_attention.SimpleParametricAttention(
output_dimension=2,
input_embedding_dimension=2,
vocab_size=10,
num_representations=3,
max_sequence_size=20)
input_batch = tf.convert_to_tensor(
np.random.randint(low=0, high=10, size=(10, 20)))
output = model(input_batch)
self.assertIsInstance(model, tf.keras.Model)
self.assertSequenceEqual(output.numpy().shape, [10, 3, 2])
if __name__ == '__main__':
tf.test.main()
|
kcsaff/maze-builder
|
maze_builder/lost_text/negative_status.py
|
Python
|
mit
| 5,360 | 0 |
from maze_builder.sewe
|
r import MadLibs, Choice
from maze_builder.lost_text.text_decorations import fix_sentence
negative_status_sentence = MadLibs({
'{NEGATIVE_STATUS INTENSIFIED SENTENCE FIXED}': 1,
'{NEGATIVE_STATUS SENTENCE FIXED}': 1,
},
FIXED=fix_sentence,
SENTENCE={
|
'{}.': 30,
'{}!': 10,
'{} again?': 10,
'{} now.': 2,
'{} here.': 5,
'{} here!': 2,
'{}. Now I know what that means.': 1,
'{}: and not for the first time.': 1,
'{} -- and not for the first time.': 1,
'{}? Yes, always.': 1,
'I feel {}.': 10,
'I feel so {}.': 5,
'I feel so {}!': 5,
'I\'m {}.': 10,
'I\'m so {}.': 5,
'Will I always be so {}?': 1,
'Why am I so {}?': 1,
'No one knows how {} I am.': 1,
'Has anyone ever been so {} before?': 1,
'Has anyone ever been so {}?': 1,
'Has anyone ever felt so {}?': 1,
'I never want to feel this {} again.': 1,
'I hope I\'ll never be so {} again.': 1,
'I can\'t stand being so {}.': 1,
'I\'ve never been so {}.': 1,
'I\'ve never been so {} before.': 1,
'Before this trip, I\'d never been so {}.': 1,
'At home, no one is ever so {}.': 1,
'So {} a person can be.': 1,
'So {} a person can feel.': 1,
'We weren\'t meant to feel so {}.': 1,
'I never knew what it was like to be so {}.': 1,
'No one has ever been so {}.': 1,
'I could write a book about being so {}.': 1,
'Even in my dreams, I\'m {}.': 1,
'I\'m as {} as I\'ve ever been.': 1,
'Why does God allow us to be so {}?': 1,
'Would I have come this way, if I\'d known I\'d be so {}?': 1,
},
INTENSIFIED={
'awfully {}': 1,
'amazingly {}': 1,
'cursedly {}': 1,
'critically {}': 1,
'deathly {}': 1,
'meagerly {}': 0.2,
'super-{}': 1,
'devastatingly {}': 1,
'terribly {}': 1,
'dreadfully {}': 1,
'wickedly {}': 1,
'disgracefully {}': 1,
'completely {}': 1,
'reprehensibly {}': 1,
'unforgivably {}': 1,
'unpleasantly {}': 1,
'wretchedly {}': 1,
},
NEGATIVE_STATUS={ # Used as '{}' or 'I'm so {}...'
'hungry': 2,
'cold': 2,
'tired': 5,
'exhausted': 1,
'defeated': 1,
'worn out': 1,
'ravenous': 1,
'faint': 1,
'empty': 1,
'hollow': 1,
'insatiable': 1,
'famished': 1,
'unsatisfied': 1,
'beat': 1,
'annoyed': 1,
'bored': 2,
'distressed': 1,
'drained': 1,
'exasperated': 1,
'fatigued': 1,
'sleepy': 1,
'collapsing': 1,
'jaded': 1,
'overtaxed': 1,
'spent': 1,
'wasted': 1,
'worn': 1,
'burned out': 1,
'done for': 1,
'lost': 20,
'desolate': 1,
'lonesome': 1,
'alone': 1,
'spiritless': 1,
'sick and tired': 1,
'sick': 1,
'unenthusiastic': 1,
'unenergetic': 1,
'adrift': 1,
'disoriented': 5,
'astray': 1,
'off-course': 5,
'perplexed': 2,
'bewildered': 2,
'confused': 5,
'contrite': 1,
'unsettled': 1,
'puzzled': 5,
'ailing': 1,
'ill': 1,
'debilitated': 1,
'frail': 1,
'impaired': 1,
'nauseated': 2,
'bedridden': 1,
'not so hot': 1,
'under the weather': 1,
'run down': 1,
'unhealthy': 1,
'unwell': 1,
'weak': 1,
'laid-up': 1,
'rotten': 1,
'anemic': 1,
'feeble': 1,
'confused': 10,
'fragile': 1,
'hesitant': 2,
'powerless': 1,
'uncertain': 5,
'shaky': 1,
'sickly': 1,
'sluggish': 1,
'slow': 1,
'unsteady': 1,
'weakened': 1,
'wobbly': 1,
'puny': 1,
'out of gas': 1,
'irresolute': 1,
'spent': 1,
'infirm': 1,
'chilled': 1,
'frozen': 1,
'frigid': 1,
'raw': 1,
'numbed': 1,
'benumbed': 1,
'thirsty': 1,
'parched': 1,
'injured': 5,
'afraid': 5,
'terrified': 1,
'anxious': 1,
'apprehensive': 1,
'frightened': 1,
'nervous': 1,
'scared': 1,
'cowardly': 1,
'daunted': 1,
'discouraged': 1,
'disheartened': 1,
'dismayed': 1,
'distressed': 1,
'horrified': 1,
'panic-stricken': 1,
'petrified': 1,
'scared stiff': 1,
'scared to death': 1,
'terror-stricken': 1,
'humbled': 1,
'dead': 1,
'naked': 1,
'wild': 1,
'uncivilized': 1,
'scorched': 1,
'withered': 1,
'sunburned': 1,
'windburned': 1,
'frostbitten': 1,
'dehydrated': 1,
'shriveled': 1,
'dried up': 1,
'dried out': 1,
'smelly': 1,
'stinky': 1,
'noxious': 1,
'putrid': 1,
'revolting': 1,
'grody': 1,
'gross': 1,
'icky': 1,
}
)
|
thinkopensolutions/tkobr-addons
|
tko_partner_multiple_phones/__manifest__.py
|
Python
|
agpl-3.0
| 2,154 | 0.000464 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# Th
|
is program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the F
|
ree Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner multiple phones',
'version': '0.008',
'category': 'Customizations',
'sequence': 16,
'complexity': 'normal',
'description': '''== Partner multiple phones module ==\n\n
This modules add a tab to manage multiple phones for a partner.\n
The phones are searchable from tree list view like in standard module.\n
This module don't break phone functionality because it keeps the phone char field in partner form.\n
''',
'author': 'ThinkOpen Solutions Brasil',
'website': 'http://www.tkobr.com',
'images': ['images/oerp61.jpeg',
],
'depends': [
'tko_contacts',
],
'data': [
'security/ir.model.access.csv',
'views/tko_partner_phones_view.xml',
'views/res_partner_view.xml',
],
'init': [],
'demo': [],
'update': [],
'test': [], # YAML files with tests
'installable': True,
'application': False,
# If it's True, the modules will be auto-installed when all dependencies
# are installed
'auto_install': False,
'certificate': '',
}
|
qtproject/pyside-pyside
|
tests/QtGui/qcolor_test.py
|
Python
|
lgpl-2.1
| 3,949 | 0.006584 |
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import unittest
import colorsys
import PySide2
from PySide2.QtCore import Qt
from PySide2.QtGui import QColor
class QColorGetTest(unittest.TestCase):
def setUp(self):
self.color = QColor(20, 40, 60, 80)
def testGetRgb(self):
self.assertEqual(self.color.getRgb(), (20, 40, 60, 80))
def testGetHslF(self):
hls = colorsys.rgb_to_hls(20.0/255, 40.0/255, 60.0/255)
hsla = hls[0], hls[2], hls[1], self.color.alphaF()
for x, y in zip(self.color.getHslF(), hsla): # Due to rounding problems
self.assertTrue(x - y < 1/100000.0)
def testGetHsv(self):
hsv = colorsys.rgb_to_hsv(20.0/255, 40.0/255, 60.0/255)
hsva = int(hsv[0]*360.0), int(hsv[1]*255), int(hsv[2]*256), self.color.alpha()
self.assertEqual(self.color.
|
getHsv(), hsva)
def testGetCmyk(self): # not supported by colorsys
self.assertEqual(self.color.getCmyk(), (170, 85
|
, 0, 195, 80))
def testGetCmykF(self): # not supported by colorsys
for x, y in zip(self.color.getCmykF(), (170/255.0, 85/255.0, 0, 195/255.0, 80/255.0)):
self.assertTrue(x - y < 1/10000.0)
class QColorQRgbConstructor(unittest.TestCase):
'''QColor(QRgb) constructor'''
# Affected by bug #170 - QColor(QVariant) coming before QColor(uint)
# in overload sorting
def testBasic(self):
'''QColor(QRgb)'''
color = QColor(255, 0, 0)
#QRgb format #AARRGGBB
rgb = 0x00FF0000
self.assertEqual(QColor(rgb), color)
class QColorEqualGlobalColor(unittest.TestCase):
def testEqualGlobalColor(self):
'''QColor == Qt::GlobalColor'''
self.assertEqual(QColor(255, 0, 0), Qt.red)
class QColorCopy(unittest.TestCase):
def testDeepCopy(self):
'''QColor deepcopy'''
from copy import deepcopy
original = QColor(0, 0, 255)
copy = deepcopy([original])[0]
self.assertTrue(original is not copy)
self.assertEqual(original, copy)
del original
self.assertEqual(copy, QColor(0, 0, 255))
def testEmptyCopy(self):
from copy import deepcopy
original = QColor()
copy = deepcopy([original])[0]
self.assertTrue(original is not copy)
self.assertEqual(original, copy)
del original
self.assertEqual(copy, QColor())
class QColorRepr(unittest.TestCase):
def testReprFunction(self):
c = QColor(100, 120, 200)
c2 = eval(c.__repr__())
self.assertEqual(c, c2)
def testStrFunction(self):
c = QColor('red')
c2 = eval(c.__str__())
self.assertEqual(c, c2)
if __name__ == '__main__':
unittest.main()
|
ferram4/ProcEngines
|
GameData/ProcEngines/PropellantMixtures/config_parser.py
|
Python
|
mit
| 928 | 0.003233 |
from os import walk
files = []
for (dirpath, dirnames, filenames) in walk('./data/'):
files.extend(filenames)
break
data = []
OFratio = None
for file in files:
t = []
with open('./data/' + file) as f:
for i, line in enumerate(f):
if i in [15, 24, 25, 29, 31, 34, 39]:
t.append(line.split())
OFratio = t[0][2]
Pc = t[1][1]
Tc = t[2][1]
Te = t[2][4]
Pe = t[1][4]
MW = t[5][4]
gamma = t[4][4]
Mach = t[6][5]
Cpc = t[3][3]
Cpe = t[3][6]
data
|
.append([Pc, Tc, Te, Pe, MW, gamma, Mach, Cpc, Cpe])
if len(data) < 15:
print('[WRN] Less than 15 keys!')
block = ''.join(['MixtureRatioData\n{\n OFratio =', OFratio,
'\n PressureData\n {\n',
''.join([' key = {}, {}, {}, {}, {}, {}, {}, {}, {}\n'.format(*line) for line in
|
data]),
' }\n}'])
with open('./data/results.txt', 'a') as f:
f.write(block)
|
Gurgel100/gcc
|
gcc/ada/doc/share/conf.py
|
Python
|
gpl-2.0
| 3,966 | 0.000756 |
# -*- coding: utf-8 -*-
# Style_Check:Python_Fragment (meaning no pyflakes check)
#
# GNAT build configuration file
import sys
import os
import time
import re
sys.path.append('.')
import ada_pygments
import latex_elements
# Some configuration values for the various documentation handled by
# this conf.py
DOCS = {
'gnat_rm': {
'title': u'GNAT Reference Manual'},
'gnat_ugn': {
'title': u'GNAT User\'s Guide for Native Platforms'}}
# Then retrieve the source directory
root_source_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
gnatvsn_spec = os.path.join(root_source_dir, '..', 'gnatvsn.ads')
basever = os.path.join(root_source_dir, '..', '..', 'BASE-VER')
texi_fsf = True # Set to False when FSF doc is switched to sphinx by default
with open(gnatvsn_spec, 'rb') as fd:
gnatvsn_content = fd.read()
def get_copyright():
return u'2008-%s, Free Software Foundation' % time.strftime('%Y')
def get_gnat_version():
m = re.search(r'Gnat_Static_Version_String : ' +
r'constant String := "([^\(\)]+)\(.*\)?";',
gnatvsn_content)
if m:
return m.group(1).strip()
else:
if texi_fsf and os.path.exists(basever):
return ''
try:
with open(basever, 'rb') as fd:
return fd.read()
except:
pass
print 'cannot find GNAT version in gnatvsn.ads or in ' + basever
sys.exit(1)
def get_gnat_build_type():
m = re.search(r'Build_Type : constant Gnat_Build_Type := (.+);',
gnatvsn_content)
if m:
return {'Gnatpro': 'PRO',
'FSF': 'FSF',
'GPL': 'GPL'}[m.group(1).strip()]
else:
print 'cannot compute GNAT build type'
sys.exit(1)
# First retrieve the name of the documentation we are building
doc_name = os.environ.get('DOC_NAME', None)
if doc_name is None:
print 'DOC_NAME environment variable should be set'
sys.exit(1)
if doc_name not in DOCS:
print '%s is not a valid documentation name' % doc_name
sys.exit(1)
# Exclude sources that are not part of the current documentation
exclude_patterns = []
for d in os.listdir(root_source_dir):
if d not in ('share', doc_name, doc_name + '.rst'):
exclude_patterns.append(d)
print 'ignoring %s' % d
if doc_name == 'gnat_rm':
exclude_patterns.append('share/gnat_project_manager.rst')
print 'ignoring share/gnat_project_manager.rst'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = doc_name
# General information about the project.
project = DOCS[doc_name]['title']
copyright = get_copyright()
version = get_gnat_version()
release = get_gnat_version()
pygments_style = 'sphinx'
tags.add(get_gnat_build_type())
html_theme = 'sphinxdoc'
if os.path.isfile('adacore_transparent.png'):
html_logo = 'a
|
dacore_transparent.png'
if os.path.isfile('favicon.ico'):
html_favicon = 'favicon.ico'
html_static_path = ['_static']
latex_additional_files = ['gnat.sty']
copyright_macros = {
'date': time.strftime("%b %d, %Y"),
'edition': 'GNAT %s Edition' % 'Pro' if get_gnat_build_type() == 'PRO'
|
else 'GPL',
'name': u'GNU Ada',
'tool': u'GNAT',
'version': version}
latex_elements = {
'preamble': '\\usepackage{gnat}\n' +
latex_elements.TOC_DEPTH +
latex_elements.PAGE_BLANK +
latex_elements.TOC_CMD +
latex_elements.LATEX_HYPHEN +
latex_elements.doc_settings(DOCS[doc_name]['title'],
get_gnat_version()),
'tableofcontents': latex_elements.TOC % copyright_macros}
latex_documents = [
(master_doc, '%s.tex' % doc_name, project, u'AdaCore', 'manual')]
texinfo_documents = [
(master_doc, doc_name, project,
u'AdaCore', doc_name, doc_name, '')]
def setup(app):
app.add_lexer('ada', ada_pygments.AdaLexer())
app.add_lexer('gpr', ada_pygments.GNATProjectLexer())
|
kalhartt/django-fluxbb
|
fluxbb/models/search_word.py
|
Python
|
gpl-2.0
| 481 | 0 |
from django.db import models
from fluxbb import FLUXBB_PREFIX
class SearchWord(model
|
s.Model):
"""
FluxBB Search Word
Fields on this model match exactly with those defined by fluxbb, see the
[fluxbb dbstructure](http://fluxbb.org/docs/v1.5/dbstructure#users).
"""
id = models.AutoField(primary_key=True)
word = models.CharField(max_length=2
|
0, default="")
class Meta:
app_label = 'fluxbb'
db_table = FLUXBB_PREFIX + 'search_words'
|
roving99/robot_pi
|
0mq/wii_ir_test.py
|
Python
|
gpl-2.0
| 966 | 0.030021 |
#!/usr/bin/python
import smbus
import time
'''
retrieve data from wii ir camera.
x = 0-1023
y = 0-720
size = 1-15?
top right of scene = [0,0]
'''
def getBlob(n,list): # return x,y,size for blob n (0-3) from list
if len(list)<13:
return []
x = list[1+(n*3)]
y = list[2+(n*3)]
s = list[3+(n*3)]
x += (s&0x30)<<4
y += (s&0xC0)<<2
s = s&0x0F
return [x,y,s]
wiiAddr = 0x58
i2c = smbus.SMBus(1)
i2c.write_byte_data(wiiAddr, 0x30,0x01)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x30,0x08)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x06,0x90)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x08,0xC0)
time.sleep(0.05)
i2c.write_byte_data(wiiAddr, 0x1A,0x40)
time.sle
|
ep(0.05)
i2c.write_byte_data(wiiAddr, 0x33,0x33)
time.sleep(0.05)
while 1:
data = i2c.read_i2c_block_data(wiiAddr, 0x36, 16)
|
print len(data), "\t", getBlob(0,data), "\t", getBlob(1,data), "\t", getBlob(2,data), "\t", getBlob(3,data)
time.sleep(0.5)
|
Distrotech/bzr
|
bzrlib/smart/message.py
|
Python
|
gpl-2.0
| 13,276 | 0 |
# Copyright (C) 2008 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
import collections
from cStringIO import StringIO
from bzrlib import (
debug,
errors,
)
from bzrlib.trace import mutter
class MessageHandler(object):
"""Base class for handling messages received via the smart protocol.
As parts of a message are received, the corresponding PART_received method
will be called.
"""
def __init__(self):
self.headers = None
def headers_received(self, headers):
"""Called when message headers are received.
This default implementation just stores them in self.headers.
"""
self.headers = headers
def byte_part_received(self, byte):
"""Called when a 'byte' part is received.
Note that a 'byte' part is a message part consisting of exactly one
byte.
"""
raise NotImplementedError(self.byte_received)
def bytes_part_received(self, bytes):
"""Called when a 'bytes' part is received.
A 'bytes' message part can contain any number of bytes. It should not
be confused with a 'byte' part, which is always a single byte.
"""
raise NotImplementedError(self.bytes_received)
def structure_part_received(self, structure):
"""Called when a 'structure' part is received.
:param structure: some structured data, which will be some combination
of list, dict, int, and str objects.
"""
raise NotImplementedError(self.bytes_received)
def protocol_error(self, exception):
"""Called when there is a protocol decoding error.
The default implementation just re-raises the exception.
"""
raise
def end_received(self):
"""Called when the end of the message is received."""
# No-op by default.
pass
class ConventionalRequestHandler(MessageHandler):
"""A message handler for "conventional" requests.
"Conventional" is used in the sense described in
doc/developers/network-protocol.txt: a simple message with arguments and an
optional body.
|
Possible states:
|
* args: expecting args
* body: expecting body (terminated by receiving a post-body status)
* error: expecting post-body error
* end: expecting end of message
* nothing: finished
"""
def __init__(self, request_handler, responder):
MessageHandler.__init__(self)
self.request_handler = request_handler
self.responder = responder
self.expecting = 'args'
self._should_finish_body = False
self._response_sent = False
def protocol_error(self, exception):
if self.responder.response_sent:
# We can only send one response to a request, no matter how many
# errors happen while processing it.
return
self.responder.send_error(exception)
def byte_part_received(self, byte):
if self.expecting == 'body':
if byte == 'S':
# Success. Nothing more to come except the end of message.
self.expecting = 'end'
elif byte == 'E':
# Error. Expect an error structure.
self.expecting = 'error'
else:
raise errors.SmartProtocolError(
'Non-success status byte in request body: %r' % (byte,))
else:
raise errors.SmartProtocolError(
'Unexpected message part: byte(%r)' % (byte,))
def structure_part_received(self, structure):
if self.expecting == 'args':
self._args_received(structure)
elif self.expecting == 'error':
self._error_received(structure)
else:
raise errors.SmartProtocolError(
'Unexpected message part: structure(%r)' % (structure,))
def _args_received(self, args):
self.expecting = 'body'
self.request_handler.args_received(args)
if self.request_handler.finished_reading:
self._response_sent = True
self.responder.send_response(self.request_handler.response)
self.expecting = 'end'
def _error_received(self, error_args):
self.expecting = 'end'
self.request_handler.post_body_error_received(error_args)
def bytes_part_received(self, bytes):
if self.expecting == 'body':
self._should_finish_body = True
self.request_handler.accept_body(bytes)
else:
raise errors.SmartProtocolError(
'Unexpected message part: bytes(%r)' % (bytes,))
def end_received(self):
if self.expecting not in ['body', 'end']:
raise errors.SmartProtocolError(
'End of message received prematurely (while expecting %s)'
% (self.expecting,))
self.expecting = 'nothing'
self.request_handler.end_received()
if not self.request_handler.finished_reading:
raise errors.SmartProtocolError(
"Complete conventional request was received, but request "
"handler has not finished reading.")
if not self._response_sent:
self.responder.send_response(self.request_handler.response)
class ResponseHandler(object):
"""Abstract base class for an object that handles a smart response."""
def read_response_tuple(self, expect_body=False):
"""Reads and returns the response tuple for the current request.
:keyword expect_body: a boolean indicating if a body is expected in the
response. Some protocol versions needs this information to know
when a response is finished. If False, read_body_bytes should
*not* be called afterwards. Defaults to False.
:returns: tuple of response arguments.
"""
raise NotImplementedError(self.read_response_tuple)
def read_body_bytes(self, count=-1):
"""Read and return some bytes from the body.
:param count: if specified, read up to this many bytes. By default,
reads the entire body.
:returns: str of bytes from the response body.
"""
raise NotImplementedError(self.read_body_bytes)
def read_streamed_body(self):
"""Returns an iterable that reads and returns a series of body chunks.
"""
raise NotImplementedError(self.read_streamed_body)
def cancel_read_body(self):
"""Stop expecting a body for this response.
If expect_body was passed to read_response_tuple, this cancels that
expectation (and thus finishes reading the response, allowing a new
request to be issued). This is useful if a response turns out to be an
error rather than a normal result with a body.
"""
raise NotImplementedError(self.cancel_read_body)
class ConventionalResponseHandler(MessageHandler, ResponseHandler):
def __init__(self):
MessageHandler.__init__(self)
self.status = None
self.args = None
self._bytes_parts = collections.deque()
self._body_started = False
self._body_stream_status = None
self._body = None
self._body_error_args = None
self.finished_reading = False
def setProtoAndMediumRequest(self, protocol_decoder, medium_request):
self._protocol_decoder = protocol_decoder
self._medium_request = medium_request
def
|
Azure/azure-sdk-for-python
|
sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/aio/operations/_reference_data_sets_operations.py
|
Python
|
mit
| 19,516 | 0.00497 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReferenceDataSetsOperations:
"""ReferenceDataSetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.timeseriesinsights.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
environment_name: str,
reference_data_set_name: str,
parameters: "_models.ReferenceDataSetCreateOrUpdateParameters",
**kwargs
) -> "_models.ReferenceDataSetResource":
"""Create or update a reference data set in the specified environment.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param environment_name: The name of the Time Series Insights environment associated with the
specified resource group.
:type environment_name: str
:param reference_data_set_name: Name of the reference data set.
:type reference_data_set_name: str
:param parameters: Parameters for creating a reference data set.
:type parameters: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetCreateOrUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReferenceDataSetResource, or the result of cls(response)
:rtype: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReferenceDataSetResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'environmentName': self._serialize.url("environment_name", environment_name, 'str'),
'referenceDataSetName': self._serialize.url("reference_data_set_name", reference_data_set_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
hea
|
der_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters
|
['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ReferenceDataSetCreateOrUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ReferenceDataSetResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ReferenceDataSetResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets/{referenceDataSetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
environment_name: str,
reference_data_set_name: str,
**kwargs
) -> "_models.ReferenceDataSetResource":
"""Gets the reference data set with the specified name in the specified environment.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param environment_name: The name of the Time Series Insights environment associated with the
specified resource group.
:type environment_name: str
:param reference_data_set_name: The name of the Time Series Insights reference data set
associated with the specified environment.
:type reference_data_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReferenceDataSetResource, or the result of cls(response)
:rtype: ~azure.mgmt.timeseriesinsights.models.ReferenceDataSetResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReferenceDataSetResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'environmentName': self._serialize.url("environment_name", environment_name, 'str'),
'referenceDataSetName': self._serialize.url("reference_data_set_name", reference_data_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._ser
|
dsoprea/LicensePrepend
|
setup.py
|
Python
|
gpl-2.0
| 692 | 0.001445 |
from setuptools import setup, find_packages
import os
description = "Make sure all source files have your standard licensing stub "\
"at the top."
long_description = ""
setup(name='plicense',
version='0.2.1',
description=descri
|
ption,
long_description=long_description,
classifiers=[],
keywords='license',
author='Dustin Oprea',
author_email='myselfasunder@gmail.com',
url='https://github.com/dsoprea/LicensePrepend',
license='GPL 2',
packages=find_packages(exclude=[]),
|
include_package_data=True,
zip_safe=True,
install_requires=[
'nose'
],
scripts=['scripts/plicense'],
)
|
cbecker/LightGBM
|
python-package/lightgbm/basic.py
|
Python
|
mit
| 66,093 | 0.001407 |
# coding: utf-8
# pylint: disable = invalid-name, C0111, C0301
# pylint: disable = R0912, R0913, R0914, W0105, W0201, W0212
"""Wrapper c_api of LightGBM"""
from __future__ import absolute_import
import ctypes
import os
from tempfile import NamedTemporaryFile
import numpy as np
import scipy.sparse
from .compat import (DataFrame, Series, integer_types, json,
json_default_with_numpy, numeric_types, range_,
string_type)
from .libpath import find_lib_path
def _load_lib():
"""Load LightGBM Library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
raise Exception("cannot find LightGBM library")
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
return lib
_LIB = _load_lib()
class LightGBMError(Exception):
"""Error throwed by LightGBM"""
pass
def _safe_call(ret):
"""Check the return value of C API call
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError())
def is_numeric(obj):
"""Check is a number or not, include numpy number etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check is 1d numpy array"""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_1d_list(data):
"""Check is 1d list"""
return isinstance(data, list) and \
(not data or isinstance(data[0], numeric_types))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""convert to 1d numpy array"""
if is_numpy_1d_array(data):
if data.dtype == dtype:
return data
else:
return data.astype(dtype=dtype, copy=False)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, Series):
return data.values.astype(dtype)
else:
raise TypeError("Wrong type({}) for {}, should be list or numpy array".format(type(data).__name__, name))
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.fromiter(cptr, dtype=np.float32, count=length)
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.fromiter(cptr, dtype=np.int32, count=length)
else:
raise RuntimeError('Expected int pointer')
def c_str(string):
"""Convert a python string to cstring."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a python array to c array."""
return (ctype * len(values))(*values)
def param_dict_to_str(data):
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
pairs.append(str(key) + '=' + ','.join(map(str, val)))
elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):
pairs.append(str(key) + '=' + str(val))
else:
raise TypeError('Unknown type of parameter:%s, got:%s'
% (key, type(val).__name__))
return ' '.join(pairs)
class _temp_file(object):
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
"""marco definition of data type in c_api of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matric is row major in python"""
C_API_IS_ROW_MAJOR = 1
"""marco definition of prediction type in c_api of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
"""data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
def c_float_array(data):
"""get pointer of float numpy array / list"""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError("Expected np.float32 or np.float64, met type({})"
|
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data)
def c_int_
|
array(data):
"""get pointer of int numpy array / list"""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError("Expected np.int32 or np.int64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data)
PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'float16': 'float',
'float32': 'float', 'float64': 'float', 'bool': 'int'}
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, DataFrame):
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = data.select_dtypes(include=['category']).columns
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is pandas Index object
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes)
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto':
categorical_feature = list(cat_cols)
else:
categorical_feature = list(categorical_feature) + list(cat_cols)
if feature_name == 'au
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.2/Lib/toaiff.py
|
Python
|
mit
| 2,989 | 0.004015 |
"""Convert "arbitrary" sound files to AIFF (Apple and SGI's audio format).
Input may be compressed.
Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others.
An exception is raised if the file is not of a recognized type.
Returned filename is either the input filename or a temporary filename;
in the latter case the caller must ensure that it is removed.
Other temporary files used are removed by the function.
"""
import os
import tempfile
import pipes
import sndhdr
__all__ = ["error", "toaiff"]
table = {}
t = pipes.Template()
t.append('sox -t au - -t aiff -r 8000 -', '--')
table['au'] = t
# XXX The following is actually sub-optimal.
# XXX The HCOM sampling rate can be 22k, 22k/2, 22k/3 or 22k/4.
# XXX We must force the output sampling rate else the SGI won't play
# XXX files sampled at 5.5k or 7.333k; however this means that files
# XXX sampled at 11k are unnecessarily expanded.
# XXX Similar comments apply to some other file types.
t = pipes.Template()
t.append('sox -t hcom - -t aiff -r 22050 -', '--')
table['hcom'] = t
t = pipes.Template()
t.append
|
('sox -t voc - -t aiff -r 11025 -', '--')
table['voc'] = t
t = pipes.Template()
t.append('sox -t wav - -t aiff -', '--')
table['wav'] = t
t = pipes.Template()
t.append('sox -t 8svx
|
- -t aiff -r 16000 -', '--')
table['8svx'] = t
t = pipes.Template()
t.append('sox -t sndt - -t aiff -r 16000 -', '--')
table['sndt'] = t
t = pipes.Template()
t.append('sox -t sndr - -t aiff -r 16000 -', '--')
table['sndr'] = t
uncompress = pipes.Template()
uncompress.append('uncompress', '--')
class error(Exception):
pass
def toaiff(filename):
temps = []
ret = None
try:
ret = _toaiff(filename, temps)
finally:
for temp in temps[:]:
if temp != ret:
try:
os.unlink(temp)
except os.error:
pass
temps.remove(temp)
return ret
def _toaiff(filename, temps):
if filename[-2:] == '.Z':
fname = tempfile.mktemp()
temps.append(fname)
sts = uncompress.copy(filename, fname)
if sts:
raise error, filename + ': uncompress failed'
else:
fname = filename
try:
ftype = sndhdr.whathdr(fname)
if ftype:
ftype = ftype[0] # All we're interested in
except IOError, msg:
if type(msg) == type(()) and len(msg) == 2 and \
type(msg[0]) == type(0) and type(msg[1]) == type(''):
msg = msg[1]
if type(msg) != type(''):
msg = `msg`
raise error, filename + ': ' + msg
if ftype == 'aiff':
return fname
if ftype is None or not table.has_key(ftype):
raise error, \
filename + ': unsupported audio file type ' + `ftype`
temp = tempfile.mktemp()
temps.append(temp)
sts = table[ftype].copy(fname, temp)
if sts:
raise error, filename + ': conversion to aiff failed'
return temp
|
ScottBuchanan/eden
|
modules/s3/s3query.py
|
Python
|
mit
| 82,174 | 0.001217 |
# -*- coding: utf-8 -*-
""" S3 Query Construction
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("FS",
"S3FieldSelector",
"S3Joins",
"S3ResourceField",
"S3ResourceQuery",
"S3URLQuery",
"S3URLQueryParser",
)
import datetime
import re
import sys
from gluon import current
from gluon.storage import Storage
from s3dal import Field, Row
from s3fields import S3RepresentLazy
from s3utils import s3_get_foreign_key, s3_unicode, S3TypeConverter
ogetattr = object.__getattribute__
TEXTTYPES = ("string", "text")
# =============================================================================
class S3FieldSelector(object):
""" Helper class to construct a resource query """
LOWER = "lower"
UPPER = "upper"
OPERATORS = [LOWER, UPPER]
def __init__(self, name, type=None):
""" Constructor """
if not isinstance(name, basestring) or not name:
raise SyntaxError("name required")
self.name = str(name)
self.type = type
self.op = None
# -------------------------------------------------------------------------
def __lt__(self, value):
return S3ResourceQuery(S3ResourceQuery.LT, self, value)
# -------------------------------------------------------------------------
def __le__(self, value):
return S3ResourceQuery(S3ResourceQuery.LE, self, value)
# -------------------------------------------------------------------------
def __eq__(self, value):
return S3ResourceQuery(S3ResourceQuery.EQ, self, value)
# -------------------------------------------------------------------------
def __ne__(self, value):
return S3ResourceQuery(S3ResourceQuery.NE, self, value)
# -------------------------------------------------------------------------
def __ge__(self, value):
return S3ResourceQuery(S3ResourceQuery.GE, self, value)
# -------------------------------------------------------------------------
def __gt__(self, value):
return S3ResourceQuery(S3ResourceQuery.GT, self, value)
# -------------------------------------------------------------------------
def like(self, value):
return S3ResourceQuery(S3ResourceQuery.LIKE, self, value)
# -------------------------------------------------------------------------
def belongs(self, value):
return S3ResourceQuery(S3ResourceQuery.BELONGS, self, value)
# -------------------------------------------------------------------------
def contains(self, value):
return S3ResourceQuery(S3ResourceQuery.CONTAINS, self, value)
# -------------------------------------------------------------------------
def anyof(self, value):
return S3ResourceQuery(S3ResourceQuery.ANYOF, self, value)
# -------------------------------------------------------------------------
def typeof(self, value):
return S3ResourceQuery(S3ResourceQuery.TYPEOF, self, value)
# -------------------------------------------------------------------------
def lower(self):
self.op = self.LOWER
return self
# -------------------------------------------------------------------------
def upper(self):
self.op = self.UPPER
return self
# -------------------------------------------------------------------------
def expr(self, val):
if self.op and val is not None:
if self.op == self.LOWER and \
hasattr(val, "lower") and callable(val.lower) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.lower()
elif self.op == self.UPPER and \
hasattr(val, "upper") and callable(val.upper) and \
(not isinstance(val, Field) or val.type in TEXTTYPES):
return val.upper()
return val
# -------------------------------------------------------------------------
def represent(self, resource):
try:
rfield = S3ResourceField(resource, self.name)
except:
colname = None
else:
colname = rfield.colname
if colname:
if self.op is not None:
return "%s.%s()" % (colname, self.op)
else:
return colname
else:
return "(%s?)" % self.name
# -------------------------------------------------------------------------
@classmethod
def extract(cls, resource, row, field):
"""
Extract a value from a Row
@param resource: the resource
@param row: the Row
@param field: the field
@return: field if field is not a Field/S3FieldSelector instance,
the value from the row otherwise
"""
error = lambda fn: KeyError("Field not found: %s" % fn)
t = type(field)
if isinstance(field, Field):
colname = str(field)
tname, fname = colname.split(".", 1)
elif t is S3FieldSelector:
rfield = S3ResourceField(resource, field.name)
colname = rfield.colname
if not colname:
# unresolvable selector
raise error(field.name)
fname = rfield.fname
tname = rfield.tname
elif t is S3ResourceField:
colname = field.colname
if not colname:
# unresolved selector
return None
fname = field.fname
tname = field.tname
else:
return field
if type(row) is Row:
try:
if tname in row.__dict__:
value = ogetattr(ogetattr(row, tname), fname)
|
else:
value = ogetattr(row, fname)
except:
try:
value = row[colname]
except (KeyError, AttributeError):
|
raise error(colname)
elif fname in row:
value = row[fname]
elif colname in row:
value = row[colname]
elif tname is not None and \
tname in row and fname in row[tname]:
value = row[tname][fname]
else:
raise error(colname)
if callable(value):
# Lazy virtual field
try:
value = value()
except:
current.log.error(sys.exc_info()[1])
value = None
if hasattr(field, "expr"):
return field.expr(value)
return value
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Resolve this field against a resource
@param resource: the resource
"""
return S3ResourceField(resource, self.name)
# =============================================================================
# Short name for the S3Fiel
|
danwent/Perspectives-Server
|
util/__init__.py
|
Python
|
gpl-3.0
| 156 | 0.00641 |
"""
A c
|
ollection of utility and helper modules.
These differ from notary_utils in that they do
|
not depend on or need to connect to the notary database.
"""
|
deevarvar/myLab
|
book/tlpi_zhiye/utlib/ut_util.py
|
Python
|
mit
| 335 | 0.01194 |
__author__ = 'deevarvar'
import string
import random
import o
|
s
#generate a random string
def string_generator(size=6, chars=string.ascii_letters+string.digits):
return ''.join(random.choice(chars) for _ in range(size))
#emulate touch cmd
def touchFile(fname, time=None):
with open(fname, 'a'):
os.utime(fname
|
,time)
|
GiancarloF/raspymc_server
|
core/conf_manager.py
|
Python
|
gpl-3.0
| 4,528 | 0.023852 |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
"""
Raspymc is a multimedia centre exposed via a http server built with bottlepy
Copyright (C) 2013
|
Giancarlo Fringuello
This program is free software: you can redistribute it and
|
/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, inspect, ConfigParser, pickle
from utils import *
from logger import*
from track_obj import *
CNF_SERVER_PATH = sys.path[0]
CNF_FOLDER_PATH = ""
CNF_PLAYLIST_PATH = CNF_SERVER_PATH + "/config/playlist.pkl"
CNF_FOLDER_PATH = CNF_SERVER_PATH + "/config/"
CNF_CONFIG_FILE = CNF_FOLDER_PATH + "config.ini"
#
# Loads the saved playlist from file
def get_playlist():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()")
l_playlist = []
try:
with open(CNF_PLAYLIST_PATH, 'rb') as l_input:
l_playlist = pickle.load(l_input)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
return l_playlist
def store_playlist(p_list):
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()")
try:
with open(CNF_PLAYLIST_PATH, 'wb') as l_output:
pickle.dump(p_list, l_output, pickle.HIGHEST_PROTOCOL)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
#
# Loads the configuration from file
def get_folder_path():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()")
global CNF_FOLDER_PATH
global CNF_PLAYLIST_PATH
global SERVER_PATH
l_config_parser = ConfigParser.ConfigParser()
l_clean_configuration = False
if not os.path.isdir(CNF_FOLDER_PATH): # if config directory does not exist, create it
os.makedirs(CNF_FOLDER_PATH)
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", CNF_FOLDER_PATH + " did not exist, it has been created")
if os.path.isfile(CNF_CONFIG_FILE):
try:
l_config_parser.read(CNF_CONFIG_FILE)
if l_config_parser.has_section("PATH"):
if l_config_parser.has_option("PATH", "CNF_FOLDER_PATH"):
CNF_FOLDER_PATH = l_config_parser.get("PATH","CNF_FOLDER_PATH")
else:
l_clean_configuration = True
else:
# if section does not exist
l_clean_configuration = True
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "unable to load CNF_FOLDER_PATH, using home as default, new config.ini will be generated.")
except:
# if unable to read file (e.g. file damaged)
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "exception: unable to load CNF_FOLDER_PATH from " + CNF_CONFIG_FILE + ", using home path as default, new config.ini will be generated.")
else:
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "no configuration file found, new config.ini will be generated.")
if l_clean_configuration:
# cleanup config file
for l_section in l_config_parser.sections():
l_config_parser.remove_section(l_section)
l_config_parser.add_section("PATH")
l_config_parser.set("PATH", "CNF_FOLDER_PATH", os.path.expanduser("~"))
l_config_parser.write(file(CNF_CONFIG_FILE, 'w'))
if "" == CNF_FOLDER_PATH:
CNF_FOLDER_PATH = os.path.expanduser("~")
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_FOLDER_PATH = " + CNF_FOLDER_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_PLAYLIST_PATH = " + CNF_PLAYLIST_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_SERVER_PATH = " + CNF_SERVER_PATH)
return CNF_FOLDER_PATH
def get_server_path():
return SERVER_PATH
def get_playlist_path():
return CNF_PLAYLIST_PATH
|
usc-isi-i2/WEDC
|
spark_dependencies/python_lib/digSparkUtil/fileUtil.py
|
Python
|
apache-2.0
| 11,462 | 0.005496 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import json
import csv
import StringIO
import io
from dictUtil import as_dict, merge_dicts
import urllib
# import jq
from itertools import izip
from logUtil import logging
# for manifest introspection only
import inspect
from pyspark import SparkContext
class FileUtil(object):
def __init__(self, sparkContext):
self.sc = sparkContext
## Support for entries into manifest
# any entry created thus
# should have spark_context, name of caller, module of caller
# untested: do not use
def makeEntry(self, **kwargs):
entry = dict(**kwargs)
entry["spark_context"] = self.sc
op = kwargs.get("operation", None)
if not op:
try:
st = inspect.stack()
# stack exists
if len(st)>=2:
# look up one stack frame, retrieve the function name[3]
op = st[1][3]
# stack frame memory leak could be very bad, so be careful
del st
except:
pass
mdl = kwargs.get("module", None)
if not mdl:
try:
st = inspect.stack()
# stack exists
if len(st)>=2:
# look up one stack frame, retrieve the module it belongs to
mdl = inspect.getmodule(st[0]).__name__
# stack frame memory leak could be very bad, so be careful
del st
except:
pass
entry["module"] = mdl
return entry
## GENERIC
## Herein:
## file_format is in {text, sequence}
## data_type is in {csv, json, jsonlines(=keyless)}
def load_file(self, filename, file_format='sequence', data_type='json', **kwargs):
try:
handlerName = FileUtil.load_dispatch_table[(file_format, data_type)]
handler = getattr(self, handlerName)
rdd = handler(filename, **kwargs)
# TBD: return (rdd, manifestEntry)
# entry = self.makeEntry(input_filename=filename,
# input_file_format=file_format,
# input_data_type=data_type)
# return (rdd, entry)
#logging.info("Loaded {}/{} file {}: {} elements".format(file_format, data_type, filename, rdd.count()))
return rdd
except KeyError:
raise NotImplementedError("File_Format={}, data_type={}".format(file_format, data_type))
load_dispatch_table = {("sequence", "json"): "_load_sequence_json_file",
("sequence", "csv"): "_load_sequence_csv_file",
("text", "json"): "_load_text_json_file",
("text", "jsonlines"): "_load_text_jsonlines_file",
("text", "csv"): "_load_text_csv_file"}
def _load_sequence_json_file(self, filename, **kwargs):
rdd_input = self.sc.sequenceFile(filename)
rdd_json = rdd_input.mapValues(lambda x: json.loads(x))
return rdd_json
def _load_text_json_file(self, filename, separator='\t', **kwargs):
# rdd_input = self.sc.textFile(filename)
# rdd_json = rdd_input.map(lambda x: FileUtil.__parse_json_line(x, separator))
rdd_strings = self.sc.textFile(filename)
rdd_split = rdd_strings.map(lambda line: tuple(line.split(separator, 1)))
def tryJson(v):
try:
j = json.loads(v)
return j
except Exception as e:
print("failed [{}] on {}".format(str(e), v), file=sys.stderr)
rdd_json = rdd_split.mapValues(lambda v: tryJson(v))
return rdd_json
def _load_text_jsonlines_file(self, filename, keyPath='.uri', **kwargs):
rdd_strings = self.sc.textFile(filename)
def tryJson(line):
try:
obj = json.loads(line)
# We ignore all but the first occurrence of key
try:
# key = jq.jq(keyPath).transform(obj, multiple_output=False)
key = obj["uri"]
except:
key = None
if key:
# i.e., a paired RDD
return (key, obj)
else:
raise ValueError("No key (per {}) in line {}".format(keyPath, line))
except Exception as e:
print("failed [{}] on {}".format(str(e), line), file=sys.stderr)
rdd_json = rdd_strings.map(lambda line: tryJson(line))
return rdd_json
def _load_sequence_csv_file(self, filename, **kwargs):
"""Should emulate text/csv"""
raise NotImplementedError("File_Format=sequence, data_type=csv")
def _load_text_csv_file(self, filename, separator=',', **kwargs):
"""Return a pair RDD where key is taken from first column, remaining columns are named after their column id as string"""
rdd_input = self.sc.textFile(filename)
def load_csv_record(line):
input_stream = StringIO.StringIO(line)
reader = csv.reader(input_stream, delimiter=',')
# key in first column, remaining columns 1..n become dict key values
payload = reader.next()
key = payload[0]
rest = payload[1:]
# generate dict of "1": first value, "2": second value, ...
d = {}
for (cell,i) in izip(rest, range(1,1+len(rest))):
d[str(i)] = cell
# just in case, add "0": key
d["0"] = key
return (key, d)
rdd_parsed = rdd_input.map(load_csv_record)
return rdd_parsed
## SAVE
def save_file(self, rdd, filename, file_format='sequence', data_type='json', **kwargs):
try:
handlerName = FileUtil.save_dispatch_table[(file_fo
|
rmat, data_type)]
handler = getattr(self, handlerName)
rdd = handler(rdd, filename, **kwargs)
# TBD: return (rdd, manifestEntry)
# entry = self.makeEntry(output_filename=filename,
# output_file_format=file_format,
# output_data_type=data_type)
# return (rdd, entry)
return rdd
except KeyError:
|
raise NotImplementedError("File_Format={}, data_type={}".format(file_format, data_type))
save_dispatch_table = {("sequence", "json"): "_save_sequence_json_file",
("sequence", "csv"): "_save_sequence_csv_file",
("text", "json"): "_save_text_json_file",
("text", "csv"): "_save_text_csv_file"}
def _save_sequence_json_file(self, rdd, filename, separator='\t', **kwargs):
# regardless of whatever it is, key is retained
rdd.mapValues(lambda x: json.dumps(x)).saveAsSequenceFile(filename)
return filename
def _save_text_json_file(self, rdd, filename, separator='\t', **kwargs):
rdd_json = rdd.map(lambda (k, v): FileUtil.__dump_as_json(k, v, separator))
# this saves the (<uri>, <serialized_json_string>) as as text repn
# perhaps a regular readable text file uri<separator>JSON will be more useful?
rdd_json.saveAsTextFile(filename)
return filename
def _save_text_csv_file(self, rdd, filename, separator='\t', encoding='utf-8', **kwargs):
with io.open(filename, 'wb', encoding=encoding) as f:
wrtr = csv.writer(f, delimiter=separator)
def save_csv_record(line):
wrtr.writerow(line)
rdd.foreach(save_csv_record)
return filename
def _save_sequence_csv_file(self, rdd, filename, separator='\t', **kwargs):
raise NotImplementedError("File_Format=sequence, data_type=csv")
## JSON
@staticmethod
def __parse_json_line(line, separator):
line = line.strip()
if len(line) > 0:
line_elem = line.split(separator, 2)
if len(line_ele
|
indie1982/osmc-fixes
|
package/mediacenter-addon-osmc/src/script.module.osmcsetting.updates/resources/lib/apt_cache_action.py
|
Python
|
gpl-2.0
| 11,354 | 0.035406 |
''' This script is run as root by the osmc update module. '''
import apt
import socket
import sys
from datetime import datetime
import json
import os
import time
import subprocess
import traceback
from CompLogger import comprehensive_logger as clog
t = datetime
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
try:
sys.stdout = Logger("/var/tmp/OSMC_python_apt_log.txt")
except:
pass
@clog(maxlength=1500)
def call_parent(raw_message, data={}):
address = '/var/tmp/osmc.settings.update.sockfile'
print '%s %s sending response' % (t.now(), 'apt_cache_action.py')
message = (raw_message, data)
message = json.dumps(message)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(address)
sock.sendall(message)
sock.close()
except Exception as e:
return '%s %s failed to connect to parent - %s' % (t.now(), 'apt_cache_action.py', e)
return 'response sent'
class Main(object):
def __init__(self, action):
# with apt.apt_pkg.SystemLock():
# implements a lock on the package system, so that nothing else can alter packages
print '==================================================================='
print '%s %s running' % (t.now(), 'apt_cache_action.py')
self.error_package = ''
self.error_message = ''
self.heading = 'Updater'
self.action = action
self.cache = apt.Cache()
self.block_update_file = '/var/tmp/.suppress_osmc_update_checks'
self.action_to_method = {
'update' : self.update,
'update_manual' : self.update,
'commit' : self.commit,
'fetch' : self.fetch,
'action_list' : self.action_list,
}
try:
self.act()
except Exception as e:
print '%s %s exception occurred' % (t.now(), 'apt_cache_action.py')
print '%s %s exception value : %s' % (t.now(), 'apt_cache_action.py', e)
deets = 'Error Type and Args: %s : %s \n\n %s' % (type(e).__name__, e.args, traceback.format_exc())
# send the error to the parent (parent will kill the progress bar)
call_parent('apt_error', {'error': self.error_message, 'package': self.error_package, 'exception': deets})
self.respond()
print '%s %s exiting' % (t.now(), 'apt_cache_action.py')
print '==================================================================='
def respond(self):
call_parent('apt_cache %s complete' % self.action)
def act(self):
action = self.action_to_method.get(self.action, False)
if action:
action()
else:
print 'Action not in action_to_method dict'
#@clog()
def action_list(self):
''' This method processes a list sent in argv[2], and either installs or remove packages.
The list is sent as a string:
install_packageid1|=|install_packageid2|=|removal_packageid3'''
self.heading = 'App Store'
action_string = sys.argv[2]
action_dict = self.parse_argv2(action_string)
self.update()
self.cache.open()
for pkg in self.cache:
# mark packages as install or remove
if pkg.shortname in action_dict['install']:
pkg.mark_install()
if pkg.shortname in action_dict['removal']:
pkg.mark_delete(purge=True)
# commit
self.commit_action()
if action_dict['removal']:
# if there were removals then remove the packages that arent needed any more
self.update()
self.cache.open()
removals = False
for pkg in self.cache:
if pkg.is_auto_removable:
pkg.mark_delete(purge=True)
removals = True
if removals:
# commit
self.commit_action()
# #@clog()
def parse_argv2(self, action_string):
install = []
removal = []
actions = action_string.split('|=|')
for action in actions:
if action.startswith('install_'):
install.append(action[len('install_'):])
elif action.startswith('removal_'):
removal.append(action[len('removal_'):])
return {'install': install, 'removal': removal}
#@clog()
def update(self):
dprg = Download_Progress(partial_heading='Updating')
self.cache.update(fetch_progress=dprg, pulse_interval=1000)
# call the parent and kill the pDialog
call_parent('progress_bar', {'kill': True})
return '%s %s cache updated' % (t.now(), 'apt_cache_action.py')
#@clog()
def commit(self):
# check whether any packages are broken, if they are then the install needs to take place outside of Kodi
for pkg in self.cache:
if pkg.is_inst_broken or pkg.is_now_broken:
return "%s is BROKEN, cannot proceed with commit" % pkg.shortname
print '%s %s upgrading all packages' % (t.now(), 'apt_cache_action.py')
self.cache.upgrade(True)
print '%s %s committing cache' % (t.now(), 'apt_cache_action.py')
self.commit_action()
#@clog()
def commit_action(self):
dprg = Download_Progress()
iprg = Install_Progress(self)
self.cache.commit(fetch_progress=dprg, install_progress=iprg)
# call the parent and kill the pDialog
call_parent('progress_bar', {'kill': True})
# remove the file that blocks further update checks
try:
os.remove(self.block_update_file)
except:
return 'Failed to remove block_update_file'
return '%s %s cache committed' % (t.now(), 'apt_cache_action.py')
#@clog(
|
)
def fetch(self):
self.cache.upgrade(True)
print '%s %s fetching all packages' % (t.now(), 'apt_cache_action.py')
dprg = Download_Progress()
self.cache.fetch_archives(progress=dprg)
# call the p
|
arent and kill the pDialog
call_parent('progress_bar', {'kill': True})
return '%s %s all packages fetched' % (t.now(), 'apt_cache_action.py')
class Operation_Progress(apt.progress.base.OpProgress):
def __init__(self):
super(Operation_Progress, self).__init__()
def update(self):
call_parent('progress_bar', {'percent': self.percent, 'heading': self.op, 'message':self.sub_op,})
def done(self):
call_parent('progress_bar', {'kill': True})
class Install_Progress(apt.progress.base.InstallProgress):
def __init__(self, parent):
self.parent = parent
super(Install_Progress, self).__init__()
call_parent('progress_bar', {'percent': 0, 'heading': self.parent.heading, 'message':'Starting Installation'})
#@clog()
def error(self, pkg, errormsg):
print 'ERROR!!! \n%s\n' % errormsg
try:
pkgname = os.path.basename(pkg).split('_')
print 'Package affected!!! \n%s\n' % pkgname
self.parent.error_package = pkgname[0]
if len(pkgname) > 1:
self.parent.error_package += ' (' + pkgname[1] + ')'
except:
self.parent.error_package = '(unknown package)'
self.parent.error_message = errormsg
''' (Abstract) Called when a error is detected during the install. '''
# The following method should be overridden to implement progress reporting for dpkg-based runs
# i.e. calls to run() with a filename:
# def processing(self, pkg, stage):
# ''' This method is called just before a processing stage starts. The parameter pkg is the name of the
# package and the parameter stage is one of the stages listed in the dpkg manual under the
# status-fd option, i.e. "upgrade", "install" (both sent before unpacking), "configure", "trigproc",
# "remove", "purge". '''
# def dpkg_status_change(self, pkg, status):
# ''' This method is called whenever the dpkg status of the package changes. The parameter pkg is the
# name of the package and the parameter status is one of the status strings used in the status file
# (/var/lib/dpkg/status) and documented in dpkg(1). '''
# The following methods should be overridden to implement progress reporting for run() calls
# with an apt_pkg.PackageManager object as their parameter:
#@clog()
def status_change(self, pkg, percent, status):
''' This method implements progress reporting for package installation by APT and may be extended to
dpkg at a later time. This method takes two parameters: The parameter percent is a float value
describing the overall progress and the parameter status is a string describing the current status
in an human-readable manner. '
|
tjgillies/distributed-draw
|
entangled/kademlia/constants.py
|
Python
|
lgpl-3.0
| 1,820 | 0.006593 |
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
""" This module defines the charaterizing constants of the Kademlia network
C{checkRefreshInterval} and C{udpDatagramMaxSize} are implementation-specific
constants, and do not affect general Kademlia operation.
"""
######### KADEMLIA CONSTANTS ###########
#: Small number Representing the degree of parallelism in network calls
a
|
lpha = 3
#: Maxim
|
um number of contacts stored in a bucket; this should be an even number
k = 8
#: Timeout for network operations (in seconds)
rpcTimeout = 5
# Delay between iterations of iterative node lookups (for loose parallelism) (in seconds)
iterativeLookupDelay = rpcTimeout / 2
#: If a k-bucket has not been used for this amount of time, refresh it (in seconds)
refreshTimeout = 3600 # 1 hour
#: The interval at which nodes replicate (republish/refresh) data they are holding
replicateInterval = refreshTimeout
# The time it takes for data to expire in the network; the original publisher of the data
# will also republish the data at this time if it is still valid
dataExpireTimeout = 86400 # 24 hours
######## IMPLEMENTATION-SPECIFIC CONSTANTS ###########
#: The interval in which the node should check its whether any buckets need refreshing,
#: or whether any data needs to be republished (in seconds)
checkRefreshInterval = refreshTimeout/5
#: Max size of a single UDP datagram, in bytes. If a message is larger than this, it will
#: be spread accross several UDP packets.
udpDatagramMaxSize = 8192 # 8 KB
|
itJunky/web-tasker.py
|
db_repository/versions/028_migration.py
|
Python
|
gpl-2.0
| 1,041 | 0.000961 |
from sqlalchemy import *
from migrate im
|
port *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
task = Table('task', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_id', Integer, default=ColumnDefault(0)),
Column('body', String),
Column('taskname',
|
String(length=140)),
Column('timestamp', DateTime),
Column('user_id', Integer),
Column('project_id', Integer),
Column('status', String(length=10)),
Column('depth', Integer, default=ColumnDefault(0)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['task'].columns['parent_id'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['task'].columns['parent_id'].drop()
|
zappyk-github/zappyk-python
|
src/src_zappyk/developing/test-gui-Gtk.py
|
Python
|
gpl-2.0
| 4,640 | 0.009483 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'zappyk'
import sys, subprocess
from gi.repository import Gtk, Gio
from gi.repository import GLib
###############################################################################
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Hello World")
self.button = Gtk.Button(label="Click Here")
self.button.connect("clicked", self.on_button_clicked)
self.add(self.button)
def on_button_clicked(self, widget):
print("Hello World")
#______________________________________________________________________________
#
def test_1():
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
###############################################################################
class Handler():
def __init__(self, cmd):
self.cmd = CommandTextView(cmd)
def on_button1_clicked(self, widget):
self.cmd.run()
pass
def on_button2_clicked(self, widget):
pass
def on_textview1_add(self, widget):
widget.inset
pass
def on_window1_delete_event(self, *args):
Gtk.main_quit(*args)
###############################################################################
class CommandTextView(Gtk.TextView):
''' NICE TEXTVIEW THAT READS THE OUTPUT OF A COMMAND SYNCRONOUSLY '''
def __init__(self, command):
'''COMMAND : THE SHELL COMMAND TO SPAWN'''
super(CommandTextView, self).__init__()
self.command = command
def run(self):
''' RUNS THE PROCESS '''
proc = subprocess.Popen(self.command, stdout = subprocess.PIPE) # SPAWNING
GLib.io_add_watch(proc.stdout, # FILE DESCRIPTOR
GLib.IO_IN, # CONDITION
self.write_to_buffer) # CALLBACK
def write_to_buffer(self, fd, condition):
if condition == GLib.IO_IN: #IF THERE'S SOMETHING INTERESTING TO READ
#CZ#char = fd.read(1) # WE READ ONE BYTE PER TIME, TO AVOID BLOCKING
char = fd.read().decode("utf-8")
buff = self.get_buffer()
buff.insert_at_cursor(char) # WHEN RUNNING DON'T TOUCH THE TEXTVIEW!!
return
|
True # FUNDAMENTAL, OTHERWISE THE CALLBACK ISN'T RECALLED
else:
ret
|
urn False # RAISED AN ERROR: EXIT AND I DON'T WANT TO SEE YOU ANYMORE
#______________________________________________________________________________
#
def test_2():
cmd = CommandTextView("find")
win = Gtk.Window()
win.connect("delete-event", lambda wid, event: Gtk.main_quit()) # DEFINING CALLBACKS WITH LAMBDAS
win.set_size_request(200,300)
win.add(cmd)
win.show_all()
cmd.run()
Gtk.main()
#______________________________________________________________________________
#
def test_3():
cmd = CommandTextView("find")
builder = Gtk.Builder()
builder.add_from_file("test-gui-Gtk.glade")
builder.connect_signals(Handler(cmd))
window = builder.get_object("window1")
window.show_all()
cmd.run()
Gtk.main()
###############################################################################
class HeaderBarWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Stack Demo")
#CZ#Gtk.Window.__init__(self, title="Stack Demo", type=Gtk.WINDOW_TOPLEVEL)
self.set_border_width(10)
self.set_default_size(400, 200)
#CZ#self.has_toplevel_focus()
#hb = Gtk.HeaderBar()
#hb.props.show_close_button = True
#hb.props.title = "HeaderBar example"
#self.set_titlebar(hb)
button = Gtk.Button()
icon = Gio.ThemedIcon(name="mail-send-receive-symbolic")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button.add(image)
#hb.pack_end(button)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
Gtk.StyleContext.add_class(box.get_style_context(), "linked")
button = Gtk.Button()
button.add(Gtk.Arrow(Gtk.ArrowType.LEFT, Gtk.ShadowType.NONE))
box.add(button)
button = Gtk.Button()
button.add(Gtk.Arrow(Gtk.ArrowType.RIGHT, Gtk.ShadowType.NONE))
box.add(button)
#hb.pack_start(box)
self.add(Gtk.TextView())
#______________________________________________________________________________
#
def test_4():
win = HeaderBarWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
#______________________________________________________________________________
#
if __name__ == '__main__':
test_4()
|
ulikoehler/ODBPy
|
ODBPy/SurfaceParser.py
|
Python
|
apache-2.0
| 1,978 | 0.004044 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ODB++ surface parser components
"""
import re
from collections import namedtuple
from .Decoder import DecoderOption
from .Treeifier import TreeifierRule
from .PolygonParser import Polygon
from .Structures import Polarity, polarity_map
from .Attributes import parse_attributes
__all__ = ["surface_decoder_options",
"SurfaceBeginTag", "surface_treeify_rules",
"surface_decoder_options",
"SurfaceEndTag", "Surface", "Polarity"]
Surface = namedtuple("Surface", ["polarity", "dcode", "polygons", "attributes"])
SurfaceBeginTag = namedtuple("SurfaceBeginTag", ["polarity", "dcode", "attributes"])
SurfaceEndTag = namedtuple("SurfaceEndTag", [])
# Surface syntax regular expressions
_surface_re = re.compile(r"^S\s+([PN])\s+(\d+)\s*(;\s*.+?)?$")
_surface_end_re = re.compile(r"^SE\s*$")
def _parse_surface_start(match):
"Parse a surface begin tag regex match"
polarity, d
|
code, attributes = match.groups()
|
# Parse attribute string
attributes = parse_attributes(attributes[1:]) \
if attributes is not None else {}
return SurfaceBeginTag(polarity_map[polarity],
int(dcode), attributes)
def _parse_surface_end(match):
"Parse a surface end tag regex match"
return SurfaceEndTag()
surface_decoder_options = [
DecoderOption(_surface_re, _parse_surface_start),
DecoderOption(_surface_end_re, _parse_surface_end)
]
def _treeifier_process_surface(elems):
"""Treeifier processor function for surfaces."""
polygons = []
polarity, dcode, attributes = elems[0] # Poly begin tag
for elem in elems[1:]: # Iterate everything except the end tag
if isinstance(elem, Polygon):
polygons.append(elem)
# Build polygon structure
return Surface(polarity, dcode, polygons, attributes)
surface_treeify_rules = [
TreeifierRule(SurfaceBeginTag, SurfaceEndTag, _treeifier_process_surface),
]
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/security/cmd/hide/type_Result.py
|
Python
|
unlicense
| 1,899 | 0.002633 |
# uncompyle6 version 2.9.10
# Python bytecode 2
|
.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
RESULT_TYPE_PROCESS_HIDE = 0
RESULT_TYPE_PROCESS_UNHIDE = 1
class Result:
def __init__(self):
self.__dict__['type'] = 0
self.__dict__['item'] = ''
self.__dict__['metaData'] = ''
|
def __getattr__(self, name):
if name == 'type':
return self.__dict__['type']
if name == 'item':
return self.__dict__['item']
if name == 'metaData':
return self.__dict__['metaData']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'type':
self.__dict__['type'] = value
elif name == 'item':
self.__dict__['item'] = value
elif name == 'metaData':
self.__dict__['metaData'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU8(MSG_KEY_RESULT_TYPE, self.__dict__['type'])
submsg.AddStringUtf8(MSG_KEY_RESULT_ITEM, self.__dict__['item'])
submsg.AddStringUtf8(MSG_KEY_RESULT_METADATA, self.__dict__['metaData'])
mmsg.AddMessage(MSG_KEY_RESULT, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['type'] = submsg.FindU8(MSG_KEY_RESULT_TYPE)
self.__dict__['item'] = submsg.FindString(MSG_KEY_RESULT_ITEM)
self.__dict__['metaData'] = submsg.FindString(MSG_KEY_RESULT_METADATA)
|
mazulo/taskbuster-boilerplate
|
docs/conf.py
|
Python
|
mit
| 8,303 | 0.006263 |
# -*- coding: utf-8 -*-
#
# TaskBuster documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 16 10:01:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from django.conf import settings
settings.configure()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TaskBuster'
copyright = u'2015, Patrick Mazulo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using t
|
he given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes
|
and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TaskBusterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TaskBuster.tex', u'TaskBuster Documentation',
u'Patrick Mazulo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'taskbuster', u'TaskBuster Documentation',
[u'Patrick Mazulo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TaskBuster', u'TaskBuster Documentation',
u'Patrick Mazulo', 'TaskBuster', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo
|
ostree/plaso
|
plaso/formatters/plist.py
|
Python
|
apache-2.0
| 497 | 0.012072 |
# -*- coding: utf-8 -*-
"""The plist event formatter."""
from plaso.formatters
|
import inte
|
rface
from plaso.formatters import manager
class PlistFormatter(interface.ConditionalEventFormatter):
"""Formatter for a plist key event."""
DATA_TYPE = u'plist:key'
FORMAT_STRING_SEPARATOR = u''
FORMAT_STRING_PIECES = [
u'{root}/',
u'{key}',
u' {desc}']
SOURCE_LONG = u'Plist Entry'
SOURCE_SHORT = u'PLIST'
manager.FormattersManager.RegisterFormatter(PlistFormatter)
|
xjsender/haoide
|
salesforce/lib/diff.py
|
Python
|
mit
| 2,098 | 0.005243 |
import sublime, sublime_plugin
import difflib
import time
import datetime
import codecs
import os
def diff_changes(file_name, result):
try:
if "Body" in result:
server = result["Body"].splitlines()
elif "Markup" in result:
server = result["Markup"].splitline
|
s()
local = codecs.open(file_name, "r", "utf-8").read().splitlines()
except UnicodeDecodeError:
show_diff_panel("Diff only works with UTF-8 files")
return
time.strfti
|
me("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
bdate_literal = result["LastModifiedDate"].split(".")[0]
server_date = datetime.datetime.strptime(bdate_literal, "%Y-%m-%dT%H:%M:%S")
local_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
diff = difflib.unified_diff(server, local, "Server", "Local ", server_date, local_date, lineterm='')
difftxt = u"\n".join(line for line in diff)
if difftxt == "":
show_diff_panel("There is no difference between %s and server" % os.path.basename(file_name))
return
show_diff_panel(difftxt)
def diff_files(file_name, other_file_name):
try:
this_file_content = codecs.open(file_name, "r", "utf-8").read().splitlines()
other_file_content = codecs.open(other_file_name, "r", "utf-8").read().splitlines()
except UnicodeDecodeError:
show_diff_panel("Diff only works with UTF-8 files")
return
diff = difflib.unified_diff(this_file_content, other_file_content, "Server", "Local ", "", "", lineterm='')
difftxt = u"\n".join(line for line in diff)
if difftxt == "":
show_diff_panel("There is no difference between %s and %s" % (
file_name,
other_file_name
))
return
show_diff_panel(difftxt)
def show_diff_panel(difftxt):
win = sublime.active_window()
v = win.create_output_panel('diff_with_server')
v.assign_syntax('Packages/Diff/Diff.tmLanguage')
v.run_command('append', {'characters': difftxt})
win.run_command("show_panel", {"panel": "output.diff_with_server"})
|
dubwub/F2016-UPE-AI
|
sample_AIs/darwinbot2.py
|
Python
|
mit
| 1,018 | 0.016699 |
# posting to: http://localhost:3000/api/articles/update/:articleid with title, content
# changes title, content
#
# id1: (darwinbot1 P@ssw0rd!! 57d748bc67d0eaf026dff431) <-- this will change with differing mongo instances
import time # for testing, this is not good
import requests # if not installed already, run python -m pip install requests OR pip install requests, whatever you normally do
r = requests.post('http://localhost:80/api/games/search', data={'devkey': "581ced5d7563227053011823", 'username': 'darwinbot2'}) # search for new game
json = r.json() # when request comes back, that means you've found a match! (validation if server goes down?
|
)
print(json)
gameID = json['gameID']
playerID = json['playerID']
print(gameID)
print(
|
playerID)
input = ' '
while input != '':
input = raw_input('input move: ')
r = requests.post('http://localhost:80/api/games/submit/' + gameID, data={'playerID': playerID, 'move': input, 'devkey': "581ced5d7563227053011823"}); # submit sample move
json = r.json()
print(json)
|
pythonvietnam/nms
|
apps/alarm/apps.py
|
Python
|
mit
| 85 | 0 |
fro
|
m django.a
|
pps import AppConfig
class AlarmConfig(AppConfig):
name = 'alarm'
|
codenote/chromium-test
|
tools/telemetry/telemetry/core/chrome/inspector_backend.py
|
Python
|
bsd-3-clause
| 9,767 | 0.009727 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import socket
import sys
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.core.chrome import inspector_console
from telemetry.core.chrome import inspector_memory
from telemetry.core.chrome import inspector_page
from telemetry.core.chrome import inspector_runtime
from telemetry.core.chrome import inspector_timeline
from telemetry.core.chrome import png_bitmap
from telemetry.core.chrome import websocket
class InspectorException(Exception):
pass
class InspectorBackend(object):
def __init__(self, browser, browser_backend, debugger_url):
assert debugger_url
self._browser = browser
self._browser_backend = browser_backend
self._debugger_url = debugger_url
self._socket = None
self._domain_handlers = {}
self._cur_socket_timeout = 0
self._next_request_id = 0
self._console = inspector_console.InspectorConsole(self)
self._memory = inspector_memory.InspectorMemory(self)
self._page = inspector_page.InspectorPage(self)
self._runtime = inspector_runtime.InspectorRuntime(self)
self._timeline = inspector_timeline.InspectorTimeline(self)
def __del__(self):
self.Disconnect()
def _Connect(self):
if self._socket:
return
self._socket = websocket.create_connection(self._debugger_url)
self._cur_socket_timeout = 0
self._next_request_id = 0
def Disconnect(self):
for _, handlers in self._domain_handlers.items():
_, will_close_handler = handlers
will_close_handler()
self._domain_handlers = {}
if self._socket:
self._socket.close()
self._socket = None
# General public methods.
@property
def browser(self):
return self._browser
@property
def url(self):
self.Disconnect()
return self._browser_backend.tab_list_backend.GetTabUrl(self._debugger_url)
def Activate(self):
self._Connect()
self._browser_backend.tab_list_backend.ActivateTab(self._debugger_url)
def Close(self):
self.Disconnect()
self._browser_backend.tab_list_backend.CloseTab(self._debugger_url)
# Public methods implemented in JavaScript.
def WaitForDocumentReadyStateToBeComplete(self, timeout):
util.WaitFor(
lambda: self._runtime.Evaluate('document.readyState') == 'complete',
timeout)
def WaitForDocumentReadyStateToBeInteractiveOrBetter(
self, timeout):
def IsReadyStateInteractiveOrBetter():
rs = self._runtime.Evaluate('document.readyState')
return rs == 'complete' or rs == 'interactive'
util.WaitFor(IsReadyStateInteractiveOrBetter, timeout)
@property
def screenshot_supported(self):
if self
|
._runtime.Evaluate(
'window.chrome.gpuBenchmarking === undefined'):
return Fa
|
lse
if self._runtime.Evaluate(
'window.chrome.gpuBenchmarking.beginWindowSnapshotPNG === undefined'):
return False
# TODO(dtu): Also check for Chrome branch number, because of a bug in
# beginWindowSnapshotPNG in older versions. crbug.com/171592
return True
def Screenshot(self, timeout):
if self._runtime.Evaluate(
'window.chrome.gpuBenchmarking === undefined'):
raise Exception("Browser was not started with --enable-gpu-benchmarking")
if self._runtime.Evaluate(
'window.chrome.gpuBenchmarking.beginWindowSnapshotPNG === undefined'):
raise Exception("Browser does not support window snapshot API.")
self._runtime.Evaluate("""
if(!window.__telemetry) {
window.__telemetry = {}
}
window.__telemetry.snapshotComplete = false;
window.__telemetry.snapshotData = null;
window.chrome.gpuBenchmarking.beginWindowSnapshotPNG(
function(snapshot) {
window.__telemetry.snapshotData = snapshot;
window.__telemetry.snapshotComplete = true;
}
);
""")
def IsSnapshotComplete():
return self._runtime.Evaluate('window.__telemetry.snapshotComplete')
util.WaitFor(IsSnapshotComplete, timeout)
snap = self._runtime.Evaluate("""
(function() {
var data = window.__telemetry.snapshotData;
delete window.__telemetry.snapshotComplete;
delete window.__telemetry.snapshotData;
return data;
})()
""")
if snap:
return png_bitmap.PngBitmap(snap['data'])
return None
# Console public methods.
@property
def message_output_stream(self): # pylint: disable=E0202
return self._console.message_output_stream
@message_output_stream.setter
def message_output_stream(self, stream): # pylint: disable=E0202
self._console.message_output_stream = stream
# Memory public methods.
def GetDOMStats(self, timeout):
dom_counters = self._memory.GetDOMCounters(timeout)
return {
'document_count': dom_counters['documents'],
'node_count': dom_counters['nodes'],
'event_listener_count': dom_counters['jsEventListeners']
}
# Page public methods.
def PerformActionAndWaitForNavigate(self, action_function, timeout):
self._page.PerformActionAndWaitForNavigate(action_function, timeout)
def Navigate(self, url, script_to_evaluate_on_commit, timeout):
self._page.Navigate(url, script_to_evaluate_on_commit, timeout)
def GetCookieByName(self, name, timeout):
return self._page.GetCookieByName(name, timeout)
# Runtime public methods.
def ExecuteJavaScript(self, expr, timeout):
self._runtime.Execute(expr, timeout)
def EvaluateJavaScript(self, expr, timeout):
return self._runtime.Evaluate(expr, timeout)
# Timeline public methods.
@property
def timeline_model(self):
return self._timeline.timeline_model
def StartTimelineRecording(self):
self._timeline.Start()
def StopTimelineRecording(self):
self._timeline.Stop()
# Methods used internally by other backends.
def DispatchNotifications(self, timeout=10):
self._Connect()
self._SetTimeout(timeout)
try:
data = self._socket.recv()
except (socket.error, websocket.WebSocketException):
if self._browser_backend.tab_list_backend.DoesDebuggerUrlExist(
self._debugger_url):
return
raise exceptions.TabCrashException()
res = json.loads(data)
logging.debug('got [%s]', data)
if 'method' in res:
self._HandleNotification(res)
def _HandleNotification(self, res):
if (res['method'] == 'Inspector.detached' and
res.get('params', {}).get('reason','') == 'replaced_with_devtools'):
self._WaitForInspectorToGoAwayAndReconnect()
return
mname = res['method']
dot_pos = mname.find('.')
domain_name = mname[:dot_pos]
if domain_name in self._domain_handlers:
try:
self._domain_handlers[domain_name][0](res)
except Exception:
import traceback
traceback.print_exc()
else:
logging.debug('Unhandled inspector message: %s', res)
def SendAndIgnoreResponse(self, req):
self._Connect()
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
self._socket.send(data)
logging.debug('sent [%s]', data)
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _WaitForInspectorToGoAwayAndReconnect(self):
sys.stderr.write('The connection to Chrome was lost to the Inspector UI.\n')
sys.stderr.write('Telemetry is waiting for the inspector to be closed...\n')
self._socket.close()
self._socket = None
def IsBack():
return self._browser_backend.tab_list_backend.DoesDebuggerUrlExist(
self._debugger_url)
util.WaitFor(IsBack, 512, 0.5)
sys.stderr.write('\n')
sys.stderr.write('Inspector\'s UI closed. Telemetry will now resume.\n')
self._Connect()
def SyncRequest(self, req, timeout=10):
self._Connect()
# TODO(nduca): Listen to the timeout argument
# pylint: disable=W0613
self._SetTimeout(timeout)
self
|
danieldUKIM/controllers_dockers
|
rem_console/REM_console.py
|
Python
|
apache-2.0
| 2,275 | 0.037528 |
#!/usr/bin/python3
import rem_backend.query_data as qd
import rem_backend.propagation_model_estimation as pm
import threading
import _thread
__author__ = "Daniel Denkovski", "Valentin Rakovic"
__copyright__ = "Copyright (c) 2017, Faculty of Electrical Engineering and Information Technologies, UKIM, Skopje, Macedonia"
__version__ = "0.1.0"
__email__ = "{danield, valentin}@feit.ukim.edu.mk"
'''
REM console module
Showcases the REM backend capabilities of the extension
Used as console interface for users to interact with the platform
'''
def main():
run = 1;
while (run):
print("Please choose from the selection:")
print("1. WiFi device localization")
print("2. Duty cycle calculation")
print(
|
"3. Path loss model estimation")
print("0. Quit")
choice = input(" >> ")
if (choice == '0'):
run = 0
elif (choice == '1'):
print("Loc:Enter the channel of interest")
chann = input(" >> ")
dev_list = qd.get_all_active_devices_on_channel(chann,1)
try:
print("Select the index of the device of interest")
ind = 1
for row in dev_list:
print("{}. {}".format(ind,row[0]))
|
ind += 1
devind = input(" >> ")
print(dev_list[int(devind)-1][0])
try:
location = qd.estimate_tx_location(str(dev_list[int(devind)-1][0]),10)
print("The location of devices {} is:".format(str(dev_list[int(devind)-1][0])))
print("x:{} y:{} z:{} Pt:{} dBm".format(location[0],location[1],location[2],location[3]))
except:
print("not sufficient data for modeling")
print("")
except:
print("no devices")
print("")
elif (choice == '2'):
print("DC:Enter the channel of interest")
chann = input(" >> ")
ux, ul, dx, dy = input("provide ux ul dx dl coordinates of interest: ").split(' ')
try:
val = qd.get_duty_cycle_by_area(chann,10,ux,ul,dx,dy)
dc = val[0][0]
print("Duty cycle value for channel={} is {}".format(chann,dc))
except:
print("not sufficient data for modeling")
print("")
elif (choice == '3'):
print("PL:Enter the channel of interest")
chann = input(" >> ")
try:
val = pm.get_chann_model(10,chann)
print(val)
except:
print("not sufficient data for modeling")
print("")
if __name__=="__main__":
main()
|
PetePriority/home-assistant
|
homeassistant/components/lutron/light.py
|
Python
|
apache-2.0
| 2,793 | 0 |
"""
Support for Lutron lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.lutron/
"""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.components.lutron import (
LutronDevice, LUTRON_DEVICES, LUTRON_CONTROLLER)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron lights."""
devs = []
for (area_name, device) in hass.data[LUTRON_DEVICES]['light']:
dev = LutronLight(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
add_entities(devs, True)
def to_lutron_level(level):
"""Convert the given HASS light level (0-255) to Lutron (0.0-100.0)."""
return float((level * 100) / 255)
def to_hass_level(level):
"""Convert the given Lutron (0.0-100.0) light level to HASS (0-255)."""
return int((level * 255) / 100)
class LutronLight(LutronDevice, Light):
"""Representation of a Lutron Light, including dimmable."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the light."""
self._prev_brightness = None
super().__init__(area_name, lutron_device, controller)
@property
def supported_features
|
(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of the light."""
new_brightness = to_hass_level(self._lutron_device.last_level())
if new_brightness != 0:
self._prev_brightness = new_brightness
return
|
new_brightness
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs and self._lutron_device.is_dimmable:
brightness = kwargs[ATTR_BRIGHTNESS]
elif self._prev_brightness == 0:
brightness = 255 / 2
else:
brightness = self._prev_brightness
self._prev_brightness = brightness
self._lutron_device.level = to_lutron_level(brightness)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._lutron_device.level = 0
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = {'lutron_integration_id': self._lutron_device.id}
return attr
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_level() > 0
def update(self):
"""Call when forcing a refresh of the device."""
if self._prev_brightness is None:
self._prev_brightness = to_hass_level(self._lutron_device.level)
|
TacticAlpha/basic-lan-webserver
|
server/status.py
|
Python
|
agpl-3.0
| 938 | 0 |
import json as json_
# Template for code 200 requests so data can easily be added
def ok(d=None, *, json=True):
code = {'code': 200, 'status': 'OK', 'data': d
|
}
if json:
code = json_.dumps(code)
return code
# The 400 codes shoul
|
dn't require any special aruments.
def invalid_request(*, json=True):
code = {'code': 400, 'status': 'MALFORMED_REQUEST'}
if json:
code = json_.dumps(code)
return code
def unknown_request(*, json=True):
code = {'code': 400, 'status': 'UNKNOWN_REQUEST'}
if json:
code = json_.dumps(code)
return code
# You can assign the internal server error a number for debugging purposes.
def internal_server_error(n=None, *, json=True):
status_string = 'INTERNAL_SERVER_ERROR'
if n is not None:
status_string += '_{}'.format(n)
code = {'code': 500, 'status': status_string}
if json:
code = json_.dumps(code)
return code
|
pepincho/Python101-and-Algo1-Courses
|
Programming-101-v3/week9/1-Money-In-The-Bank/start.py
|
Python
|
mit
| 465 | 0.002151 |
from bank_CI import BankCI
from bank_controller
|
import BankController
from settings import DB_NAME, CREATE_TABLES, DROP_DATABASE
from sql_manager import BankDatabaseManager
def main():
manager = BankDatabaseManager.create_from_db_and_sql(DB_NAME, CREATE_TABLES, DROP_DATABASE, create_if_exists=False)
controller = BankController(manager)
command_interface = BankCI(controller)
command_interface.main_menu()
if __name__
|
== '__main__':
main()
|
duointeractive/python-bluefin
|
bluefin/__init__.py
|
Python
|
bsd-3-clause
| 31 | 0.032258 |
#
|
Major, Minor
VERSION = (1, 4)
| |
internap/netman
|
netman/core/objects/backward_compatible_switch_operations.py
|
Python
|
apache-2.0
| 3,375 | 0.003556 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from netman.core.objects.interface_states import OFF, ON
class BackwardCompatibleSwitchOperations(object):
"""
Depecrated methods
"""
def remove_access_vlan(self, interface_id):
warnings.warn("Deprecated, use unset_interface_access_vlan(interface_id) instead", DeprecationWarning)
return self.unset_interface_access_vlan(interface_id)
def configure_native_vlan(self, interface_id, vlan):
warnings.warn("Deprecated, use set_interface_native_vlan(interface_id, vlan) instead", DeprecationWarning)
return self.set_interface_native_vlan(interface_id, vlan)
def remove_native_vlan(self, interface_id):
warnings.warn("Deprecated, use unset_interface_native_vlan(interface_id) instead", DeprecationWarning)
return self.unset_interface_native_vlan(interface_id)
def remove_vlan_access_group(self, vlan_number, direction):
warnings.warn("Deprecated, use unset_vlan_access_group(vlan_number, direction) instead", DeprecationWarning)
return self.unset_vlan_access_group(vlan_number, direction)
def remove_vlan_vrf(self, vlan_number):
warnings.warn("Deprecated, use unset_vlan_vrf(vlan_number) instead", DeprecationWarning)
return self.unset_vlan_vrf(vlan_number)
def remove_interface_description(self, interface_id):
warnings.warn("Deprecated, use unset_interface_description(interface_id) instead", DeprecationWarning)
return self.unset_interface_description(interface_id)
def remove_bond_description(self, number):
warnings.warn("Deprecated, use unset_bond_description(number) instead", DeprecationWarning)
return self.unset_bond_description(number)
def configure_bond_native_vlan(self, number, vlan):
warnings.warn("Deprecated, use set_bond_native_vlan(number, vlan) instead", DeprecationWarning)
return self.set_bond_native_vlan(number, vlan)
def remove_bond_native_vlan(self, number):
warnings.warn("Deprecated, use unset_bond_native_vlan(number) instead", DeprecationWarning)
return self.unset_bond_native_vlan(number)
def enable_lldp(self, interface_id, enabled):
warnings.warn("Deprecated, use set_interface_lldp_state(interface_id, enabled) instead", DeprecationWarning)
return self.set_interface_lldp_state(interface_id, enabled)
def shutdown_interface(self, interface_id):
warnings.warn("Deprecated, use set_interfa
|
ce_state(interface_id, state) instead", DeprecationWarning)
return self.set_interface_state(inte
|
rface_id, OFF)
def openup_interface(self, interface_id):
warnings.warn("Deprecated, use set_interface_state(interface_id, state) instead", DeprecationWarning)
return self.set_interface_state(interface_id, ON)
|
gguillen/galeranotify
|
galeranotify.py
|
Python
|
gpl-2.0
| 5,427 | 0.00387 |
#!/usr/bin/python
#
# Script to send email notifications when a change in Galera cluster membership
# occurs.
#
# Complies with http://www.codership.com/wiki/doku.php?id=notification_command
#
# Author: Gabe Guillen <gabeguillen@outlook.com>
# Version: 1.5
# Release: 3/5/2015
# Use at your own risk. No warranties expressed or implied.
#
import os
import sys
import getopt
import smtplib
try: from email.mime.text import MIMEText
except ImportError:
# Python 2.4 (CentOS 5.x)
from email.MIMEText import MIMEText
import socket
import email.utils
# Change this to some value if you don't want your server hostname to show in
# the notification emails
THIS_SERVER = socket.gethostname()
# Server hostname or IP address
SMTP_SERVER = 'YOUR_SMTP_HERE'
SMTP_PORT = 25
# Set to True if you need SMTP over SSL
SMTP_SSL = False
# Set to True if you need to authenticate to your SMTP server
SMTP_AUTH = False
# Fill in authorization information here if True above
SMTP_USERNAME = ''
SMTP_PASSWORD = ''
# Takes a single sender
MAIL_FROM = 'YOUR_EMAIL_HERE'
# Takes a list of recipients
MAIL_TO = ['SOME_OTHER_EMAIL_HERE']
# Need Date in Header for SMTP RFC Compliance
DATE = email.utils.formatdate()
# Edit below at your own risk
################################################################################
def main(argv):
str_status = ''
str_uuid = ''
str_primary = ''
str_members = ''
str_index = ''
message = ''
usage = "Usage: " + os.path.basename(sys.argv[0]) + " --status <status str>"
usage += " --uuid <state UUID> --primary <yes/no> --members <comma-seperated"
usage += " list of the component member UUIDs> --index <n>"
try:
opts, args = getopt.getopt(argv, "h", ["status=","uuid=",'primary=','members=','index='])
except getopt.GetoptError:
print usage
sys.exit(2)
if(len(opts) > 0):
message_obj = GaleraStatus(THIS_SERVER)
for opt, arg in opts:
if opt == '-h':
print usage
sys.exit()
elif opt in ("--status"):
message_obj.set_status(arg)
elif opt in ("--uuid"):
message_obj.set_uuid(arg)
elif opt in ("--primary"):
message_obj.set_primary(arg)
elif opt in ("--members"):
message_obj.set_members(arg)
elif opt in ("--index"):
message_obj.set_index(arg)
try:
send_notification(MAIL_FROM, MAIL_TO, 'Galera Notification: ' + THIS_SERVER, DATE,
str(message_obj), SMTP_SERVER, SMTP_PORT, SMTP_SSL, SMTP_AUTH,
SMTP_USERNAME, SMTP_PASSWORD)
except Exception, e:
print "Unable to send notification: %s" % e
sys.exit(1)
else:
print usage
sys.exit(2)
sys.exit(0)
def send_notification(from_email, to_email, subject, date, message, smtp_server,
smtp_port, use_ssl, use_auth, smtp_user, smtp_pass):
msg = MIMEText(message)
msg['From'] = from_email
msg['To'] = ', '.join(to_email)
msg['Subject'] = subject
msg['Date'] = date
if(use_ssl):
mailer = smtplib.SMTP_SSL(smtp_server, smtp_port)
else:
mailer = smtplib.SMTP(smtp_server, smtp_port)
if(use_auth):
mailer.login(smtp_user, smtp_pass)
mailer.sendmail(from_email, to_email, msg.as_string())
mailer.close()
class GaleraStatus:
def __init__(self, server):
self._server = server
self._status = ""
self._uuid = ""
self._primary = ""
self._members = ""
self._index = ""
self._count = 0
def set_status(self, status):
self._status = status
self._count += 1
def set_uuid(self, uuid):
self._uuid = uuid
self._count += 1
def set_primary(self, primary):
self._primary = primary.capitalize()
self._count += 1
def set_members(self, members):
self._members = members.split(',')
self._count += 1
def set_index(self, index):
self._index = index
self._count += 1
def __str__(self):
message = "Galera running on " + self._server + " has reported the following"
message += " cluster membership change"
if(self._count > 1):
message += "s"
message += ":\n\n"
if(self._status):
message += "Status of this node: " + self._status + "\n\n"
if(self._uuid):
message += "Cluster state UUID: " + self._uuid + "\n\n"
if(self._primary):
message += "Current cluster component is primary: " + self._primary + "\n\n"
if(self._members):
message += "Current members of the component:\n"
if(self._index):
for i in range(len(self._members)):
if(i == int(self._index)):
message += "-> "
else:
|
message += "-- "
message += self._members[i] + "\n"
else:
message += "\
|
n".join((" " + str(x)) for x in self._members)
message += "\n"
if(self._index):
message += "Index of this node in the member list: " + self._index + "\n"
return message
if __name__ == "__main__":
main(sys.argv[1:])
|
mr-uuid/snippets
|
python/classes/init_vs_call.py
|
Python
|
mit
| 530 | 0.001887 |
# making a class callable:
class a(object):
def __init__(self, a):
print "__init__"
self.a = a
def __call__(self, *args):
print "__call__"
self.a = args[0]
# Based on this code, when we call a, the __init__ function gets called. My guess
# is that we dont have
|
an instance initiallzed, then when we have an instance,
# we can call it
# Initallizing an obj
apple = a("Hello")
|
# Calling an obj
apple("Hi")
# What will this do? :
# @a
# def fuu(*args):
# print args
# fuu("hi")
|
peterhinch/micropython-tft-gui
|
tft/driver/tft.py
|
Python
|
mit
| 32,591 | 0.010034 |
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Robert Hammelrath
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Some parts of the software are a port of code provided by Rinky-Dink Electronics, Henning Karlsen,
# with the following copyright notice:
#
## Copyright (C)2015 Rinky-Dink Electronics, Henning Karlsen. All right reserved
## This library is free software; you can redistribute it and/or
## modify it under the terms of the CC BY-NC-SA 3.0 license.
## Please see the included documents for further information.
#
# Class supporting TFT LC-displays with a parallel Interface
# First example: Controller SSD1963 with a 4.3" or 7" display
#
# The minimal connection is:
# X1..X8 for data, Y9 for /Reset, Y10 for /RD, Y11 for /WR and Y12 for /RS
# Then LED must be hard tied to Vcc and /CS to GND.
#
import pyb, stm
from uctypes import addressof
from tft.driver import TFT_io
import gc
# define constants
#
RESET = const(1 << 10) ## Y9
RD = const(1 << 11) ## Y10
WR = const(0x01) ## Y11
D_C = const(0x02) ## Y12
LED = const(1 << 8) ## Y3
POWER = const(1 << 9) ## Y4
## CS is not used and must be hard tied to GND
PORTRAIT = const(1)
LANDSCAPE = const(0)
class TFT:
def __init__(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
self.tft_init(controller, lcd_type, orientation, v_flip, h_flip)
def tft_init(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
#
# For convenience, define X1..X1 and Y9..Y12 as output port using thy python functions.
# X1..X8 will be redefind on the fly as Input by accessing the MODER control registers
# when needed. Y9 is treate seperately, since it is used for Reset, which is done at python level
# since it need long delays anyhow, 5 and 15 ms vs. 10 µs.
#
# Set TFT general defaults
self.controller = controller
self.lcd_type = lcd_type
self.orientation = orientation
self.v_flip = v_flip # flip vertical
self.h_flip = h_flip # flip horizontal
self.c_flip = 0 # flip blue/red
self.rc_flip = 0 # flip row/column
self.setColor((255, 255, 255)) # set FG color to white as can be.
self.setBGColor((0, 0, 0)) # set BG to black
self.bg_buf = bytearray()
#
self.pin_led = None # deferred init Flag
self.power_control = power_control
if self.power_control:
# special treat for Power Pin
self.pin_power = pyb.Pin("Y4", pyb.Pin.OUT_PP)
self.power(True) ## switch Power on
#
pyb.delay(10)
# this may have to be moved to the controller specific section
if orientation == PORTRAIT:
self.setXY = TFT_io.setXY_P
self.drawPixel = TFT_io.drawPixel_P
else:
self.setXY = TFT_io.setXY_L
self.drawPixel = TFT_io.drawPixel_L
self.swapbytes = TFT_io.swapbytes
self.swapcolors = TFT_io.swapcolors
# ----------
for pin_name in ["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8",
"Y10", "Y11", "Y12"]:
pin = pyb.Pin(pin_name, pyb.Pin.OUT_PP) # set as output
pin.value(1) ## set high as default
# special treat for Reset
self.pin_reset = pyb.Pin("Y9", pyb.Pin.OUT_PP)
# Reset the device
self.pin_reset.value(1) ## do a hard reset
pyb.delay(10)
self.pin_reset.value(0) ## Low
pyb.delay(20)
self.pin_reset.value(1) ## set high again
pyb.delay(20)
#
# Now initialiize the LCD
# This is for the SSD1963 controller and two specific LCDs. More may follow.
# Data taken from the SSD1963 data sheet, SSD1963 Application Note and the LCD Data sheets
#
if controller == "SSD1963": # 1st approach for 480 x 272
TFT_io.tft_cm
|
d_data(0xe2, bytearray(b'\x1d\x02\x54'), 3) # PLL multiplier, set PLL clock to 100M
# N=0x2D for 6.5MHz, 0x1D for 10MHz crystal
# PLLClock = Crystal * (Mult + 1) / (Div + 1)
# The intermediate value Crystal * (Mult + 1) must be
|
between 250MHz and 750 MHz
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x01'), 1) # PLL Enable
pyb.delay(10)
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x03'), 1)
pyb.delay(10)
TFT_io.tft_cmd(0x01) # software reset
pyb.delay(10)
#
# Settings for the LCD
#
# The LCDC_FPR depends on PLL clock and the reccomended LCD Dot clock DCLK
#
# LCDC_FPR = (DCLK * 1048576 / PLLClock) - 1
#
# The other settings are less obvious, since the definitions of the SSD1963 data sheet and the
# LCD data sheets differ. So what' common, even if the names may differ:
# HDP Horizontal Panel width (also called HDISP, Thd). The value store in the register is HDP - 1
# VDP Vertical Panel Width (also called VDISP, Tvd). The value stored in the register is VDP - 1
# HT Total Horizontal Period, also called HP, th... The exact value does not matter
# VT Total Vertical Period, alco called VT, tv, .. The exact value does not matter
# HPW Width of the Horizontal sync pulse, also called HS, thpw.
# VPW Width of the Vertical sync pulse, also called VS, tvpw
# Front Porch (HFP and VFP) Time between the end of display data and the sync pulse
# Back Porch (HBP and VBP Time between the start of the sync pulse and the start of display data.
# HT = FP + HDP + BP and VT = VFP + VDP + VBP (sometimes plus sync pulse width)
# Unfortunately, the controller does not use these front/back porch times, instead it uses an starting time
# in the front porch area and defines (see also figures in chapter 13.3 of the SSD1963 data sheet)
# HPS Time from that horiz. starting point to the start of the horzontal display area
# LPS Time from that horiz. starting point to the horizontal sync pulse
# VPS Time from the vert. starting point to the first line
# FPS Time from the vert. starting point to the vertical sync pulse
#
# So the following relations must be held:
#
# HT > HDP + HPS
# HPS >= HPW + LPS
# HPS = Back Porch - LPS, or HPS = Horizontal back Porch
# VT > VDP + VPS
# VPS >= VPW + FPS
# VPS = Back Porch - FPS, or VPS = Vertical back Porch
#
# LPS or FPS may have a value of zero, since the length of the front porch is detemined by the
# other figures
#
# The best is to start with the recomendations of the lCD data sheet for Back porch, grab a
# sync pulse with and the determine the other, such that they meet the relations. Typically, these
# values allow for some ambuigity.
#
if lcd_type == "LB04301": # Size 480x272, 4.3", 24 Bit, 4.3"
#
# Value Min Typical Max
# DotClock 5 MHZ 9 MHz 12 MHz
# HT (Hor. Total 490 531 612
# HDP (Hor. Disp) 480
# HBP (back porch) 8 43
# HFP (Fr. porch) 2 8
# HPW (Hor. sync) 1
# VT (Vert. Total) 2
|
scrapycloud/scrapy-cluster
|
crawler/crawler/spiders/baidu.py
|
Python
|
mit
| 979 | 0.002043 |
# -*- coding: utf-8 -*-
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkE
|
xtractor
from scrapy_redis.spiders import RedisCrawlSpider
class BaiduSpider(RedisCrawlSpider):
"""Spider that reads urls from redis queue (myspider:start_urls)."""
name = 'baidu'
redis_key = 'crawler:start_keyword'
"""Spider that reads urls from redis queue when idle."""
rules = (
Rule(LinkExtractor("baidu.php"), callback='parse_page', follow=True),
)
def __init__(self, *args, **kwargs):
# Dynamica
|
lly define the allowed domains list.
domain = kwargs.pop('domain', '')
self.allowed_domains = filter(None, domain.split(','))
super(BaiduSpider, self).__init__(*args, **kwargs)
def parse_page(self, response):
data = {
'name': response.css('title::text').extract_first(),
'url': response.url,
}
import pprint
pprint.pprint(data)
return data
|
torypages/luigi
|
luigi/lock.py
|
Python
|
apache-2.0
| 3,102 | 0.001289 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Locking functi
|
onality
|
when launching things from the command line.
Uses a pidfile.
This prevents multiple identical workflows to be launched simultaneously.
"""
from __future__ import print_function
import hashlib
import os
from luigi import six
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
cmd = 'ps -p %s -o command=' % (pid,)
with os.popen(cmd, 'r') as p:
return p.readline().strip()
def get_info(pid_dir, my_pid=None):
# Check the name and pid of this process
if my_pid is None:
my_pid = os.getpid()
my_cmd = getpcmd(my_pid)
if six.PY3:
cmd_hash = my_cmd.encode('utf8')
else:
cmd_hash = my_cmd
pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
return my_pid, my_cmd, pid_file
def acquire_for(pid_dir, num_available=1):
"""
Makes sure the process is only run once at the same time with the same name.
Notice that we since we check the process name, different parameters to the same
command can spawn multiple processes at the same time, i.e. running
"/usr/bin/my_process" does not prevent anyone from launching
"/usr/bin/my_process --foo bar".
"""
my_pid, my_cmd, pid_file = get_info(pid_dir)
# Check if there is a pid file corresponding to this name
if not os.path.exists(pid_dir):
os.mkdir(pid_dir)
os.chmod(pid_dir, 0o777)
pids = set()
pid_cmds = {}
if os.path.exists(pid_file):
# There is such a file - read the pid and look up its process name
pids.update(filter(None, map(str.strip, open(pid_file))))
pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)
matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))
if len(matching_pids) >= num_available:
# We are already running under a different pid
print('Pid(s)', ', '.join(matching_pids), 'already running')
return False
else:
# The pid belongs to something else, we could
pass
pid_cmds[str(my_pid)] = my_cmd
# Write pids
pids.add(str(my_pid))
with open(pid_file, 'w') as f:
f.writelines('%s\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))
# Make the file writable by all
if os.name == 'nt':
pass
else:
s = os.stat(pid_file)
if os.getuid() == s.st_uid:
os.chmod(pid_file, s.st_mode | 0o777)
return True
|
j-martin/raspberry-gpio-zmq
|
raspzmq/alerts.py
|
Python
|
mit
| 3,371 | 0.00089 |
#!/usr/bin/env python
"""alerts.py Classes for sendings alerts
"""
__author__ = "Jean-Martin Archer"
__copyright__ = "Copyright 2013, MIT License."
import smtplib
from twilio.rest import TwilioRestClient
from vendors.pushbullet.pushbullet import PushBullet
import configuration
class Alerts(object):
"""<ac:image ac:thumbnail="true" ac:width="300">for alerts"""
def __init__(self, config_path='./config/'):
self.config = configuration.load(config_path)
self.register()
def register(self):
alerts = self.config['alerts']
alerts_list = []
if alerts['sms']['on']:
alerts_list.append(alerts.sms(alerts['AlertSMS']))
if alerts['pushbullet']['on']:
alerts_list.append(alerts.pushbullet(alerts['AlertPushBullet']))
if alerts['email']['on']:
alerts_list.append(alerts.sms(alerts['AlertPushBullet']))
self.alerts = alerts_list
def send(self, message):
for alert in self.alerts:
alert.send_notification(message)
class BasicAlert(object):
"""<ac:image ac:thumbnail="true" ac:width="300">for BasicAlert class. This is more an interface/contract
than anything else"""
def __init__(self, config):
self.config = config
self.setup()
def setup(self):
raise NotImplementedError
def send_notification(self, message):
raise NotImplementedError
class AlertEmail(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertEmail"""
def setup(self):
self.sender = self.config['email_sender']
|
self.receivers = self.config['email_receivers']
self.server = self.config['server']
def send_notification(self,
|
message):
email_body = """From: Alert <%s>
To: Alert <%s>
Subject: %s
This is a test e-mail message.
""" % (self.sender, self.receivers, message)
try:
smtpObj = smtplib.SMTP(self.server)
smtpObj.sendmail(self.sender, self.receivers, email_body)
print "Successfully sent AlertEmail"
except SMTPException:
print "Error: unable to send AlertEmail"
class AlertPushBullet(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertPushBullet. Get you api key from
https://www.PushBullet.com/account
Use the pyPushBullet API to know which deviceID to use.
"""
def setup(self):
self.push = PushBullet(self.config['apikey'])
def send_notification(self, message):
for device in self.config['device']:
self.push.pushNote(device, message, message)
def get_device_id(self):
print self.push.getDevices()
class AlertSMS(BasicAlert):
"""<ac:image ac:thumbnail="true" ac:width="300">for AlertSMS, uses your twilio.com account."""
def setup(self):
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = self.config['twilio_sid']
auth_token = self.config['twilio_auth_token']
self.client = TwilioRestClient(account_sid, auth_token)
self.create = client.sms.messages.create
def send_notification(self, message):
message = self.create(body=message,
to=self.config['to_number'],
from_=self.config["from_number"])
|
kslundberg/pants
|
src/python/pants/backend/codegen/tasks/protobuf_gen.py
|
Python
|
apache-2.0
| 13,284 | 0.008883 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import subprocess
from collections import OrderedDict
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.tasks.protobuf_parse import ProtobufParse
from pants.backend.codegen.tasks.simple_codegen_task import SimpleCodegenTask
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jar_import_products import JarImportProducts
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants.binaries.binary_util import BinaryUtil
from pants.fs.archive import ZIP
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
class ProtobufGen(SimpleCodegenTask):
@classmethod
def global_subsystems(cls):
return super(ProtobufGen, cls).global_subsystems() + (BinaryUtil.Factory,)
@classmethod
def register_options(cls, register):
super(ProtobufGen, cls).register_options(register)
# The protoc version and the plugin names are used as proxies for the identity of the protoc
# executable environment here. Although version is an obvious proxy for the protoc binary
# itself, plugin names are less so and plugin authors must include a version in the name for
# proper invalidation of protobuf products in the face of plugin modification that affects
# plugin outputs.
register('--version', advanced=True, fingerprint=True,
help='Version of protoc. Used to create the default --javadeps and as part of '
'the path to lookup the tool with --pants-support-baseurls and '
'--pants-bootstrapdir. When changing this parameter you may also need to '
'update --javadeps.',
default='2.4.1')
register('--plugins', advanced=True, fingerprint=True, action='append',
help='Names of protobuf plugins to invoke. Protoc will look for an executable '
'named protoc-gen-$NAME on PATH.',
default=[])
register('--extra_path', advanced=True, action='append',
help='Prepend this path onto PATH in the environment before executing protoc. '
'Intended to help protoc find its plugins.',
default=None)
register('--supportdir', advanced=True,
help='Path to use for the protoc binary. Used as part of the path to lookup the'
'tool under --pants-bootstrapdir.',
default='bin/protobuf')
register('--javadeps', advanced=True, action='append',
help='Dependencies to bootstrap this task for generating java code. When changing '
'this parameter you may also need to update --version.',
default=['3rdparty:protobuf-java'])
# TODO https://github.com/pantsbuild/pants/issues/604 prep start
@classmethod
def prepare(cls, options, round_manager):
super(ProtobufGen, cls).prepare(options, round_manager)
round_manager.require_data(JarImportProducts)
round_manager.require_data('deferred_sources')
# TODO https://github.com/pantsbuild/pants/issues/604 prep finish
def __init__(self, *args, **kwargs):
"""Generates Java files from .proto files using the Google protobuf compiler."""
super(ProtobufGen, self).__init__(*args, **kwargs)
self.plugins = self.get_options().plugins
self._extra_paths = self.get_options().extra_path
@memoized_property
def protobuf_binary(self):
binary_util = BinaryUtil.Factory.create()
return binary_util.select_binary(self.get_options().supportdir,
self.get_options().version,
'protoc')
@property
def javadeps(self):
return self.resolve_deps(self.get_options().javadeps)
@property
def synthetic_target_type(self):
return JavaLibrary
def synthetic_target_extra_dependencies(self, target):
deps = OrderedSet()
if target.imported_jars:
# We need to add in the proto im
|
ports jars.
jars_address = Address(os.path.relpath(self.codegen_work
|
dir(target), get_buildroot()),
target.id + '-rjars')
jars_target = self.context.add_new_target(jars_address,
JarLibrary,
jars=target.imported_jars,
derived_from=target)
deps.update([jars_target])
deps.update(self.javadeps)
return deps
def is_gentarget(self, target):
return isinstance(target, JavaProtobufLibrary)
@classmethod
def supported_strategy_types(cls):
return [cls.IsolatedCodegenStrategy, cls.ProtobufGlobalCodegenStrategy]
def sources_generated_by_target(self, target):
genfiles = []
for source in target.sources_relative_to_source_root():
path = os.path.join(target.target_base, source)
genfiles.extend(self.calculate_genfiles(path, source))
return genfiles
def execute_codegen(self, targets):
if not targets:
return
sources_by_base = self._calculate_sources(targets)
if self.codegen_strategy.name() == 'isolated':
sources = OrderedSet()
for target in targets:
sources.update(target.sources_relative_to_buildroot())
else:
sources = OrderedSet(itertools.chain.from_iterable(sources_by_base.values()))
if not self.validate_sources_present(sources, targets):
return
bases = OrderedSet(sources_by_base.keys())
bases.update(self._proto_path_imports(targets))
check_duplicate_conflicting_protos(self, sources_by_base, sources, self.context.log)
for target in targets:
# NB(gm): If the strategy is set to 'isolated', then 'targets' should contain only a single
# element, which means this simply sets the output directory depending on that element.
# If the strategy is set to 'global', the target passed in as a parameter here will be
# completely arbitrary, but that's OK because the codegen_workdir function completely
# ignores the target parameter when using a global strategy.
output_dir = self.codegen_workdir(target)
break
gen_flag = '--java_out'
safe_mkdir(output_dir)
gen = '{0}={1}'.format(gen_flag, output_dir)
args = [self.protobuf_binary, gen]
if self.plugins:
for plugin in self.plugins:
# TODO(Eric Ayers) Is it a good assumption that the generated source output dir is
# acceptable for all plugins?
args.append("--{0}_out={1}".format(plugin, output_dir))
for base in bases:
args.append('--proto_path={0}'.format(base))
args.extend(sources)
# Tack on extra path entries. These can be used to find protoc plugins
protoc_environ = os.environ.copy()
if self._extra_paths:
protoc_environ['PATH'] = os.pathsep.join(self._extra_paths
+ protoc_environ['PATH'].split(os.pathsep))
self.context.log.debug('Executing: {0}'.format('\\\n '.join(args)))
process = subprocess.Popen(args, env=protoc_environ)
result = process.wait()
if result != 0:
raise TaskError('{0} ... exited non-zero ({1})'.format(self.protobuf_binary, result))
def _calculate_sources(self, targets):
gentargets = OrderedSet()
def add_to_gentargets(target):
if self.is_gentarget(target):
gentargets.add(target)
self.context.build_graph.walk_transitive_dependency_graph(
[target.address for target in targets],
add_to_gentargets,
postorder=True)
sources_by_base = OrderedDict()
# TODO(Eric Ayers) Extr
|
ntt-sic/nova
|
nova/virt/configdrive.py
|
Python
|
apache-2.0
| 6,518 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config Drive v2 helper."""
import os
import shutil
import tempfile
from oslo.config import cfg
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova import version
LOG = logging.getLogger(__name__)
configdrive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
help='Config drive format. One of iso9660 (default) or vfat'),
cfg.StrOpt('config_drive_tempdir',
default=tempfile.tempdir,
help=('Where to put temporary files associated with '
'config drive creation')),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help='Name and optionally path of the tool used for '
'ISO image creation')
]
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * 1024 * 1024
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
# TODO(mikal): I don't think I can use utils.tempdir here, because
# I need to have the directory last longer than the scope of this
# method call
self.tempdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_gen_')
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, path, data):
filepath = os.path.join(self.tempdir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'w') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, value) in instance_md.metadata_for_config_drive():
self._add_file(path, value)
LOG.debug(_('Added %(filepath)s to config drive'),
{'filepath': path})
def _make_iso9660(self, path):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
'-V', 'config-2',
self.tempdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'w') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
mounted = False
try:
mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_mnt_')
_out, err = utils.trycmd('mount', '-o',
'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path, mountdir,
run_as_root=True)
if err:
|
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use
|
shutils.copytree here, because the
# destination directory already exists. This is annoying.
for ent in os.listdir(self.tempdir):
shutil.copytree(os.path.join(self.tempdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
shutil.rmtree(mountdir)
def make_drive(self, path):
"""Make the config drive.
:param path: the path to place the config drive image at
:raises ProcessExecuteError if a helper process has failed.
"""
if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path)
elif CONF.config_drive_format == 'vfat':
self._make_vfat(path)
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
fileutils.delete_if_exists(self.imagefile)
try:
shutil.rmtree(self.tempdir)
except OSError as e:
LOG.error(_('Could not remove tmpdir: %s'), str(e))
def required_by(instance):
return instance.get('config_drive') or CONF.force_config_drive
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/conn_string_info.py
|
Python
|
mit
| 1,434 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# -------------
|
-------------------------------------------------------------
from msrest.serialization import Model
class ConnStrin
|
gInfo(Model):
"""Database connection string information.
:param name: Name of connection string.
:type name: str
:param connection_string: Connection string value.
:type connection_string: str
:param type: Type of database. Possible values include: 'MySql',
'SQLServer', 'SQLAzure', 'Custom', 'NotificationHub', 'ServiceBus',
'EventHub', 'ApiHub', 'DocDb', 'RedisCache'
:type type: str or :class:`ConnectionStringType
<azure.mgmt.web.models.ConnectionStringType>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'type': {'key': 'type', 'type': 'ConnectionStringType'},
}
def __init__(self, name=None, connection_string=None, type=None):
self.name = name
self.connection_string = connection_string
self.type = type
|
aerler/HGS-Tools
|
Python/geospatial/xarray_tools.py
|
Python
|
gpl-3.0
| 40,502 | 0.015234 |
'''
Created on Feb. 23, 2019
Utility functions to extract data from xarray Dataset or DataArray classes.
@author: Andre R. Erler, GPL v3
'''
from warnings import warn
from datetime import datetime
import os
import numpy as np
import xarray as xr
import netCDF4 as nc
from dask.diagnostics import ProgressBar
# internal imports
from geospatial.netcdf_tools import getNCAtts, geospatial_netcdf_version, zlib_default # this import should be fine
## an important option I am relying on!
xr.set_options(keep_attrs=True)
# names of valid geographic/projected coordinates
default_x_coords = dict(geo=('lon','long','longitude',), proj=('x','easting','west_east') )
default_y_coords = dict(geo=('lat','latitude',), proj=('y','northing','south_north'))
default_lon_coords = default_x_coords['geo']; default_lat_coords = default_y_coords['geo']
## helper functions
def getAtts(xvar, lraise=True):
''' return dictionary of attributed from netCDF4 or xarray '''
if isinstance(xvar,(xr.DataArray,xr.Variable,xr.Dataset)):
atts = xvar.attrs.copy()
elif isinstance(xvar,(nc.Variable,nc.Dataset)):
atts = getNCAtts(xvar)
elif lraise:
raise TypeError(xvar)
return atts
## functions to interface with rasterio
def getGeoDims(xvar, x_coords=None, y_coords=None, lraise=True):
''' helper function to identify geographic/projected dimensions by name '''
if x_coords is None: x_coords = default_x_coords
if y_coords is None: y_coords = default_y_coords
xlon,ylat = None,None # return None, if nothing is found
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
for name in xvar.dims.keys() if isinstance(xvar,xr.Dataset) else xvar.dims:
if name.lower() in x_coords[coord_type]:
xlon = name; break
for name in xvar.dims.keys() if isinstance(xvar,xr.Dataset) else xvar.dims:
if name.lower() in y_coords[coord_type]:
ylat = name; break
if xlon is not None and ylat is not None: break
else: xlon,ylat = None,None
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
for name in xvar.dimensions:
if name.lower() in x_coords[coord_type]:
xlon = name; break
for name in xvar.dimensions:
if name.lower() in y_coords[coord_type]:
ylat = name; break
if xlon is not None and ylat is not None: break
else: xlon,ylat = None,None
elif lraise: # optionally check input
raise TypeError("Can only infer coordinates from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
pass # return None,None
return xlon,ylat
def getGeoCoords(xvar, x_coords=None, y_coords=None, lraise=True, lvars=True):
''' helper function to extract geographic/projected coordinates from xarray'''
# find dim names
xlon_dim,ylat_dim = getGeoDims(xvar, x_coords=x_coords, y_coords=y_coords, lraise=lraise)
# find coordinates
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
if xlon_dim in xvar.coords:
xlon = xvar.coords[xlon_dim] if lvars else xlon_dim
else:
|
xlon = None
if ylat_dim in xvar.coords:
ylat = xvar.coords[ylat_dim] if lvars else ylat_dim
else: ylat = None
elif isinstance(xvar,nc.Variable) and lraise:
raise TypeError("Cannot infer coordinates from netCDF4 Variable - only Dataset!")
elif isinstance(xvar,nc.Dataset):
if xlon_dim in xvar.variables:
xlon = xvar.variables[xlon_dim] if lvars else xlon_dim
else: xlon = None
if ylat_dim in xvar.va
|
riables:
ylat = xvar.variables[ylat_dim] if lvars else ylat_dim
else: ylat = None
# optionally raise error if no coordinates are found, otherwise just return None
if lraise and (xlon is None or ylat is None):
raise ValueError("No valid pair of geographic coodinates found:\n {}".format(xvar.dims))
# return a valid pair of geographic or projected coordinate axis
return xlon,ylat
def isGeoVar(xvar, x_coords=None, y_coords=None, lraise=True):
''' helper function to identify variables that have geospatial coordinates (geographic or
projected), based on xarray or netCDF4 dimension names '''
if x_coords is None: x_coords = default_x_coords
if y_coords is None: y_coords = default_y_coords
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
dims = xvar.coords.keys()
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
dims = xvar.dimensions
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
return None # evaluates as False, but allows checking
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
xlon,ylat = False,False
for name in dims:
if name.lower() in x_coords[coord_type]:
xlon = True; break
for name in dims:
if name.lower() in y_coords[coord_type]:
ylat = True; break
if xlon and ylat: break
# if it has a valid pair of geographic or projected coordinate axis
return ( xlon and ylat )
def isGeoCRS(xvar, lat_coords=None, lon_coords=None, lraise=True):
''' helper function to determine if we have a simple geographic lat/lon CRS (based on xarray dimension names) '''
lat,lon = False,False
if lon_coords is None: lon_coords = default_x_coords['geo']
if lat_coords is None: lat_coords = default_y_coords['geo']
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
dims = xvar.coords.keys()
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
dims = xvar.dimensions
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4- not from {}".format(xvar.__class__))
else:
return None # evaluates as False, but allows checking
# check dimension names
for name in dims:
if name.lower() in lon_coords:
lon = True; break
for name in dims:
if name.lower() in lat_coords:
lat = True; break
# it is a geographic coordinate system if both, lat & lon are present
return ( lat and lon )
def getTransform(xvar=None, x=None, y=None, lcheck=True):
''' generate an affine transformation from xarray coordinate axes '''
from rasterio.transform import Affine # to generate Affine transform
if isinstance(xvar,(xr.DataArray,xr.Dataset,nc.Dataset)):
x,y = getGeoCoords(xvar, lraise=True)
elif xvar is None and isinstance(x,(xr.DataArray,nc.Variable)) and isinstance(y,(xr.DataArray,nc.Variable)):
pass # x and y axes are supplied directly
elif xvar:
raise TypeError('Can only infer GeoTransform from xarray Dataset or DataArray or netCDF4 Dataset\n - not from {}.'.format(xvar))
# check X-axis
if isinstance(x,xr.DataArray): x = x.data
elif isinstance(x,nc.Variable): x = x[:]
if not isinstance(x,np.ndarray):
raise TypeError(x)
diff_x = np.diff(x); dx = diff_x.min()
if lcheck and not np.isclose(dx, diff_x.max(), rtol=1.e-2):
raise ValueError("X-axis is not regular: {} - {}".format(dx, diff_x.max()))
# check Y-axis
if isinstance(y,xr.DataArray): y = y.data
elif isinstance(y,nc.Variable): y = y[:]
if not isinstance(y,np.ndarray):
raise TypeError(y)
diff_y = np.diff(y); dy = diff_y.min()
if lcheck and not np.isclose(dy, diff_y.max(), rtol=1.e-2):
raise ValueError("Y-axis is not regular. {} - {}".format(dy, diff_y.max()))
# generate transform
return Affine.from_gdal(x[0]-dx/2.,dx,0.,y[0]-dy/2.,0.,dy), (len(x),len(y))
def readCFCRS(xds, grid_mapping=None, lraise=T
|
chrismattmann/drat
|
distribution/src/main/resources/bin/list-ghe-repos.py
|
Python
|
apache-2.0
| 1,171 | 0 |
#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab
|
le law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
|
bartTC/django-markup
|
django_markup/filter/markdown_filter.py
|
Python
|
bsd-3-clause
| 1,360 | 0 |
from django_markup.filter import MarkupFilter
class MarkdownMarkupFilter(MarkupFilter):
"""
Applies Markdown conversion to a string, and returns the HTML.
"""
title = 'Markdown'
kwargs = {'safe_mode': True}
def render(self, text, **kwargs):
if kwargs:
self.kwargs.updat
|
e(kwargs)
from markdown import markdown
text = markdown(text, **self.kwa
|
rgs)
# Markdowns safe_mode is deprecated. We replace it with Bleach
# to keep it backwards compatible.
# https://python-markdown.github.io/change_log/release-2.6/#safe_mode-deprecated
if self.kwargs.get('safe_mode') is True:
from bleach import clean
# fmt: off
markdown_tags = [
"h1", "h2", "h3", "h4", "h5", "h6",
"b", "i", "strong", "em", "tt",
"p", "br",
"span", "div", "blockquote", "pre", "code", "hr",
"ul", "ol", "li", "dd", "dt",
"img",
"a",
"sub", "sup",
]
markdown_attrs = {
"*": ["id"],
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
# fmt: on
text = clean(text, markdown_tags, markdown_attrs)
return text
|
JoseBlanca/franklin
|
franklin/backbone/mapping.py
|
Python
|
agpl-3.0
| 16,787 | 0.005957 |
'''
This module is part of ngs_backbone. This module provide mapping related
analyses
Created on 15/03/2010
@author: peio
'''
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of franklin.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
import os, shutil
from gzip import GzipFile
from tempfile import NamedTemporaryFile
from franklin.backbone.analysis import (Analyzer, scrape_info_from_fname,
_LastAnalysisAnalyzer)
from franklin.mapping import map_reads
from franklin.utils.cmd_utils import call
from franklin.utils.misc_utils import (NamedTemporaryDir, VersionedPath,
rel_symlink)
from franklin.backbone.specifications import (BACKBONE_BASENAMES,
PLOT_FILE_FORMAT,
BACKBONE_DIRECTORIES)
from franklin.sam import (bam2sam, add_header_and_tags_to_sam, merge_sam,
sam2bam, sort_bam_sam, standardize_sam, realign_bam,
bam_distribs, create_bam_index, bam_general_stats)
class SetAssemblyAsReferenceAnalyzer(Analyzer):
'It sets the reference assembly as mapping reference'
def run(self):
'''It runs the analysis.'''
contigs_path = self._get_input_fpaths()['contigs']
contigs_ext = contigs_path.extension
reference_dir = self._create_output_dirs()['result']
reference_fpath = os.path.join(reference_dir,
BACKBONE_BASENAMES['mapping_reference'] + '.' + \
contigs_ext)
if os.path.exists(reference_fpath):
os.remove(reference_fpath)
rel_symlink(contig
|
s_path.last_version, reference_fpath)
def _get_basename(fpath):
'It returns the base name without path and extension'
return os.path.splitext(os.path.basename(fpath))[0]
class MappingAnaly
|
zer(Analyzer):
'It performs the mapping of the sequences to the reference'
def run(self):
'''It runs the analysis.'''
self._log({'analysis_started':True})
project_settings = self._project_settings
settings = project_settings['Mappers']
tmp_dir = project_settings['General_settings']['tmpdir']
project_path = project_settings['General_settings']['project_path']
unmapped_fhand = None
if 'keep_unmapped_reads_in_bam' in settings:
if settings['keep_unmapped_reads_in_bam'] == False:
unmapped_fpath = os.path.join(project_path,
BACKBONE_DIRECTORIES['mappings'][0],
BACKBONE_BASENAMES['unmapped_list'])
unmapped_fhand = GzipFile(unmapped_fpath, 'w')
inputs = self._get_input_fpaths()
reads_fpaths = inputs['reads']
output_dir = self._create_output_dirs(timestamped=True)['result']
# define color and sequence references
reference_path = inputs['reference']
mapping_index_dir = inputs['mapping_index']
#print reference_path, mapping_index_dir
#memory for the java programs
java_mem = self._project_settings['Other_settings']['java_memory']
picard_path = self._project_settings['Other_settings']['picard_path']
for read_fpath in reads_fpaths:
mapping_parameters = {}
read_info = scrape_info_from_fname(read_fpath)
platform = read_info['pl']
#which maper are we using for this platform
mapper = settings['mapper_for_%s' % platform]
(reference_fpath,
color_space) = self._prepare_mapper_index(mapping_index_dir,
reference_path,
platform, mapper)
mapping_parameters['unmapped_fhand'] = unmapped_fhand
mapping_parameters['colorspace'] = color_space
out_bam_fpath = os.path.join(output_dir,
read_fpath.basename + '.bam')
if platform in ('454', 'sanger'):
mapping_parameters['reads_length'] = 'long'
else:
mapping_parameters['reads_length'] = 'short'
if not os.path.exists(out_bam_fpath):
mapping_parameters['threads'] = self.threads
mapping_parameters['java_conf'] = {'java_memory':java_mem,
'picard_path':picard_path}
mapping_parameters['tmp_dir'] = tmp_dir
map_reads(mapper,
reads_fpath=read_fpath.last_version,
reference_fpath=reference_fpath,
out_bam_fpath=out_bam_fpath,
parameters=mapping_parameters)
# Now we run the select _last mapping
self._spawn_analysis(DEFINITIONS['_select_last_mapping'],
silent=self._silent)
self._log({'analysis_finished':True})
def _prepare_mapper_index(self, mapping_index_dir, reference_path, platform,
mapper):
'It creates reference_fpath depending on the mapper and the platform'
kind = 'color' if platform == 'solid' else 'sequence'
color_space = True if kind == 'color' else False
mapping_index_dir = mapping_index_dir[0].original_path
index_dir = mapping_index_dir % (mapper, kind)
if not os.path.exists(index_dir):
os.mkdir(index_dir)
reference_fpath = reference_path.last_version
reference_fname = os.path.basename(reference_fpath)
index_fpath = os.path.join(index_dir, reference_fname)
if not os.path.exists(index_fpath):
rel_symlink(reference_fpath, index_fpath)
return index_fpath, color_space
class MergeBamAnalyzer(Analyzer):
'It performs the merge of various bams into only one'
def run(self):
'''It runs the analysis.'''
self._log({'analysis_started':True})
settings = self._project_settings
project_path = settings['General_settings']['project_path']
tmp_dir = settings['General_settings']['tmpdir']
inputs = self._get_input_fpaths()
bam_paths = inputs['bams']
reference_path = inputs['reference']
output_dir = self._create_output_dirs()['result']
merged_bam_path = VersionedPath(os.path.join(output_dir,
BACKBONE_BASENAMES['merged_bam']))
merged_bam_fpath = merged_bam_path.next_version
#Do we have to add the default qualities to the sam file?
#do we have characters different from ACTGN?
add_qualities = settings['Sam_processing']['add_default_qualities']
#memory for the java programs
java_mem = settings['Other_settings']['java_memory']
picard_path = settings['Other_settings']['picard_path']
if add_qualities:
default_sanger_quality = settings['Other_settings']['default_sanger_quality']
default_sanger_quality = int(default_sanger_quality)
else:
default_sanger_quality = None
temp_dir = NamedTemporaryDir()
for bam_path in bam_paths:
bam_basename = bam_path.basename
temp_sam = NamedTemporaryFile(prefix='%s.' % bam_basename,
suffix='.sam')
sam_fpath = os.path.join(temp_dir
|
mrexmelle/liblinesdk-py
|
setup.py
|
Python
|
mit
| 1,211 | 0.000826 |
import os
from setuptools import setup
from setuptools import find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='liblinesdk',
version='0.1.0',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='LINE API Python SDK.',
long_description=README,
url='https://www.example.com/',
author='LINE Corporation',
author_email='matthew.r.tanudjaja@linecorp.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
|
'Pr
|
ogramming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
nuclear-wizard/moose
|
python/MooseDocs/extensions/floats.py
|
Python
|
lgpl-2.1
| 8,900 | 0.004494 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/
|
COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import uuid
import collections
import moosetree
import MooseDocs
from ..common import exceptions
from ..base import components, MarkdownReader, LatexRenderer, Extension
from ..tree import tokens, html, latex
from . import core
def make_extension(**kwargs):
r
|
eturn FloatExtension(**kwargs)
Float = tokens.newToken('Float', img=False, bottom=False, command='figure')
FloatCaption = tokens.newToken('FloatCaption', key='', prefix='', number='?')
ModalLink = tokens.newToken('ModalLink', bookmark=True, bottom=False, close=True)
ModalLinkTitle = tokens.newToken('ModalLinkTitle')
ModalLinkContent = tokens.newToken('ModalLinkContent')
def create_float(parent, extension, reader, page, settings, bottom=False, img=False,
token_type=Float, **kwargs):
"""
Helper for optionally creating a float based on the existence of caption and/or id.
Inputs:
parent: The parent token that float should be placed
extension: The extension object (to extract 'prefix' from config items)
reader: The Reader object for tokenization of the heading
page: The Page object for passing to the tokenization routine
settings: The command settings to extract a local 'prefix'
bottom[True|False]: Set flag on the float for placing the caption at the bottom
img[True|False]: Set to True if the contents are an image (Materialize only)
token_type: The type of Token object to create; it should derive from Float
"""
cap, _ = _add_caption(None, extension, reader, page, settings)
if cap:
flt = token_type(parent, img=img, bottom=bottom, **kwargs)
cap.parent = flt
return flt
return parent
def caption_settings():
"""Return settings necessary for captions."""
settings = dict()
settings['caption'] = (None, "The caption text for the float object.")
settings['prefix'] = (None, "The numbered caption label to include prior to the caption text.")
return settings
def _add_caption(parent, extension, reader, page, settings):
"""Helper for adding captions to float tokens."""
cap = settings['caption']
key = settings['id']
prefix = settings.get('prefix')
if prefix is None:
prefix = extension.get('prefix', None)
if prefix is None:
msg = "The 'prefix' must be supplied via the settings or the extension configuration."
raise exceptions.MooseDocsException(msg)
caption = None
if key:
caption = FloatCaption(parent, key=key, prefix=prefix)
if cap:
reader.tokenize(caption, cap, page, MarkdownReader.INLINE)
elif cap:
caption = FloatCaption(parent)
reader.tokenize(caption, cap, page, MarkdownReader.INLINE)
return caption, prefix
def create_modal(parent, title=None, content=None, **kwargs):
"""
Create the necessary Modal tokens for creating modal windows with materialize.
"""
modal = ModalLink(parent.root, **kwargs)
if isinstance(title, str):
ModalLinkTitle(modal, string=title)
elif isinstance(title, tokens.Token):
title.parent = ModalLinkTitle(modal)
if isinstance(content, str):
ModalLinkContent(modal, string=content)
elif isinstance(content, tokens.Token):
content.parent = ModalLinkContent(modal)
return parent
def create_modal_link(parent, title=None, content=None, string=None, **kwargs):
"""
Create the necessary tokens to create a link to a modal window with materialize.
"""
kwargs.setdefault('bookmark', str(uuid.uuid4()))
link = core.Link(parent,
url='#{}'.format(kwargs['bookmark']),
class_='modal-trigger',
string=string)
create_modal(parent, title, content, **kwargs)
return link
class FloatExtension(Extension):
"""
Provides ability to add caption float elements (e.g., figures, table, etc.). This is only a
base extension. It does not provide tables for example, just the tools to make floats
in a uniform manner.
"""
def extend(self, reader, renderer):
renderer.add('Float', RenderFloat())
renderer.add('FloatCaption', RenderFloatCaption())
renderer.add('ModalLink', RenderModalLink())
renderer.add('ModalLinkTitle', RenderModalLinkTitle())
renderer.add('ModalLinkContent', RenderModalLinkContent())
if isinstance(renderer, LatexRenderer):
renderer.addPackage('caption', labelsep='period')
def postTokenize(self, page, ast):
"""Set float number for each counter."""
counts = page.get('counts', collections.defaultdict(int))
for node in moosetree.iterate(ast, lambda n: n.name == 'FloatCaption'):
prefix = node.get('prefix', None)
if prefix is not None:
counts[prefix] += 1
node['number'] = counts[prefix]
key = node.get('key')
if key:
shortcut = core.Shortcut(ast.root, key=key, link='#{}'.format(key))
# TODO: This is a bit of a hack to get Figure~\ref{} etc. working in general
if isinstance(self.translator.renderer, LatexRenderer):
shortcut['prefix'] = prefix.title()
else:
tokens.String(shortcut, content='{} {}'.format(prefix.title(), node['number']))
page['counts'] = counts
class RenderFloat(components.RenderComponent):
def createHTML(self, parent, token, page):
div = html.Tag(parent, 'div', token)
div.addClass('moose-float-div')
if token['bottom']:
cap = token(0)
cap.parent = None # Guarantees that "cap" is removed from the current tree
cap.parent = token
return div
def createMaterialize(self, parent, token, page):
div = html.Tag(parent, 'div', token)
div.addClass('card moose-float')
content = html.Tag(div, 'div')
if token['img']:
content.addClass('card-image')
else:
content.addClass('card-content')
if token['bottom']:
cap = token(0)
cap.parent = None
cap.parent = token
return content
def createLatex(self, parent, token, page):
env = latex.Environment(parent, token['command'])
style = latex.parse_style(token)
width = style.get('width', None)
if width and token(0).name == 'Image':
token(0).set('style', 'width:{};'.format(width))
if style.get('text-align', None) == 'center':
latex.Command(env, 'centering')
return env
class RenderFloatCaption(components.RenderComponent):
def createHTML(self, parent, token, page):
caption = html.Tag(parent, 'p', class_="moose-caption")
prefix = token.get('prefix', None)
if prefix:
heading = html.Tag(caption, 'span', class_="moose-caption-heading")
html.String(heading, content="{} {}: ".format(prefix, token['number']))
return html.Tag(caption, 'span', class_="moose-caption-text")
def createLatex(self, parent, token, page):
caption = latex.Command(parent, 'caption')
if token['key']:
latex.Command(caption, 'label', string=token['key'], escape=True)
return caption
class RenderModalLink(core.RenderLink):
def createLatex(self, parent, token, page):
return None
def createHTML(self, parent, token, page):
return None
def createMaterialize(self, parent, token, page):
cls = "modal bottom-sheet" if token['bottom'] else "modal"
modal = html.Tag(parent, 'div', class_=cls, id_=token['bookmark'])
modal.addClass('moose-modal')
modal_content = html.Tag(modal, 'div', class_="modal-content")
if token['close']:
foote
|
openstack/congress
|
congress/db/db_ds_table_data.py
|
Python
|
apache-2.0
| 3,233 | 0 |
# Copyright (c) 2016 VMware, Inc. All right
|
s reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complian
|
ce with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import sqlalchemy as sa
from sqlalchemy.orm import exc as db_exc
from congress.db import api as db
from congress.db import model_base
from congress.db import utils as db_utils
class DSTableData(model_base.BASE):
__tablename__ = 'dstabledata'
ds_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
tablename = sa.Column(sa.String(255), nullable=False, primary_key=True)
# choose long length compatible with MySQL, SQLite, Postgres
tabledata = sa.Column(sa.Text(), nullable=False)
@db_utils.retry_on_db_error
def store_ds_table_data(ds_id, tablename, tabledata, session=None):
session = session or db.get_session()
tabledata = _json_encode_table_data(tabledata)
with session.begin(subtransactions=True):
new_row = session.merge(DSTableData(
ds_id=ds_id,
tablename=tablename,
tabledata=tabledata))
return new_row
@db_utils.retry_on_db_error
def delete_ds_table_data(ds_id, tablename=None, session=None):
session = session or db.get_session()
if tablename is None:
return session.query(DSTableData).filter(
DSTableData.ds_id == ds_id).delete()
else:
return session.query(DSTableData).filter(
DSTableData.ds_id == ds_id,
DSTableData.tablename == tablename).delete()
@db_utils.retry_on_db_error
def get_ds_table_data(ds_id, tablename=None, session=None):
session = session or db.get_session()
try:
if tablename is None:
rows = session.query(DSTableData).filter(
DSTableData.ds_id == ds_id)
return_list = []
for row in rows:
return_list.append(
{'tablename': row.tablename,
'tabledata': _json_decode_table_data(row.tabledata)})
return return_list
else:
return _json_decode_table_data(session.query(DSTableData).filter(
DSTableData.ds_id == ds_id,
DSTableData.tablename == tablename).one().tabledata)
except db_exc.NoResultFound:
pass
def _json_encode_table_data(tabledata):
tabledata = list(tabledata)
for i in range(0, len(tabledata)):
tabledata[i] = list(tabledata[i])
return json.dumps(tabledata)
def _json_decode_table_data(json_tabledata):
tabledata = json.loads(json_tabledata)
for i in range(0, len(tabledata)):
tabledata[i] = tuple(tabledata[i])
return set(tabledata)
|
eddie-dunn/swytcher
|
swytcher/swytcher.py
|
Python
|
mit
| 3,436 | 0 |
"""Automatic keyboard layout switcher"""
import functools
import logging
import subprocess
from typing import Iterable
from typing import Set
import xkbgroup
import swytcher.settings as settings
import swytcher.xwindow as xwindow
from swytcher.util import suppress_err
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Move this to swytcher.system
@suppress_err(FileNotFoundError, log)
def notify(title: str, msg: str = '') -> None: # pragma: no cover
"""Use notify-send (if available) to inform user of layout switch."""
if not settings.NOTIFY:
return
cmd = [
'notify-send',
'--urgency=low',
'--expire-time=2000',
title,
msg
]
subprocess.call(cmd)
def change_layout(xkb: xkbgroup.XKeyboard, layout: str) -> bool:
"""Set layout; returns True if layout was changed, False otherwise"""
if xkb.group_name == layout: # check against current layout
log.debug("%r is already the active layout", layout)
return False # don't change layout if it's already correct
log.info("setting layout %r", layout)
xkb.group_name = layout
notify("Changed layout", layout)
return True
def _match_substrings(name_list: Iterable[str],
substrings: Iterable[str]) -> set:
"""Substring filter match"""
found_matches = set()
for name in name_list:
for substring in substrings:
if substring in name:
log.debug("Substring filter match: %r in %r", substring, name)
found_matches.update([name])
return found_matches
def matches(name_list: Iterable[str], strings: Iterabl
|
e[str],
substrings: Iterable[str]) -> Set[str]:
"""Returns True if any of the strings in the two filters `strings` and
`substrings` occur in `name_list`."""
matched = (set(strings) & set(name_list) or
_match_substrings(name_list, substrings or {}))
if matched:
log.debug('%r matched %r from %r or %r',
name_list, matched, strings, substrings)
return matched
def change_callback(name_list, xkb, layouts: list) ->
|
None: # pragma: no cover
"""Event handler when active window is changed"""
# NOTE: These extracted variables should be removed later
primary_filter = layouts[0]['strings']
primary_substrings = layouts[0]['substrings']
primary = layouts[0]['name']
secondary_filter = layouts[1]['strings']
secondary_substrings = layouts[1]['substrings']
secondary = layouts[1]['name']
# matched_layout = match_layout(name_list, layouts)
# if matched_layout:
# change_layout(xkb, matched_layout)
# else:
# change_layout(xkb, last_remembered_layout_for_window)
if matches(name_list, secondary_filter, secondary_substrings):
change_layout(xkb, secondary)
elif matches(name_list, primary_filter, primary_substrings):
change_layout(xkb, primary)
else:
log.debug("%r: No match, using default layout", name_list)
change_layout(xkb, xkb.groups_names[0])
def main(args=None): # pragma: no cover
"""Main"""
if not args:
pass
xkb = xkbgroup.XKeyboard()
layouts = settings.setup_layouts(xkb, settings.CONFIG_INI)
log.info("Layouts configured by setxkbmap: %s", layouts)
partial_cb = functools.partial(change_callback, xkb=xkb, layouts=layouts)
xwindow.run(partial_cb)
|
d0ugal/readthedocs.org
|
readthedocs/projects/migrations/0028_add_version_default_privacy.py
|
Python
|
mit
| 9,508 | 0.007678 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.version_privacy_level'
db.add_column('projects_project', 'version_privacy_level',
self.gf('django.db.models.fields.CharField')(default='public', max_length=20),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.version_privacy_level'
db.delete_column('projects_project', 'version_privacy_level')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 13, 23, 55, 17, 885486)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 13, 23, 55, 17, 885212)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crate_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [],
|
{'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'def
|
ault': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'through': "orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_system_packages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version_privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'})
},
'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.mode
|
berquist/PyQuante
|
PyQuante/OEP.py
|
Python
|
bsd-3-clause
| 25,427 | 0.019664 |
"Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
import settings
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
logger = logging.getLogger("pyquante")
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**
|
kwargs):
self.iter = 0
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
|
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nclosed):
for a in xrange(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**kwargs):
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nalpha):
for a in xrange(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nbeta):
for a in xrange(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**kwargs):
return oep_hf(atoms,orbs,**
|
missionpinball/mpf-monitor
|
mpfmonitor/core/devices.py
|
Python
|
mit
| 13,716 | 0.000948 |
import logging
import time
import os
# will change these to specific imports once code is more final
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
BRUSH_WHITE = QBrush(QColor(255, 255, 255), Qt.SolidPattern)
BRUSH_GREEN = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
BRUSH_BLACK = QBrush(QColor(0, 0, 0), Qt.SolidPattern)
BRUSH_DARK_PURPLE = QBrush(QColor(128, 0, 255), Qt.SolidPattern)
class DeviceNode:
__slots__ = ["_callback", "_name", "_data", "_type", "_brush", "q_name", "q_state", "sub_properties",
"sub_properties_appended", "q_time_added", "log"]
def __init__(self):
self._callback = None
self._name = ""
self._data = {}
self._type = ""
self._brush = BRUSH_BLACK
self.q_name = QStandardItem()
self.q_state = QStandardItem()
self.sub_properties = {}
self.sub_properties_appended = False
self.q_time_added = QStandardItem()
self.q_time_added.setData(time.perf_counter(), Qt.DisplayRole)
self.q_name.setDragEnabled(True)
self.q_state.setData("", Qt.DisplayRole)
self.log = logging.getLogger('Device')
def setName(self, name):
self._name = name
self.q_name.setData(str(self._name), Qt.DisplayRole)
self.log = logging.getLogger('Device {}'.format(self._name))
self.q_state.emitDataChanged()
def setData(self, data):
"""Set data of device."""
if data == self._data:
# do nothing if data did not change
return
if not isinstance(data, dict):
data = {}
if self._callback:
self._callback()
self._data = data
state_str = str(list(self._data.values())[0])
if len(self._data) > 1:
state_str = state_str + " {…}"
self.q_state.setData(state_str, Qt.DisplayRole)
for row in self._data:
if not self.sub_properties_appended:
q_property = QStandardItem()
q_value = QStandardItem()
self.sub_properties.update({row: [q_property, q_value]})
self.q_name.appendRow(self.sub_properties.get(row))
self.sub_properties.get(row)[0].setData(str(row), Qt.DisplayRole)
self.sub_properties.get(row)[1].setData(str(self._data.get(row)), Qt.DisplayRole)
self.sub_properties_appended = True
self.q_state.emitDataChanged()
self._brush = self._calculate_colored_brush()
def setType(self, type):
self._type = type
self._brush = self._calculate_colored_brush()
self.q_state.emitDataChanged()
def get_row(self):
return [self.q_name, self.q_state, self.q_time_added]
def data(self):
return self._data
def type(self):
return self._type
def get_colored_brush(self) -> QBrush:
"""Return colored brush for device."""
return self._brush
def _calculate_color_gamma_correction(self, color):
"""Perform gamma correction.
Feel free to fiddle with these constants until it feels right
With gamma = 0.5 and constant a = 18, the top 54 values are lost,
but the bottom 25% feels much more normal.
"""
gamma = 0.5
a = 18
corrected = []
for value in color:
if value < 0 or
|
value > 255:
self.log.warning("Got value %s for brightness which outside the expected range", value)
value = 0
value = int(pow(value, gamma) * a)
if value > 255:
value = 255
corrected.append(value)
return corrected
def _calculate_colored_brush(self):
if self._t
|
ype == 'light':
color = self.data()['color']
if color == [0, 0, 0]:
# shortcut for black
return BRUSH_BLACK
color = self._calculate_color_gamma_correction(color)
elif self._type == 'switch':
state = self.data()['state']
if state:
return BRUSH_GREEN
else:
return BRUSH_BLACK
elif self._type == 'diverter':
state = self.data()['active']
if state:
return BRUSH_DARK_PURPLE
else:
return BRUSH_BLACK
else:
# Get first parameter and draw as white if it evaluates True
state = bool(list(self.data().values())[0])
if state:
return BRUSH_WHITE
else:
return BRUSH_BLACK
return QBrush(QColor(*color), Qt.SolidPattern)
def set_change_callback(self, callback):
if self._callback:
# raise AssertionError("Can only have one callback")
old_callback = self._callback
self._callback = callback
return old_callback
else:
self._callback = callback
self.q_state.emitDataChanged()
class DeviceDelegate(QStyledItemDelegate):
def __init__(self):
self.size = None
super().__init__()
def paint(self, painter, view, index):
super().paint(painter, view, index)
color = None
state = None
balls = None
found = False
text = ''
# src_index = index.model().mapToSource(index)
# src_index_model = src_index.model()
# print(index.data())
# print(src_index_model.data())
data = []
try:
data = index.model().itemFromIndex(index).data()
# src_index = index.model().mapToSource(index)
# data = index.model().data(src_index)
except:
pass
num_circles = 1
# return
if index.column() == 0:
return
try:
if 'color' in data:
color = data['color']
found = True
except TypeError:
return
try:
if 'brightness' in data:
color = [data['brightness']]*3
found = True
except TypeError:
return
try:
if 'state' in data:
text = str(data['state'])
found = True
except TypeError:
return
try:
if 'complete' in data:
state = not data['complete']
found = True
except TypeError:
return
try:
if 'enabled' in data:
state = data['enabled']
found = True
except TypeError:
return
try:
if 'balls' in data:
balls = data['balls']
found = True
except TypeError:
return
try:
if 'balls_locked' in data:
balls = data['balls_locked']
found = True
except TypeError:
return
try:
if 'num_balls_requested' in data:
text += 'Requested: {} '.format(
data['num_balls_requested'])
found = True
except TypeError:
return
try:
if 'unexpected_balls' in data:
text += 'Unexpected: {} '.format(
data['unexpected_balls'])
found = True
except TypeError:
return
if not found:
return
text += " " + str(data)
painter.save()
painter.setRenderHint(QPainter.Antialiasing, True)
painter.setPen(QPen(QColor(100, 100, 100), 1, Qt.SolidLine))
if color:
painter.setBrush(QBrush(QColor(*color), Qt.SolidPattern))
elif state is True:
painter.setBrush(QBrush(QColor(0, 255, 0), Qt.SolidPattern))
elif state is False:
painter.setBrush(QBrush(QColor(255, 255, 255), Qt.SolidPattern))
elif isinstance(balls, int):
painter.setBrush(QBrush(QColor(0, 255, 0), Qt.SolidPattern))
num_circles = balls
x_offset = 0
for
|
gabrik/ingunict-bot
|
ingbot.py
|
Python
|
apache-2.0
| 9,550 | 0.030389 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ING UNI CT Telegram Bot
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
from utils import utility
from unidecode import unidecode
import json
#need load configuration from file
ROOMS_FILE='utils/rooms.json'
COURSES_FILE='utils/courses.json'
PROFESSORS_FILE='utils/professors.json'
CLASSROOMS_FILE='utils/classrooms.json'
EXAMS_FILE='utils/exams.json'
## Other files
TOKEN_FILE='token.conf'
LOG_FILE='ingbot.log'
##global variables
rooms={}
courses={}
professors={}
classrooms={}
exams=[]
# loading token from file
tokenconf = open(TOKEN_FILE, 'r').read()
tokenconf = tokenconf.replace("\n", "")
TOKEN = tokenconf
# Enable logging
logging.basicConfig(filename=LOG_FILE,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
#define command handlers
def start_handler(bot, update):
newmsg = "Ing UniCT Telegram Bot\nLista Comandi:\n\t"\
"/orari <cld> <anno> Orario delle lezioni\n\t"\
"/esami <id cds> Elenco degli esami\n\t"\
"/corso <nome>\n\t/prof <cognome o nome> Informazioni sul professore\n\t"\
"/insegnamento <nome_insegnamento> Informazioni su un insegnamento\n\t"\
"/aula <numero> Indicazioni sull'ubicazione di un'aula\n\t"\
"/segreteria Informazioni sugli orari della segreteria studenti\n\t"\
"/cus Informazioni sul CUS"
newmsg += "\n\n\nATTENZIONE : Tutti i dati sono ricavati dal sito di Ingegneria,"\
" il bot non ha alcuna responsabilita' sulla corretteza di questi dati!!!\n"
developmode = '\n\n\n Il bot è in via di sviluppo se vuoi contribuire vai su:"\
" https://github.com/gabrik/ingunict-bot\nOppure contatta @Gakbri '
bot.sendMessage(update.message.chat_id, text=newmsg+developmode)
def help_handler(bot, update):
start(bot,update)
def schedule_handler(bot, update):
bot.sendMessage(update.message.chat_id, text='Orari temporaneamente non disponibili')
def professors_handler(bot, update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)>=2:
professor_name = unidecode(" ".join(msg[1:]))
if len(professor_name)>3:
search_result = [professor for professor in professors if professor_name.upper() in professor['Nome'].upper()]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d professori '\
'con la tua ricerca' % len(search_result))
descr=""
for p in search_result:
descr += "Nome: %s\nQualifica: %s\nDipartimento: %s\n" % (p['Nome'], p['Qualifica'], p['Dipartimento'])
descr+= "Indirizzo: %s\nEmail: %s\nTelefono: %s\n" % (p['Indirizzo'], p['Email'], p['Telefono'])
descr+= "Sito: %s\nSSD: %s\n\n" % (p['Sito'], p['SSD'])
bot.sendMessage(update.message.chat_id,text= descr)
else:
bot.sendMessage(update.message.chat_id, text='Professore non trovato')
else:
bot.sendMessage(update.message.chat_id, text='Inserisci almeno 4 caratteri per la ricerca')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire il professore su cui ottenere informazioni!\n/prof <nome cognome>")
def classroom_handler(bot, update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
insegnamento_name=unidecode(" ".join(msg[1:]))
if len(insegnamento_name)>3:
search_result=[insegnamento for insegnamento in c
|
lassrooms if insegnamento_name.upper() in insegnamento['Nome'].upper()]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d insegnamenti con la tua ricerca' % len(search_result))
descr=""
for m in search_result:
doc=''.join([docente+'\n' for docente in m['Docenti']])
descr += "Nome: %s\nSemestre
|
: %s\nCorso di Laurea: %s\n" % (m['Nome'], m['Semestre'], m['Corso di Laurea'])
descr+= "Anno: %s\nDocenti: %s\nSSD: %s\n" % (m['Anno'], doc, m['SSD'])
descr+= "CFU: %s\n\n" % (m['CFU'])
bot.sendMessage(update.message.chat_id, text=descr)
else:
bot.sendMessage(update.message.chat_id, text='Insegnamento non trovato')
else:
bot.sendMessage(update.message.chat_id, text='Inserisci almeno 4 caratteri per la ricerca')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire l'insegnamento su cui ottenere informazioni!\n/insegnamento <nome>")
def room_handler(bot, update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
key = msg[1].upper().strip()
if key in rooms:
aula = rooms[key]
bot.sendMessage(update.message.chat_id, text='Aula %s , Edificio %s, Piano %s' % (key, aula['Edificio'], aula['Piano']))
else:
bot.sendMessage(update.message.chat_id, text='Aula non trovata')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire l'aula su cui ottenere informazioni!\n/aula <nome>")
def courses_handler(bot,update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
nome_corso = unidecode(msg[1])
if len(nome_corso)>3:
search_result = [corso for corso in courses if nome_corso.upper() in corso['Denominazione'].upper()]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d corsi con la tua ricerca' % len(search_result))
descr=""
for corso in search_result:
descr+="Nome: %s\nID: %s\n" % (corso['Denominazione'], corso['ID'])
descr+="Codice: %s\nOrdinamento: %s\n Tipo: %s\n\n" % (corso['Codice'], corso['Ordinamento'], corso['Tipo'])
bot.sendMessage(update.message.chat_id, text=descr)
else:
bot.sendMessage(update.message.chat_id, text='Corso non trovato')
else:
bot.sendMessage(update.message.chat_id, text='Inserisci almeno 4 caratteri per la ricerca')
else:
bot.sendMessage(update.message.chat_id, text="Devi inserire il corso su cui ottenere informazioni!\n/corso <nome>")
def exams_handler(bot,update):
msg = update.message.text
msg = msg.split(' ')
if len(msg)==2:
cds_id = unidecode(msg[1])
search_result=[esame for esame in exams if cds_id==str(esame['CDS_ID'])]
if len(search_result)>0:
bot.sendMessage(update.message.chat_id, text='Sono stati trovati %d esami con la tua ricerca' % len(search_result))
for esame in search_result:
descr="Materia: %s\nData: %s\nOra: %s\n" % (esame['Insegnamento'], esame['Data'], esame['Ora'])
descr+='Aula: %s\n Scaglione: %s\nTipo: %s\nTipo Appello:%s\n\n' % (esame['Aula'], esame['Scaglione'], esame['Tipo Esame'], esame['Appello'])
bot.sendMessage(update.message.chat_id, text=descr)
else:
bot.sendMessage(update.message.chat_id, text="Corso non trovato verifica di aver inserito l'id corretto")
else:
bot.sendMessage(update.message.chat_id, text="Inserisci l'id del corso, lo puoi conoscere usando il comando corsi")
def secretary_handler(bot, update):
newmsg = "Carriera Studenti - Settore tecnico - scientifico\n\nVia S. Sofia, 64 - Edificio 11 C.U. 95135 Catania\n\nTel.:095-738 6104/2051"
newmsg+= "\n\n Orari\n\n"
newmsg+= "Lunedì 10.00 - 12.30\n"
newmsg= "Martedì 10.00 - 12.30 e 15.00 - 16.30\n"
newmsg+= "Mercoledì Chiusura\n"
newmsg+= "Giovedì 10.00 - 12.30 e 15.00 - 16.30\n"
newmsg+= "Venerdì 10.00 - 12.30\n"
newmsg+= "\n\n Telefonare solo nelle fasce orarie di apertura"
newmsg+= "\n\n Mail: settore.tecnicoscientifico@unict.it"
newmsg+= "\n\n Per ulteriori infomazioni : http://www.unict.it/content/coordinamento-settori-carriere-studenti"
bot.sendMessage(update.message.chat_id, text=newmsg)
def cus_handler(bot, update):
newmsg="CUS CATANIA:\n\nViale A. Doria n° 6 - 95125 Catania\n\ntel. 095336327- fax 095336478\n\n"\
"CUS Catania - info@cuscatania.it\n\n"\
"Segreteria studenti:\ntel. 095/336327 (int. 0) - segreteriastudenti@cuscatania.it "
bot.sendMessage(update.message.chat_id, text=newmsg)
def error_handler(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
# loading data from files
logger.info('[LOADING] rooms from "%s"' % ROOMS_FILE)
global rooms
rooms = utility.load_rooms(ROOMS_FILE)
logger.info('[ DONE ] loading rooms')
logger.info('[LOADING] courses from "%s"' % COURSES_FILE)
global courses
courses = utility.load_courses(COURSES_FILE)
logger.info('[ DO
|
mhallin/knitty-gritty
|
setup.py
|
Python
|
bsd-3-clause
| 918 | 0 |
from setuptools import setup, find_packages
with open('README.rst') as f:
description = f.read()
setup(
name='knitty-gritty',
ver
|
sion='0.0.2',
description='A tool for managing knitting machine patterns',
long_description=description,
url='https://github.com/mhallin/knitty-gritty',
author='Magnus Hallin',
author_email='mhallin@gmail.com',
license='BSD',
packages=find_packages(),
install_requires=[
'click>=2
|
.4,<2.5',
'Pillow>=2.5,<2.6',
'pyserial>=2.7,<2.8',
],
extras_require={
'dev': [
'flake8>=2.2,<2.3',
'mccabe>=0.2,<0.3',
'pep8>=1.5,<1.6',
'pip-tools>=0.3,<0.4',
'pyflakes>=0.8.1,<0.9',
'wheel>=0.24,<0.25',
],
},
entry_points={
'console_scripts': [
'knitty-gritty = knittygritty.main:cli'
],
},
)
|
pombredanne/stockpile
|
stockpile/__about__.py
|
Python
|
bsd-2-clause
| 539 | 0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
__all__ = [
"__title__", "__summary__", "__u
|
ri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "stock
|
pile"
__summary__ = "Generic file storage abstraction"
__uri__ = "https://github.com/dstufft/stockpile/"
__version__ = "0.1"
__author__ = "Donald Stufft"
__email__ = "donald.stufft@gmail.com"
__license__ = "Simplified BSD"
__copyright__ = "Copyright 2012 Donald Stufft"
|
zstackio/zstack-woodpecker
|
integrationtest/vm/ha/test_one_node_shutdown_with_scheduler.py
|
Python
|
apache-2.0
| 2,920 | 0.004452 |
'''
Integration Test for scheduler reboot VM in HA mode.
@author: Quarkonics
'''
import zstackwoodpecker.test_
|
util as test_util
impo
|
rt zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import test_stub
import time
import os
vm = None
node1_ip = None
node2_ip = None
def test():
global vm
global node1_ip
vm = test_stub.create_basic_vm()
vm.check()
start_date = int(time.time())
schd = vm_ops.reboot_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_reboot_vm_scheduler', start_date+60, 30)
node1_ip = os.environ.get('node1Ip')
node2_ip = os.environ.get('node2Ip')
test_util.test_logger("shutdown node: %s" % (node1_ip))
cmd = "init 0"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
test_util.test_logger("wait for 2 minutes to see if http api still works well")
time.sleep(180)
test_stub.exercise_connection(600)
time.sleep(180)
scheduler_execution_count = 0
for i in range(0, 30):
for j in range(0, 6):
if test_lib.lib_find_in_remote_management_server_log(node1_ip, host_username, host_password, start_date+60+30*i+j, '[msg received]: {"org.zstack.header.vm.RebootVmInstanceMsg', vm.get_vm().uuid):
scheduler_execution_count += 1
if test_lib.lib_find_in_remote_management_server_log(node2_ip, host_username, host_password, start_date+60+30*i+j, '[msg received]: {"org.zstack.header.vm.RebootVmInstanceMsg', vm.get_vm().uuid):
scheduler_execution_count -= 1
if abs(scheduler_execution_count) < 5:
test_util.test_fail('VM reboot scheduler is expected to executed for more than 5 times, while it only execute %s times' % (scheduler_execution_count))
schd_ops.delete_scheduler(schd.uuid)
vm.destroy()
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
time.sleep(180)
test_stub.exercise_connection(600)
test_util.test_pass('Scheduler Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global node1_ip
if vm:
try:
vm.destroy()
except:
pass
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
time.sleep(180)
test_stub.exercise_connection(600)
|
dendyyangky/sgeede_b2b
|
sgeede_internal_transfer/stock_internal_transfer.py
|
Python
|
unlicense
| 4,237 | 0.029974 |
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp import SUPERUSER_ID, api
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
_logger = logging.getLogger(__name__)
class stock_internal_transfer(osv.osv):
_name = 'stock.internal.transfer'
_inherit = ['mail.thread', 'ir.needaction_mixin']
# def create(self, cr, uid, vals, context=None):
# data = super(stock_internal_transfer, self).create(cr, uid, vals, context=context)
# if self.pool.get('res.users').browse(cr,uid,uid).company_id.transit_location_id:
# raise osv.except_osv(_('Error!'), _('Please setup your stock transit location in Setting - Warehouse'))
# return data
def action_cancel(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'cancel'
})
return True
def action_draft(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'draft'
})
return True
def action_send(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'send'
})
return True
def action_receive(self, cr, uid, ids, context):
self.write(cr, uid, ids, {
'state' : 'done'
})
return True
def do_enter_wizard(self, cr, uid, ids, context):
if not context:
context = {}
context.update({
'active_model': self._name,
'active_ids': ids,
'active_id': len(ids) and ids[0] or False
})
created_id = self.pool['wizard.stock.internal.transfer'].create(cr, uid, {'transfer_id': len(ids) and ids[0] or False}, context)
return self.pool['wizard.stock.internal.transfer'].wizard_view(cr, uid, created_id, context)
_columns = {
'name' : fields.char('Reference', track_visibility='onchange'),
'date' : fields.datetime('Date', track_visibility='onchange'),
'source_war
|
ehouse_id' : fields.many2one('stock.warehouse', 'Source Warehouse', track_visibility='onchange'),
'dest_warehouse_id' : fields.many2one('stock.warehouse', 'Destination Warehouse', track_visibility='onchange'),
'state' : fields.selection([('c
|
ancel', 'Cancel'), ('draft', 'Draft'), ('send', 'Send'), ('done', 'Done')], 'Status', track_visibility='onchange'),
'line_ids' : fields.one2many('stock.internal.transfer.line', 'transfer_id', 'Stock Internal Transfer Line'),
'picking_ids' : fields.one2many('stock.picking', 'transfer_id', 'Picking'),
'backorder_id' : fields.many2one('stock.internal.transfer', 'Backorder'),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.internal.transfer'),
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state' : lambda *a: 'draft',
}
class stock_internal_transfer_line(osv.osv):
_name = 'stock.internal.transfer.line'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def product_id_change(self, cr, uid, ids, product_id, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom_id': False,
}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uom_id': product_uom_id}
return result
_columns = {
'name' : fields.char('Reference', track_visibility='onchange'),
'product_id' : fields.many2one('product.product', 'Product', track_visibility='onchange'),
'product_qty' : fields.float('Quantity', track_visibility='onchange'),
'product_uom_id' : fields.many2one('product.uom', 'Unit of Measure', track_visibility='onchange'),
'state' : fields.selection([('cancel', 'Cancel'), ('draft', 'Draft'), ('send', 'Send'), ('done', 'Done')], 'Status', track_visibility='onchange'),
'transfer_id' : fields.many2one('stock.internal.transfer', 'Transfer', track_visibility='onchange'),
}
_defaults = {
'state' : lambda *a: 'draft',
'product_qty' : lambda *a: 1,
}
|
earwig/earwigbot
|
earwigbot/tasks/__init__.py
|
Python
|
mit
| 5,993 | 0.000167 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2015 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated
|
documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS",
|
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from earwigbot import exceptions
from earwigbot import wiki
__all__ = ["Task"]
class Task:
"""
**EarwigBot: Base Bot Task**
This package provides built-in wiki bot "tasks" EarwigBot runs. Additional
tasks can be installed as plugins in the bot's working directory.
This class (import with ``from earwigbot.tasks import Task``) can be
subclassed to create custom bot tasks.
To run a task, use :py:meth:`bot.tasks.start(name, **kwargs)
<earwigbot.managers.TaskManager.start>`. ``**kwargs`` get passed to the
Task's :meth:`run` method.
"""
name = None
number = 0
def __init__(self, bot):
"""Constructor for new tasks.
This is called once immediately after the task class is loaded by
the task manager (in :py:meth:`tasks.load()
<earwigbot.managers._ResourceManager.load>`). Don't override this
directly; if you do, remember to place ``super().__init()`` first.
Use :py:meth:`setup` for typical task-init/setup needs.
"""
self.bot = bot
self.config = bot.config
self.logger = bot.tasks.logger.getChild(self.name)
number = self.config.tasks.get(self.name, {}).get("number")
if number is not None:
self.number = number
self.setup()
def __repr__(self):
"""Return the canonical string representation of the Task."""
res = "Task(name={0!r}, number={1!r}, bot={2!r})"
return res.format(self.name, self.number, self.bot)
def __str__(self):
"""Return a nice string representation of the Task."""
res = "<Task {0} ({1}) of {2}>"
return res.format(self.name, self.number, self.bot)
def setup(self):
"""Hook called immediately after the task is loaded.
Does nothing by default; feel free to override.
"""
pass
def run(self, **kwargs):
"""Main entry point to run a given task.
This is called directly by :py:meth:`tasks.start()
<earwigbot.managers.TaskManager.start>` and is the main way to make a
task do stuff. *kwargs* will be any keyword arguments passed to
:py:meth:`~earwigbot.managers.TaskManager.start`, which are entirely
optional.
"""
pass
def unload(self):
"""Hook called immediately before the task is unloaded.
Does nothing by default; feel free to override.
"""
pass
def make_summary(self, comment):
"""Make an edit summary by filling in variables in a config value.
:py:attr:`config.wiki["summary"] <earwigbot.config.BotConfig.wiki>` is
used, where ``$2`` is replaced by the main summary body, given by the
*comment* argument, and ``$1`` is replaced by the task number.
If the config value is not found, we'll just return *comment* as-is.
"""
try:
summary = self.bot.config.wiki["summary"]
except KeyError:
return comment
return summary.replace("$1", str(self.number)).replace("$2", comment)
def shutoff_enabled(self, site=None):
"""Return whether on-wiki shutoff is enabled for this task.
We check a certain page for certain content. This is determined by
our config file: :py:attr:`config.wiki["shutoff"]["page"]
<earwigbot.config.BotConfig.wiki>` is used as the title, with any
embedded ``$1`` replaced by our username and ``$2`` replaced by the
task number; and :py:attr:`config.wiki["shutoff"]["disabled"]
<earwigbot.config.BotConfig.wiki>` is used as the content.
If the page has that exact content or the page does not exist, then
shutoff is "disabled", meaning the bot is supposed to run normally, and
we return ``False``. If the page's content is something other than
what we expect, shutoff is enabled, and we return ``True``.
If a site is not provided, we'll try to use :py:attr:`self.site <site>`
if it's set. Otherwise, we'll use our default site.
"""
if not site:
if hasattr(self, "site"):
site = getattr(self, "site")
else:
site = self.bot.wiki.get_site()
try:
cfg = self.config.wiki["shutoff"]
except KeyError:
return False
title = cfg.get("page", "User:$1/Shutoff/Task $2")
username = site.get_user().name
title = title.replace("$1", username).replace("$2", str(self.number))
page = site.get_page(title)
try:
content = page.get()
except exceptions.PageNotFoundError:
return False
if content == cfg.get("disabled", "run"):
return False
self.logger.warn("Emergency task shutoff has been enabled!")
return True
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/test/regrtest_data/beyond_top/__init__.py
|
Python
|
apache-2.0
| 108 | 0.009259 |
from ... import Something
from .
|
import data
try:
from ... import Lala
except I
|
mportError:
pass
|
dbbhattacharya/kitsune
|
vendor/packages/pylint/test/input/func_interfaces.py
|
Python
|
bsd-3-clause
| 1,802 | 0.010544 |
# pylint:disable-msg=R0201
"""docstring"""
__revision__ = ''
class Interface:
"""base class for interfaces"""
class IMachin(Interface):
"""docstring"""
def truc(self):
"""docstring"""
def troc(self, argument):
"""docstring"""
class Correct1:
"""docstring"""
__implements_
|
_ = IMachin
def __init__(self):
pass
def truc(self):
"""docstring"""
pass
def troc(self, argument):
"""docstring"""
pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
def __init__(self):
pass
def truc(self):
"""docstring"""
pass
def troc(self, argument):
"""docstring"""
print argument
class MissingMethod:
"""docstring"""
__implements__ =
|
IMachin,
def __init__(self):
pass
def troc(self, argument):
"""docstring"""
print argument
def other(self):
"""docstring"""
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
def __init__(self):
pass
def truc(self):
"""docstring"""
pass
def troc(self):
"""docstring"""
pass
class InterfaceCantBeFound:
"""docstring"""
__implements__ = undefined
def __init__(self):
"""only to make pylint happier"""
def please(self):
"""public method 1/2"""
def besilent(self):
"""public method 2/2"""
class InterfaceCanNowBeFound:
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
def __init__(self):
"""only to make pylint happier"""
def please(self):
"""public method 1/2"""
def besilent(self):
"""public method 2/2"""
|
Petrole/MaturePyRobots
|
WebPyRobot/backend/migrations/0009_userprofile_level.py
|
Python
|
gpl-3.0
| 460 | 0 |
# -*- coding: utf-8 -*-
|
# Generated by Django 1.11.6 on 2017-11-15 14:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0008_auto_20171115_1443'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='level',
|
field=models.PositiveIntegerField(default=1),
),
]
|
vtemian/university_projects
|
practic_stage/hmw7/main.py
|
Python
|
apache-2.0
| 125 | 0 |
from office import Office
from bank i
|
mport Bank
bank = Bank("Open Bank")
office = Office("Timisoara", ban
|
k)
office.open()
|
justinnoah/autobot
|
autobot/common.py
|
Python
|
apache-2.0
| 1,761 | 0.001136 |
#
# common.py
#
# Copyright (C) 2009 Justin Noah <justinnoah@gmail.com>
#
# Basic plugin template created by:
# Copyright (C)
|
2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2009 Damien Churchill <damoxc@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope th
|
at it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
def get_resource(filename):
import pkg_resources, os
return pkg_resources.resource_filename("autobot", os.path.join("data", filename))
|
BrainIntensive/OnlineBrainIntensive
|
resources/nipype/nipype/examples/dmri_preprocessing.py
|
Python
|
mit
| 5,464 | 0.000549 |
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
===================
dMRI: Preprocessing
===================
Introduction
============
This script, dmri_preprocessing.py, demonstrates how to prepare dMRI data
for tractography and connectivity analysis with nipype.
We perform this analysis using the FSL course data, which can be acquired from
here: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
Can be executed in command line using ``python dmri_preprocessing.py``
Import necessary modules from nipype.
"""
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as niu # utility
import nipype.algorithms.misc as misc
import nipype.pipeline.engine as pe # pypeline engine
from nipype.interfaces import fsl
from nipype.interfaces import ants
"""
Load specific nipype's workflows for preprocessing of dMRI data:
:class:`nipype.workflows.dmri.preprocess.epi.all_peb_pipeline`,
as data include a *b0* volume with reverse encoding direction
(*P>>>A*, or *y*), in contrast with the general acquisition encoding
that is *A>>>P* or *-y* (in RAS systems).
"""
from nipype.workflows.dmri.fsl.artifacts import all_fsl_pipeline, remove_bias
"""
Map field names into individual subject runs
"""
info = dict(dwi=[['subject_id', 'dwidata']],
bvecs=[['subject_id', 'bvecs']],
bvals=[['subject_id', 'bvals']],
dwi_rev=[['subject_id', 'nodif_PA']])
infosource = pe.Node(interface=niu.IdentityInterface(fields=['subject_id']),
name="infosource")
# Set the subject 1 identifier in subject_list,
# we choose the preproc dataset as it contains uncorrected files.
subject_list = ['subj1_preproc']
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`~nipype.pipeline.engine.Node` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(nio.DataGrabber(infields=['subject_id'],
outfields=list(info.keys())), name='datasource')
datasource.inputs.template = "%s/%s"
# This needs to point to the fdt folder you can find after extracting
# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
datasource.inputs.base_directory = os.path.abspath('fdt1')
datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz',
dwi_rev='%s/%s.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
An inputnode is used to pass the data obtained by the data grabber to the
actual processing functions
"""
inputnode = pe.Node(niu.IdentityInterface(fields=["dwi", "bvecs", "bvals",
"dwi_rev"]), name="inputnode")
"""
Setup for dMRI preprocessing
============================
In this section we initialize the appropriate workflow for preprocessing of
diffusion images.
Artifacts correction
--------------------
We will use the combination of ``topup`` and ``eddy`` as suggested by FSL.
In order to configure the susceptibility distortion correction (SDC), we first
write the specific parameters of our echo-planar imaging (EPI) images.
Particularly, we look into the ``acqparams.txt`` file of the selected subject
to gather the encoding direction, acceleration factor (in parallel sequences
it is > 1), and readout time or echospacing.
"""
epi_AP = {'echospacing': 66.5e-3, 'enc_dir': 'y-'}
epi_PA = {'echospacing': 66.5e-3, 'enc_dir': 'y'}
prep = all_fsl_pipeline(epi_params=epi_AP, altepi_params=epi_PA)
"""
Bias field correction
---------------------
Finally, we set up a node to correct for a single multiplicative bias field
from computed on the *b0* image, as suggested in [Jeurissen2014]_.
"""
bias = remove_bias()
"""
Connect nodes in workflow
=========================
We create a higher level workflow to connect the nodes. Please excuse the
author for writing the arguments of the ``connect`` function in a not-standard
style with readability aims.
"""
wf = pe.Workflow(name="dMRI_Preprocessing")
wf.base_dir = os.path.abspath('preprocessing_dmri_tutorial')
wf.connect([
(infosource, datasource, [('subject_id', 'subject
|
_id')]),
(datasource, prep, [('dwi', 'inputnode.in_file'),
('dwi_rev', 'inputnode.alt_file'),
('bvals', 'inputnode.in_bval'),
('bvecs', 'inputnode.in_bvec')]),
(prep, bias, [('outputnode.out_file', 'inputnode.in_file'),
('outputnode.out_mask', 'inputnode.in_mask')]),
(datasource, bias, [('bvals', 'inputnode.in_bval')])
])
"""
Run the workflow as command line
|
executable
"""
if __name__ == '__main__':
wf.run()
wf.write_graph()
|
wakiyamap/electrum-mona
|
electrum_mona/gui/qt/update_checker.py
|
Python
|
mit
| 6,116 | 0.00327 |
# Copyright (C) 2019 The Electrum developers
# Distr
|
ibuted under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import base64
from distutils.version import LooseVersion
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QLabel, QProgressBar,
QHBoxLayout, QPushButton, QDialog)
from electrum_mona import version
from elect
|
rum_mona import constants
from electrum_mona import ecc
from electrum_mona.i18n import _
from electrum_mona.util import make_aiohttp_session
from electrum_mona.logging import Logger
from electrum_mona.network import Network
class UpdateCheck(QDialog, Logger):
url = "https://electrum-mona.org/version"
download_url = "https://electrum-mona.org"
VERSION_ANNOUNCEMENT_SIGNING_KEYS = (
"MUJ1nBxpAzdGdNhTN1x3MCtyeBa4DbdqpK",
)
def __init__(self, *, latest_version=None):
QDialog.__init__(self)
self.setWindowTitle('Electrum - ' + _('Update Check'))
self.content = QVBoxLayout()
self.content.setContentsMargins(*[10]*4)
self.heading_label = QLabel()
self.content.addWidget(self.heading_label)
self.detail_label = QLabel()
self.detail_label.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
self.detail_label.setOpenExternalLinks(True)
self.content.addWidget(self.detail_label)
self.pb = QProgressBar()
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.content.addWidget(self.pb)
versions = QHBoxLayout()
versions.addWidget(QLabel(_("Current version: {}".format(version.ELECTRUM_VERSION))))
self.latest_version_label = QLabel(_("Latest version: {}".format(" ")))
versions.addWidget(self.latest_version_label)
self.content.addLayout(versions)
self.update_view(latest_version)
self.update_check_thread = UpdateCheckThread()
self.update_check_thread.checked.connect(self.on_version_retrieved)
self.update_check_thread.failed.connect(self.on_retrieval_failed)
self.update_check_thread.start()
close_button = QPushButton(_("Close"))
close_button.clicked.connect(self.close)
self.content.addWidget(close_button)
self.setLayout(self.content)
self.show()
def on_version_retrieved(self, version):
self.update_view(version)
def on_retrieval_failed(self):
self.heading_label.setText('<h2>' + _("Update check failed") + '</h2>')
self.detail_label.setText(_("Sorry, but we were unable to check for updates. Please try again later."))
self.pb.hide()
@staticmethod
def is_newer(latest_version):
return latest_version > LooseVersion(version.ELECTRUM_VERSION)
def update_view(self, latest_version=None):
if latest_version:
self.pb.hide()
self.latest_version_label.setText(_("Latest version: {}".format(latest_version)))
if self.is_newer(latest_version):
self.heading_label.setText('<h2>' + _("There is a new update available") + '</h2>')
url = "<a href='{u}'>{u}</a>".format(u=UpdateCheck.download_url)
self.detail_label.setText(_("You can download the new version from {}.").format(url))
else:
self.heading_label.setText('<h2>' + _("Already up to date") + '</h2>')
self.detail_label.setText(_("You are already on the latest version of Electrum."))
else:
self.heading_label.setText('<h2>' + _("Checking for updates...") + '</h2>')
self.detail_label.setText(_("Please wait while Electrum checks for available updates."))
class UpdateCheckThread(QThread, Logger):
checked = pyqtSignal(object)
failed = pyqtSignal()
def __init__(self):
QThread.__init__(self)
Logger.__init__(self)
self.network = Network.get_instance()
async def get_update_info(self):
# note: Use long timeout here as it is not critical that we get a response fast,
# and it's bad not to get an update notification just because we did not wait enough.
async with make_aiohttp_session(proxy=self.network.proxy, timeout=120) as session:
async with session.get(UpdateCheck.url) as result:
signed_version_dict = await result.json(content_type=None)
# example signed_version_dict:
# {
# "version": "3.9.9",
# "signatures": {
# "MRkEwoPcvSPaC5WNtQMa7NGPy2tBKbp3Bm": "H84UFTdaBswxTrNty0gLlWiQEQhJA2Se5xVdhR9zFirKYg966IXEkC7km6phIJq+2CT3KwvKuj8YKaSCy1fErwg="
# }
# }
version_num = signed_version_dict['version']
sigs = signed_version_dict['signatures']
for address, sig in sigs.items():
if address not in UpdateCheck.VERSION_ANNOUNCEMENT_SIGNING_KEYS:
continue
sig = base64.b64decode(sig)
msg = version_num.encode('utf-8')
if ecc.verify_message_with_address(address=address, sig65=sig, message=msg,
net=constants.BitcoinMainnet):
self.logger.info(f"valid sig for version announcement '{version_num}' from address '{address}'")
break
else:
raise Exception('no valid signature for version announcement')
return LooseVersion(version_num.strip())
def run(self):
if not self.network:
self.failed.emit()
return
try:
update_info = asyncio.run_coroutine_threadsafe(self.get_update_info(), self.network.asyncio_loop).result()
except Exception as e:
self.logger.info(f"got exception: '{repr(e)}'")
self.failed.emit()
else:
self.checked.emit(update_info)
|
turbokongen/home-assistant
|
homeassistant/components/withings/common.py
|
Python
|
apache-2.0
| 34,960 | 0.00083 |
"""Common code for Withings."""
import asyncio
from dataclasses import dataclass
import datetime
from datetime import timedelta
from enum import Enum, IntEnum
import logging
import re
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from aiohttp.web import Response
import requests
from withings_api import AbstractWithingsApi
from withings_api.common import (
AuthFailedException,
GetSleepSummaryField,
MeasureGroupAttribs,
MeasureType,
MeasureTypes,
NotifyAppli,
SleepGetSummaryResponse,
UnauthorizedException,
query_measure_groups,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_WEBHOOK_ID,
HTTP_UNAUTHORIZED,
MASS_KILOGRAMS,
PERCENTAGE,
SPEED_METERS_PER_SECOND,
TIME_SECONDS,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.config_entry_oauth2_flow import (
AUTH_CALLBACK_PATH,
AbstractOAuth2Implementation,
LocalOAuth2Implementation,
OAuth2Session,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.network import get_url
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util import dt
from . import const
from .const import Measurement
_LOGGER = logging.getLogger(const.LOG_NAMESPACE)
NOT_AUTHENTICATED_ERROR = re.compile(
f"^{HTTP_UNAUTHORIZED},.*",
re.IGNORECASE,
)
DATA_UPDATED_SIGNAL = "withings_entity_state_updated"
MeasurementData = Dict[Measurement, Any]
class NotAuthenticatedError(HomeAssistantError):
"""Raise when not authenticated with the service."""
class ServiceError(HomeAssistantError):
"""Raise when the service has an error."""
class UpdateType(Enum):
"""Data update type."""
POLL = "poll"
WEBHOOK = "webhook"
@dataclass
class WithingsAttribute:
"""Immutable class for describing withings sensor data."""
measurement: Measurement
measute_type: Enum
friendly_name: str
unit_of_measurement: str
icon: Optional[str]
platform: str
enabled_by_default: bool
update_type: UpdateType
@dataclass
class WithingsData:
"""Represents value and meta-data from the withings service."""
attribute: WithingsAttribute
value: Any
@dataclass
class WebhookConfig:
"""Config for a webhook."""
id: str
url: str
enabled: bool
@dataclass
class StateData:
"""State data held by data manager for retrieval by entities."""
unique_id: str
state: Any
WITHINGS_ATTRIBUTES = [
WithingsAttribute(
Measurement.WEIGHT_KG,
MeasureType.WEIGHT,
"Weight",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_MASS_KG,
MeasureType.FAT_MASS_WEIGHT,
"Fat Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_FREE_MASS_KG,
MeasureType.FAT_FREE_MASS,
"Fat Free Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.MUSCLE_MASS_KG,
MeasureType.MUSCLE_MASS,
"Muscle Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.BONE_MASS_KG,
MeasureType.BONE_MASS,
"Bone Mass",
MASS_KILOGRAMS,
"mdi:weight-kilogram",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HEIGHT_M,
MeasureType.HEIGHT,
"Height",
const.UOM_LENGTH_M,
"mdi:ruler",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.TEMP_C,
MeasureType.TEMPERATURE,
"Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.BODY_TEMP_C,
MeasureType.BODY_TEMPERATURE,
"Body Temperature",
const.UOM_TEMP_C,
"mdi:thermometer
|
",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SKIN_TEMP_C,
|
MeasureType.SKIN_TEMPERATURE,
"Skin Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.FAT_RATIO_PCT,
MeasureType.FAT_RATIO,
"Fat Ratio",
PERCENTAGE,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.DIASTOLIC_MMHG,
MeasureType.DIASTOLIC_BLOOD_PRESSURE,
"Diastolic Blood Pressure",
const.UOM_MMHG,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SYSTOLIC_MMGH,
MeasureType.SYSTOLIC_BLOOD_PRESSURE,
"Systolic Blood Pressure",
const.UOM_MMHG,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HEART_PULSE_BPM,
MeasureType.HEART_RATE,
"Heart Pulse",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SPO2_PCT,
MeasureType.SP02,
"SP02",
PERCENTAGE,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.HYDRATION,
MeasureType.HYDRATION,
"Hydration",
MASS_KILOGRAMS,
"mdi:water",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.PWV,
MeasureType.PULSE_WAVE_VELOCITY,
"Pulse Wave Velocity",
SPEED_METERS_PER_SECOND,
None,
SENSOR_DOMAIN,
True,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_BREATHING_DISTURBANCES_INTENSITY,
GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY,
"Breathing disturbances intensity",
"",
"",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_DEEP_DURATION_SECONDS,
GetSleepSummaryField.DEEP_SLEEP_DURATION,
"Deep sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_TOSLEEP_DURATION_SECONDS,
GetSleepSummaryField.DURATION_TO_SLEEP,
"Time to sleep",
TIME_SECONDS,
"mdi:sleep",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_TOWAKEUP_DURATION_SECONDS,
GetSleepSummaryField.DURATION_TO_WAKEUP,
"Time to wakeup",
TIME_SECONDS,
"mdi:sleep-off",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_AVERAGE,
GetSleepSummaryField.HR_AVERAGE,
"Average heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
SENSOR_DOMAIN,
False,
UpdateType.POLL,
),
WithingsAttribute(
Measurement.SLEEP_HEART_RATE_MAX,
GetSleepSummaryField.HR_MAX,
"Maximum heart rate",
const.UOM_BEATS_PER
|
wright-group/PyCMDS
|
pycmds/project/file_dialog_handler.py
|
Python
|
mit
| 4,481 | 0.000893 |
"""
QFileDialog objects can only be run in the main thread.
"""
### imports ###################################################################
import os
import time
from PySide2 import QtCore
from PySide2 import QtWidgets
from pycmds.project import project_globals as g
from pycmds.project import classes as pc
### FileDialog object #########################################################
directory_filepath = pc.Mutex()
open_filepath = pc.Mutex()
save_filepath = pc.Mutex()
class FileDialog(QtCore.QObject):
update_ui = QtCore.Signal()
queue_emptied = QtCore.Signal()
def __init__(self, enqueued_object, busy_object):
QtCore.QObject.__init__(self)
self.name = "file_dialog"
self.enqueued = enqueued_object
self.busy = busy_object
@QtCore.Slot(str, list)
def dequeue(self, method, inputs):
"""
Slot to accept enqueued commands from main thread.
Method passed as qstring, inputs as list of [args, kwargs].
Calls own method with arguments from inputs.
"""
self.update_ui.emit()
method = str(method) # method passed as qstring
args, kwargs = inputs
if g.debug.read():
print(self.name, " dequeue:", method, inputs, self.busy.read())
self.enqueued.pop()
getattr(self, method)(*args, **kwargs)
if not self.enqueued.read():
self.queue_emptied.emit()
self.check_busy()
def check_busy(self):
"""
decides if the hardware is done and handles writing of 'busy' to False
"""
# must always write busy whether answer is True or False
if self.enqueued.read():
time.sleep(0.1) # don't loop like crazy
self.busy.write(True)
else:
self.busy.write(False)
self.update_ui.emit()
def clean(self, out):
"""
takes the output and returns a string that has the properties I want
"""
out = str(out)
out = out.replace("/", os.sep)
return out
def getExistingDirectory(self, inputs=[]):
caption, directory, options = inputs
options = QtWidgets.QFileDialog.ShowDirsOnly
out = self.clean(
QtWidgets.QFileDialog.getExistingDirectory(
g.main_window.read(), caption, str(directory), options
)
)
directory_filepath.write(out)
def getOpenFileName(self, inputs=[]):
caption, directory, options = inputs
out = self.clean(
QtWidgets.QFileDialog.getOpenFileName(
g.main_window.read(), caption, str(directory), options
)[0]
)
open_filepath.write(out)
def getSaveFileName(self, inputs=[]):
caption, directory, savefilter, selectedfilter, options = inputs
out = self.clean(
QtWidgets.QFileDialog.getSaveFileName(
g.main_window.read(), caption, directory, savefilter, selectedfilter, options,
)[0]
)
save_filepath.write(out)
busy = pc.Busy()
enqueued = pc.Enqueued()
file_dialog = FileDialog(enqueued, busy)
q = pc.Q(enqueued, busy, file_dialog)
### thread-safe file dialog methods ###########################################
# the q method only works between different threads
# call directly if the calling object is in the main thread
def dir_dialog(caption, directory, options=None):
inputs = [caption, directory, options]
|
if QtCore.QThread.currentThread() == g.main_thread.read():
file_dialog.getExistingDirectory(inputs)
else:
q.push("getExistingDirectory", inputs)
while busy.read():
time.sleep(0.1)
return directory_filepath.read()
def open_dialog(caption, directory, options):
inputs = [caption, directory, options]
if QtCore.QThread.currentThread() == g.main_thread.read():
file_dialog.getOpenFileN
|
ame(inputs)
else:
q.push("getOpenFileName", inputs)
while busy.read():
time.sleep(0.1)
return open_filepath.read()
def save_dialog(caption, directory, savefilter, selectedfilter, options):
inputs = [caption, directory, savefilter, selectedfilter, options]
if QtCore.QThread.currentThread() == g.main_thread.read():
file_dialog.getSaveFileName(inputs)
else:
q.push("getSaveFileName", inputs)
while busy.read():
time.sleep(0.1)
return save_filepath.read()
|
knnniggett/weewx
|
bin/weewx/drivers/ws23xx.py
|
Python
|
gpl-3.0
| 77,294 | 0.004463 |
#!usr/bin/env python
#
# Copyright 2013 Matthew Wall
# See the file LICENSE.txt for your full rights.
#
# Thanks to Kenneth Lavrsen for the Open2300 implementation:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/WebHome
# description of the station communication interface:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSAPI
# memory map:
# http://www.lavrsen.dk/foswiki/bin/view/Open2300/OpenWSMemoryMap
#
# Thanks to Russell Stuart for the ws2300 python implementation:
# http://ace-host.stuart.id.au/russell/files/ws2300/
# and the map of the station memory:
# http://ace-host.stuart.id.au/russell/files/ws2300/memory_map_2300.txt
#
# This immplementation copies directly from Russell Stuart's implementation,
# but only the parts required to read from and write to the weather station.
"""Classes and functions for interfacing with WS-23xx weather stations.
LaCrosse made a number of stations in the 23xx series, including:
WS-2300, WS-2308, WS-2310, WS-2315, WS-2317, WS-2357
The stations were also sold as the TFA Matrix and TechnoLine 2350.
The WWVB receiver is located in the console.
To synchronize the console and sensors, press and hold the PLUS key for 2
seconds. When console is not synchronized no data will be received.
To do a factory reset, press and hold PRESSURE and WIND for 5 seconds.
A single bucket tip is 0.0204 in (0.518 mm).
The station has 175 history records. That is just over 7 days of data with
the default history recording interval of 60 minutes.
The station supports both wireless and wired communication between the
sensors and a station console. Wired connection updates data every 8 seconds.
Wireless connection updates data in 16 to 128 second intervals, depending on
wind speed and rain activity.
The connection type can be one of 0=cable, 3=lost, 15=wireless
sensor update frequency:
32 seconds when wind speed > 22.36 mph (wireless)
128 seconds when wind speed < 22.36 mph (wireless)
10 minutes (wireless after 5 failed attempts)
8 seconds (wired)
console update frequency:
15 seconds (pressure/temperature)
20 seconds (humidity)
It is possible to increase the rate of wireless updates:
http://www.wxforum.net/index.php?topic=2196.0
Sensors are connected by unshielded phone cables. RF interference can cause
random spikes in data, with one symptom being values of 25.5 m/s or 91.8 km/h
for the wind speed. To reduce the number of spikes in data, replace with
shielded cables:
http://www.lavrsen.dk/sources/weather/windmod.htm
The station records wind speed and direction, but has no notion of gust.
The station calculates windchill and dewpoint.
The station has a serial connection to the computer.
This driver does not keep the serial port open for long periods. Instead, the
driver opens the serial port, reads data, then closes the port.
This driver polls the station. Use the polling_interval parameter to specify
how often to poll for data. If not specified, the polling interval will adapt
based on connection type and status.
USB-Serial Converters
With a USB-serial converter one can connect the station to a computer with
only USB ports, but not every converter will work properly. Perhaps the two
most common converters are based on the Prolific and FTDI chipsets. Many
people report better luck with the FTDI-based converters. Some converters
that use the Prolific chipset (PL2303) will work, but not all of them.
Known to work: ATEN UC-232A
Bounds checking
wind speed: 0-113 mph
wind direction: 0-360
humidity: 0-100
temperature: ok if not -22F and humidity is valid
dewpoint: ok if not -22F and humidity is valid
barometer: 25-35 inHg
rain rate: 0-10 in/hr
Discrepancies Between Implementations
As of December 2013, there are significant differences between the open2300,
wview, and ws2300 implementations. Current version numbers are as follows:
open2300 1.11
ws2300 1.8
wview 5.20.2
History Interval
The factory default is 60 minutes. The value stored in the console is one
less than the actual value (in minutes). So for the factory default of 60,
the console stores 59. The minimum interval is 1.
ws2300.py reports the actual value from the console, e.g., 59 when the
interval is 60. open2300 reports the interval, e.g., 60 when the interval
is 60. wview ignores the interval.
Detecting Bogus Sensor Values
wview queries the station 3 times for each sensor then accepts the value only
if the three values were close to each other.
open2300 sleeps 10 seconds if a wind measurement indicates invalid or overflow.
The ws2300.py implementation includes overflow and validity flags for values
from the wind sensors. It does not retry based on invalid or overflow.
Wind Speed
There is disagreement about how to calculate wind speed and how to determine
whether the wind speed is valid.
This driver introduces a WindConversion object that uses open2300/wview
decoding so that wind speeds match that of open2300/wview. ws2300 1.8
incorrectly uses bcd2num instead of bin2num. This bug is fixed in this driver.
The memory map indicates the fo
|
llowing:
addr smpl description
0x527 0 Wind overflow flag: 0 = normal
0x528 0 Wind minimum code: 0=min, 1=--.-, 2=OFL
0x529 0 Windspeed: binary nibble 0 [m/s * 10]
0x52A 0 Windspeed
|
: binary nibble 1 [m/s * 10]
0x52B 0 Windspeed: binary nibble 2 [m/s * 10]
0x52C 8 Wind Direction = nibble * 22.5 degrees
0x52D 8 Wind Direction 1 measurement ago
0x52E 9 Wind Direction 2 measurement ago
0x52F 8 Wind Direction 3 measurement ago
0x530 7 Wind Direction 4 measurement ago
0x531 7 Wind Direction 5 measurement ago
0x532 0
wview 5.20.2 implementation (wview apparently copied from open2300):
read 3 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
fail
} else {
dir = (x[2] >> 4) * 22.5
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0 * 2.23693629)
maxdir = dir
maxspeed = speed
}
open2300 1.10 implementation:
read 6 bytes starting at 0x527
0x527 x[0]
0x528 x[1]
0x529 x[2]
0x52a x[3]
0x52b x[4]
0x52c x[5]
if ((x[0] != 0x00) ||
((x[1] == 0xff) && (((x[2] & 0xf) == 0) || ((x[2] & 0xf) == 1)))) {
sleep 10
} else {
dir = x[2] >> 4
speed = ((((x[2] & 0xf) << 8) + (x[1])) / 10.0)
dir0 = (x[2] >> 4) * 22.5
dir1 = (x[3] & 0xf) * 22.5
dir2 = (x[3] >> 4) * 22.5
dir3 = (x[4] & 0xf) * 22.5
dir4 = (x[4] >> 4) * 22.5
dir5 = (x[5] & 0xf) * 22.5
}
ws2300.py 1.8 implementation:
read 1 nibble starting at 0x527
read 1 nibble starting at 0x528
read 4 nibble starting at 0x529
read 3 nibble starting at 0x529
read 1 nibble starting at 0x52c
read 1 nibble starting at 0x52d
read 1 nibble starting at 0x52e
read 1 nibble starting at 0x52f
read 1 nibble starting at 0x530
read 1 nibble starting at 0x531
0x527 overflow
0x528 validity
0x529 speed[0]
0x52a speed[1]
0x52b speed[2]
0x52c dir[0]
speed: ((x[2] * 100 + x[1] * 10 + x[0]) % 1000) / 10
velocity: (x[2] * 100 + x[1] * 10 + x[0]) / 10
dir = data[0] * 22.5
speed = (bcd2num(data) % 10**3 + 0) / 10**1
velocity = (bcd2num(data[:3])/10.0, bin2num(data[3:4]) * 22.5)
bcd2num([a,b,c]) -> c*100+b*10+a
"""
# TODO: use pyserial instead of LinuxSerialPort
# TODO: put the __enter__ and __exit__ scaffolding on serial port, not Station
# FIXME: unless we can get setTime to work, just ignore the console clock
# FIXME: detect bogus wind speed/direction
# i see these when the wind instrument is disconnected:
# ws 26.399999
# wsh 21
# w0 135
from __future__ import with_statement
import syslog
import time
import string
import fcntl
import os
import select
import struct
import termios
import tty
import weeutil.weeutil
import weewx
import weewx.drivers
import weewx.units
import weewx.wxformulas
DRIVER_NAME = 'WS23xx'
DRIVER_VERSION = '0.22'
def loader(config_dict, _):
return WS23xxDriver(config_dict=config_dict, **config_dict[DRIVER_NAME])
def configurator_loader(_):
return WS23xxConfigurator()
def confeditor_loader():
return WS23xxConfEditor()
DEFAULT_PORT = '/dev/ttyUSB0'
def logmsg(dst, msg):
syslog.syslog(dst, 'ws2
|
abarisain/mopidy
|
mopidy/utils/path.py
|
Python
|
apache-2.0
| 5,006 | 0.0002 |
from __future__ import unicode_literals
import logging
import os
import string
import urllib
import urlparse
import glib
logger = logging.getLogger(__name__)
XDG_DIRS = {
'XDG_CACHE_DIR': glib.get_user_cache_dir(),
'XDG_CONFIG_DIR': glib.get_user_config_dir(),
'XDG_DATA_DIR': glib.get_user_data_dir(),
'XDG_MUSIC_DIR': glib.get_user_special_dir(glib.USER_DIRECTORY_MUSIC),
}
# XDG_MUSIC_DIR can be none, so filter out any bad data.
XDG_DIRS = dict((k, v) for k, v in XDG_DIRS.items() if v is not None)
def get_or_create_dir(dir_path):
if not isinstance(dir_path, bytes):
raise ValueError('Path is not a bytestring.')
dir_path = expand_path(dir_path)
if os.path.isfile(dir_path):
raise OSError(
'A file with the same name as the desired dir, '
'"%s", already exists.' % dir_path)
elif not os.path.isdir(dir_path):
logger.info('Creating dir %s', dir_path)
os.makedirs(dir_path, 0755)
return dir_path
def get_or_create_file(file_path, mkdir=True, content=None):
if not isinstance(file_path, bytes):
raise ValueError('Path is not a bytestring.')
file_path = expand_path(file_path)
if mkdir:
get_or_create_dir(os.path.dirname(file_path))
if not os.path.isfile(file_path):
logger.info('Creating file %s', file_path)
with open(file_path, 'w') as fh:
if content:
fh.write(content)
return file_path
def path_to_uri(path):
"""
Convert OS specific path to file:// URI.
Accepts either unicode strings or bytestrings. The encoding of any
bytestring will be maintained so that :func:`uri_to_path` can return the
same bytestring.
Returns a file:// URI as an unicode string.
"""
if isinstance(path, unicode):
path = path.encode('utf-8')
path = urllib.quote(path)
return urlparse.urlunsplit((b'file', b'', path, b'', b''))
def uri_to_path(uri):
"""
Convert an URI to a OS specific path.
Returns a bytestring, since the file path can contain chars with other
encoding than UTF-8.
If we had returned these paths as unicode strings, you wouldn't be able to
look up the matching dir or file on your file system because the exact path
would be lost by ignoring its encoding.
"""
if isinstance(uri, unicode):
uri = uri.encode('utf-8')
return urllib.unquote(urlparse.urlsplit(uri).path)
def split_path(path):
parts = []
while True:
path, part = os.path.split(path)
if part:
parts.insert(0, part)
if not path or path == b'/':
break
return parts
def expand_path(path):
# TODO: document as we want people to use this.
if not isinstance(path, bytes):
raise ValueError('Path is not a bytestring.')
try:
path = string.Template(path).substitute(XDG_DIRS)
except KeyError:
return None
path = os.path.expanduser(path)
path = os.path.abspath(path)
return path
def find_files(path):
"""
Finds all files within a path.
Directories and files with names starting with ``.`` is ignored.
:returns: yields the full path to files as bytestrings
"""
if isinstance(path, unicode):
path = path.encode('utf-8')
if os.path.isfile(path):
return
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
for dirname in dirnames:
if dirname.startswith(b'.'):
# Skip hidden dirs by modifying dirname
|
s inplace
dirnames.remove(dirname)
for filename in filenames:
if filename.startswith(b'.'):
# Skip hidden files
continue
yield os.path.relpath(os.path.join(dirpath, filename), path)
def check_file_path_is_inside_base_dir(file_path, base_path):
assert not file_path.endswith(os.sep), (
'File path %s cannot end wit
|
h a path separator' % file_path)
# Expand symlinks
real_base_path = os.path.realpath(base_path)
real_file_path = os.path.realpath(file_path)
# Use dir of file for prefix comparision, so we don't accept
# /tmp/foo.m3u as being inside /tmp/foo, simply because they have a
# common prefix, /tmp/foo, which matches the base path, /tmp/foo.
real_dir_path = os.path.dirname(real_file_path)
# Check if dir of file is the base path or a subdir
common_prefix = os.path.commonprefix([real_base_path, real_dir_path])
assert common_prefix == real_base_path, (
'File path %s must be in %s' % (real_file_path, real_base_path))
# FIXME replace with mock usage in tests.
class Mtime(object):
def __init__(self):
self.fake = None
def __call__(self, path):
if self.fake is not None:
return self.fake
return int(os.stat(path).st_mtime)
def set_fake_time(self, time):
self.fake = time
def undo_fake(self):
self.fake = None
mtime = Mtime()
|
ImpregnableProgrammer/Advent-of-Code
|
2019/Day_06.py
|
Python
|
gpl-3.0
| 1,071 | 0.006536 |
def First_Part(orbits):
Orbits = dict()
checksum = 0
for orbit in orbits:
od, og = orbit.split(')')
Orbits[og] = od
for og in Orbits.keys():
while 1:
try:
og = Orbits[og]
checksum += 1
except KeyError:
break
return checksum
def Second_Part(orbits):
Orbits = dict()
for orbit in orbits:
od, og = orbit.split(')')
Orbits[og] = od
oPast = ["YOU"]
oCurr = [Orbits["YOU"]]
oNext = list()
dist = 0
while "SAN" not in oCurr:
for o in oCurr:
oNext += ([Orbits[o]] if o != "COM" else []) + [i for i in Orbits.keys() if Orbits[i] == o and i not in oPast]
oCurr = oNext
oNext = list()
oPast += oCurr
dist += 1
return dist - 1
Orbits = '''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN'''.split('\n')
Orbits = open("Inputs/Day_06.txt", 'r').read().split(
|
'\n')[:-1]
print(First_Part(Orbits))
print(Second_
|
Part(Orbits))
|
naoyeye/geeksoho
|
application.py
|
Python
|
mit
| 1,969 | 0.019457 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: J.Y Han
# start
# spawn-fcgi -d /users/hanjiyun/project/geeksoho -f /users/hanjiyun/project/geeksoho/application.py -a 127.0.0.1 -p 9001
#stop
# kill `pgrep -f "/users/hanjiyun/project/geeksoho/application.py"`
import os
import web
import rediswebpy
from web.contrib.template import render_jinja
import misc
db = web.database(dbn='mysql', db='geeksoho', user='geeksoho', passwd='geeksoho')
urls = (
'/', 'index',
'/test', 'test'
)
# controllers
# ===============
class index:
"""Home"""
def GET(self):
# return pjax('jobs.html')
jobsList = GetJobs()
return ren
|
der.jobs(jobsList=jobsList)
def POST(self):
data = web.input(title='', link='', company='', company_weibo='', company_website='', city='', salary='', intro='')
CreatNewJob(data)
raise web.seeother('/')
class test:
"""test"""
def GET(self):
# return pjax('test.html')
return render.test()
# models
# =============
def CreatNewJob(data):
db.insert(
|
'jobs',
title = data.title,
link = data.link,
company = data.company,
company_weibo = data.company_weibo,
company_website = data.company_website,
city = data.city,
salary = data.salary,
intro = data.intro)
def GetJobs():
return db.select('jobs', limit = 100, order='id DESC')
# globals = get_all_functions(misc)
app = web.application(urls, globals())
web.config.debug = True
cache = False
session = web.session.Session(app, rediswebpy.RedisStore(), initializer={'count': 0})
render = render_jinja(
'templates', # 设置模板路径.
encoding = 'utf-8', # 编码.
)
myFilters = {'filter_tags': misc.filter_tags,}
render._lookup.filters.update(myFilters)
if __name__ == "__main__":
web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
app.run()
|
jbms/ofxclient
|
ofxclient/config.py
|
Python
|
mit
| 9,882 | 0.000506 |
from __future__ import with_statement
from ofxclient.account import Account
from configparser import ConfigParser
import os
import os.path
try:
import keyring
KEYRING_AVAILABLE = True
except:
KEYRING_AVAILABLE = False
try:
DEFAULT_CONFIG = os.path.expanduser(os.path.join('~', 'ofxclient.ini'))
except:
DEFAULT_CONFIG = None
class SecurableConfigParser(ConfigParser):
""":py:class:`ConfigParser.ConfigParser` subclass that knows how to store
options marked as secure into the OS specific
keyring/keychain.
To mark an option as secure, the caller must call
'set_secure' at least one time for the particular
option and from then on it will be seen as secure
and will be stored / retrieved from the keychain.
Example::
from ofxclient.config import SecurableConfigParser
# password will not be saved in the config file
c = SecurableConfigParser()
c.add_section('Info')
c.set('Info','username','bill')
c.set_secure('Info','password','s3cre7')
with open('config.ini','w') as fp:
c.write(fp)
"""
_secure_placeholder = '%{secured}'
def __init__(self, keyring_name='ofxclient',
keyring_available=KEYRING_AVAILABLE, **kwargs):
ConfigParser.__init__(self, interpolation = None)
self.keyring_name = keyring_name
self.keyring_available = keyring_available
self._unsaved = {}
self.keyring_name = keyring_name
def is_secure_option(self, section, option):
"""Test an option to see if it is secured or not.
:param section: section id
:type section: string
:param option: option name
:type option: string
:rtype: boolean
otherwise.
"""
if not self.has_section(section):
return False
if not self.has_option(section, option):
return False
if ConfigParser.get(self, section, option) == self._secure_placeholder:
return True
return False
def has_secure_option(self, section, option):
"""See is_secure_option"""
return self.is_secure_option(section, option)
def items(self, section):
"""Get all items for a section. Subclassed, to ensure secure
items come back with the unencrypted data.
:param section: section id
:type section: string
"""
items = []
for k, v in ConfigParser.items(self, section):
if self.is_secure_option(section, k):
v = self.get(section, k)
items.append((k,
|
v))
return items
def secure_items(self, section):
"""Like items() but only return secure items.
:param section: section id
:type section: string
"""
|
return [x
for x in self.items(section)
if self.is_secure_option(section, x[0])]
def set(self, section, option, value):
"""Set an option value. Knows how to set options properly marked
as secure."""
if self.is_secure_option(section, option):
self.set_secure(section, option, value)
else:
ConfigParser.set(self, section, option, value)
def set_secure(self, section, option, value):
"""Set an option and mark it as secure.
Any subsequent uses of 'set' or 'get' will also
now know that this option is secure as well.
"""
if self.keyring_available:
s_option = "%s%s" % (section, option)
self._unsaved[s_option] = ('set', value)
value = self._secure_placeholder
ConfigParser.set(self, section, option, value)
def get(self, section, option, *args):
"""Get option value from section. If an option is secure,
populates the plain text."""
if self.is_secure_option(section, option) and self.keyring_available:
s_option = "%s%s" % (section, option)
if self._unsaved.get(s_option, [''])[0] == 'set':
return self._unsaved[s_option][1]
else:
return keyring.get_password(self.keyring_name, s_option)
return ConfigParser.get(self, section, option, *args)
def remove_option(self, section, option):
"""Removes the option from ConfigParser as well as
the secure storage backend
"""
if self.is_secure_option(section, option) and self.keyring_available:
s_option = "%s%s" % (section, option)
self._unsaved[s_option] = ('delete', None)
ConfigParser.remove_option(self, section, option)
def write(self, *args):
"""See ConfigParser.write(). Also writes secure items to keystore."""
ConfigParser.write(self, *args)
if self.keyring_available:
for key, thing in self._unsaved.items():
action = thing[0]
value = thing[1]
if action == 'set':
keyring.set_password(self.keyring_name, key, value)
elif action == 'delete':
try:
keyring.delete_password(self.keyring_name, key)
except:
pass
self._unsaved = {}
class OfxConfig(object):
"""Default config file handler for other tools to use.
This can read and write from the default config which is
$USERS_HOME/ofxclient.ini
:param file_name: absolute path to a config file (optional)
:type file_name: string or None
Example usage::
from ofxclient.config import OfxConfig
from ofxclient import Account
a = Account()
c = OfxConfig(file_name='/tmp/new.ini')
c.add_account(a)
c.save()
account_list = c.accounts()
one_account = c.account( a.local_id() )
"""
def __init__(self, file_name=None):
self.secured_field_names = [
'institution.username',
'institution.password'
]
f = file_name or DEFAULT_CONFIG
if f is None:
raise ValueError('file_name is required')
self._load(f)
def reload(self):
"""Reload the config file from disk"""
return self._load()
def accounts(self):
"""List of confgured :py:class:`ofxclient.Account` objects"""
return [self._section_to_account(s)
for s in self.parser.sections()]
def encrypted_accounts(self):
return [a
for a in self.accounts()
if self.is_encrypted_account(a.local_id())]
def unencrypted_accounts(self):
return [a
for a in self.accounts()
if not self.is_encrypted_account(a.local_id())]
def account(self, id):
"""Get :py:class:`ofxclient.Account` by section id"""
if self.parser.has_section(id):
return self._section_to_account(id)
return None
def add_account(self, account):
"""Add Account to config (does not save)"""
serialized = account.serialize()
section_items = flatten_dict(serialized)
section_id = section_items['local_id']
if not self.parser.has_section(section_id):
self.parser.add_section(section_id)
for key in sorted(section_items):
self.parser.set(section_id, key, section_items[key])
self.encrypt_account(id=section_id)
return self
def encrypt_account(self, id):
"""Make sure that certain fields are encrypted."""
for key in self.secured_field_names:
value = self.parser.get(id, key)
self.parser.set_secure(id, key, value)
return self
def is_encrypted_account(self, id):
"""Are all fields for the account id encrypted?"""
for key in self.secured_field_names:
if not self.parser.is_secure_option(id, key):
return False
return True
def remove_account(self, id):
"""Add Account from config (does not save)"""
if self.parser.has_section(id):
self.parser.remove_section(id)
return True
return False
def save(self):
|
sharadagarwal/autorest
|
AutoRest/Generators/Python/Azure.Python.Tests/AcceptanceTests/paging_tests.py
|
Python
|
mit
| 6,270 | 0.003191 |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDE
|
D *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from uuid import uuid4
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir, pardir))
sys.path.append(join(root, "ClientRuntimes" , "Python", "msrest"))
sys.path.append(join(root, "ClientRuntimes" , "Python", "msrestazure"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "Paging"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
from msrestazure.azure_exceptions import CloudError
from msrest.authentication import BasicTokenAuthentication
from autorestpagingtestservice import AutoRestPagingTestService
from autorestpagingtestservice.models import PagingGetMultiplePagesWithOffsetOptions
class PagingTests(unittest.TestCase):
def setUp(self):
cred = BasicTokenAuthentication({"access_token" :str(uuid4())})
self.client = AutoRestPagingTestService(cred, base_url="http://localhost:3000")
self.client._client._adapter.add_hook("request", self.client._client._adapter._test_pipeline)
return super(PagingTests, self).setUp()
def test_paging_happy_path(self):
pages = self.client.paging.get_single_pages()
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 1)
self.assertEqual(items[0].properties.id, 1)
self.assertEqual(items[0].properties.name, "Product")
pages = self.client.paging.get_multiple_pages()
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 10)
pages.reset()
more_items = [i for i in pages]
eq = [e for e in items if e not in more_items]
self.assertEqual(len(eq), 0)
with self.assertRaises(GeneratorExit):
pages.next()
pages = self.client.paging.get_multiple_pages_retry_first()
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 10)
pages = self.client.paging.get_multiple_pages_retry_second()
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 10)
pages = self.client.paging.get_single_pages(raw=True)
items = [i for i in pages]
self.assertIsNone(pages.next_link)
self.assertEqual(len(items), 1)
self.assertEqual(items, pages.raw.output)
pages = self.client.paging.get_multiple_pages(raw=True)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
self.assertIsNotNone(pages.raw.response)
options = PagingGetMultiplePagesWithOffsetOptions(100)
pages = self.client.paging.get_multiple_pages_with_offset(paging_get_multiple_pages_with_offset_options=options)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
self.assertEqual(items[-1].properties.id, 110)
pages = self.client.paging.get_multiple_pages_retry_first(raw=True)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
pages = self.client.paging.get_multiple_pages_retry_second(raw=True)
self.assertIsNotNone(pages.next_link)
items = [i for i in pages]
self.assertEqual(len(items), 10)
def test_paging_sad_path(self):
pages = self.client.paging.get_single_pages_failure()
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure()
self.assertIsNotNone(pages.next_link)
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure_uri()
with self.assertRaises(ValueError):
items = [i for i in pages]
pages = self.client.paging.get_single_pages_failure(raw=True)
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure(raw=True)
self.assertIsNotNone(pages.next_link)
with self.assertRaises(CloudError):
items = [i for i in pages]
pages = self.client.paging.get_multiple_pages_failure_uri(raw=True)
with self.assertRaises(ValueError):
items = [i for i in pages]
if __name__ == '__main__':
unittest.main()
|
halcy/MastodonToTwitter
|
mtt/__init__.py
|
Python
|
mit
| 1,421 | 0 |
import os
from threading import RLock
from path import Path
import mtt.config as base_config # noqa
__all__ = ['config', 'lock']
class ConfigAccessor:
def __init__(self, configuration_items):
self.config = configuration_items
def update(self, other):
self.config.update(other.config if 'config' in other else other)
def __getattr__(self, item):
if item in self.config:
return self.config[item]
raise AttributeError(f'Unknown configuration option \'{item}\'')
def __getitem__(self, item):
try:
return self.config[item]
except KeyError:
raise KeyError(f'Unknown configuration option \'{item}\'')
def get_variables_in_module(module_name: str) -> ConfigAccessor:
module = globals().get(module_name, None)
module_type = type(os)
class_type = type(Path)
variables = {}
if module:
variables = {key: value for key, value in module.__dict__.items()
if not (key.startswith('__') or key.startswith('_'))
|
and not isinstance(value, module_type)
and not isinstance(value, class_type)}
return ConfigAccessor(variables)
config = get_variables_in_module('base_config')
try:
|
import mtt.user_config as user_config # noqa
config.update(get_variables_in_module('user_config'))
except ImportError:
pass
lock = RLock()
|
steveniemitz/scales
|
test/integration/thrift/test_mux.py
|
Python
|
mit
| 224 | 0.017857 |
import unittest
fro
|
m integration.thrift.test_thrift import ThriftTestCase
from scales.thriftmux import ThriftMux
class ThriftMuxTestCase(ThriftTestCase):
BUILDER = ThriftMux
if __name__ == '__main__':
unittest.
|
main()
|
kgao/MediaDrop
|
mediacore/lib/auth/group_based_policy.py
|
Python
|
gpl-3.0
| 52 | 0 |
from mediadrop.lib.auth.group_based_
|
policy import *
| |
coala/coala-bears
|
tests/python/requirements/PySafetyBearWithoutMockTest.py
|
Python
|
agpl-3.0
| 5,749 | 0 |
import os
from queue import Queue
from bears.python.requirements.PySafetyBear import PySafetyBear
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.testing.BearTestHelper import generate_skip_decorator
def get_testfile_path(name):
return os.path.join(os.path.dirname(__file__),
'PySafety_test_files',
name)
def load_testfile(name):
return open(get_testfile_path(name)).readlines()
@generate_skip_decorator(PySafetyBear)
class PySafetyBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = PySafetyBear(self.section, Queue())
def test_without_vulnerability(self):
self.check_validity(self.uut, ['lxml==3.6.0'])
def test_with_vulnerability(self):
self.check_invalidity(self.uut, ['bottle==0.10.1'])
def test_with_cve_vulnerability(self):
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
file_contents = [file_contents[0]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'bottle<0.12.10 is vulnerable '
'to CVE-2016-9964 and your project '
'is using 0.10.0.',
file=get_testfile_path(file_name),
line=1,
column=9,
end_line=1,
end_column=15,
severity=RESULT_SEVERITY.NORMAL,
additional_info='redirect() in bottle.py '
'in bottle 0.12.10 doesn\'t filter '
'a "\\r\\n" sequence, which leads '
'to a CRLF attack, as demonstrated '
'by a redirect("233\\r\\nSet-Cookie: '
'name=salt") call.'),
Result.from_values('PySafetyBear',
'bottle>=0.10,<0.10.12 is vulnerable to '
'CVE-2014-3137 and your project is '
'using 0.10.0.',
file=get_testfile_path(file_name),
line=1,
column=9,
end_line=1,
end_column=15,
severity=RESULT_SEVERITY.NORMAL,
additional_info='Bottle 0.10.x before 0.10.12,'
' 0.11.x before 0.11.7, and 0.12.x before'
' 0.12.6 does not properly limit content'
' types, which allows remote attackers to'
' bypass intended access restrictions via an'
' accepted Content-Type followed by a ;'
' (semi-colon) and a Content-Type that'
' would not be accepted, as demonstrated in'
' YouCo
|
mpleteM
|
e to execute arbitrary code.')],
filename=get_testfile_path(file_name))
def test_without_cve_vulnerability(self):
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
file_contents = [file_contents[1]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'locustio<0.7 is vulnerable to pyup.io-25878 '
'and your project is using 0.5.1.',
file=get_testfile_path(file_name),
line=1,
column=11,
end_line=1,
end_column=16,
severity=RESULT_SEVERITY.NORMAL,
additional_info='locustio before '
'0.7 uses pickle.',
)],
filename=get_testfile_path(file_name))
def test_with_cve_ignore(self):
self.section.append(Setting('cve_ignore', 'CVE-2016-9964, '
'CVE-2014-3137'))
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
# file_contents = [file_contents[0]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'locustio<0.7 is vulnerable to pyup.io-25878 '
'and your project is using 0.5.1.',
file=get_testfile_path(file_name),
line=2,
column=11,
end_line=2,
end_column=16,
severity=RESULT_SEVERITY.NORMAL,
additional_info='locustio before '
'0.7 uses pickle.',
)],
filename=get_testfile_path(file_name))
def test_with_no_requirements(self):
self.check_validity(self.uut, [])
def test_with_no_pinned_requirements(self):
self.check_validity(self.uut, ['foo'])
|
TailorDev/pauling
|
api/models.py
|
Python
|
mit
| 2,935 | 0.001704 |
import datetime
import uuid
from flask import current_app as app
from flask import url_for
from database import db
from sqlalchemy import Column, DateTime, String, Text
from sqlalchemy.dialects.postgresql import UUID
class Poster(db.Model):
__tablename__ = 'posters'
id = Column(UUID(as_uuid=True), primary_key=True)
title = Column(String(400), nullable=False, default='Untitled')
authors = Column(Text)
abstract = Column(Text)
source_url = Column(String(400), nullable=False)
download_url = Column(String(400), nullable=False)
presented_at = Column(String(200))
created_at = Column('create_date', DateTime, default=datetime.datetime.now())
id_admin = Column(UUID(as_uuid=True), unique=True, nullable=False)
email = Column(String(50))
def __init__(self, title, source_url, download_url, authors=None,
abstract=None, presented_at=None):
self.id = uuid.uuid4()
self.title = title
self.authors = authors
self.abstract = abstract
self.source_url = source_url
self.download_url = download_url
self.presented_at = presented_at
self.id_admin = uuid.uuid4()
def __repr__(self):
return '<User {}>'.format(str(self.id))
def serialize(self):
return {
'id': self.id,
'title': self.title,
'authors': self.authors,
'abstract': self.abstract,
'source_url': self.source_url,
'download_url': self.download_url,
'presented_at': self.presented_at,
'created_at': self.created_at.isoformat(),
'thumbnail_url': self.thumbnail_url(),
}
def public_url(self, absolute=False):
return url_for('get_poster', id=self.id, _external=absolute)
def admin_url(self, absolute=False):
return url_for('edit_poster', id_admin=self.id_admin, _external=absolute)
def qrco
|
de_svg_url(self, absolute=False):
return url_for('get_qrcode_svg', id=self.id, _external=absolute)
def qrcode_png_url(self, absolute=False):
return url_for('get_qrcode_png', id=self.id, _external=absolute)
def is_image(self):
return self.download_url.endswith('.png') or self.download_url.endswith('.jpg')
def viewable_download_url(self):
cloudinary = app.config['CLOUDINARY_BASE_URL']
if self.is_image() or self.download_url.startswith(cl
|
oudinary):
return self.download_url
return '{}/image/fetch/{}'.format(cloudinary, self.download_url)
def thumbnail_url(self):
cloudinary = app.config['CLOUDINARY_BASE_URL']
transformations = 'c_thumb,w_370,h_200,f_png'
if self.download_url.startswith(cloudinary):
return self.download_url.replace('/upload/', '/upload/{}/'.format(transformations))
return '{}/image/fetch/{}/{}'.format(cloudinary, transformations, self.download_url)
|
dyomas/pyhrol
|
examples/example_0050.py
|
Python
|
bsd-3-clause
| 1,983 | 0.00706 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, 2014, Pyhrol, pyhrol@rambler.ru
# GEO: N55.703431,E37.623324 .. N48.742359,E44.536997
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 4. Neither the name of the Pyhrol nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIAB
|
ILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WA
|
Y
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import example_0050
try:
example_0050.function_with_keywords()
except TypeError as ex:
print '***', ex
try:
example_0050.function_with_keywords(arg1 = 1)
except TypeError as ex:
print '***', ex
example_0050.function_with_keywords(counter = 1, description = "One")
example_0050.function_with_keywords(description = "Two", counter = 2)
|
Dennisparchkov/rumal
|
interface/management/commands/fdaemon.py
|
Python
|
gpl-2.0
| 10,312 | 0.001649 |
#!/usr/bin/env python
#
# fdaemon.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# Author: Tarun Kumar <reach.tarun.here AT gmail.com>
# NOTE: THIS IS AN INITIAL RELEASE AND IS LIKELY TO BE UNSTABLE
import ConfigParser
import logging
import os
import time
from django.core.management.base import BaseCommand, CommandError
from interface.models import *
from django.core import serializers
import pymongo
import gridfs
from bson import ObjectId
from bson.json_util impo
|
rt loads,dumps
import json
from bson import json_util
from interface.producer import Producer
import pika
STATUS_NEW = 0 # identifies local status of task
STATUS_PROCESSING = 1
STATUS_FAILED = 2
STATUS_COMPLETED = 3
STATUS_TIMEOUT = 4
NEW_SCAN_TASK = 1 # identifies data being sent to back end
SEND_ANY = 'Any'
ANY_QUEUE = 'an
|
y_queue'
PRIVATE_QUEUE = 'private_queue'
RPC_PORT = 5672
config = ConfigParser.ConfigParser()
config.read(os.path.join(settings.BASE_DIR, "conf", "backend.conf"))
BACKEND_HOST = config.get('backend', 'host', 'localhost')
# mongodb connection settings
client = pymongo.MongoClient()
db = client.thug
dbfs = client.thugfs
fs = gridfs.GridFS(dbfs)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
active_scans = [] # List of started threads waiting for a result to be returned from backend otr timeout
def fetch_new_tasks(self):
return Task.objects.filter(status__exact=STATUS_NEW).order_by('submitted_on')
def fetch_pending_tasks(self):
return Task.objects.filter(status__exact=STATUS_PROCESSING)
# Task.objects.filter(status__exact=STATUS_PROCESSING).update(status=STATUS_NEW)
def mark_as_running(self, task):
logger.debug("[{}] Marking task as running".format(task.id))
task.started_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_PROCESSING
task.save()
def mark_as_failed(self, task):
logger.debug("[{}] Marking task as failed".format(task.id))
task.completed_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_FAILED
task.save()
def mark_as_timeout(self, task):
logger.debug("[{}] Marking task timeout".format(task.id))
task.completed_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_TIMEOUT
task.save()
def mark_as_completed(self, task):
logger.debug("[{}] Marking task as completed".format(task.id))
task.completed_on = datetime.now(pytz.timezone(settings.TIME_ZONE))
task.status = STATUS_COMPLETED
task.save()
def renderTaskDetail(self, pkval):
return dumps(
loads(
serializers.serialize(
'json',
[Task.objects.get(pk=pkval), ]
)
)[0]
)
def post_new_task(self, task):
temp1 = loads(self.renderTaskDetail(task.id))
temp = temp1['fields']
backend = temp.pop("backend")
temp.pop("user")
temp.pop("sharing_model")
temp.pop("plugin_status")
temp.pop("sharing_groups")
temp.pop("star")
temp["frontend_id"] = temp1.pop("pk")
temp["task"] = NEW_SCAN_TASK
logger.debug("Posting task {}".format(temp["frontend_id"]))
if backend == SEND_ANY:
# start the thread to post the scan on any queue
scan = Producer(json.dumps(temp),
BACKEND_HOST,
RPC_PORT,
ANY_QUEUE,
temp["frontend_id"])
scan.start()
self.active_scans.append(scan)
self.mark_as_running(task)
else:
# start the thread to post the scan on private queue
scan = Producer(json.dumps(temp),
backend,
RPC_PORT,
PRIVATE_QUEUE,
temp["frontend_id"])
scan.start()
self.active_scans.append(scan)
self.mark_as_running(task)
def search_samples_dict_list(self, search_id,sample_dict):
# returns new gridfs sample_id
for x in sample_dict:
if x["_id"] == search_id:
return x["sample_id"]
def retrieve_save_document(self, response, files):
# now files for locations
for x in response["locations"]:
if x['content_id'] is not None:
dfile = [
item["data"] for item in files
if str(item["content_id"]) == x["content_id"]
][0]
new_fs_id = str(fs.put(dfile.encode('utf-8')))
# now change id in repsonse
x['location_id'] = new_fs_id
# now for samples
for x in response["samples"]:
dfile = [
item["data"] for item in files
if str(item["sample_id"]) == x["sample_id"]
][0]
new_fs_id = str(fs.put(dfile.encode('utf-8')))
# now change id in repsonse
x['sample_id'] = new_fs_id
# same for pcaps
for x in response["pcaps"]:
if x['content_id'] is not None:
dfile = [
item["data"] for item in files
if str(item["content_id"]) == x["content_id"]
][0]
new_fs_id = str(fs.put(dfile.encode('utf-8')))
# now change id in repsonse
x['content_id'] = new_fs_id
# for vt,andro etc. eoint sample_id to gridfs id
# check for issues in this
for x in response["virustotal"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
for x in response["honeyagent"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
for x in response["androguard"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
for x in response["peepdf"]:
x['sample_id'] = self.search_samples_dict_list(x['sample_id'],
response["samples"])
# remove id from all samples and pcaps
for x in response["samples"]:
x.pop("_id")
response.pop("_id")
frontend_analysis_id = db.analysiscombo.insert(response)
return frontend_analysis_id
def process_response(self, task):
analysis = json.loads(task.response, object_hook=decoder)
if analysis["status"] is STATUS_COMPLETED:
logger.info("Task Completed")
analysis_response = analysis["data"]
files = json_util.loads(analysis["files"])
local_task = Task.objects.get(id=analysis_response["frontend_id"])
frontend_analysis_id = self.retrieve_save_document(analysis_response,
files)
local_task.object_id = frontend_analysis_id
local_task.save()
self.mark_as_completed(local_task)
self.active_scans.remove(task)
else:
logger.info("Task Failed")
local_scan = Task.objects.get(id=analysis["data"])
self.mark
|
hacklab-fi/hhlevents
|
hhlevents/apps/hhlregistrations/migrations/0005_auto_20150412_1806.py
|
Python
|
bsd-3-clause
| 592 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db impor
|
t models, migrations
class Migration(migrations.Migration):
dependencies = [
('hhlregistrations', '0004_auto_
|
20150411_1935'),
]
operations = [
migrations.AddField(
model_name='event',
name='payment_due',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='event',
name='require_registration',
field=models.BooleanField(default=False),
),
]
|
wozz/electrum-myr
|
lib/transaction.py
|
Python
|
gpl-3.0
| 30,650 | 0.006754 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Note: The deserialization code originally comes from ABE.
import bitcoin
from bitcoin import *
from util import print_error
import time
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import mmap
import random
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
|
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return
|
bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def parse_redeemScript(bytes):
dec = [ x for x in script_GetOp(bytes.decode('hex')) ]
# 2 of 2
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex') ]
return 2, pubkeys
# 2 of 3
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex'), dec[3][1].encode('hex') ]
return 2, pubkeys
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESE
|
Bleno/sisgestor-django
|
docente/apps.py
|
Python
|
mit
| 89 | 0 |
from djang
|
o.apps import AppConfig
class DocenteConfig(AppConfig):
nam
|
e = 'docente'
|
woddx/privacyidea
|
tests/base.py
|
Python
|
agpl-3.0
| 4,803 | 0.000625 |
import unittest
import json
from privacyidea.app import create_app
from privacyidea.models import db
from privacyidea.lib.resolver import (save_resolver)
from privacyidea.lib.realm import (set_realm)
from privacyidea.lib.user import User
from privacyidea.lib.auth import create_db_admin
from privacyidea.api.lib.postpolicy import DEFAULT_POLICY_TEMPLATE_URL
PWFILE = "tests/testdata/passwords"
class FakeFlaskG():
policy_object = None
class MyTestCase(unittest.TestCase):
resolvername1 = "resolver1"
resolvername2 = "Resolver2"
resolvername3 = "reso3"
realm1 = "realm1"
realm2 = "realm2"
serials = ["SE1", "SE2", "SE3"]
otpkey = "3132333435363738393031323334353637383930"
@classmethod
def setUpClass(cls):
cls.app = create_app('testing', "")
cls.app_context = cls.app.app_context()
cls.app_context.push()
db.create_all()
# Create an admin for tests.
create_db_admin(cls.app, "testadmin", "admin@test.tld", "testpw")
def setUp_user_realms(self):
# create user realm
rid = save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": PWFILE})
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.realm1,
[self.resolvername1])
self.assertTrue(len(failed) == 0)
self.assertTrue(len(added) == 1)
user = User(login="root",
realm=self.realm1,
resolver=self.resolvername1)
user_str = "%s" % user
|
self.assertTrue(user_str == "<root.resolver1@realm1>", user_str)
self.assertFalse(user.is_empty())
self.assertTrue(User().is_empty())
user_repr = "%r" % user
expected = "User(login='root', realm='realm1', resolver='resolver1')"
self.assertTrue(user_repr == expected, user_repr)
def setUp_user_realm2(self):
# create user realm
rid = save_resolver({"resolver": self.resolvername1,
"type": "passw
|
dresolver",
"fileName": PWFILE})
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.realm2,
[self.resolvername1])
self.assertTrue(len(failed) == 0)
self.assertTrue(len(added) == 1)
user = User(login="root",
realm=self.realm2,
resolver=self.resolvername1)
user_str = "%s" % user
self.assertTrue(user_str == "<root.resolver1@realm2>", user_str)
self.assertFalse(user.is_empty())
self.assertTrue(User().is_empty())
user_repr = "%r" % user
expected = "User(login='root', realm='realm2', resolver='resolver1')"
self.assertTrue(user_repr == expected, user_repr)
@classmethod
def tearDownClass(cls):
db.session.remove()
db.drop_all()
cls.app_context.pop()
def setUp(self):
self.authenticate()
def authenticate(self):
with self.app.test_request_context('/auth',
data={"username": "testadmin",
"password": "testpw"},
method='POST'):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status"), res.data)
self.at = result.get("value").get("token")
def authenticate_selfserive_user(self):
with self.app.test_request_context('/auth',
method='POST',
data={"username":
"selfservice@realm1",
"password": "test"}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status"), res.data)
# In self.at_user we store the user token
self.at_user = result.get("value").get("token")
# check that this is a user
role = result.get("value").get("role")
self.assertTrue(role == "user", result)
self.assertEqual(result.get("value").get("realm"), "realm1")
# Test logout time
self.assertEqual(result.get("value").get("logout_time"), 120)
self.assertEqual(result.get("value").get("policy_template_url"),
DEFAULT_POLICY_TEMPLATE_URL)
|
geojames/Dart_EnvGIS
|
Week6-2_Matplotlib_Adv.py
|
Python
|
mit
| 3,114 | 0.008992 |
#------------------------------------------------------------------------------
__author__ = 'James T. Dietrich'
__contact__ = 'james.t.dietrich@dartmouth.edu'
__copyright__ = '(c) James Dietrich 2016'
__license__ = 'MIT'
__date__ = 'Wed Nov 16 11:33:39 2016'
__version__ = '1.0'
__status__ = "initial release"
__url__ = "https://github.com/geojames/..."
"""
Name: Week6-1_Matplotlib_Adv.py
Compatibility: Python 3.5
Description: This program does stuff
URL: https://github.com/geojames/...
Requires: libraries
Dev ToDo:
AUTHOR: James T. Dietrich
ORGANIZATION: Dartmouth College
Contact: james.t.dietrich@dartmout
|
h.edu
Copyright: (c) James Dietrich 2016
"""
#-
|
-----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# MESHGRID
#
# Meshgrid is a command/function that allows you to easily build X and Y
# grids from 1-D arrays/vectors which can be used to evaluate equations
# in 2D or 3D space
# different conventions for naming meshed variables
# x > xv
# x > xx
# x > xg
#
# meshgrid takes to 1-D arrays of X and Y coordinates and returns two X and Y
# "meshes" 2D arrays that cover the X and Y spaces
x = np.linspace(-10.,10.,30)
y = np.linspace(-10.,10.,30)
xg, yg = np.meshgrid(x,y)
r = np.sqrt((xg**2 + yg**2))
z = np.sin(r) * xg**2
plt.pcolor(z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xg, yg, z,rstride=1, cstride=1, cmap='coolwarm')
# 3D Subplots
fig = plt.figure(figsize=plt.figaspect(0.33333))
ax1 = fig.add_subplot(1, 3, 1)
ax1.pcolor(z, cmap = 'hot')
ax2 = fig.add_subplot(1, 3, 2, projection='3d')
ax2.plot_surface(xg, yg, z,rstride=1, cstride=1, cmap='hot')
ax3 = fig.add_subplot(1, 3, 3, projection='3d')
ax3.contour(xg,yg,z)
#%% Formatted text for plots
#
# Matplotlib
# http://matplotlib.org/users/mathtext.html#mathtext-tutorial
# it basically uses TeX syntax and formatting codes
def f(x,y):
return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 -y**2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
xx,yy = np.meshgrid(x, y)
#plt.axes([0.025, 0.025, 0.95, 0.95])
plt.contourf(xx, yy, f(xx, yy), 8, alpha=.75, cmap=plt.cm.hot)
C = plt.contour(xx, yy, f(xx, yy), 8, colors='black', linewidth=0.5)
plt.clabel(C, inline=1, fontsize=10)
plt.text (-2.5,-2,r'$\frac{1-x}{2 + x^5 + y^3} \times e^{(-x^2 -y^2)}$',fontsize=20)
plt.xlabel(r'$\mathbf{Bold \ x}$ x', fontsize=20)
plt.ylabel(r'$\mathit{Y-Label}$', fontsize=20)
plt.title('Regular ' r'$\mathbf{Bold}$ $\mathit{and \ italic}$ words')
#%% Double Y Axis Plots (from the Matplotlib Gallery)
fig, ax1 = plt.subplots()
t = np.arange(0.01, 10.0, 0.01)
s1 = np.exp(t)
ax1.plot(t, s1, 'b-')
ax1.set_xlabel('time (s)')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('exp', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
s2 = np.sin(2*np.pi*t)
ax2.plot(t, s2, 'r.')
ax2.set_ylabel('sin', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/vm/disk_encryption.py
|
Python
|
mit
| 27,499 | 0.004291 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import os
from knack.log import get_logger
from azure.cli.core.commands import LongRunningOperation
from azure.cli.command_modules.vm.custom import set_vm, _compute_client_factory, _is_linux_os
from azure.cli.command_modules.vm._vm_utils import get_key_vault_base_url, create_keyvault_data_plane_client
_DATA_VOLUME_TYPE = 'DATA'
_ALL_VOLUME_TYPE = 'ALL'
_STATUS_ENCRYPTED = 'Encrypted'
logger = get_logger(__name__)
vm_extension_info = {
'Linux': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryptionForLinux',
'version': '1.1',
'legacy_version': '0.1'
},
'Windows': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryption',
'version': '2.2',
'legacy_version': '1.1'
}
}
def _find_existing_ade(vm, use_instance_view=False, ade_ext_info=None):
if not ade_ext_info:
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
if use_instance_view:
exts = vm.instance_view.extensions or []
r = next((e for e in exts if e.type and e.type.lower().startswith(ade_ext_info['publisher'].lower()) and
e.name.lower() == ade_ext_info['name'].lower()), None)
else:
exts = vm.resources or []
r = next((e for e in exts if (e.publisher.lower() == ade_ext_info['publisher'].lower() and
e.type_properties_type.lower() == ade_ext_info['name'].lower())), None)
return r
def _detect_ade_status(vm):
if vm.storage_profile.os_disk.encryption_settings:
return False, True
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
ade = _find_existing_ade(vm, ade_ext_info=ade_ext_info)
if ade is None:
return False, False
if ade.type_handler_version.split('.')[0] == ade_ext_info['legacy_version'].split('.')[0]:
return False, True
return True, False # we believe impossible to have both old & new ADE
def encrypt_vm(cmd, resource_group_name, vm_name, # pylint: disable=too-many-locals, too-many-statements
disk_encryption_keyvault,
aad_client_id=None,
aad_client_secret=None, aad_client_cert_thumbprint=None,
key_encryption_keyvault=None,
key_encryption_key=None,
key_encryption_algorithm='RSA-OAEP',
volume_type=None,
encrypt_format_all=False,
|
force=False):
from msrestazure.tools import parse_resource_id
from knack.util import CLIError
# pylint: disable=no-member
|
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
is_linux = _is_linux_os(vm)
backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False
_, has_old_ade = _detect_ade_status(vm)
use_new_ade = not aad_client_id and not has_old_ade
extension = vm_extension_info['Linux' if is_linux else 'Windows']
if not use_new_ade and not aad_client_id:
raise CLIError('Please provide --aad-client-id')
# 1. First validate arguments
if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret:
raise CLIError('Please provide either --aad-client-cert-thumbprint or --aad-client-secret')
if volume_type is None:
if not is_linux:
volume_type = _ALL_VOLUME_TYPE
elif vm.storage_profile.data_disks:
raise CLIError('VM has data disks, please supply --volume-type')
else:
volume_type = 'OS'
# sequence_version should be unique
sequence_version = uuid.uuid4()
# retrieve keyvault details
disk_encryption_keyvault_url = get_key_vault_base_url(
cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])
# disk encryption key itself can be further protected, so let us verify
if key_encryption_key:
key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
# to avoid bad server errors, ensure the vault has the right configurations
_verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vm, force)
# if key name and not key url, get url.
if key_encryption_key and '://' not in key_encryption_key: # if key name and not key url
key_encryption_key = _get_keyvault_key_url(
cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)
# 2. we are ready to provision/update the disk encryption extensions
# The following logic was mostly ported from xplat-cli
public_config = {
'KeyVaultURL': disk_encryption_keyvault_url,
'VolumeType': volume_type,
'EncryptionOperation': 'EnableEncryption' if not encrypt_format_all else 'EnableEncryptionFormatAll',
'KeyEncryptionKeyURL': key_encryption_key,
'KeyEncryptionAlgorithm': key_encryption_algorithm,
'SequenceVersion': sequence_version,
}
if use_new_ade:
public_config.update({
"KeyVaultResourceId": disk_encryption_keyvault,
"KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
})
else:
public_config.update({
'AADClientID': aad_client_id,
'AADClientCertThumbprint': aad_client_cert_thumbprint,
})
ade_legacy_private_config = {
'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '')
}
VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
'KeyVaultKeyReference', 'SubResource')
ext = VirtualMachineExtension(
location=vm.location, # pylint: disable=no-member
publisher=extension['publisher'],
type_properties_type=extension['name'],
protected_settings=None if use_new_ade else ade_legacy_private_config,
type_handler_version=extension['version'] if use_new_ade else extension['legacy_version'],
settings=public_config,
auto_upgrade_minor_version=True)
poller = compute_client.virtual_machine_extensions.begin_create_or_update(
resource_group_name, vm_name, extension['name'], ext)
LongRunningOperation(cmd.cli_ctx)(poller)
poller.result()
# verify the extension was ok
extension_result = compute_client.virtual_machine_extensions.get(
resource_group_name, vm_name, extension['name'], 'instanceView')
if extension_result.provisioning_state != 'Succeeded':
raise CLIError('Extension needed for disk encryption was not provisioned correctly')
if not use_new_ade:
if not (extension_result.instance_view.statuses and
extension_result.instance_view.statuses[0].message):
raise CLIError('Could not find url pointing to the secret for disk encryption')
# 3. update VM's storage profile with the secrets
status_url = extension_result.instance_view.statuses[0].message
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
secret_ref = KeyVaultSecretReference(secret_url=status_url,
source_vault=SubResource(id=disk_encryption_keyvault))
key_encryption_key_obj = None
if key_encr
|
anzenehansen/wedding-photos
|
plugins/bases/handlers.py
|
Python
|
mpl-2.0
| 4,489 | 0.008911 |
import tornado.web
import traceback
from plugins.bases.plugin import PluginBase
import os
import sys
import gdata
import gdata.youtube
import gdata.youtube.service
class HandlersBase(tornado.web.RequestHandler, PluginBase):
# Every handler must have a web path, override this in this fashion
WEB_PATH = r"/"
STORE_ATTRS = True
STORE_UNREF = True
# Specifies what JS and CSS files to load from templates/bootstrap/[css|js]
JS_FILES = []
CSS_FILES = []
# Used as a default for every page
PAGE_TITLE = "Home"
def initialize(self, **kwargs):
self.sysconf = kwargs.get("sysconf", None)
def get_template_path(self):
return "%s/templates" % os.path.dirname(os.path.realpath(sys.argv[0]))
# Initialize YouTube reference to perform actions
def yt_instance(self):
self.yt_service = gdata.youtube.service.YouTubeService()
self.yt_service.ssl = True
self.yt_service.developer_key = self.sysconf.devid
self.yt_service.client_id = self.sysconf.clientid
self.yt_service.email = self.sysconf.gmail
self.yt_service.password = self.sysconf.gpass
self.yt_service.source = self.sysconf.clientid
self.yt_service.ProgrammaticLogin()
# Simple class property to return the playlist URI
@property
def yt_plist_uri(self):
return "http://gdata.youtube.com/feeds/api/playlists/%s" % self.sysconf.playlist
# Return the data about the playlist
def yt_playlist(self):
return self.yt_service.GetYouTubePlaylistVideo
|
Feed(uri=self.yt_plist_uri)
# Get total number of videos in playlist
def yt_playlist_count(self):
plist = self.yt_playlist()
entry = []
for e in plist.entry:
entry.append(e)
return len(entry)
# Wrapper to get upload token for YouTube video post req
|
uest
def yt_uploadtoken(self, mg):
video_entry = gdata.youtube.YouTubeVideoEntry(media=mg)
response = self.yt_service.GetFormUploadToken(video_entry)
return (response[0], response[1])
# This defines various aspects of the video
def yt_mediagroup(self, title, desc):
return gdata.media.Group(
title = gdata.media.Title(text=title),
description = gdata.media.Description(description_type='plain', text=desc),
keywords=gdata.media.Keywords(text='amber, eric, wedding, 2013, october, 31, halloween'),
category=[gdata.media.Category(
text='People',
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label='People')],
player=None
)
# Adds a video to playlist
def yt_vid2pl(self, vidid, title, desc):
video_entry = self.yt_service.AddPlaylistVideoEntryToPlaylist(
self.yt_plist_uri, vidid, title, desc
)
if isinstance(video_entry, gdata.youtube.YouTubePlaylistVideoEntry):
return 1
return 0
"""
show
Wrapper around RequestHandler's render function to make rendering these templates easier/better.
This way the class just has to specify what special CSS and/or JavaScript files to load (see handlers/main),
and it is automatically passed to the template engine to parse and deal with.
Easier management and use IMO.
"""
def show(self, templ, **kwargs):
# What JavaScript files to load?
js = ["jquery", "bootstrap.min", "common", "jquery.prettyPhoto"]
js.extend(self.JS_FILES)
# CSS files we want for the particular page
css = ["common", "prettyPhoto"]
css.extend(self.CSS_FILES)
# We pass specifics to the page as well as any uniques via kwargs passed from here
self.render("%s.html" % templ,
js=js, css=css,
page_title=self.PAGE_TITLE,
plistid=self.sysconf.playlist,
**kwargs)
def write_error(self, status_code, **kwargs):
path = os.path.dirname(os.path.realpath(sys.argv[0]))
_,err,_ = kwargs['exc_info']
msg = "Unfortunately an error has occured. If you believe this is in error, please contact support.<br /><br />"
msg += "Error: %s" % (err)
self.show("%s/templates/message" % path, path=path, message=msg)
|
PEDSnet/pedsnetcdms
|
pedsnetcdms/pedsnetcdm/alembic/versions/cfbd6d35cab_initial.py
|
Python
|
bsd-2-clause
| 30,989 | 0.012101 |
"""Initial
Revision ID: cfbd6d35cab
Revises:
Create Date: 2015-03-04 04:13:56.547992
"""
# revision identifiers, used by Alembic.
revision = 'cfbd6d35cab'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('location',
sa.Column('city', sa.String(length=50), nullable=True),
sa.Column('zip', sa.String(length=9), nullable=True),
sa.Column('county', sa.String(length=50), nullable=True),
sa.Column('state', sa.String(length=2), nullable=True),
sa.Column('address_1', sa.String(length=100), nullable=True),
sa.Column('address_2', sa.String(length=100), nullable=True),
sa.Column('location_source_value', sa.String(length=300), nullable=True),
sa.Column('location_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('location_id', name=op.f('pk_location'))
)
op.create_index(op.f('ix_location_location_id'), 'location', ['location_id'], unique=False, postgresql_ops={})
op.create_table('cohort',
sa.Column('cohort_end_date', sa.DateTime(), nullable=True),
sa.Column('cohort_id', sa.Integer(), nullable=False),
sa.Column('subject_id', sa.Integer(), nullable=False),
sa.Column('stop_reason', sa.String(length=100), nullable=True),
sa.Column('cohort_concept_id', sa.Integer(), nullable=False),
sa.Column('cohort_start_date', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('cohort_id', name=op.f('pk_cohort'))
)
op.create_index(op.f('ix_cohort_cohort_id'), 'cohort', ['cohort_id'], unique=False, postgresql_ops={})
op.create_table('organization',
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('place_of_service_concept_id', sa.Integer(), nullable=True),
sa.Column('place_of_service_source_value', sa.String(length=100), nullable=True),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.Column('organization_source_value', sa.String(length=50), nullable=False),
sa.ForeignKeyConstraint(['location_id'], [u'location.location_id'], name=op.f('fk_organization_location_id_location')),
sa.PrimaryKeyConstraint('organization_id', name=op.f('pk_organization')),
sa.UniqueConstraint('organization_source_value', name=op.f('uq_organization_organization_source_value'))
)
op.create_index(op.f('ix_organization_location_id'), 'organization', ['location_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_organization_organization_
|
id'), 'organization', ['organization_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_organization_organization_source_value_place_of_service_source_value'), 'organization', ['organization_source_value', 'place_of_service_source_value'], unique=False, postgresql_ops={u'place_of_service_source_value': u'varchar_pattern_ops', u'organization_source_valu
|
e': u'varchar_pattern_ops'})
op.create_table('care_site',
sa.Column('place_of_service_source_value', sa.String(length=100), nullable=True),
sa.Column('place_of_service_concept_id', sa.Integer(), nullable=True),
sa.Column('care_site_source_value', sa.String(length=100), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('care_site_id', sa.Integer(), nullable=False),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['location_id'], [u'location.location_id'], name=op.f('fk_care_site_location_id_location')),
sa.ForeignKeyConstraint(['organization_id'], [u'organization.organization_id'], name=op.f('fk_care_site_organization_id_organization')),
sa.PrimaryKeyConstraint('care_site_id', name=op.f('pk_care_site')),
sa.UniqueConstraint('organization_id', 'care_site_source_value', name=op.f('uq_care_site_organization_id_care_site_source_value'))
)
op.create_index(op.f('ix_care_site_care_site_id'), 'care_site', ['care_site_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_care_site_location_id'), 'care_site', ['location_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_care_site_organization_id'), 'care_site', ['organization_id'], unique=False, postgresql_ops={})
op.create_table('provider',
sa.Column('provider_id', sa.Integer(), nullable=False),
sa.Column('npi', sa.String(length=20), nullable=True),
sa.Column('specialty_concept_id', sa.Integer(), nullable=True),
sa.Column('provider_source_value', sa.String(length=100), nullable=False),
sa.Column('dea', sa.String(length=20), nullable=True),
sa.Column('care_site_id', sa.Integer(), nullable=False),
sa.Column('specialty_source_value', sa.String(length=300), nullable=True),
sa.ForeignKeyConstraint(['care_site_id'], [u'care_site.care_site_id'], name=op.f('fk_provider_care_site_id_care_site')),
sa.PrimaryKeyConstraint('provider_id', name=op.f('pk_provider'))
)
op.create_index(op.f('ix_provider_care_site_id'), 'provider', ['care_site_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_provider_provider_id'), 'provider', ['provider_id'], unique=False, postgresql_ops={})
op.create_table('person',
sa.Column('provider_id', sa.Integer(), nullable=True),
sa.Column('ethnicity_concept_id', sa.Integer(), nullable=True),
sa.Column('ethnicity_source_value', sa.String(length=50), nullable=True),
sa.Column('person_source_value', sa.String(length=100), nullable=False),
sa.Column('month_of_birth', sa.Numeric(precision=2, scale=0), nullable=True),
sa.Column('pn_time_of_birth', sa.DateTime(), nullable=True),
sa.Column('day_of_birth', sa.Numeric(precision=2, scale=0), nullable=True),
sa.Column('year_of_birth', sa.Numeric(precision=4, scale=0), nullable=False),
sa.Column('gender_source_value', sa.String(length=50), nullable=True),
sa.Column('race_source_value', sa.String(length=50), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('care_site_id', sa.Integer(), nullable=False),
sa.Column('gender_concept_id', sa.Integer(), nullable=False),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.Column('race_concept_id', sa.Integer(), nullable=True),
sa.Column('pn_gestational_age', sa.Numeric(precision=4, scale=2), nullable=True),
sa.ForeignKeyConstraint(['care_site_id'], [u'care_site.care_site_id'], name=op.f('fk_person_care_site_id_care_site')),
sa.ForeignKeyConstraint(['location_id'], [u'location.location_id'], name=op.f('fk_person_location_id_location')),
sa.ForeignKeyConstraint(['provider_id'], [u'provider.provider_id'], name=op.f('fk_person_provider_id_provider')),
sa.PrimaryKeyConstraint('person_id', name=op.f('pk_person')),
sa.UniqueConstraint('person_source_value', name=op.f('uq_person_person_source_value'))
)
op.create_index(op.f('ix_person_care_site_id'), 'person', ['care_site_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_person_location_id'), 'person', ['location_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_person_person_id'), 'person', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_person_provider_id'), 'person', ['provider_id'], unique=False, postgresql_ops={})
op.create_table('death',
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('death_type_concept_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('death_date', sa.DateTime(), nullable=False),
sa.Column('cause_of_death_concept_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('cause_of_death_source_value', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['person_id'], [u'person.person_id'], name=op.f('fk_death_person_id_person')),
sa.PrimaryKeyConstraint('person_id', 'death_type_concept_id', 'cause_of_death_concept_id', name=op.f('pk_death'))
)
op.create_index(op.f('ix_death_person_id'), 'death', ['person_id'], unique=False, postgresql_ops={})
op.create_index(op.f('ix_death_person_id_death_type_concept_id_cause_of_death_concept_id'), 'death', ['pers
|
Azure/azure-sdk-for-python
|
sdk/identity/azure-identity/tests/test_msal_client.py
|
Python
|
mit
| 1,769 | 0.001131 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.exceptions import ServiceRequestError
from azure.identity._internal.msal_client import MsalClient
import pytest
from helpers import mock, mock_response, validating_transport, Request
def test_retries_requests():
"""The client should retry token requests"""
message = "can't connect"
transport = mock.Mock(send=mock.Mock(side_effect=ServiceRequestError(message)))
client = MsalClient(transport=transport)
with pytest.raises(ServiceRequestError, match=message):
client.post("https://localhost")
assert transport.send.call_count > 1
transport.send.reset_mock()
with pytest.raises(ServiceRequestEr
|
ror, match=message):
client.get("https://localhost")
assert transport.send.call_count > 1
def test_get_error_response():
first_result = {"error": "first"}
first_response = mock_response(401, json_payload=first_result)
second_result = {"error": "second"}
second_response = mock_respon
|
se(401, json_payload=second_result)
transport = validating_transport(
requests=[Request(url="https://localhost")] * 2, responses=[first_response, second_response]
)
client = MsalClient(transport=transport)
for result in (first_result, second_result):
assert not client.get_error_response(result)
client.get("https://localhost")
response = client.get_error_response(first_result)
assert response is first_response
client.post("https://localhost")
response = client.get_error_response(second_result)
assert response is second_response
assert not client.get_error_response(first_result)
|
TheGentlemanOctopus/thegentlemanoctopus
|
octopus_code/core/octopus/patterns/spiralOutFast.py
|
Python
|
gpl-3.0
| 1,846 | 0.008667 |
from pattern import Pattern
import copy
import numpy as np
import random
import collections
#from scipy.signal import convolve2d
import time
from collections import deque
class SpiralOutFast(Pattern):
def __init__(self):
self.register_param("r_leak", 0, 3, 1.2)
self.register_param("g_leak", 0, 3, 1.7)
self.register_param("b_leak", 0, 3, 2)
self.register_param("speed", 0, 1 , 0)
#Initialise time and color history
self.t = [0,0]
self.r = [0,0]
self.g = [0,0]
self.b = [0,0]
self.buff_len = 1500
self.start_time = np.float(time.time())
def on_pattern_select(self, octopus):
self.pixels = octopus.pixels_spiral()
self.previous_time = np.float16(time.time())
def next_frame(self, octopus, data):
current_time = time.time() - self.start_time
self.previous_time = current_time
scale = float(255)
self.t.append(current_time)
self.r.append(scale*np.mean([data.eq[0], data.eq[1]]))
self.g.append(scale*np.mean([data.eq[2], data.eq[3]]))
self.b.append(scale*np.mean([data.eq[4], data.eq[5], data.eq[6]]))
if len(self.t) > self.buff_len:
del self.t[0]
del s
|
elf.r[0]
del self.g[0]
del self.b[0]
domain_r = np.linspace(current_time, current_time - self.r_leak, len(self.pixels))
domain_g = np.linspace(current_time, current_time - self.g_leak, len(self.pixels))
domain_b = np.linspace(current_time, current_time - self.b_leak, len(self.pixels))
r = np.interp(domain_r, self.t, self.r)
|
g = np.interp(domain_g, self.t, self.g)
b = np.interp(domain_b, self.t, self.b)
for i in range(len(self.pixels)):
self.pixels[i].color = (r[i], g[i], b[i])
|
octogene/hadaly
|
hadaly/painter.py
|
Python
|
gpl-3.0
| 3,680 | 0.00163 |
# -*- coding: utf-8 -*-
import math
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line
from kivy.vector import Vector
from kivy.properties import StringProperty, DictProperty, BooleanProperty, BoundedNumericProperty, ListProperty
class Painter(Widget):
tools = DictProperty({'arrow': {'color': (1, 1, 1, 1), 'thickness': 0.5},
'line': {'color': (1, 1, 1, 1), 'thickness': 0.5},
'freeline': {'color': (1, 1, 1, 1), 'thickness': 0.5},
'eraser': {'thickness': 0.4}
})
current_tool = StringProperty('arrow')
thickness = BoundedNumericProperty(1, min=0.5, max=10, errorvalue=0.5)
color = ListProperty((1, 1, 1, 1))
locked = BooleanProperty(False)
def on_thickness(self, instance, value):
self.tools[self.current_tool]['thickness'] = value
def on_color(self, instance, value):
self.tools[self.current_tool]['color'] = value
def on_current_tool(self, instance, value):
self.color = self.tools[value]['color']
self.thickness = self.tools[value]['thickness']
def on_touch_down(self, touch):
if not self.locked and self.collide_point(*touch.pos):
touch.grab(self)
with self.canvas:
|
Color(*self.color, mode='rgba')
touch.ud['line'] = Line(points=(touch.x, touch.y), width=self.thickness, cap='round', joint='miter')
if self.current_tool == 'arrow':
touch.ud['arrowhead'] = Line(width=self.thickness, cap='square', joint='miter')
touch.ud['initial_pos'] = touch.pos
else:
return
|
False
return super(Painter, self).on_touch_down(touch)
def on_touch_move(self, touch):
if not self.locked and self.collide_point(*touch.pos):
try:
if self.current_tool == 'freeline':
touch.ud['line'].points += [touch.x, touch.y]
else:
touch.ud['line'].points = [touch.ox, touch.oy, touch.x, touch.y]
except KeyError:
pass
else:
return False
return super(Painter, self).on_touch_move(touch)
def arrowhead(self, start, end):
'''
start : list of points (x, y) for the start of the arrow.
end : list of points (x, y) for the end of the arrow.
return : list of points for each line forming the arrow head.
'''
# TODO: Adjust arrowhead size according to line thickness.
A = Vector(start)
B = Vector(end)
h = 10 * math.sqrt(3)
w = 10
U = (B - A) / Vector(B - A).length()
V = Vector(-U.y, U.x)
v1 = B - h * U + w * V
v2 = B - h * U - w * V
return (v1, v2)
def on_touch_up(self, touch):
if not self.locked and touch.grab_current == self and \
self.collide_point(*touch.pos) and self.current_tool == 'arrow':
try:
arrowhead = self.arrowhead(touch.ud['initial_pos'], touch.pos)
except KeyError:
pass
except ZeroDivisionError:
pass
else:
touch.ud['arrowhead'].points += arrowhead[0]
touch.ud['arrowhead'].points += (touch.x, touch.y)
touch.ud['arrowhead'].points += arrowhead[1]
touch.ungrab(self)
else:
return False
return super(Painter, self).on_touch_up(touch)
def on_size(self, *kwargs):
# TODO: Update every drawing according to size.
self.canvas.clear()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.