repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
j-herrera/icarus
|
icarus_site/ISStrace/urls.py
|
Python
|
gpl-2.0
| 279 | 0.014337 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.map, name='map'),
url(r'^mapSim', views.mapSim, name='mapSim'),
url(r
|
'^api/getPos', views.getPos, name='getPos'),
url(r'^api/getProjAndPos', views.getProjAndPos, name='getP
|
rojAndPos'),
]
|
scholer/nascent
|
nascent/graph_sim_nx/debug.py
|
Python
|
agpl-3.0
| 1,923 | 0.00832 |
# -*- coding: utf-8 -*-
## Copyright 2015 Rasmus Scholer Sorensen, rasmusscholer@gmail.com
##
## This file is part of Nascent.
##
## Nasc
|
ent is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILI
|
TY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#pylint: disable=C0103,C0111,W0613
from __future__ import absolute_import, print_function, division
from pprint import pprint
do_print = False
def print_debug(*args, **kwargs):
""" Change module-level do_print variable to toggle behaviour. """
if 'origin' in kwargs:
del kwargs['origin']
if do_print:
print(*args, **kwargs)
def pprint_debug(*args, **kwargs):
if do_print:
pprint(*args, **kwargs)
def info_print(*args, **kwargs):
""" Will print the file and line before printing. Can be used to find spurrious print statements. """
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe().f_back)
print(frameinfo.filename, frameinfo.lineno)
pprint(*args, **kwargs)
def info_pprint(*args, **kwargs):
""" Will print the file and line before printing the variable. """
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe().f_back)
print(frameinfo.filename, frameinfo.lineno)
pprint(*args, **kwargs)
pprintd = pprint_debug
printd = print_debug
|
chickonice/AutonomousFlight
|
simulation/simulation_ws/build/rotors_simulator/rotors_joy_interface/catkin_generated/pkg.develspace.context.pc.py
|
Python
|
gpl-3.0
| 669 | 0.004484 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/spacecat/AutonomousFlight/simulation/simulation_ws/src/rotors_simulator/rotors_joy_interface/include".split(';') if "/home/spacecat/AutonomousFlight/simulation/simulation_ws/src/rotors_simulator/rotors_joy_interface/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;geometry_msgs;mav_msgs;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJEC
|
T_NAME = "rotors_joy_interface"
PROJECT_SPACE_DIR = "/home/spac
|
ecat/AutonomousFlight/simulation/simulation_ws/devel"
PROJECT_VERSION = "1.0.0"
|
stephenlienharrell/roster-dns-management
|
roster-config-manager/setup.py
|
Python
|
bsd-3-clause
| 3,685 | 0.005156 |
#!/usr/bin/env python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMA
|
GE.
"""Setup script for roster config manager."""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
current_version = __version__
if( __version__.startswith('#') ):
current_version = '1000'
setup(name='RosterConfigManager',
version=current_version,
description='RosterConfigManager is a Bind9 config importer/exporter for '
'
|
Roster',
long_description='Roster is DNS management software for use with Bind 9. '
'Roster is written in Python and uses a MySQL database '
'with an XML-RPC front-end. It contains a set of '
'command line user tools that connect to the XML-RPC '
'front-end. The config files for Bind are generated '
'from the MySQL database so a live MySQL database is '
'not needed.',
maintainer='Roster Development Team',
maintainer_email='roster-discussion@googlegroups.com',
url='http://code.google.com/p/roster-dns-management/',
packages=['roster_config_manager'],
license=__license__,
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python :: 2.5',
'Topic :: Internet :: Name Service (DNS)'],
install_requires = ['dnspython>=1.6.0', 'IPy>=0.62',
'iscpy>=1.0.5', 'fabric>=1.4.0',
'RosterCore>=%s' % current_version],
scripts = ['scripts/dnsconfigsync', 'scripts/dnszoneimporter',
'scripts/dnstreeexport', 'scripts/dnscheckconfig',
'scripts/dnsexportconfig', 'scripts/dnsrecover',
'scripts/dnszonecompare', 'scripts/dnsquerycheck',
'scripts/dnsservercheck', 'scripts/dnsversioncheck']
)
|
thonkify/thonkify
|
src/lib/telegram/contrib/botan.py
|
Python
|
mit
| 1,522 | 0.001971 |
import logging
from future.moves.urllib.parse import quote
from future.moves.urllib.error import HTTPError, URLError
from future.moves.urllib.request import urlopen, Request
logging.getLogger(__name__).addHandler(logging.NullHandler())
class Botan(object):
"""This class helps to send incoming events to your botan analytics account.
See more: https://github.com/botanio/sdk#botan-sdk
"""
token = ''
url_template = 'https://api.botan.io/track?token={token}' \
'&u
|
id={uid}&name={name}&src=python-telegram-bot'
def __init__(self, token):
self.token = token
self.logger = logging.getLogger(__name__)
def track(self, message, event_name='event'):
try:
uid = message.chat_id
except AttributeError:
self.logger.warn('No chat_id in message')
return False
data = message.to_json()
try:
url = self.url_template.format(
token=str(self.token)
|
, uid=str(uid), name=quote(event_name))
request = Request(
url, data=data.encode(), headers={'Content-Type': 'application/json'})
urlopen(request)
return True
except HTTPError as error:
self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(
'utf-8'))
return False
except URLError as error:
self.logger.warn('Botan track error ' + str(error.reason))
return False
|
liuzhenyang14/Meeting
|
codex/baseerror.py
|
Python
|
gpl-3.0
| 629 | 0.00159 |
# -*- coding: utf-8 -*-
#
__author__ = "Epsirom"
class BaseError(Exception):
def __init__(self, code, msg):
super(BaseError, self).__init__(msg)
self.code = code
self.msg = msg
def __repr__(self):
return '[ERRCODE=%d] %s' % (self.code, self.msg)
class InputError(BaseError):
def __init__(self, msg):
super(InputError, self).__init__(1, msg)
class LogicError(BaseErr
|
or):
def __init__(self, msg):
super(LogicError, self).__init__(2, msg)
class ValidateError(BaseError):
def __init__(self, msg):
sup
|
er(ValidateError, self).__init__(3, msg)
|
pesaply/sarafu
|
smpp.py
|
Python
|
mit
| 447 | 0.067114 |
%bash/bin/python
import sys
|
import os
import socket
host = localhost
port = 2275
}
s == socket.socket
{
s.connect('$port' , '$host' );
def curlhttp ('http k
|
eepalive == 300 tps '):
curlhttp ('http keepalive == 300 tps '):
elif curlhttp ('https keepalive == <300
('https keepalive == <300
print "Not Valid Request "
)
)
#require Connection From Socket
#require verison control
|
moagstar/python-uncompyle6
|
test/simple_source/expression/03_map.py
|
Python
|
mit
| 361 | 0 |
# Bug in 2.7 base64.py
_b32alphabet = {
|
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 1
|
4: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
|
tsabata/PyDNB
|
pydnb/dnb.py
|
Python
|
mit
| 6,733 | 0.001782 |
import time
import numpy as np
import pandas as pd
class DNB:
"""
Class representing Dynamic Naive Bayes.
"""
def __init__(self, debug=False):
self.states_prior = None
self.states_list = None
self.features = None
self.A = None
self.B = None
self.debug = debug
def _state_index(self, state):
return np.searchsorted(self.states_list, state)
def mle(self, df, state_col, features=None, avoid_zeros=False, fix_scales=False):
t = time.process_time()
""" Fitting dynamics in the DNB """
self._dynamic_mle(df[state_col], avoid_zeros)
""" Fitting observable variables """
self.B = {}
for st in self.states_list:
self._features_mle(df[df[state_col] == st].drop([state_col], axis=1), st, features)
if fix_scales:
self.fix_zero_scale()
if self.debug:
elapsed_time = time.process_time() - t
print("MLE finished in %d seconds." % elapsed_time)
return self
def _dynamic_mle(self, df, avoid_zeros):
states_vec = df.as_matrix()
self.states_list = np.unique(states_vec)
states_nr = len(self.states_list)
self.A = np.zeros((states_nr, states_nr))
if avoid_zeros:
self.A += 1
self.states_prior = np.zeros(states_nr)
self.states_prior[self._state_index(states_vec[0])] += 1
for i in range(1, len(states_vec)):
self.A[self._state_index(states_vec[i - 1]), self._state_index(states_vec[i])] += 1
self.states_prior[self._state_index(states_vec[i])] += 1
self.states_prior = self.states_prior / self.states_prior.sum()
for i in range(states_nr):
self.A[i] = self.A[i] / self.A[i].sum()
def _features_mle(self, df, state, features):
im
|
port scipy.stats as st
if features is None:
self.features = dict.fromkeys(list(df.columns.values), st.norm)
else:
self.features = features
for f, dist in self.
|
features.items():
params = dist.fit(df[f])
arg = params[:-2]
loc = params[-2]
scale = params[-1]
if self.debug:
print("Distribution: %s, args: %s, loc: %s, scale: %s" % (str(dist), str(arg), str(loc), str(scale)))
self.B[(state, f)] = list(params)
def fix_zero_scale(self, new_scale=1, tolerance=0.000001):
for state in self.states_list:
for f, dist in self.features.items():
scale = self.B[(state, f)][-1]
if scale < tolerance:
if self.debug:
print("state: %s,feature: %s" % (str(state), str(f)))
self.B[(state, f)][-1] = new_scale
def prior_prob(self, state, log=False):
if log:
return np.log(self.states_prior[self._state_index(state)])
else:
return self.states_prior[self._state_index(state)]
def emission_prob(self, state, data, log=False):
prob = 1
if log:
prob = np.log(prob)
for f, dist in self.features.items():
arg = self.B[(state, f)][:-2]
loc = self.B[(state, f)][-2]
scale = self.B[(state, f)][-1]
if log:
prob += dist.logpdf(data[f], loc=loc, scale=scale, *arg)
else:
prob *= dist.pdf(data[f], loc=loc, scale=scale, *arg)
return prob
def transition_prob(self, state1, state2, log=False):
if log:
return np.log(self.A[self._state_index(state1), self._state_index(state2)])
else:
return self.A[self._state_index(state1), self._state_index(state2)]
def _forward(self, data, k=None, state=None):
alpha = np.zeros((len(self.states_list), len(data)))
""" alpha t=0 """
for st in self.states_list:
alpha[self._state_index(st)] = self.prior_prob(st, log=True) + self.emission_prob(st, data.iloc[0],
log=True)
for t in range(1, len(data)):
for st in self.states_list:
alpha[self._state_index(st)][t] = sum(
alpha[self._state_index(_st)][t - 1] + self.transition_prob(_st, st, log=True) for _st in
self.states_list) + self.emission_prob(st, data.iloc[t], log=True)
if state:
alpha = alpha[self._state_index(state), :]
if k:
alpha = alpha[:, k]
return alpha
def _backward(self, data, k=None, state=None):
beta = np.zeros((len(self.states_list), len(data)))
for t in range(len(data) - 1, 0, -1):
for st in self.states_list:
beta[self._state_index(st)][t] = sum(
self.transition_prob(st, _st, log=True) + self.emission_prob(_st, data.iloc[t + 1], log=True) +
beta[_st][t + 1] for _st
in self.states_list)
if state:
beta = beta[self._state_index(state), :]
if k:
beta = beta[:, k]
return beta
def sample(self, size, n=1):
sequences = []
for i in range(n):
Y, output = [], {}
state = self.states_list[np.random.choice(len(self.states_list), 1, p=self.states_prior)[0]]
for _ in range(size):
for f, dist in self.features.items():
arr = output.get(f, [])
arg = self.B[(state, f)][:-2]
loc = self.B[(state, f)][-2]
scale = self.B[(state, f)][-1]
arr.append(dist(loc=loc, scale=scale, *arg).rvs())
output[f] = arr
Y.append(state)
state = self.states_list[
np.random.choice(len(self.states_list), 1, p=self.A[self._state_index(state)])[0]]
df = pd.DataFrame({**{'state': Y}, **output})
sequences.append(df)
return sequences
def obs_seq_probability(self, data):
return sum(self._forward(data, k=len(data) - 1))
def seq_probability(self, data, path, log=True):
prob = 0
path = list(path)
prob += self.prior_prob(path[0], log=True)
prob += self.emission_prob(path[0], data.iloc[0], log=True)
for t in range(1, len(data)):
prob += self.transition_prob(path[t - 1], path[t], log=True)
prob += self.emission_prob(path[t], data.iloc[t], log=True)
if not log:
return np.exp(prob)
return prob
def viterbi(self, data):
pass
|
lebabouin/CouchPotatoServer-develop
|
couchpotato/core/notifications/synoindex/main.py
|
Python
|
gpl-3.0
| 1,104 | 0.009964 |
from couchpotato.core.event import addEvent
f
|
rom couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import os
import subprocess
log = CPLog(__name__)
class Synoindex(Notification):
index_path = '/usr/syno/bin/synoindex'
def __init__(self):
super(Synoindex, self).__init__()
a
|
ddEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
command = [self.index_path, '-A', group.get('destination_dir')]
log.info('Executing synoindex command: %s ', command)
try:
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
out = p.communicate()
log.info('Result from synoindex: %s', str(out))
return True
except OSError, e:
log.error('Unable to run synoindex: %s', e)
return False
def test(self, **kwargs):
return {
'success': os.path.isfile(self.index_path)
}
|
jangsutsr/tower-cli
|
tests/test_resources_job_template.py
|
Python
|
apache-2.0
| 8,499 | 0 |
# Copyright 2015, Ansible, Inc.
# Alan Rominger <arominger@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tower_cli
from tower_cli.api import client
from tests.compat import unittest, mock
from tower_cli.conf import settings
import click
import json
class TemplateTests(unittest.TestCase):
"""A set of tests for commands operating on the job template
"""
def setUp(self):
self.res = tower_cli.get_resource('job_template')
def test_create(self):
"""Establish that a job template can be created
"""
with client.test_mode as t:
endpoint = '/job_templates/'
t.register_json(endpoint, {'count': 0, 'results': [],
'next': None, 'previous': None},
method='GET')
t.register_json(endpoint, {'changed': True, 'id': 42},
method='POST')
self.res.create(name='bar', job_type='run', inventory=1,
project=1, playbook='foobar.yml', credential=1)
self.assertEqual(t.requests[0].method, 'GET')
self.assertEqual(t.requests[1].method, 'POST')
self.assertEqual(len(t.requests), 2)
# Check that default job_type will get added when needed
with client.test_mode as t:
endpoint = '/job_templates/'
t.register_json(endpoint, {'count': 0, 'results': [],
'next': None, 'previous': None},
method='GET')
t.register_json(endpoint, {'changed': True, 'id': 42},
method='POST')
self.res.create(name='bar', inventory=1, project=1,
playbook='foobar.yml', credential=1)
req_body = json.loads(t.requests[1].body)
self.assertIn('job_type', req_body)
self.assertEqual(req_body['job_type'], 'run')
def test_job_template_create_with_echo(self):
"""Establish that a job template can be created
"""
with client.test_mode as t:
endpoint = '/job_templates/'
t.register_json(endpoint, {'count': 0, 'results': [],
'next': None, 'previous': None},
method='GET')
t.register_json(endpoint,
{'changed': True, 'id': 42,
'name': 'bar', 'inventory': 1, 'project': 1,
'playbook': 'foobar.yml', 'credential': 1},
method='POST')
self.res.create(name='bar', job_type='run', inventory=1,
project=1, playbook='foobar.yml', credential=1)
f = self.res.as_command()._echo_method(self.res.create)
with mock.patch.object(click, 'secho'):
with settings.runtime_values(format='human'):
f(name='bar', job_type='run', inventory=1,
project=1, playbook='foobar.yml', credential=1)
def test_create_w_extra_vars(self):
"""Establish that a job template can be created
and extra varas passed to it
"""
with client.test_mode as t:
endpoint = '/job_templates/'
t.register_json(endpoint, {'count': 0, 'results': [],
'next': None, 'previous': None},
method='GET')
t.register_json(endpoint, {'changed': True, 'id': 42},
method='POST')
self.res.create(name='bar', job_type='run', inventory=1,
project=1, playbook='foobar.yml', credential=1,
extra_vars=['foo: bar'])
self.assertEqual(t.requests[0].method, 'GET')
self.assertEqual(t.requests[1].method, 'POST')
self.assertEqual(len(t.requests), 2)
def test_modify(self):
"""Establish that a job template can be modified
"""
with client.test_mode as t:
endpoint = '/job_templates/'
t.register_json(endpoint, {'count': 1, 'results': [{'id': 1,
'name': 'bar'}], 'next': None, 'previous': None},
method='GET')
t.register_json('/job_templates/1/', {'name': 'bar', 'id': 1,
'job_type': 'run'},
method='PATCH')
self.res.modify(name='bar', playbook='foobared.yml')
self.assertEqual(t.requests[0].method, 'GET')
self.assertEqual(t.requests[1].method, 'PATCH')
self.assertEqual(len(t.requests), 2)
def test_modify_extra_vars(self):
"""Establish that a job template can be modified
"""
with client.test_mode as t:
endpoint = '/job_templates/'
t.register_json(endpoint, {'count': 1, 'results': [{'id': 1,
'name': 'bar'}], 'next': None, 'previous': None},
method='GET')
t.register_json('/job_templates/1/', {'name': 'bar', 'id': 1,
|
'job_type': 'run'},
method='PATC
|
H')
self.res.modify(name='bar', extra_vars=["a: 5"])
self.assertEqual(t.requests[0].method, 'GET')
self.assertEqual(t.requests[1].method, 'PATCH')
self.assertEqual(len(t.requests), 2)
def test_associate_label(self):
"""Establish that the associate method makes the HTTP requests
that we expect.
"""
with client.test_mode as t:
t.register_json('/job_templates/42/labels/?id=84',
{'count': 0, 'results': []})
t.register_json('/job_templates/42/labels/', {}, method='POST')
self.res.associate_label(42, 84)
self.assertEqual(t.requests[1].body,
json.dumps({'associate': True, 'id': 84}))
def test_disassociate_label(self):
"""Establish that the disassociate method makes the HTTP requests
that we expect.
"""
with client.test_mode as t:
t.register_json('/job_templates/42/labels/?id=84',
{'count': 1, 'results': [{'id': 84}],
'next': None, 'previous': None})
t.register_json('/job_templates/42/labels/', {}, method='POST')
self.res.disassociate_label(42, 84)
self.assertEqual(t.requests[1].body,
json.dumps({'disassociate': True, 'id': 84}))
def test_associate_notification_template(self):
"""Establish that a job template should be able to associate itself
with an existing notification template.
"""
with client.test_mode as t:
t.register_json('/job_templates/5/notification_templates_any/'
'?id=3', {'count': 0, 'results': []})
t.register_json('/job_templates/5/notification_templates_any/',
{}, method='POST')
self.res.associate_notification_template(5, 3, 'any')
self.assertEqual(t.requests[1].body,
json.dumps({'associate': True, 'id': 3}))
def test_disassociate_notification_template(self):
"""Establish that a job template should be able to disassociate itself
from an associated notification template.
"""
with client.test_mode as t:
t.register_json('/job_templates/5/notification_templates_any/'
'?id=3', {'count': 1, 'results': [{'id': 3}
|
cschenck/blender_sim
|
fluid_sim_deps/blender-2.69/2.69/scripts/modules/bl_i18n_utils/bl_extract_messages.py
|
Python
|
gpl-3.0
| 39,687 | 0.004009 |
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have rec
|
eived a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Populate a
|
template file (POT format currently) from Blender RNA/py/C data.
# XXX: This script is meant to be used from inside Blender!
# You should not directly use this script, rather use update_msg.py!
import collections
import copy
import datetime
import os
import re
import sys
# XXX Relative import does not work here when used from Blender...
from bl_i18n_utils import settings as settings_i18n, utils
import bpy
##### Utils #####
# check for strings like "+%f°"
ignore_reg = re.compile(r"^(?:[-*.()/\\+%°0-9]|%d|%f|%s|%r|\s)*$")
filter_message = ignore_reg.match
def init_spell_check(settings, lang="en_US"):
try:
from bl_i18n_utils import utils_spell_check
return utils_spell_check.SpellChecker(settings, lang)
except Exception as e:
print("Failed to import utils_spell_check ({})".format(str(e)))
return None
def _gen_check_ctxt(settings):
return {
"multi_rnatip": set(),
"multi_lines": set(),
"py_in_rna": set(),
"not_capitalized": set(),
"end_point": set(),
"undoc_ops": set(),
"spell_checker": init_spell_check(settings),
"spell_errors": {},
}
def _diff_check_ctxt(check_ctxt, minus_check_ctxt):
"""Returns check_ctxt - minus_check_ctxt"""
for key in check_ctxt:
if isinstance(check_ctxt[key], set):
for warning in minus_check_ctxt[key]:
if warning in check_ctxt[key]:
check_ctxt[key].remove(warning)
elif isinstance(check_ctxt[key], dict):
for warning in minus_check_ctxt[key]:
if warning in check_ctxt[key]:
del check_ctxt[key][warning]
def _gen_reports(check_ctxt):
return {
"check_ctxt": check_ctxt,
"rna_structs": [],
"rna_structs_skipped": [],
"rna_props": [],
"rna_props_skipped": [],
"py_messages": [],
"py_messages_skipped": [],
"src_messages": [],
"src_messages_skipped": [],
"messages_skipped": set(),
}
def check(check_ctxt, msgs, key, msgsrc, settings):
"""
Performs a set of checks over the given key (context, message)...
"""
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
multi_lines = check_ctxt.get("multi_lines")
py_in_rna = check_ctxt.get("py_in_rna")
not_capitalized = check_ctxt.get("not_capitalized")
end_point = check_ctxt.get("end_point")
undoc_ops = check_ctxt.get("undoc_ops")
spell_checker = check_ctxt.get("spell_checker")
spell_errors = check_ctxt.get("spell_errors")
if multi_rnatip is not None:
if key in msgs and key not in multi_rnatip:
multi_rnatip.add(key)
if multi_lines is not None:
if '\n' in key[1]:
multi_lines.add(key)
if py_in_rna is not None:
if key in py_in_rna[1]:
py_in_rna[0].add(key)
if not_capitalized is not None:
if(key[1] not in settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED and
key[1][0].isalpha() and not key[1][0].isupper()):
not_capitalized.add(key)
if end_point is not None:
if (key[1].strip().endswith('.') and not key[1].strip().endswith('...') and
key[1] not in settings.WARN_MSGID_END_POINT_ALLOWED):
end_point.add(key)
if undoc_ops is not None:
if key[1] == settings.UNDOC_OPS_STR:
undoc_ops.add(key)
if spell_checker is not None and spell_errors is not None:
err = spell_checker.check(key[1])
if err:
spell_errors[key] = err
def print_info(reports, pot):
def _print(*args, **kwargs):
kwargs["file"] = sys.stderr
print(*args, **kwargs)
pot.update_info()
_print("{} RNA structs were processed (among which {} were skipped), containing {} RNA properties "
"(among which {} were skipped).".format(len(reports["rna_structs"]), len(reports["rna_structs_skipped"]),
len(reports["rna_props"]), len(reports["rna_props_skipped"])))
_print("{} messages were extracted from Python UI code (among which {} were skipped), and {} from C source code "
"(among which {} were skipped).".format(len(reports["py_messages"]), len(reports["py_messages_skipped"]),
len(reports["src_messages"]), len(reports["src_messages_skipped"])))
_print("{} messages were rejected.".format(len(reports["messages_skipped"])))
_print("\n")
_print("Current POT stats:")
pot.print_info(prefix="\t", output=_print)
_print("\n")
check_ctxt = reports["check_ctxt"]
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
multi_lines = check_ctxt.get("multi_lines")
py_in_rna = check_ctxt.get("py_in_rna")
not_capitalized = check_ctxt.get("not_capitalized")
end_point = check_ctxt.get("end_point")
undoc_ops = check_ctxt.get("undoc_ops")
spell_errors = check_ctxt.get("spell_errors")
# XXX Temp, no multi_rnatip nor py_in_rna, see below.
keys = multi_lines | not_capitalized | end_point | undoc_ops | spell_errors.keys()
if keys:
_print("WARNINGS:")
for key in keys:
if undoc_ops and key in undoc_ops:
_print("\tThe following operators are undocumented!")
else:
_print("\t“{}”|“{}”:".format(*key))
if multi_lines and key in multi_lines:
_print("\t\t-> newline in this message!")
if not_capitalized and key in not_capitalized:
_print("\t\t-> message not capitalized!")
if end_point and key in end_point:
_print("\t\t-> message with endpoint!")
# XXX Hide this one for now, too much false positives.
# if multi_rnatip and key in multi_rnatip:
# _print("\t\t-> tip used in several RNA items")
# if py_in_rna and key in py_in_rna:
# _print("\t\t-> RNA message also used in py UI code!")
if spell_errors and spell_errors.get(key):
lines = ["\t\t-> {}: misspelled, suggestions are ({})".format(w, "'" + "', '".join(errs) + "'")
for w, errs in spell_errors[key]]
_print("\n".join(lines))
_print("\t\t{}".format("\n\t\t".join(pot.msgs[key].sources)))
def process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt, settings):
if filter_message(msgid):
reports["messages_skipped"].add((msgid, msgsrc))
return
if not msgctxt:
# We do *not* want any "" context!
msgctxt = settings.DEFAULT_CONTEXT
# Always unescape keys!
msgctxt = utils.I18nMessage.do_unescape(msgctxt)
msgid = utils.I18nMessage.do_unescape(msgid)
key = (msgctxt, msgid)
check(check_ctxt, msgs, key, msgsrc, settings)
msgsrc = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + msgsrc
if key not in msgs:
msgs[key] = utils.I18nMessage([msgctxt], [msgid], [], [msgsrc], settings=settings)
else:
msgs[key].comment_lines.append(msgsrc)
##### RNA #####
def dump_rna_messages(msgs, reports, settings, verbose=False):
"""
Dump into messages dict all RNA-d
|
ctsit/research-subject-mapper
|
rsm/utils/redcap_transactions.py
|
Python
|
bsd-3-clause
| 3,580 | 0.004469 |
import logging
import httplib
from urllib import urlencode
import os
import sys
from lxml import etree
# This addresses the issues with relative paths
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../../")
proj_root = os.path.abspath(goal_dir)+'/'
sys.path.insert(0, proj_root+'rsm')
class redcap_transactions:
"""A class for getting data from redcap instace"""
def __init__(self):
self.data = []
self.configuration_directory = ''
def init_redcap_interface(self,settings,logger):
'''This function initializes the variables requrired to get data from redcap
interface. This reads the data from the settings.ini and fills the dict
with required properties.
Mohan'''
logger.info('Initializing redcap interface')
host = ''
path = ''
source_data_schema_file = ''
source_data_schema_file = self.configuration_directory + '/' + settings.source_data_schema_file
if not os.path.exists(source_data_schema_file):
raise Exception("Error: source_data_schema.xml file not found at\
"+ source_data_schema_file)
else:
source = open(source_data_schema_file, 'r')
source_data = etree.parse(source_data_schema_file)
redcap_uri = source_data.find('redcap_uri').text
token = source_data.find('apitoken').text
fields = ','.join(field.text for field in source_data.iter('field'))
if redcap_uri is None:
host = '127.0.0.1:8998'
path = '/redcap/api/'
if token is None:
token = '4CE405878D219CFA5D3ADF7F9AB4E8ED'
uri_list = redcap_uri.split('//')
http_str = ''
if uri_list[0] == 'https:':
is_secure = True
else:
is_secure = False
after_httpstr_list = uri_list[1].split('/', 1)
host = http_str + '//' + after_httpstr_list[0]
host = after_httpstr_list[0]
path = '/' + after_httpstr_list[1]
properties = {'host' : host, 'path' : path, "is_secure" : is_secure,
'token': token, "fields" : fields}
logger.info("redcap interface initialzed")
return properties
def get_data_from_redcap(self, properties,logger, format_param='xml',
type_param='flat', return_format='xml'):
'''This function gets data from redcap using POST method
for getting person index data formtype='Person_Index' must be passed as argument
for getting redcap data formtype='RedCap' must be passed as argument
'''
logger.info('getting data from redcap')
params = {}
params['token'] = properties['token']
params['content'] = 'record'
params['format'] = format_param
params['type'] = type_param
params['returnFormat'] = return_format
params['fields'] = properties['fields']
if properties['is_secure'] is True:
redcap_connection = ht
|
tplib.HTTPSConnection(properties['host'])
else:
redcap_connection = httplib.HTTPConnection(properties['host'])
redcap_connection.request('POST', properties['path'], urlencode(params),
{'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'})
response_buffer = redcap_connection.getresponse()
r
|
eturned = response_buffer.read()
logger.info('***********RESPONSE RECEIVED FROM REDCAP***********')
redcap_connection.close()
return returned
|
TaliesinSkye/evennia
|
wintersoasis-master/web/urls.py
|
Python
|
bsd-3-clause
| 3,320 | 0.005422 |
#
# File that determines what each URL points to. This uses _Python_ regular
# expressions, not Perl's.
#
# See:
# http://diveintopython.org/regular_expressions/street_addresses.html#re.matching.2.3
#
from django.conf import settings
from django.conf.urls.defaults import *
from django.contrib import admin
from django.views.generic import RedirectView
# Wiki imports
from wiki.urls import get_pattern as get_wiki_pattern
from django_notify.urls import get_pattern as get_notify_pattern
from djangobb_forum import settings as forum_settings
admin.autodiscover()
# Setup the root url tree from /
# AJAX stuff.
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
urlpatterns = patterns('',
# User Authentication
url(r'^login/', 'web.views.login', name="login"),
url(r'^logout/', 'django.contrib.auth.views.logout', name="logout"),
url(r'^accounts/login', 'views.login_gateway'),
# News stuff
#url(r'^news/', include('src.web.news.urls')),
# Page place-holder for things that aren't implemented yet.
url(r'^tbi/', 'game.gamesrc.oasis.web.website.views.to_be_implemented'),
# Admin interface
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
# favicon
url(r'^favicon\.ico$', RedirectView.as_view(url='/media/images/favicon.ico')),
# ajax stuff
url(r'^webclient/',include('game.gamesrc.oasis.web.webclient.urls', namespace="webclient")),
# Wiki
url(r'^notify/', get_notify_pattern()),
url(r'^wiki/', get_wiki_pattern()),
# Forum
(r'^forum/', include('bb_urls', namespace='djangobb')),
# Favicon
(r'^favicon\.ico$', RedirectView.as_view(url='/media/images/favicon.ico')),
# Registration stuff
url(r'^roster/', include('roster.urls', namespace='roster')),
# Character related stuff.
url(r'^character/', include('character.urls', namespace='character')),
# Mail stuff
url(r'^mail/', include('mail.urls', namespace='mail')),
# Search utilities
url(r'^search/', include('haystack.urls', namespace='search')),
# AJAX stuff
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
url(r'^selectable/', include('selectable.urls')),
# Ticket system
url(r'^tickets/', include('helpdesk.urls', namespace='helpdesk')),
url(r'^$', 'views.page_index', name='index'),
)
# 500 Errors:
handler500 = 'web.views.custom_500'
# This sets up the server if the user want to run the Django
# test server (this should normally not be needed).
if settings.SERVE_MEDIA:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
(r'^static/(?P<path>.*)
|
$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^wiki/([^/]+/)*wiki/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT + '/wiki/'})
)
# PM Extension
if (forum_set
|
tings.PM_SUPPORT):
urlpatterns += patterns('',
(r'^mail/', include('mail_urls')),
)
if (settings.DEBUG):
urlpatterns += patterns('',
(r'^%s(?P<path>.*)$' % settings.MEDIA_URL.lstrip('/'),
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
our-city-app/oca-backend
|
src/solutions/common/job/module_statistics.py
|
Python
|
apache-2.0
| 6,018 | 0.001329 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import datetime
import json
import logging
import time
import cloudstorage
from mapreduce import mapreduce_pipeline
from pipeline import pipeline
from pipeline.common import List
from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET
from rogerthat.dal.service import get_service_identities
from rogerthat.settings import get_server_settings
from rogerthat.utils import guid, log_offload
def start_job():
current_date = datetime.datetime.now()
key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid())
counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple()))
task = counter.start(idempotence_key=key, return_task=True)
task.add(queue_name=STATS_QUEUE)
redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id)
logging.info('ModuleStats pipeline url: %s', redirect_url)
return get_server_settings().baseUrl + redirect_url
def mapper(sln_settings):
# type: (SolutionSettings) -> GeneratorType
for service_identity in get_service_identities(sln_settings.service_user):
yield service_identity.app_id, str(sln_settings.modules)
def _combine(new_values, previous_combined_values):
# type: (list[list[str]], list[dict[str, int]]) -> dict[str, int]
combined = {}
for stats in previous_combined_values:
for module, count in stats.iteritems():
if module not in combined:
combined[module] = count
else:
combined[module] += count
for v in new_values:
# mapper returns a string
modules = eval(v) if isinstance(v, basestring) else v
for module in modules:
if module not in combined:
combined[module] =
|
1
else:
combined[module] += 1
return combined
def combiner(key, new_values, previously_combined_values):
# t
|
ype: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType
if DEBUG:
logging.debug('combiner %s new_values: %s', key, new_values)
logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values)
combined = _combine(new_values, previously_combined_values)
if DEBUG:
logging.debug('combiner %s combined: %s', key, combined)
yield combined
def reducer(app_id, values):
# type: (str, list[dict[str, int]]) -> GeneratorType
if DEBUG:
logging.info('reducer values: %s', values)
combined = _combine([], values)
json_line = json.dumps({'app_id': app_id, 'stats': combined})
if DEBUG:
logging.debug('reducer %s: %s', app_id, json_line)
yield '%s\n' % json_line
class ModuleStatsPipeline(pipeline.Pipeline):
def run(self, bucket_name, key, current_date):
# type: (str, str, long) -> GeneratorType
params = {
'mapper_spec': 'solutions.common.job.module_statistics.mapper',
'mapper_params': {
'bucket_name': bucket_name,
'entity_kind': 'solutions.common.models.SolutionSettings',
'filters': []
},
'combiner_spec': 'solutions.common.job.module_statistics.combiner',
'reducer_spec': 'solutions.common.job.module_statistics.reducer',
'reducer_params': {
'output_writer': {
'bucket_name': bucket_name
}
},
'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader',
'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter',
'shards': 2 if DEBUG else 10
}
output = yield mapreduce_pipeline.MapreducePipeline(key, **params)
process_output_pipeline = yield ProcessOutputPipeline(output, current_date)
with pipeline.After(process_output_pipeline):
yield CleanupGoogleCloudStorageFiles(output)
def finalized(self):
if self.was_aborted:
logging.error('%s was aborted', self, _suppress=False)
return
logging.info('%s was finished', self)
class ProcessOutputPipeline(pipeline.Pipeline):
def run(self, output, current_date):
results = []
for filename in output:
results.append((yield ProcessFilePipeline(filename)))
yield List(*results)
def finalized(self):
if DEBUG:
logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value)
_, timestamp = self.args
# list of dicts with key app_id, value dict of module, amount
outputs = self.outputs.default.value # type: list[dict[int, int]]
for output in outputs:
log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp)
class ProcessFilePipeline(pipeline.Pipeline):
def run(self, filename):
stats_per_app = {}
with cloudstorage.open(filename, "r") as f:
for json_line in f:
d = json.loads(json_line)
stats_per_app[d['app_id']] = d['stats']
if DEBUG:
logging.debug('ProcessFilePipeline: %s', stats_per_app)
return stats_per_app
class CleanupGoogleCloudStorageFiles(pipeline.Pipeline):
def run(self, output):
for filename in output:
cloudstorage.delete(filename)
|
irskep/clubsandwich
|
clubsandwich/blt/nice_terminal.py
|
Python
|
mit
| 3,101 | 0.000322 |
"""
.. py:attribute:: terminal
Exactly like ``bearlibterminal.terminal``, but for any function that takes
arguments ``x, y``, ``dx, dy``, or ``x, y, width, height``, you can
instead pass a single argument of type :py:class:`Point` (for the first
two) or :py:class:`Rect` (for the last).
This makes interactions betweeen :py:mod:`geom` and ``bearlibterminal``
much less verbose.
Example::
from clubsandwich.blt.nice_terminal import terminal
from clubsandwich.geom import Point
terminal.open()
a = Point(10, 10)
b = a + Point(1, 1)
terminal.put(a, 'a')
terminal.put(b, 'b')
terminal.refresh()
terminal.read()
terminal.close()
"""
from bearlibterminal import terminal as _terminal
from clubsandwich.geom import Point, Rect
class NiceTerminal:
def __getattr__(self, k):
return getattr(_terminal, k)
def clear_area(self, *args):
if args and isinstance(args[0], Rect):
return _terminal.clear_area(
args[0].origin.x, args[0].origin.y,
args[0].size.width, args[0].size.height)
else:
return _terminal.clear_area(*args)
def crop(self, *args):
if args and isinstance(args[0], Rect):
return _terminal.crop(
args[0].origin.x, args[0].origin.y,
args[0].size.width, args[0].size.height)
else:
return _terminal.crop(*args)
def print(self, *args):
if isinstance(args
|
[0], Point):
return _terminal.print(args[0].x, args[0].y, *args[1:])
else:
return _terminal.print(*args)
def printf(self, *args):
if isinstance(args[0], Point):
return _terminal.printf(args[0].x, args[0].y, *args[1:])
else:
return _terminal.printf(*args)
def put(self, *ar
|
gs):
if isinstance(args[0], Point):
return _terminal.put(args[0].x, args[0].y, *args[1:])
else:
return _terminal.put(*args)
def pick(self, *args):
if isinstance(args[0], Point):
return _terminal.pick(args[0].x, args[0].y, *args[1:])
else:
return _terminal.pick(*args)
def pick_color(self, *args):
if isinstance(args[0], Point):
return _terminal.pick_color(args[0].x, args[0].y, *args[1:])
else:
return _terminal.pick_color(*args)
def pick_bkcolor(self, *args):
if isinstance(args[0], Point):
return _terminal.pick_bkcolor(args[0].x, args[0].y, *args[1:])
else:
return _terminal.pick_bkcolor(*args)
def put_ext(self, *args):
if isinstance(args[0], Point):
return _terminal.put_ext(args[0].x, args[0].y, args[1].x, args[1].y, *args[2:])
else:
return _terminal.put_ext(*args)
def read_str(self, *args):
if isinstance(args[0], Point):
return _terminal.read_str(args[0].x, args[0].y, *args[1:])
else:
return _terminal.read_str(*args)
terminal = NiceTerminal()
|
kennedyshead/home-assistant
|
tests/components/threshold/test_binary_sensor.py
|
Python
|
apache-2.0
| 11,914 | 0.001007 |
"""The test for the threshold sensor platform."""
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, STATE_UNKNOWN, TEMP_CELSIUS
from homeassistant.setup import async_setup_component
async def test_sensor_upper(hass):
"""Test if source is above threshold."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "15",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert state.attributes.get("sensor_value") == 16
assert state.attributes.get("position") == "above"
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == 0.0
assert state.attributes.get("type") == "upper"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 14)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 15)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
async def test_sensor_lower(hass):
"""Test if source is below threshold."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "15",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 16)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.attributes.get("hysteresis") == 0.0
assert state.attributes.get("type") == "lower"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 14)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_hysteresis(hass):
"""Test if source is above threshold using hysteresis."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "15",
"hysteresis": "2.5",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 20)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == 2.5
assert state.attributes.get("type") == "upper"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 13)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 12)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 17)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 18)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_in_range_no_hysteresis(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert state.attributes.get("sensor_value") == 16
assert state.attributes.get("position") == "in_range"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == 0.0
assert state.attributes.get("type") == "range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 9)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "below"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 21)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.state == "off"
async def test_sensor_in_range_with_hysteresis(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"hysteresis": "2",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16,
|
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert state.attributes.get("sensor_value") == 16
assert state.attributes.get("position") == "in_range"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"]
|
)
assert state.attributes.get("hysteresis") == float(
config["binary_sensor"]["hysteresis"]
)
assert state.attributes.get("type") == "range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 8)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "in_range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 7)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "below"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 12)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "below"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 13)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "in_range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 22)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "in_range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 23)
await hass.async_block_till_done()
state = hass.states
|
fgolemo/SocWeb-Reddit-Crawler
|
usersToFile.py
|
Python
|
mit
| 195 | 0.005128 |
import cPickle as pickle
import
|
os
users = list(pickle.load(file('users.pickle')))
pickle.dump
|
(users, open("users.list.pickle.tmp", "wb"))
os.rename("users.list.pickle.tmp", "users.list.pickle")
|
mrknow/filmkodi
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/ol_gmu.py
|
Python
|
apache-2.0
| 2,291 | 0.003492 |
# -*- coding: utf-8 -*-
"""
openload.io urlresolver plugin
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Publ
|
ic License fo
|
r more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib2
from HTMLParser import HTMLParser
from urlresolver9 import common
from urlresolver9.resolver import ResolverError
net = common.Net()
def get_media_url(url):
try:
HTTP_HEADER = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Referer': url} # 'Connection': 'keep-alive'
html = net.http_GET(url, headers=HTTP_HEADER).content
hiddenurl = HTMLParser().unescape(re.search('hiddenurl">(.+?)<\/span>', html, re.IGNORECASE).group(1))
s = []
for i in hiddenurl:
j = ord(i)
if (j >= 33 & j <= 126):
s.append(chr(33 + ((j + 14) % 94)))
else:
s.append(chr(j))
res = ''.join(s)
videoUrl = 'https://openload.co/stream/{0}?mime=true'.format(res)
dtext = videoUrl.replace('https', 'http')
headers = {'User-Agent': HTTP_HEADER['User-Agent']}
req = urllib2.Request(dtext, None, headers)
res = urllib2.urlopen(req)
videourl = res.geturl()
res.close()
return videourl
except Exception as e:
common.log_utils.log_debug('Exception during openload resolve parse: %s' % e)
raise
raise ResolverError('Unable to resolve openload.io link. Filelink not found.')
|
tdfischer/organizer
|
events/models.py
|
Python
|
agpl-3.0
| 1,124 | 0.003559 |
# -*-
|
coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from crm.models import Person
from geocodable.models import LocationAlias
import uuid
class Event(models.Model):
name = models.CharField(max_length=200)
timestamp = models.DateTimeField()
end_timestamp = models.DateTimeField()
attendees = mode
|
ls.ManyToManyField(Person, related_name='events', blank=True)
uid = models.CharField(max_length=200, blank=True)
location = models.ForeignKey(LocationAlias, default=None, blank=True,
null=True)
instance_id = models.CharField(max_length=200, blank=True)
@property
def geo(self):
return {'lat': self.lat, 'lng': self.lng}
@property
def lat(self):
if self.location is not None:
return self.location.lat
else:
return None
@property
def lng(self):
if self.location is not None:
return self.location.lng
else:
return None
def __unicode__(self):
return "%s (%s)"%(self.name, self.timestamp)
|
bundgus/python-playground
|
matplotlib-playground/examples/animation/double_pendulum_animated.py
|
Python
|
mit
| 2,354 | 0.001274 |
# Double pendulum formula translated from the C code at
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
G = 9.8 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2] - state[0]
den1 = (M1 + M2)*L1 - M2*L1*cos(del_)*cos(del_)
dydx[1] = (M2*L1*state[1]*state[1]*sin(del_)*cos(del_) +
M2*G*sin(state[2])*cos(del_) +
M2*L2*state[3]*state[3]*sin(del_) -
(M1 + M2)*G*sin(state[0]))/den1
dydx[2] = state[3]
den2 = (L2/L1)*den1
dydx[3] = (-M2*L2*state[3]*state[3]*sin(del_)*cos(del_) +
(M1 + M2)*G*sin(state[0])*cos(del_) -
(M1 + M2)*L1*state[1]*state[1]*sin(del_) -
(M1 + M2)*G*sin(state[2]))/den2
return dydx
# create a time array from 0..100 sampled at 0.05 second steps
dt = 0.05
t = np.arange(0.0, 20, dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = 120.0
w1 = 0.0
th2 = -10.0
w2 = 0.0
# initial state
state = np.radians([th1, w1, th2, w2])
# integrate your ODE using scipy.integrate.
y = integrate.odeint(derivs, state, t)
x1 = L1*sin(y[:, 0])
y1 = -L1*cos(y[:, 0])
x2 = L2*sin(y[:, 2]) + x1
y2 = -L2*cos(y[:, 2]) + y1
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))
ax.set_aspect('equal')
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
line.set_data([], [])
time_text.set_text('')
return line, time
|
_text
def animate(i):
thisx = [0, x1[i], x2[i]]
thisy = [0, y1[i], y2[i]]
line.set_data(thisx, thisy)
time_text.set_text(time_template % (i
|
*dt))
return line, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)),
interval=25, blit=True, init_func=init)
#ani.save('double_pendulum.mp4', fps=15)
plt.show()
|
manojngb/Crazyfly_simple_lift
|
src/cfclient/ui/widgets/ai.py
|
Python
|
gpl-2.0
| 8,202 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Attitude indicator widget.
"""
import sys
from PyQt4 import QtGui, QtCore
__author__ = 'Bitcraze AB'
__all__ = ['AttitudeIndicator']
class AttitudeIndicator(QtGui.QWidget):
"""Widget for showing attitude"""
def __init__(self):
super(AttitudeIndicator, self).__init__()
self.roll = 0
self.pitch = 0
self.hover = False
self.hoverASL = 0.0
self.hoverTargetASL = 0.0
self.setMinimumSize(30, 30)
# self.setMaximumSize(240,240)
def setRoll(self, roll):
self.roll = roll
self.repaint()
def setPitch(self, pitch):
self.pitch = pitch
self.repaint()
def setHover(self, target):
self.hoverTargetASL = target
self.hover = target > 0
self.repaint()
def setBaro(self, asl):
self.hoverASL = asl
self.repaint()
def setRollPitch(self, roll, pitch):
self.roll = roll
self.pitch = pitch
self.repaint()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
size = self.size()
w = size.width()
h = size.height()
qp.translate(w / 2, h / 2)
qp.rotate(self.roll)
qp.translate(0, (self.pitch * h) / 50)
qp.translate(-w / 2, -h / 2)
qp.setRenderHint(qp.Antialiasing)
font = QtGui.QFont('Serif', 7, QtGui.QFont.Light)
qp.setFont(font)
# Draw the blue
qp.setPen(QtGui.QColor(0, 61, 144))
qp.setBrush(QtGui.QColor(0, 61, 144))
qp.drawRect(-w, h / 2, 3 * w, -3 * h)
# Draw the marron
qp.setPen(QtGui.QColor(59, 41, 39))
qp.setBrush(QtGui.QColor(59, 41, 39))
qp.drawRect(-w, h / 2, 3 * w, 3 * h)
pen = QtGui.QPen(QtGui.QColor(255, 255, 255), 1.5,
QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(-w, h / 2, 3 * w, h / 2)
# Drawing pitch lines
for ofset in [-180, 0, 180]:
for i in range(-900, 900, 25):
pos = (((i / 10.0) + 25 + ofset) * h / 50.0)
if i % 100 == 0:
length = 0.35 * w
if i != 0:
if ofset == 0:
qp.drawText((w / 2) + (length / 2) + (w * 0.06),
pos, "{}".format(-i / 10))
qp.drawText((w / 2) - (length / 2) - (w * 0.08),
pos, "{}".format(-i / 10))
else:
qp.drawText((w / 2) + (length / 2) + (w * 0.06),
pos, "{}".format(i / 10))
qp.drawText((w / 2) - (length / 2) - (w *
|
0.08),
pos, "{}".format(i / 10))
elif i % 50 == 0:
length = 0.2 * w
else:
length = 0.1 * w
qp.drawLine((w / 2) - (length / 2), pos,
(w / 2) + (length / 2), pos)
qp.setWorldMatrixEnabled(False)
pen = QtGui.QPen(QtGui.QColor(0, 0, 0), 2,
QtCore.Qt.SolidLine)
qp.setBrush(QtGui.QColor(0, 0, 0))
|
qp.setPen(pen)
qp.drawLine(0, h / 2, w, h / 2)
# Draw Hover vs Target
qp.setWorldMatrixEnabled(False)
pen = QtGui.QPen(QtGui.QColor(255, 255, 255), 2,
QtCore.Qt.SolidLine)
qp.setBrush(QtGui.QColor(255, 255, 255))
qp.setPen(pen)
fh = max(7, h / 50)
font = QtGui.QFont('Sans', fh, QtGui.QFont.Light)
qp.setFont(font)
qp.resetTransform()
qp.translate(0, h / 2)
if not self.hover:
# asl
qp.drawText(w - fh * 10, fh / 2, str(round(self.hoverASL, 2)))
if self.hover:
# target asl (center)
qp.drawText(
w - fh * 10, fh / 2, str(round(self.hoverTargetASL, 2)))
diff = round(self.hoverASL - self.hoverTargetASL, 2)
pos_y = -h / 6 * diff
# cap to +- 2.8m
if diff < -2.8:
pos_y = -h / 6 * -2.8
elif diff > 2.8:
pos_y = -h / 6 * 2.8
else:
pos_y = -h / 6 * diff
# difference from target (moves up and down +- 2.8m)
qp.drawText(w - fh * 3.8, pos_y + fh / 2, str(diff))
# vertical line
qp.drawLine(w - fh * 4.5, 0, w - fh * 4.5, pos_y)
# left horizontal line
qp.drawLine(w - fh * 4.7, 0, w - fh * 4.5, 0)
# right horizontal line
qp.drawLine(w - fh * 4.2, pos_y, w - fh * 4.5, pos_y)
if __name__ == "__main__":
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def updatePitch(self, pitch):
self.wid.setPitch(pitch - 90)
def updateRoll(self, roll):
self.wid.setRoll((roll / 10.0) - 180.0)
def updateTarget(self, target):
self.wid.setHover(500 + target / 10.)
def updateBaro(self, asl):
self.wid.setBaro(500 + asl / 10.)
def initUI(self):
vbox = QtGui.QVBoxLayout()
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.setFocusPolicy(QtCore.Qt.NoFocus)
sld.setRange(0, 3600)
sld.setValue(1800)
vbox.addWidget(sld)
self.wid = AttitudeIndicator()
sld.valueChanged[int].connect(self.updateRoll)
vbox.addWidget(self.wid)
hbox = QtGui.QHBoxLayout()
hbox.addLayout(vbox)
sldPitch = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldPitch.setFocusPolicy(QtCore.Qt.NoFocus)
sldPitch.setRange(0, 180)
sldPitch.setValue(90)
sldPitch.valueChanged[int].connect(self.updatePitch)
hbox.addWidget(sldPitch)
sldASL = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldASL.setFocusPolicy(QtCore.Qt.NoFocus)
sldASL.setRange(-200, 200)
sldASL.setValue(0)
sldASL.valueChanged[int].connect(self.updateBaro)
sldT = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldT.setFocusPolicy(QtCore.Qt.NoFocus)
sldT.setRange(-200, 200)
sldT.setValue(0)
sldT.valueChanged[int].connect(self.updateTarget)
hbox.addWidget(sldT)
hbox.addWidget(sldASL)
self.setLayout(hbox)
self.setGeometry(50, 50, 510, 510)
self.setWindowTitle('Attitude Indicator')
self.show()
def changeValue(self, value):
self.c.updateBW.emit(value)
self.wid.repaint()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__ma
|
ychaim/FPGA-Litecoin-Miner
|
experimental/LX150-EIGHT-B/ltcminer-test-dynclock.py
|
Python
|
gpl-3.0
| 8,949 | 0.027154 |
#!/usr/bin/env python
# by teknohog
# Python wrapper for Xilinx Serial Miner
# Host/user configuration is NOT USED in ltctestmode.py ...
# host = "localhost" # Stratum Proxy alternative
# user = "username.1" # Your worker goes here
# password = "password" # Worker password, NOT your account password
# http_port = "8332" # Getwork port (stratum)
# CONFIGURATION - CHANGE THIS (eg try COM1, COM2, COM3, COM4 etc)
serial_port = "COM4"
# serial_port = "/dev/ttyUSB0" # raspberry pi
# CONFIGURATION - how often to refresh work - reduced for test
askrate = 2
###############################################################################
from jsonrpc import ServiceProxy
from time import ctime, sleep, time
from serial import Serial
from threading import Thread, Event
from Queue import Queue
import sys
dynclock = 0
dynclock_hex = "0000"
def stats(count, starttime):
# BTC 2**32 hashes per share (difficulty 1)
# mhshare = 4294.967296
# LTC 2**32 / 2048 hashes per share (difficulty 32)
# khshare = 2097.152 # CHECK THIS !!
khshare = 65.536 * writer.diff
s = sum(count)
tdelta = time() - starttime
rate = s * khshare / tdelta
# This is only a rough estimate of the true hash rate,
# particularly when the number of events is low. However, since
# the events follow a Poisson distribution, we can estimate the
# standard deviation (sqrt(n) for n events). Thus we get some idea
# on how rough an estimate this is.
# s should always be positive when this function is called, but
# checking for robustness anyway
if s > 0:
stddev = rate / s**0.5
else:
stddev = 0
return "[%i accepted, %i failed, %.2f +/- %.2f khash/s]" % (count[0], count[1], rate, stddev)
class Reader(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
# flush the input buffer
ser.read(1000)
def run(self):
while True:
nonce = ser.read(4)
if len(nonce) == 4:
# Keep this order, because writer.block will be
# updated due to the golden event.
submitter = Submitter(writer.block, nonce)
submitter.start()
golden.set()
class Writer(Thread):
def __init__(self,dynclock_hex):
Thread.__init__(self)
# Keep something sensible available while waiting for the
# first getwork
self.block = "0" * 256
self.target = "f" * 56 + "ff070000" # diff=32 for testmode
self.diff = 32 # testmode
self.dynclock_hex = dynclock_hex
self.daemon = True
self.go = True
self.infile = open("../../scripts/test_data.txt","r")
self.nonce = 0
self.nonce_tested = 0
self.nonce_ok = 0
self.nonce_fail = 0
def run(self):
while self.go:
try:
# work = bitcoin.getwork()
# self.block = work['data']
# self.target = work['target']
print "Tested", self.nonce_tested, " passed", self.nonce_ok, " fail", self.nonce_fail, " unmatched", self.nonce_tested - self.nonce_ok - self.nonce_fail
se
|
lf.line = self.infile.readline()
if (len(self.line) != 257):
print "EOF on test data" # Or its an error, but let's not be worrysome
# quit() # Except it doesn't ...
self.go = False # Terminating threads is
|
a bit tricksy
break
self.nonce_tested = self.nonce_tested + 1
self.block = self.line.rstrip()
# Hard-code a diff=32 target for test work
# Replace MSB 16 bits of target with clock (NB its reversed)
self.target = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff07" + self.dynclock_hex
self.dynclock_hex = "0000" # Once only
# print("block old " + self.block)
# We need to subtract a few from the nonces in order to match (why?)
nonce_bin = self.block.decode('hex')[79:75:-1]
self.nonce = int(nonce_bin.encode('hex'), 16)
# print "nonce old =", self.nonce
nonce_new = self.nonce - 50
if (nonce_new < 0):
nonce_new = 0
# print "nonce new =", nonce_new
nonce_hex = "{0:08x}".format(nonce_new)
# print "encoded = ", nonce_hex
nonce_hex_rev = nonce_hex[6:8]+nonce_hex[4:6]+nonce_hex[2:4]+nonce_hex[0:2]
# print "reversed = ", nonce_hex_rev
self.block = self.block[0:152]+nonce_hex_rev+self.block[160:]
# print("block new " + self.block)
except:
print("RPC getwork error")
# In this case, keep crunching with the old data. It will get
# stale at some point, but it's better than doing nothing.
# print("block " + self.block + " target " + self.target) # DEBUG
sdiff = self.target.decode('hex')[31:27:-1]
intTarget = int(sdiff.encode('hex'), 16)
if (intTarget < 1):
print "WARNING zero target", intTarget
print "target", self.target
print("sdiff", sdiff) # NB Need brackets here else prints binary
self.target = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000"
else:
newdiff = 65536 / intTarget
if (self.diff != newdiff):
print "New target diff =", newdiff
self.diff = newdiff
# print("Sending data to FPGA") # DEBUG
# for litecoin send 80 bytes of the 128 byte data plus 4 bytes of 32 byte target
payload = self.target.decode('hex')[31:27:-1] + self.block.decode('hex')[79::-1]
# TEST HASH, this should match on nonce 0000318f
# NB The pool will REJECT this share as it did not send the data...
# UNCOMMENT the following two lines for testing...
# test_payload ="000000014eb4577c82473a069ca0e95703254da62e94d1902ab6f0eae8b1e718565775af20c9ba6ced48fc9915ef01c54da2200090801b2d2afc406264d491c7dfc7b0b251e91f141b44717e00310000ff070000"
# payload = test_payload.decode('hex')[::-1]
# This is probably best commented out unless debugging ...
print("Test " + payload.encode('hex_codec')) # DEBUG
ser.write(payload)
result = golden.wait(askrate)
if result:
golden.clear()
class Submitter(Thread):
def __init__(self, block, nonce):
Thread.__init__(self)
self.block = block
self.nonce = nonce
def run(self):
# This thread will be created upon every submit, as they may
# come in sooner than the submits finish.
# print("Block found on " + ctime())
print("Share found on " + ctime() + " nonce " + self.nonce.encode('hex_codec'))
if (int(self.nonce.encode('hex_codec'),16) != writer.nonce):
print "... ERROR expected nonce", hex(writer.nonce)
writer.nonce_fail = writer.nonce_fail + 1
else:
print "... CORRECT"
writer.nonce_ok = writer.nonce_ok + 1
hrnonce = self.nonce[::-1].encode('hex')
data = self.block[:152] + hrnonce + self.block[160:]
try:
# result = bitcoin.getwork(data)
result = False
# print("Upstream result: " + str(result)) # Pointless in test mode
except:
print("RPC send error")
# a sensible boolean for stats
result = False
results_queue.put(result)
class Display_stats(Thread):
def __init__(self):
Thread.__init__(self)
self.count = [0, 0]
self.starttime = time()
self.daemon = True
print("Miner started on " + ctime())
def run(self):
while True:
result = results_queue.get()
if result:
self.count[0] += 1
else:
self.count[1] += 1
# print(stats(self.count, self.starttime)) # Pointless in test mode
results_queue.task_done()
# ======= main =======
# Process command line
if (len(sys.argv) > 2):
print "ERROR too many command line arguments"
print "usage:", sys.argv[0], "clockfreq"
quit()
if (len(sys.argv) == 1):
print "WARNING no clockfreq supplied, not setting freq"
else:
# TODO ought to check the value is a valid integer
try:
dynclock = int(sys.argv[1])
except:
print "ERROR parsing clock frequency on command line, needs to be an integer"
print "usage:", sys.argv[0], "clockfreq"
quit()
if (dynclock==0):
print "ERROR parsing clock frequency on command line, cannot be zero"
print "usage:", sys.argv[0], "clockfreq"
quit()
if (dynclock>254): # Its 254 since onescomplement(255) is zero, which is not allowed
print "ERROR parsing clock frequency on command line, max 254"
print "usage:", sys.argv[0], "clockfreq"
quit()
if (dynclock<25):
print "ERROR use at least 25 for clock (the DCM can lock up for low values)"
print "usage:", sys.argv[0], "clockfreq"
quit()
dynclock_hex = "{0:04x}".format((255-dynclock)*256+dynclock) # both value and ones-complement
|
antoinecarme/pyaf
|
tests/artificial/transf_RelativeDifference/trend_PolyTrend/cycle_30/ar_/test_artificial_32_RelativeDifference_PolyTrend_30__20.py
|
Python
|
bsd-3-clause
| 273 | 0.084249 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype =
|
"PolyTrend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.
|
0, exog_count = 20, ar_order = 0);
|
kylazhang/virt-test
|
libguestfs/tests/guestmount.py
|
Python
|
gpl-2.0
| 2,277 | 0 |
import logging
import os
from autotest.client.shared import error, utils
from virttest import data_dir, utils_test
def umount_fs(mountpoint):
if os.path.ismount(mountpoint):
result = utils.run("umount -l %s" % mountpoint, ignore_status=True)
if result.exit_status:
logging.debug("Umount %s failed", mountpoint)
return False
logging.debug("Umount %s successfully", mountpoint)
return True
def run_guestmount(test, params, env):
"""
Test libguestfs tool guestmount.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
if vm.is_alive():
vm.destroy()
# Create a file to vm with guestmount
content = "This is file for guestmount test."
path = params.get("gm_tempfile", "/home/gm_tmp")
mountpoint = os.path.join(data_dir.get_tmp_dir(), "mountpoint")
status_error = "yes" == params.get("status_error", "yes")
readonly = "no" == params.get("gm_readonly", "no")
special_mount = "yes" == params.get("gm_mount", "no")
v
|
t = utils_test.libguestfs.VirtTools(vm, params)
if special_mount:
# Get root filesystem before test
params['libvirt_domain'] = params.
|
get("main_vm")
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
roots, rootfs = gf.get_root()
gf.close_session()
if roots is False:
raise error.TestError("Can not get root filesystem "
"in guestfish before test")
logging.info("Root filesystem is:%s", rootfs)
params['special_mountpoints'] = [rootfs]
writes, writeo = vt.write_file_with_guestmount(mountpoint, path, content)
if umount_fs(mountpoint) is False:
logging.error("Umount vm's filesytem failed.")
if status_error:
if writes:
if readonly:
raise error.TestFail("Write file to readonly mounted "
"filesystem successfully.Not expected.")
else:
raise error.TestFail("Write file with guestmount "
"successfully.Not expected.")
else:
if not writes:
raise error.TestFail("Write file to mounted filesystem failed.")
|
johnbachman/indra
|
indra/pipeline/pipeline.py
|
Python
|
bsd-2-clause
| 16,636 | 0.00006 |
import json
import logging
import inspect
from .decorators import pipeline_functions, register_pipeline
from indra.statements import get_statement_by_name, Statement
logger = logging.getLogger(__name__)
class AssemblyPipeline():
"""An assembly pipeline that runs the specified steps on a given set of
statements.
Ways to initialize and run the pipeline (examples assume you have a list
of INDRA Statements stored in the `stmts` variable.)
>>> from indra.statements import *
>>> map2k1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
>>> mapk1 = Agent('MAPK1', db_refs={'HGNC': '6871'})
>>> braf = Agent('BRAF')
>>> stmts = [Phosphorylation(map2k1, mapk1, 'T', '185'),
... Phosphorylation(braf, map2k1)]
|
1) Provide a JSON file containing the steps, then use the classmethod
`from_json_file`, and run it with the `run` method on a list of statements.
This option allows storing pipeline versions in a separate file and
reproducing the same results. All functions referenced in the JSON file
have to be registered with the @register_pipeline decorator.
>>> import os
>>> path_this = os.path.dirname(os.path.abs
|
path(__file__))
>>> filename = os.path.abspath(
... os.path.join(path_this, '..', 'tests', 'pipeline_test.json'))
>>> ap = AssemblyPipeline.from_json_file(filename)
>>> assembled_stmts = ap.run(stmts)
2) Initialize a pipeline with a list of steps and run it with the `run`
method on a list of statements. All functions referenced in steps have to
be registered with the @register_pipeline decorator.
>>> steps = [
... {"function": "filter_no_hypothesis"},
... {"function": "filter_grounded_only",
... "kwargs": {"score_threshold": 0.8}}
... ]
>>> ap = AssemblyPipeline(steps)
>>> assembled_stmts = ap.run(stmts)
3) Initialize an empty pipeline and append/insert the steps one by one.
Provide a function and its args and kwargs. For arguments that
require calling a different function, use the RunnableArgument class. All
functions referenced here have to be either imported and passed as function
objects or registered with the @register_pipeline decorator and passed as
function names (strings). The pipeline built this way can be optionally
saved into a JSON file. (Note that this example requires indra_world
to be installed.)
>>> from indra.tools.assemble_corpus import *
>>> from indra_world.ontology import load_world_ontology
>>> from indra_world.belief import get_eidos_scorer
>>> ap = AssemblyPipeline()
>>> ap.append(filter_no_hypothesis)
>>> ap.append(filter_grounded_only)
>>> ap.append(run_preassembly,
... belief_scorer=RunnableArgument(get_eidos_scorer),
... ontology=RunnableArgument(load_world_ontology))
>>> assembled_stmts = ap.run(stmts)
>>> ap.to_json_file('filename.json')
Parameters
----------
steps : list[dict]
A list of dictionaries representing steps in the pipeline. Each step
should have a 'function' key and, if appropriate, 'args' and 'kwargs'
keys. Arguments can be simple values (strings, integers, booleans,
lists, etc.) or can be functions themselves. In case an argument is a
function or a result of another function, it should also be
represented as a dictionary of a similar structure. If a function
itself is an argument (and not its result), the dictionary should
contain a key-value pair {'no_run': True}. If an argument is a type
of a statement, it should be represented as a dictionary {'stmt_type':
<name of a statement type>}.
"""
def __init__(self, steps=None):
# This import is here to avoid circular imports
# It is enough to import one function to get all registered functions
from indra.tools.assemble_corpus import filter_grounded_only
from indra.ontology.bio import bio_ontology
from indra.preassembler.grounding_mapper.gilda import ground_statements
from indra.preassembler.custom_preassembly import agent_grounding_matches
self.steps = steps if steps else []
@classmethod
def from_json_file(cls, filename):
"""Create an instance of AssemblyPipeline from a JSON file with
steps."""
with open(filename, 'r') as f:
steps = json.load(f)
ap = AssemblyPipeline(steps)
return ap
def to_json_file(self, filename):
"""Save AssemblyPipeline to a JSON file."""
with open(filename, 'w') as f:
json.dump(self.steps, f, indent=1)
def run(self, statements, **kwargs):
"""Run all steps of the pipeline.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to run the pipeline on.
**kwargs : kwargs
It is recommended to define all arguments for the steps functions
in the steps definition, but it is also possible to provide some
external objects (if it is not possible to provide them as a step
argument) as kwargs to the entire pipeline here. One should be
cautious to avoid kwargs name clashes between multiple functions
(this value will be provided to all functions that expect an
argument with the same name). To overwrite this value in other
functions, provide it explicitly in the corresponding steps kwargs.
Returns
-------
list[indra.statements.Statement]
The list of INDRA Statements resulting from running the pipeline
on the list of input Statements.
"""
logger.info('Running the pipeline')
for step in self.steps:
statements = self.run_function(step, statements, **kwargs)
return statements
def append(self, func, *args, **kwargs):
"""Append a step to the end of the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should be a function object or a string')
new_step = self.create_new_step(func_name, *args, **kwargs)
self.steps.append(new_step)
def insert(self, ix, func, *args, **kwargs):
"""Insert a step to any position in the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should
|
missionpinball/mpf
|
mpf/modes/carousel/code/carousel.py
|
Python
|
mit
| 6,517 | 0.003376 |
"""Mode which allows the player to select another mode to run."""
from mpf.core.utility_functions import Util
from mpf.core.mode import Mode
class Carousel(Mode):
"""Mode which allows the player to select another mode to run."""
__slots__ = ["_all_items", "_items", "_select_item_events", "_next_item_events",
"_previous_item_events", "_highlighted_item_index", "_done",
"_block_events", "_release_events", "_is_blocking"]
def __init__(self, *args, **kwargs):
"""Initialise carousel mode."""
self._all_items = None
self._items = None
self._select_item_events = None
self._next_item_events = None
self._previous_item_events = None
self._highlighted_item_index = None
self._block_events = None
self._release_events = None
self._is_blocking = None
self._done = None
super().__init__(*args, **kwargs)
def mode_init(self):
"""Initialise mode and read all settings from config."""
super().mode_init()
mode_settings = self.config.get("mode_settings", [])
self._all_items = []
for item in Util.string_to_event_list(mode_settings.get("selectable_items", "")):
placeholder = self.machine.placeholder_manager.parse_conditional_template(item)
# Only add a placeholder if there's a condition, otherwise just the string
self._all_items.append(placeholder if placeholder.condition else item)
self._select_item_events = Util.string_to_event_list(mode_settings.get("select_item_events", ""))
self._next_item_events = Util.string_to_event_list(mode_settings.get("next_item_events", ""))
self._previous_item_events = Util.string_to_event_list(mode_settings.get("previous_item_events", ""))
self._highlighted_item_index = 0
self._block_events = Util.string_to_event_list(mode_settings.get("block_events", ""))
self._release_events = Util.string_to_event_list(mode_settings.get("release_events", ""))
if not self._all_items:
raise AssertionError("Specify at least one item to select from")
def mode_start(self, **kwargs):
"""Start mode and let the player select."""
self._items = []
for item in self._all_items:
# All strings go in, but only conditional templates if they evaluate true
if isinstance(item, str):
self._items.append(item)
elif not item.condition or item.condition.evaluate({}):
self._items.append(item.name)
if not self._items:
self.machin
|
e.events.post("{}_items_empty".
|
format(self.name))
'''event (carousel_name)_items_empty
desc: A carousel's items are all conditional and all evaluated false.
If this event is posted, the carousel mode will not be started.
'''
self.stop()
return
super().mode_start(**kwargs)
self._done = False
self._is_blocking = False
self._register_handlers(self._next_item_events, self._next_item)
self._register_handlers(self._previous_item_events, self._previous_item)
self._register_handlers(self._select_item_events, self._select_item)
self._highlighted_item_index = 0
self._update_highlighted_item(None)
# If set to block next/prev on flipper cancel, set those event handlers
if self._block_events:
# This rudimentary implementation will block on any block_event
# and release on any release_event. If future requirements need to
# track *which* block_event blocked and *only* release on the
# corresponding release_event, additional work will be needed.
for event in self._block_events:
self.add_mode_event_handler(event, self._block_enable, priority=100)
for event in self._release_events:
self.add_mode_event_handler(event, self._block_disable, priority=100)
def _register_handlers(self, events, handler):
for event in events:
self.add_mode_event_handler(event, handler)
def _get_highlighted_item(self):
return self._get_available_items()[self._highlighted_item_index]
def _update_highlighted_item(self, direction):
self.debug_log("Highlighted item: " + self._get_highlighted_item())
self.machine.events.post("{}_{}_highlighted".format(self.name, self._get_highlighted_item()),
direction=direction)
'''event (carousel_name)_(item)_highlighted
desc: Player highlighted an item in a carousel. Mostly used to play shows or trigger slides.
args:
direction: The direction the carousel is moving. Either forwards or backwards. None on mode start.
'''
def _get_available_items(self):
# Return the default items
return self._items
def _next_item(self, **kwargs):
del kwargs
if self._done or self._is_blocking:
return
self._highlighted_item_index += 1
if self._highlighted_item_index >= len(self._get_available_items()):
self._highlighted_item_index = 0
self._update_highlighted_item("forwards")
def _previous_item(self, **kwargs):
del kwargs
if self._done or self._is_blocking:
return
self._highlighted_item_index -= 1
if self._highlighted_item_index < 0:
self._highlighted_item_index = len(self._get_available_items()) - 1
self._update_highlighted_item("backwards")
def _select_item(self, **kwargs):
del kwargs
if self._done:
return
self.debug_log("Selected mode: " + str(self._get_highlighted_item()))
self._done = True
self.machine.events.post("{}_{}_selected".format(self.name, self._get_highlighted_item()))
'''event (carousel_name)_(item)_selected
desc: Player selected an item in a carousel. Can be used to trigger modes. '''
self.machine.events.post("{}_item_selected".format(self.name))
'''event (carousel_name)_item_selected
desc: Player selected any item in a carousel. Used to stop mode. '''
def _block_enable(self, **kwargs):
del kwargs
self._is_blocking = True
def _block_disable(self, **kwargs):
del kwargs
self._is_blocking = False
|
Dev-Cloud-Platform/Dev-Cloud
|
dev_cloud/cc1/src/cm/utils/threads/image.py
|
Python
|
apache-2.0
| 8,639 | 0.002084 |
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# License
|
d under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governi
|
ng permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.cm.utils.threads.image
"""
import subprocess
import threading
import urllib2
from common.states import image_states
from common.hardware import disk_format_commands, disk_filesystems_reversed
import os
from cm.utils import log
import random
import hashlib
from cm.utils import message
class CreateImage(threading.Thread):
image = None
filesystem = None
def __init__(self, image, filesystem):
threading.Thread.__init__(self)
self.image = image
self.filesystem = filesystem
def run(self):
if os.path.exists(self.image.path):
self.image.state = image_states['failed']
self.image.save(update_fields=['state'])
log.error(self.image.user.id, "Destination image %d for user %d exists! Aborting creation" % (
self.image.id, self.image.user.id))
return
self.image.progress = 0
if self.format() == 'failed':
self.image.state = image_states['failed']
self.image.save(update_fields=['state'])
else:
self.image.progress = 100
self.image.state = image_states['ok']
self.image.save(update_fields=['state', 'progress'])
log.debug(self.image.user.id, 'stage [6/6] cleaning..')
try:
os.remove('%s' % os.path.join('/var/lib/cc1/images-tmp/', os.path.split(self.image.path)[1]))
except Exception, e:
log.error(self.image.user.id, 'error remove file: %s' % str(e))
def exec_cmd(self, args):
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retr_std = p.stdout.read()
ret = p.wait()
if ret:
retr_err = str(p.stderr.read())
log.error(self.image.user.id, retr_err)
log.error(self.image.user.id, retr_std)
return retr_err
def set_progress(self, prg):
self.image.progress = prg
self.image.save(update_fields=['progress'])
def format(self):
if not os.path.exists(os.path.dirname(self.image.path)):
os.makedirs(os.path.dirname(self.image.path))
format_cmd = disk_format_commands[disk_filesystems_reversed[self.filesystem]].split()
if format_cmd:
tmp_dir = '/var/lib/cc1/images-tmp/'
tmp_path = os.path.join(tmp_dir, os.path.split(self.image.path)[1])
if not os.path.exists(os.path.dirname(tmp_dir)):
os.makedirs(os.path.dirname(tmp_dir))
else:
tmp_path = str(self.image.path)
log.debug(self.image.user.id, 'stage [1/6] truncate partition file')
if self.exec_cmd(['truncate', '-s', '%dM' % self.image.size, '%s' % tmp_path]):
return 'failed'
self.set_progress(random.randint(0, 15))
if format_cmd:
format_cmd.append('%s' % tmp_path)
log.debug(self.image.user.id, 'stage [2/6] creating partition filesystem')
if self.exec_cmd(format_cmd):
return 'failed'
self.set_progress(random.randint(15, 50))
log.debug(self.image.user.id, 'stage [3/6] creating disk')
if self.exec_cmd(['/usr/bin/ddrescue', '-S', '-o', '1048576', '%s' % tmp_path, str(self.image.path)]):
return 'failed'
self.set_progress(random.randint(50, 80))
log.debug(self.image.user.id, 'stage [4/6] creating new partition table')
if self.exec_cmd(['/sbin/parted', '-s', str(self.image.path), 'mklabel', 'msdos']):
return 'failed'
self.set_progress(random.randint(80, 90))
log.debug(self.image.user.id, 'stage [5/6] adding partition')
if self.exec_cmd(['/sbin/parted', '-s', str(self.image.path), 'mkpart', 'primary', '1048576b', '100%']):
return 'failed'
self.set_progress(random.randint(90, 100))
log.info(self.image.user.id, 'disk succesfully formatted')
class DownloadImage(threading.Thread):
image = None
url = None
size = 0
def __init__(self, image, url, size):
threading.Thread.__init__(self)
self.image = image
self.url = url
self.size = size
def run(self):
try:
if self.url.startswith('/'):
src_image = open(self.url, 'r')
else:
src_image = urllib2.urlopen(self.url)
except Exception, e:
log.exception(self.image.user.id, "Cannot open url %s: %s" % (self.url, str(e)))
self.image.state = image_states['failed']
return
if os.path.exists(self.image.path):
self.image.state = image_states['failed']
self.image.save(update_fields=['state'])
log.error(self.image.user.id, "Destination image %d for user %d exists! Aborting download" % (
self.image.id, self.image.user.id))
return
try:
dirpath = os.path.dirname(self.image.path)
if not os.path.exists(dirpath):
os.mkdir(dirpath)
dest_image = open(self.image.path, 'w')
downloaded_size = 0
md5sum = hashlib.md5()
while downloaded_size < self.size:
buff = src_image.read(1024 * 1024)
md5sum.update(buff)
downloaded_size += len(buff)
dest_image.write(buff)
progress = int(downloaded_size * 100 / self.size)
if progress != self.image.progress:
self.image.progress = progress
self.image.save(update_fields=['progress'])
dest_image.close()
log.info(self.image.user.id, 'md5 hash of image %d is %s' % (self.image.id, md5sum.hexdigest()))
self.image.state = image_states['ok']
self.image.size = downloaded_size / (1024 * 1024)
self.image.save(update_fields=['progress', 'state', 'size'])
message.info(self.image.user.id, 'image_downloaded',
{'name': self.image.name, 'md5sum': md5sum.hexdigest()})
except Exception, e:
log.exception(self.image.user.id, "Failed to download image: %s" % str(e))
self.image.state = image_states['failed']
self.image.save(update_fields=['state'])
class CopyImage(threading.Thread):
def __init__(self, src_image, dest_image):
threading.Thread.__init__(self)
self.src_image = src_image
self.dest_image = dest_image
def run(self):
copied = 0
prev_progress = 0
try:
size = os.path.getsize(self.src_image.path)
dirpath = os.path.dirname(self.dest_image.path)
if not os.path.exists(dirpath):
os.mkdir(dirpath)
src = open(self.src_image.path, "r")
dst = open(self.dest_image.path, "w")
while 1:
buff = src.read(1024 * 1024) # Should be less than MTU?
if len(buff) > 0 and copied <= size:
dst.write(buff)
copied = copied + len(buff)
else:
break
# Update image information:
progress = 100 * copied / size
if progress > prev_progress:
prev_progress = progress
self.dest_image.progress = progress
self.dest_image.save(update_fields=['progress'])
|
whiteear/newrelic-plugin-agent
|
newrelic_plugin_agent/publisher/__init__.py
|
Python
|
bsd-3-clause
| 918 | 0.005447 |
#
# Copyright 2015 chinaskycloud.com.cn
#
# Author: Chunyang Liu
|
<liucy@chinaskycloud.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific l
|
anguage governing permissions and limitations
# under the License.
"""
Publish plugins are responsible for publish fetched metrics into
different monitoring system.
"""
plugins = {
'file':'newrelic_plugin_agent.publisher.file.FilePublisher',
'ceilometer':'newrelic_plugin_agent.publisher.ceilometer.CeilometerPublisher'
}
import base
|
danielElopez/PI-Connector-for-UFL-Samples
|
COMPLETE_SOLUTIONS/North American General Bike Feed/putJSONdata_SF_Bikes_service.py
|
Python
|
apache-2.0
| 3,727 | 0.003756 |
""" putJSONdata.py
Copyright 2016 OSIsoft, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Call:
python putJSONdata.py rest-ufl rest-external
Parameters:
rest-ufl - The Address specified in the Data Source configuration
rest-external - A third party data source which returns JSON data when receving a get request from this URL.
Example:
python putJSONdata.py https://localhost:5460/connectordata/currency http://api.fixer.io/latest?base=USD
"""
import argparse
import getpass
import json
import sys
from functools import lru_cache
import requests
import time
# Suppress insecure HTTPS warnings, if an untrusted certificate is used by the target endpoint
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Process arguments
parser = argparse.ArgumentParser()
parser.description = 'POST file contents to PI Connector for UFL'
parser.add_argument('restufl', help='The UFL rest endpoint address')
parser.add_argument('restexternal', help='The external data source rest end point')
args = parser.parse_args()
@lru_cache(maxsize=1)
def password():
return getpass.getpass()
@lru_cache(maxsize=1)
def username():
return getpass.getpass('Username: ')
s = requests.session()
# To hardcode the username and password, specify them below
# To use anonymous login, use: ("", "")
s.auth = ("pi", "pi")
def getData(url):
# Being very careful when checking for failure when accessing the external site
try:
response = requests.get(url=url)
if response.status_code != requests.codes.ok:
print("The url {0} did not return the expected value back.".format(response.url))
print("Response: {0} {1}".format(response.status_code, response.reason))
sys.exit(0)
try:
return json.dumps(response.json(), indent=4, sort_keys=True)
except ValueError as e:
print(e)
sys.exit(0)
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
print("Connection timed out")
sys.exit(0)
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
print("Too many redirects")
sys.exit(0)
except requests.exceptions.RequestException as e:
print("There was an is
|
sue with requesting the data:")
print(e)
sys.exit(0)
data = getData(args.restexternal)
# remove verify=False if the certificate used is a trusted one
response = s.put(args.restufl, data=data, verify=False)
# If instead of using the put reques
|
t, you need to use the post request
# use the function as listed below
# response = s.post(args.resturl + '/post', data=data, verify=False)
if response.status_code != 200:
print("Sending data to the UFL connect failed due to error {0} {1}".format(response.status_code, response.reason))
else:
print('The data was sent successfully over https.')
print('Check the PI Connectors event logs for any further information.')
print("SF Bike Data sent at :")
localtime = time.asctime( time.localtime(time.time()) )
print(localtime)
time.sleep(45)
|
tanglu-org/tdak
|
daklib/fstransactions.py
|
Python
|
gpl-2.0
| 6,500 | 0.001385 |
# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Transactions for filesystem actions
"""
import errno
import os
import shutil
class _FilesystemAction(object):
@property
def temporary_name(self):
raise NotImplementedError()
def check_for_temporary(self):
try:
if os.path.exists(self.temporary_name):
raise IOError("Temporary file '{0}' already exists.".format(self.temporary_name))
except NotImplementedError:
pass
class _FilesystemCopyAction(_FilesystemAction):
def __init__(self, source, destination, link=True, symlink=False, mode=None):
self.destination = destination
self.need_cleanup = False
dirmode = 0o2755
if mode is not None:
dirmode = 0o2700 | mode
# Allow +x for group and others if they have +r.
if dirmode & 0o0040:
dirmode = dirmode | 0o0010
if dirmode & 0o0004:
dirmode = dirmode | 0o0001
self.check_for_temporary()
destdir = os.path.dirname(self.destination)
if not os.path.exists(destdir):
os.makedirs(destdir, dirmode)
if symlink:
os.symlink(source, self.destination)
elif link:
try:
os.link(source, self.destination)
except OSError:
shutil.copy2(source, self.destination)
else:
shutil.copy2(source, self.destination)
self.need_cleanup = True
if mode is not None:
os.chmod(self.destination, mode)
@property
def temporary_name(self):
return self.destination
def commit(self):
pass
def rollback(self):
if self.need_cleanup:
os.unlink(self.destination)
self.need_cleanup = False
class _FilesystemUnlinkAction(_FilesystemAction):
def __init__(self, path):
self.path = path
self.need_cleanup = False
self.check_for_temporary()
os.rename(self.path, self.temporary_name)
self.need_cleanup = True
@property
def temporary_name(self):
return "{0}.dak-rm".format(self.path)
def commit(self):
if self.need_cleanup:
os.unlink(self.temporary_name)
self.need_cleanup = False
def rollback(self):
if self.need_cleanup:
os.rename(self.temporary_name, self.path)
self.need_cleanup = False
class _FilesystemCreateAction(_FilesystemAction):
def __init__(self, path):
self.path = path
self.need_cleanup = True
@property
def temporary_name(self):
return self.path
def commit(self):
pass
def rollback(self):
if self.need_cleanup:
os.unlink(self.path)
self.need_cleanup = False
class FilesystemTransaction(object):
"""transactions for filesystem actions"""
def __init__(self):
self.actions = []
def copy(self, source, destination, link=False, symlink=False, mode=None):
"""copy C{source} to C{destination}
@type source: str
@param source: source file
@type destination: str
@param destination: destination file
@type link:
|
bool
@param link: try hardlinking, falling back to copying
@type symlink: bool
@param symlink: create a symlink instead of copying
@type mode: int
@param mode: permissions to change C{destination} to
"""
if isinstance
|
(mode, str) or isinstance(mode, unicode):
mode = int(mode, 8)
self.actions.append(_FilesystemCopyAction(source, destination, link=link, symlink=symlink, mode=mode))
def move(self, source, destination, mode=None):
"""move C{source} to C{destination}
@type source: str
@param source: source file
@type destination: str
@param destination: destination file
@type mode: int
@param mode: permissions to change C{destination} to
"""
self.copy(source, destination, link=True, mode=mode)
self.unlink(source)
def unlink(self, path):
"""unlink C{path}
@type path: str
@param path: file to unlink
"""
self.actions.append(_FilesystemUnlinkAction(path))
def create(self, path, mode=None):
"""create C{filename} and return file handle
@type filename: str
@param filename: file to create
@type mode: int
@param mode: permissions for the new file
@return: file handle of the new file
"""
if isinstance(mode, str) or isinstance(mode, unicode):
mode = int(mode, 8)
destdir = os.path.dirname(path)
if not os.path.exists(destdir):
os.makedirs(destdir, 0o2775)
if os.path.exists(path):
raise IOError("File '{0}' already exists.".format(path))
fh = open(path, 'w')
self.actions.append(_FilesystemCreateAction(path))
if mode is not None:
os.chmod(path, mode)
return fh
def commit(self):
"""Commit all recorded actions."""
try:
for action in self.actions:
action.commit()
except:
self.rollback()
raise
finally:
self.actions = []
def rollback(self):
"""Undo all recorded actions."""
try:
for action in self.actions:
action.rollback()
finally:
self.actions = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None:
self.commit()
else:
self.rollback()
return None
|
twerp/django-admin-flexselect-py3
|
flex_project/urls.py
|
Python
|
cc0-1.0
| 816 | 0 |
"""flex_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$
|
', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.sit
|
e.urls)),
url(r'^flexselect/', include('flexselect.urls')),
]
|
information-machine/information-machine-api-python
|
InformationMachineAPILib/Models/GetNutrientsWrapper.py
|
Python
|
mit
| 2,942 | 0.003059 |
# -*- coding: utf-8 -*-
"""
InformationMachineAPILib.Models.GetNutrientsWrapper
"""
from InformationMachineAPILib.APIHelper import APIHelper
from InformationMachineAPILib.Models.NutrientInfo import NutrientInfo
from InformationMachineAPILib.Models.MetaBase import MetaBase
class GetNutrientsWrapper(object):
"""Implementation of the 'GetNutrientsWrapper' model.
TODO: type model description here.
Attributes:
result (list of NutrientInfo): TODO: type description here.
meta (MetaBase): TODO: type description here.
"""
def __init__(self,
**kwargs):
"""Constructor for the GetNutrientsWrapper class
Args:
**kwargs: Keyword Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
result -- list of NutrientInfo -- Sets the attribute result
meta -- MetaBase -- Sets the attribute meta
"""
# Set all of the parameters to their default values
self.result = None
self.meta = None
# Create a mapping from API property names to Model property names
replace_names = {
"result": "result",
"meta": "meta",
}
# Parse all of the Key-Value arguments
if kwargs is not None:
|
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace
|
_names[key], kwargs[key])
# Other objects also need to be initialised properly
if "result" in kwargs:
# Parameter is an array, so we need to iterate through it
self.result = list()
for item in kwargs["result"]:
self.result.append(NutrientInfo(**item))
# Other objects also need to be initialised properly
if "meta" in kwargs:
self.meta = MetaBase(**kwargs["meta"])
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"result": "result",
"meta": "meta",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval)
|
Juniper/ceilometer
|
ceilometer/event/storage/impl_elasticsearch.py
|
Python
|
apache-2.0
| 11,518 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import operator
import elasticsearch as es
from elasticsearch import helpers
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer import utils
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Put the event data into an ElasticSearch db.
Events in ElasticSearch are indexed by day and stored by event_type.
An example document::
{"_index":"events_2014-10-21",
"_type":"event_type0",
"_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779",
"_score":1.0,
"_source":{"timestamp": "2014-10-21T20:02:09.274797"
"traits": {"id4_0": "2014-10-21T20:02:09.274797",
"id3_0": 0.7510790937279408,
"id2_0": 5,
"id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"}
}
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
index_name = 'events'
# NOTE(gordc): mainly for testing, data is not searchable after write,
# it is only searchable after periodic refreshes.
_refresh_on_write = False
def __init__(self, url):
url_split = netutils.urlsplit(url)
self.conn = es.Elasticsearch(url_split.netloc)
def upgrade(self):
iclient = es.client.IndicesClient(self.conn)
ts_template = {
'template': '*',
'mappings': {'_default_':
{'_timestamp': {'enabled': True,
'store': True},
'properties': {'traits': {'type': 'nested'}}}}}
iclient.put_template(name='enable_timestamp', body=ts_template)
def record_events(self, events):
def _build_bulk_index(event_list):
for ev in event_list:
traits = {t.name: t.value for t in ev.traits}
yield {'_op_type': 'create',
'_index': '%s_%s' % (self.index_name,
ev.generated.date().isoformat()),
'_type': ev.event_type,
'_id': ev.message_id,
'_source': {'timestamp': ev.generated.isoformat(),
'traits': traits}}
problem_events = []
for ok, result in helpers.streaming_bulk(
self.conn, _build_bulk_index(events)):
if not ok:
__, result = result.popitem()
if result['status'] == 409:
problem_events.append((models.Event.DUPLICATE,
result['_id']))
else:
problem_events.append((models.Event.UNKNOWN_PROBLEM,
result['_id']))
if self._refresh_on_write:
self.conn.indices.refresh(index='%s_*' % self.index_name)
while self.conn.cluster.pending_tasks(local=True)['tasks']:
pass
return problem_events
def _make_dsl_from_filter(self, indices, ev_filter):
q_args = {}
filters = []
if ev_filter.start_timestamp:
filters.append({'range': {'timestamp':
{'ge': ev_filter.start_timestamp.isoformat()}}})
while indices[0] < (
'%s_%s' % (self.index_name,
ev_filter.start_timestamp.date().isoformat())):
del indices[0]
if ev_filter.end_timestamp:
filters.append({'range': {'timestamp':
{'le': ev_filter.end_timestamp.isoformat()}}})
while indices[-1] > (
'%s_%s' % (self.index_name,
ev_filter.end_timestamp.date().isoformat())):
del indices[-1]
q_args['index'] = indices
if ev_filter.event_type:
q_args['doc_type'] = ev_filter.event_type
if ev_filter.message_id:
filters.append({'term': {'_id': ev_filter.message_id}})
if ev_filter.traits_filter:
trait_filters = []
for t_filter in ev_filter.traits_filter:
value = None
for val_type in ['integer', 'string', 'float', 'datetime']:
if t_filter.get(val_type):
value = t_filter.get(val_type)
if isinstance(value, six.string_types):
value = value.lower()
elif isinstance(value, datetime.datetime):
value = value.isoformat()
break
if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']:
op = (t_filter.get('op').replace('ge', 'gte')
.replace('le', 'lte'))
trait_filters.append(
{'range': {t_filter['key']: {op: value}}})
else:
tf = {"query": {"query_string": {
"query": "%s: \"%s\"" % (t_filter['key'], value)}}}
if t_filter.get('op') == 'ne':
tf = {"not": tf}
trait_filters.append(tf)
filters.append(
{'nested': {'path': 'traits', 'query': {'filtered': {
'filter': {'bool': {'must': trait_filters}}}}}})
q_args['body'] = {'query': {'filtered':
{'filter': {'bool': {'must': filters}}}}}
return q_args
def get_events(self, event_filter):
iclient = es.client.IndicesClient(self.conn)
indices = iclient.get_mapping('%s_*' % self.index_name).keys()
if indices:
filter_args = self._make_dsl_from_filter(indices, event_filter)
results = self.conn.search(fields=['_id', 'timestamp',
'_type', '_source'],
sort='timestamp:asc',
**filter_args)
trait_mappings = {}
for record in results['hits']['hits']:
trait_list = []
if not record['_type'] in trait_mappings:
trait_mappings[record['_type']] = list(
|
self.get_trait_types(record['_type']))
for key in record['_source']['traits'].keys():
value = record['_source']['traits'][key]
for t_map in trait_mappings[record['_type']]:
if t_map['name'] == key:
dtype = t_map['data_type']
break
|
trait_list.append(models.Trait(
name=key, dtype=dtype,
value=models.Trait.convert_value(dtype, value)))
gen_ts = timeutils.normalize_time(timeutils.parse_isotime(
record['_source']['timestamp']))
yield models.Event(message_id=record['_id'],
event_type=record['_type'],
generated=gen_ts,
tra
|
chippey/gaffer
|
python/GafferImageTest/ObjectToImageTest.py
|
Python
|
bsd-3-clause
| 3,067 | 0.025106 |
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferImage
import GafferImageTest
class ObjectToImageTest(
|
GafferImageTest.ImageTestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
negFileName = os.path.expandvars( "$GAFFER_ROOT/python/Gaff
|
erImageTest/images/checkerWithNegativeDataWindow.200x150.exr" )
def test( self ) :
i = IECore.Reader.create( self.fileName ).read()
n = GafferImage.ObjectToImage()
n["object"].setValue( i )
self.assertEqual( n["out"].image(), i )
def testImageWithANegativeDataWindow( self ) :
i = IECore.Reader.create( self.negFileName ).read()
n = GafferImage.ObjectToImage()
n["object"].setValue( i )
self.assertEqual( n["out"].image(), i )
def testHashVariesPerTileAndChannel( self ) :
n = GafferImage.ObjectToImage()
n["object"].setValue( IECore.Reader.create( self.fileName ).read() )
self.assertNotEqual(
n["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
n["out"].channelDataHash( "G", IECore.V2i( 0 ) )
)
self.assertNotEqual(
n["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
n["out"].channelDataHash( "R", IECore.V2i( GafferImage.ImagePlug.tileSize() ) )
)
if __name__ == "__main__":
unittest.main()
|
RobLoach/lutris
|
lutris/runners/__init__.py
|
Python
|
gpl-3.0
| 1,920 | 0 |
"""Generic runner functions."""
# from lutris.util.log import logger
__all__ = (
# Native
"linux", "steam", "browser", "web",
# Microsoft based
"wine", "winesteam", "dosbox",
# Multi-system
"mame", "mess", "mednafen", "scummvm", "residualvm", "libretro", "ags",
# Commdore
"fsuae", "vice",
# Atari
"stella", "atari800", "hatari", "virtualjaguar",
# Nintendo
"snes9x", "mupen64plus", "dolphin", "desmume", "citra",
# Sony
"pcsxr", "ppsspp", "pcsx2",
# Sega
"osmose", "dgen", "reicast",
# Misc legacy systems
"frotz", "jzintv", "o2em", "zdoom"
)
class InvalidRunner(Exception):
d
|
ef __init__(self, message):
self.message = message
class RunnerInstallationError(Exception):
def __init__(self, message):
self.message = message
class NonInstallableRunnerError(Exce
|
ption):
def __init__(self, message):
self.message = message
def get_runner_module(runner_name):
if runner_name not in __all__:
raise InvalidRunner("Invalid runner name '%s'" % runner_name)
return __import__('lutris.runners.%s' % runner_name,
globals(), locals(), [runner_name], 0)
def import_runner(runner_name):
"""Dynamically import a runner class."""
runner_module = get_runner_module(runner_name)
if not runner_module:
return
return getattr(runner_module, runner_name)
def import_task(runner, task):
"""Return a runner task."""
runner_module = get_runner_module(runner)
if not runner_module:
return
return getattr(runner_module, task)
def get_installed(sort=True):
"""Return a list of installed runners (class instances)."""
installed = []
for runner_name in __all__:
runner = import_runner(runner_name)()
if runner.is_installed():
installed.append(runner)
return sorted(installed) if sort else installed
|
yewsiang/botmother
|
gunicorn.py
|
Python
|
agpl-3.0
| 417 | 0.002398 |
import os
|
def numCPUs():
if not hasattr(os, "sysconf"):
raise RuntimeError("No sysconf detected.")
return os.sysconf("SC_NPROCESSORS_ONLN")
bind = "0.0.0.0:8000"
# workers = numCPUs() * 2 + 1
workers = 1
backlog = 2048
worker_class = "sync"
# worker_class = "gevent"
debug = True
# daemon = True
# errorlog = 'gunicorn-error.log'
# accesslog = 'gunicorn-access.log'
# log-file= -
loglevel = 'info
|
'
|
alex/changes
|
migrations/versions/2b8459f1e2d6_initial_schema.py
|
Python
|
apache-2.0
| 9,758 | 0.016807 |
"""Initial schema
Revision ID: 2b
|
8459f1e2d6
Revi
|
ses: None
Create Date: 2013-10-22 14:31:32.654367
"""
# revision identifiers, used by Alembic.
revision = '2b8459f1e2d6'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('repository',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('url', sa.String(length=200), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('url')
)
op.create_table('node',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('label', sa.String(length=128), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('author',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('email', sa.String(length=128), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('name')
)
op.create_table('remoteentity',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('type', sa.String(), nullable=False),
sa.Column('provider', sa.String(length=128), nullable=False),
sa.Column('remote_id', sa.String(length=128), nullable=False),
sa.Column('internal_id', sa.GUID(), nullable=False),
sa.Column('data', sa.JSONEncodedDict(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('provider','remote_id','type', name='remote_identifier')
)
op.create_table('project',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('slug', sa.String(length=64), nullable=False),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('avg_build_time', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug')
)
op.create_table('revision',
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('sha', sa.String(length=40), nullable=False),
sa.Column('author_id', sa.GUID(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('parents', postgresql.ARRAY(sa.String(length=40)), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('repository_id', 'sha')
)
op.create_table('change',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('hash', sa.String(length=40), nullable=False),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('author_id', sa.GUID(), nullable=True),
sa.Column('label', sa.String(length=128), nullable=False),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('hash')
)
op.create_table('patch',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('change_id', sa.GUID(), nullable=True),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('parent_revision_sha', sa.String(length=40), nullable=False),
sa.Column('label', sa.String(length=64), nullable=False),
sa.Column('url', sa.String(length=200), nullable=True),
sa.Column('diff', sa.Text(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['change_id'], ['change.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('build',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('change_id', sa.GUID(), nullable=True),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('parent_revision_sha', sa.String(length=40), nullable=True),
sa.Column('patch_id', sa.GUID(), nullable=True),
sa.Column('author_id', sa.GUID(), nullable=True),
sa.Column('label', sa.String(length=128), nullable=False),
sa.Column('status', sa.Enum(), nullable=False),
sa.Column('result', sa.Enum(), nullable=False),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('date_started', sa.DateTime(), nullable=True),
sa.Column('date_finished', sa.DateTime(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('data', sa.JSONEncodedDict(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['change_id'], ['change.id'], ),
sa.ForeignKeyConstraint(['patch_id'], ['patch.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('filecoverage',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('filename', sa.String(length=256), nullable=False),
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('data', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.PrimaryKeyConstraint('id', 'filename')
)
op.create_table('test',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('group_sha', sa.String(length=40), nullable=False),
sa.Column('label_sha', sa.String(length=40), nullable=False),
sa.Column('group', sa.Text(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('package', sa.Text(), nullable=True),
sa.Column('result', sa.Enum(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('build_id','group_sha','label_sha', name='_test_key')
)
op.create_table('phase',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('label', sa.String(length=128), nullable=False),
sa.Column('status', sa.Enum(), nullable=False),
sa.Column('result', sa.Enum(), nullable=False),
sa.Column('date_started', sa.DateTime(), nullable=True),
sa.Column('date_finished', sa.DateTime(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('id')
|
JanFan/py-aho-corasick
|
py_aho_corasick/__init__.py
|
Python
|
mit
| 114 | 0 |
# -*- coding: utf-8 -*-
__author__ = """JanFan"""
__email__ = 'guangyizhang.jan@gmail.com'
__version__ = '0.1.
|
0'
|
|
smashwilson/deconst-preparer-sphinx
|
deconstrst/builder.py
|
Python
|
apache-2.0
| 1,067 | 0 |
# -*- coding: utf-8 -*-
from sphinx.builders.html import JSONHTMLBuilder
from sphinx.util import jsonimpl
class DeconstJSONImpl:
"""
Enhance the default JSON encoder by adding additional keys.
"""
def dump(self, obj, fp, *args, **kwargs):
self._enhance(obj)
return jsonimpl.dump(obj, fp, *args, **kwargs)
def dumps(self, obj, *args, **kwargs):
self._enhance(obj)
return jsonimpl.dumps(obj, *args, **kwargs)
def load(self, *args, **kwargs):
return jsonimpl.load(*args, **kwargs)
def loads(self, *args, **kwargs):
re
|
turn jsonimpl.loads(*args, **kwargs)
def _enhance(self, obj):
"""
Add additional properties to "obj" to get them into the JSON.
"""
obj["hello"] = "Sup"
class DeconstJSONBuilder(JSONHTMLBuilder):
"""
Custom Sphinx builder that generates Deconst-compatible JSON documents.
"""
implementation = DeconstJSONImp
|
l()
name = 'deconst'
out_suffix = '.json'
def init(self):
JSONHTMLBuilder.init(self)
|
tdargent/TCXfilestodata
|
readTCXFiles.py
|
Python
|
mit
| 784 | 0.026786 |
# -*- coding: utf-8 -*-
import os,tcxparser
cd = os.path.dirname(os.path.abspath(__file__)) #use to browse tcx files at the correct location
file_list=os.listdir(os.getcwd())
f=open("result.txt","a")
f.write("date (ISO UTC Format)"+'\t'+"Distance [m]"+'\t'+"
|
Duration [s]"+'\n')
for a in
|
file_list:
if a == "result.txt":
print() #Quick and dirty...
elif a== "readTCXFiles.py":
print() #Quick and dirty...
else:
tcx=tcxparser.TCXParser(cd+'/'+a) #see https://github.com/vkurup/python-tcxparser/ for details
if tcx.activity_type == 'biking' : #To select only my biking session (could be change to 'hiking', 'running' etc.)
f.write(str(tcx.completed_at)+'\t'+str(tcx.distance)+'\t'+str(tcx.duration)+'\n')
f.close
|
BenjaminSchubert/py_github_update_checker
|
setup.py
|
Python
|
mit
| 995 | 0 |
#!/usr/bin/python3
# -*- Coding : UTF-8 -*-
from os import path
import github_update_checker
from setuptools import setup, find_packages
file_path = path.abspath(path
|
.dirname(__file__))
with open(path.join(file_path, "README.md"), encoding="UTF-8") as f:
long_description = f.read()
setup(
name="github_update_checker",
version=github_update_checker.__version__,
description="A simple update checker for github in python",
long_description=long_description,
url="https://github.com/Tellendil/py_github_update_checker",
author="Benjamin Schubert",
author_email="ben.c.schub
|
ert@gmail.com",
license="MIT",
classifiers=[
'Development Status :: 5 - Stable',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3'
],
keywords="update",
packages=find_packages()
)
|
Yubico/yubiadmin
|
yubiadmin/util/app.py
|
Python
|
bsd-2-clause
| 7,953 | 0.000251 |
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import re
from jinja2 import Environment, FileSystemLoader
from webob import exc, Response
from webob.dec import wsgify
__all__ = [
'App',
'CollectionApp',
'render',
'populate_forms',
]
cwd = os.path.dirname(__file__)
base_dir = os.path.abspath(os.path.join(cwd, os.pardir))
template_dir = os.path.join(base_dir, 'templates')
env = Environment(loader=FileSystemLoader(template_dir))
class TemplateBinding(object):
def __init__(self, template, **kwargs):
self.template = env.get_template('%s.html' % template)
self.data = kwargs
for (key, val) in kwargs.items():
if isinstance(val, TemplateBinding):
self.data.update(val.data)
def extend(self, sub_variable, sub_binding):
if hasattr(sub_binding, 'data'):
self.data.update(sub_binding.data)
sub_binding.data = self.data
self.data[sub_variable] = sub_binding
@property
def prerendered(self):
self._rendered = str(self)
return self
def __str__(self):
if hasattr(self, '_rendered'):
return self._rendered
return self.template.render(self.data)
@wsgify
def __call__(self, request):
return str(self)
def render(tmpl, **kwargs):
return TemplateBinding(tmpl, **kwargs)
def populate_forms(forms, data):
if not data:
for form in filter(lambda x: hasattr(x, 'load'), forms):
form.load()
else:
errors = False
for form in forms:
form.process(data)
errors = not form.validate() or errors
if not errors:
for form in filter(lambda x: hasattr(x, 'save'), forms):
form.save()
class App(object):
sections = []
priority = 50
@property
def name(self):
self.__class__.name = sys.modules[self.__module__].__file__ \
.split('/')[-1].rsplit('.', 1)[0]
return self.name
def __call__(self, request):
section_name = request.path_info_pop()
if not section_name:
return self.redirect('/%s/%s' % (self.name, self.sections[0]))
if not hasattr(self, section_name):
raise exc.HTTPNotFound
sections = [{
'name': section,
'title': (getattr(self, section).__doc__ or section.capitalize()
).strip(),
'active': section == section_name,
'advanced': bool(getattr(getattr(self, section), 'advanced',
False))
} for section in self.sections]
request.environ['yubiadmin.response'].extend('content', render(
'app_base',
name=self.name,
sections=sections,
title='YubiAdmin - %s - %s' % (self.name, section_name)
))
resp = getattr(self, section_name)(request)
if isinstance(resp, Response):
return resp
request.environ['yubiadmin.response'].extend('page', resp)
def redirect(self, url):
raise exc.HTTPSeeOther(location=url)
def render_forms(self, request, forms, template='form',
success_msg='Settings updated!', **kwargs):
alerts = []
if not request.params:
for form in filter(lambda x: hasattr(x, 'load'), forms):
form.load()
else:
errors = False
for form in forms:
form.process(request.params)
errors = not form.validate() or errors
if not errors:
tr
|
y:
if success_msg:
alerts = [{'type': 'success', 'title': success_msg}]
for form in filter(lambda x: hasattr(x, 'save'), forms):
form.save()
except Exception as e:
alerts = [{'type': 'error', 'title': 'Error:',
'message': str(e)}]
else:
alerts = [{'type': 'error', 'title':
|
'Invalid data!'}]
return render(template, target=request.path, fieldsets=forms,
alerts=alerts, **kwargs)
ITEM_RANGE = re.compile('(\d+)-(\d+)')
class CollectionApp(App):
base_url = ''
caption = 'Items'
item_name = 'Items'
columns = []
template = 'table'
scripts = ['table']
selectable = True
max_limit = 100
def _size(self):
return len(self._get())
def _get(self, offset=0, limit=None):
return [{}]
def _labels(self, ids):
return [x['label'] for x in self._get() if x['id'] in ids]
def _delete(self, ids):
raise Exception('Not implemented!')
def __call__(self, request):
sub_cmd = request.path_info_pop()
if sub_cmd and not sub_cmd.startswith('_') and hasattr(self, sub_cmd):
return getattr(self, sub_cmd)(request)
else:
match = ITEM_RANGE.match(sub_cmd) if sub_cmd else None
if match:
offset = int(match.group(1)) - 1
limit = int(match.group(2)) - offset
return self.list(offset, limit)
else:
return self.list()
def list(self, offset=0, limit=10):
limit = min(self.max_limit, limit)
items = self._get(offset, limit)
total = self._size()
shown = (min(offset + 1, total), min(offset + limit, total))
if offset > 0:
st = max(0, offset - limit)
ed = st + limit
prev = '%s/%d-%d' % (self.base_url, st + 1, ed)
else:
prev = None
if total > shown[1]:
next = '%s/%d-%d' % (self.base_url, offset + limit + 1, shown[1]
+ limit)
else:
next = None
return render(
self.template, scripts=self.scripts, items=items, offset=offset,
limit=limit, total=total, shown='%d-%d' % shown, prev=prev,
next=next, base_url=self.base_url, caption=self.caption,
cols=self.columns, item_name=self.item_name,
selectable=self.selectable)
def delete(self, request):
ids = [x[5:] for x in request.params if request.params[x] == 'on']
labels = self._labels(ids)
return render('table_delete', ids=','.join(ids), labels=labels,
item_name=self.item_name, base_url=self.base_url)
def delete_confirm(self, request):
self._delete(request.params['delete'].split(','))
return self.redirect(self.base_url)
|
michaelbrooks/django-twitter-stream
|
twitter_stream/migrations/0002_auto__add_index_tweet_analyzed_by__add_index_tweet_created_at.py
|
Python
|
mit
| 6,130 | 0.007341 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Tweet', fields ['analyzed_by']
db.create_index(u'twitter_stream_tweet', ['analyzed_by'])
# Adding index on 'Tweet', fields ['created_at']
db.create_index(u'twitter_stream_tweet', ['created_at'])
def backwards(self, orm):
# Removing index on 'Tweet', fields ['created_at']
db.delete_index(u'twitter_stream_tweet', ['created_at'])
# Removing index on 'Tweet', fields ['analyzed_by']
db.delete_index(u'twitter_stream_tweet', ['analyzed_by'])
models = {
u'twitter_stream.apikey': {
'Meta': {'object_name': 'ApiKey'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'access_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'api_secret': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default':
|
'None', 'max_length': '75', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'twitter_stream.filterterm': {
'Meta': {'object_name': 'FilterTerm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.B
|
ooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'twitter_stream.streamprocess': {
'Meta': {'object_name': 'StreamProcess'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'error_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keys': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['twitter_stream.ApiKey']", 'null': 'True'}),
'last_heartbeat': ('django.db.models.fields.DateTimeField', [], {}),
'process_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'WAITING'", 'max_length': '10'}),
'timeout_seconds': ('django.db.models.fields.PositiveIntegerField', [], {}),
'tweet_rate': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
u'twitter_stream.tweet': {
'Meta': {'object_name': 'Tweet'},
'analyzed_by': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'favorite_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'filter_level': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'retweet_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'retweeted_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'truncated': ('django.db.models.fields.BooleanField', [], {}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_followers_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_geo_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'user_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_utc_offset': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user_verified': ('django.db.models.fields.BooleanField', [], {})
}
}
complete_apps = ['twitter_stream']
|
dunkhong/grr
|
grr/client/grr_response_client/client_actions/file_finder_utils/subactions.py
|
Python
|
apache-2.0
| 4,871 | 0.006159 |
#!/usr/bin/env python
"""Implementation of client-side file-finder subactions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
from future.utils import with_metaclass
from grr_response_client import client_utils
from grr_response_client import client_utils_common
from grr_response_client.client_actions.file_finder_utils import uploading
class Action(with_metaclass(abc.ABCMeta, object)):
"""An abstract class for subactions of the client-side file-finder.
Attributes:
flow: A parent flow action that spawned the subaction.
"""
def __init__(self, flow):
self.flow = flow
@abc.abstractmethod
def Execute(self, filepath, result):
"""Executes the action on a given path.
Concrete action implementations should return results by filling-in
appropriate fields of the result instance.
Args:
filepath: A path to the file on which the action is going to be performed.
result: An `FileFinderResult` instance to fill-in.
"""
pass
class StatAction(Action):
"""Implementation of the stat subaction.
This subaction just gathers basic metadata information about the specified
file (such as size, modification time, extended attributes and flags.
Attributes:
flow:
|
A parent flow action that spawned the subaction.
opts: A `FileFinderStatActionOptions` instance.
"""
def __init__(self, flow, opts):
super(StatAction, self).__init__(flow)
self.opts = opts
def Execute(self, filepath, result):
stat_cache = self.
|
flow.stat_cache
stat = stat_cache.Get(filepath, follow_symlink=self.opts.resolve_links)
result.stat_entry = client_utils.StatEntryFromStatPathSpec(
stat, ext_attrs=self.opts.collect_ext_attrs)
class HashAction(Action):
"""Implementation of the hash subaction.
This subaction returns results of various hashing algorithms applied to the
specified file. Additionally it also gathers basic information about the
hashed file.
Attributes:
flow: A parent flow action that spawned the subaction.
opts: A `FileFinderHashActionOptions` instance.
"""
def __init__(self, flow, opts):
super(HashAction, self).__init__(flow)
self.opts = opts
def Execute(self, filepath, result):
stat = self.flow.stat_cache.Get(filepath, follow_symlink=True)
result.stat_entry = client_utils.StatEntryFromStatPathSpec(
stat, ext_attrs=self.opts.collect_ext_attrs)
if stat.IsDirectory():
return
policy = self.opts.oversized_file_policy
max_size = self.opts.max_size
if stat.GetSize() <= self.opts.max_size:
result.hash_entry = _HashEntry(stat, self.flow)
elif policy == self.opts.OversizedFilePolicy.HASH_TRUNCATED:
result.hash_entry = _HashEntry(stat, self.flow, max_size=max_size)
elif policy == self.opts.OversizedFilePolicy.SKIP:
return
else:
raise ValueError("Unknown oversized file policy: %s" % policy)
class DownloadAction(Action):
"""Implementation of the download subaction.
This subaction sends a specified file to the server and returns a handle to
its stored version. Additionally it also gathers basic metadata about the
file.
Attributes:
flow: A parent flow action that spawned the subaction.
opts: A `FileFinderDownloadActionOptions` instance.
"""
def __init__(self, flow, opts):
super(DownloadAction, self).__init__(flow)
self.opts = opts
def Execute(self, filepath, result):
stat = self.flow.stat_cache.Get(filepath, follow_symlink=True)
result.stat_entry = client_utils.StatEntryFromStatPathSpec(
stat, ext_attrs=self.opts.collect_ext_attrs)
if stat.IsDirectory():
return
policy = self.opts.oversized_file_policy
max_size = self.opts.max_size
if stat.GetSize() <= max_size:
result.transferred_file = self._UploadFilePath(filepath)
elif policy == self.opts.OversizedFilePolicy.DOWNLOAD_TRUNCATED:
result.transferred_file = self._UploadFilePath(filepath, truncate=True)
elif policy == self.opts.OversizedFilePolicy.HASH_TRUNCATED:
result.hash_entry = _HashEntry(stat, self.flow, max_size=max_size)
elif policy == self.opts.OversizedFilePolicy.SKIP:
return
else:
raise ValueError("Unknown oversized file policy: %s" % policy)
def _UploadFilePath(self, filepath, truncate=False):
max_size = self.opts.max_size
chunk_size = self.opts.chunk_size
uploader = uploading.TransferStoreUploader(self.flow, chunk_size=chunk_size)
return uploader.UploadFilePath(filepath, amount=max_size)
def _HashEntry(stat, flow, max_size=None):
hasher = client_utils_common.MultiHasher(progress=flow.Progress)
try:
hasher.HashFilePath(stat.GetPath(), max_size or stat.GetSize())
return hasher.GetHashObject()
except IOError:
return None
|
NetApp/cinder
|
cinder/volume/drivers/vmware/datastore.py
|
Python
|
apache-2.0
| 12,036 | 0 |
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes and utility methods for datastore selection.
"""
import random
from oslo_log import log as logging
from oslo_vmware import pbm
from oslo_vmware import vim_util
from cinder.i18n import _LE
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
LOG = logging.getLogger(__name__)
class DatastoreType(object):
"""Supported datastore types."""
NFS = "nfs"
VMFS = "vmfs"
VSAN = "vsan"
VVOL = "vvol"
_ALL_TYPES = {NFS, VMFS, VSAN, VVOL}
@staticmethod
def get_all_types():
return DatastoreType._ALL_TYPES
class DatastoreSelector(object):
"""Class for selecting datastores which satisfy input requirements."""
HARD_AFFINITY_DS_TYPE = "hardAffinityDatastoreTypes"
HARD_ANTI_AFFINITY_DS = "hardAntiAffinityDatastores"
SIZE_BYTES = "sizeBytes"
PROFILE_NAME = "storageProfileName"
# TODO(vbala) Remove dependency on volumeops.
def __init__(self, vops, session, max_objects):
self._vops = vops
self._session = session
self._max_objects = max_objects
def get_profile_id(self, profile_name):
"""Get vCenter profile ID for the given profile name.
:param profile_name: profile name
:return: vCenter profile ID
:raises: ProfileNotFoundException
"""
profile_id = pbm.get_profile_id_by_name(self._session, profile_name)
if profile_id is None:
LOG.error(_LE("Storage profile: %s cannot be found in vCenter."),
profile_name)
raise vmdk_exceptions.ProfileNotFoundException(
storage_profile=profile_name)
LOG.debug("Storage profile: %(name)s resolved to vCenter profile ID: "
"%(id)s.",
{'name': profile_name,
'id': profile_id})
return profile_id
def _filter_by_profile(self, datastores, profile_id):
"""Filter out input datastores that do not match the given profile."""
cf = self._session.pbm.client.factory
hubs = pbm.convert_datastores_to_hubs(cf, datastores)
hubs = pbm.filter_hubs_by_profile(self._session, hubs, profile_id)
hub_ids = [hub.hubId for hub in hubs]
return {k: v for k, v in datastores.items() if k.value in hub_ids}
def _filter_datastores(self,
datastores,
size_bytes,
profile_id,
hard_anti_affinity_ds,
hard_affinity_ds_types,
valid_host_refs=None):
if not datastores:
return
def _is_valid_ds_type(summary):
ds_type = summary.type.lower()
return (ds_type in DatastoreType.get_all_types() and
(hard_affinity_ds_types is None or
ds_type in hard_affinity_ds_types))
def _is_ds_usable(summary):
return summary.accessible and not self._vops._in_maintenance(
summary)
valid_host_refs = valid_host_refs or []
valid_hosts = [host_ref.value for host_ref in valid_host_refs]
def _is_ds_accessible_to_valid_host(host_mounts):
for host_mount in host_mounts:
if host_mount.key.value in valid_hosts:
return True
def _is_ds_valid(ds_ref, ds_props):
summary = ds_props.get('summary')
host_mounts = ds_props.get('host')
if (summary is None or host_mounts is None):
return False
if (hard_anti_affinity_ds and
ds_ref.value in hard_anti_affinity_ds):
return False
if summary.freeSpace < size_bytes:
return False
if (valid_hosts and
not _is_ds_accessible_to_valid_host(host_mounts)):
return False
return _is_valid_ds_type(summary) and _is_ds_usable(summary)
datastores = {k: v for k, v in datastores.items()
if _is_ds_valid(k, v)}
if datastores and profile_id:
datastores = self._filter_by_profile(datastores, profile_id)
return datastores
def _get_object_properties(self, obj_content):
props = {}
if hasattr(obj_content, 'propSet'):
prop_set = obj_content.propSet
if prop_set:
props = {prop.name: prop.val for prop in prop_set}
return props
def _get_datastores(self):
datastores = {}
retrieve_result = self._session.invoke_api(
vim_util,
'get_objects',
self._session.vim,
'Datastore',
self._max_objects,
properties_to_collect=['host', 'summary'])
while retrieve_result:
if retriev
|
e_result.objects:
for obj_content in retrieve_result.objects:
props = self._get_object_properties(obj_content)
if ('host' in props and
hasattr(props['host'], 'DatastoreHostMount')):
props['host'] = props['host'].DatastoreHostMount
datastores[obj_content.obj] = props
retrieve_result =
|
self._session.invoke_api(vim_util,
'continue_retrieval',
self._session.vim,
retrieve_result)
return datastores
def _get_host_properties(self, host_ref):
retrieve_result = self._session.invoke_api(vim_util,
'get_object_properties',
self._session.vim,
host_ref,
['runtime', 'parent'])
if retrieve_result:
return self._get_object_properties(retrieve_result[0])
def _get_resource_pool(self, cluster_ref):
return self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
cluster_ref,
'resourcePool')
def _select_best_datastore(self, datastores, valid_host_refs=None):
if not datastores:
return
def _sort_key(ds_props):
host = ds_props.get('host')
summary = ds_props.get('summary')
space_utilization = (1.0 -
(summary.freeSpace / float(summary.capacity)))
return (-len(host), space_utilization)
host_prop_map = {}
def _is_host_usable(host_ref):
props = host_prop_map.get(host_ref.value)
if props is None:
props = self._get_host_properties(host_ref)
host_prop_map[host_ref.value] = props
runtime = props.get('runtime')
parent = props.get('parent')
if runtime and parent:
return (runtime.connectionState == 'connected' and
not runtime.inMaintenanceMode)
else:
return False
valid_host_refs = valid_host_refs or []
valid_hosts = [host_ref.value for host_ref in valid_host_refs]
def _select_host(host_mounts):
random.shuffle(host_mounts)
|
AlxMar/conduction-cylinder
|
genpics.py
|
Python
|
gpl-3.0
| 2,141 | 0.01915 |
### Generating picture frames to a "pic" folder
#from pylab import *
import numpy as np
from time import sleep
import matplotlib
matplotlib.use("Agg")
from matplotlib import rc
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
import matplotlib.pyplot as plt
#matplotlib.use("Agg")
#import matplotlib.animation as animation
from boutdata import collect
from boututils import DataFile
pathname = "data3"
T = np.squeeze(collect("T", path=pathname, xguards=False))
time = np.squeeze(collect("t_array", path=pathname, xguards=False))
#dx = collect(("dx", path="data")
#dz = collect("dz", path="data")
print T.shape
#dx = dx.squeeze()
#print dx
#time = collect("t", path="data")
#create 5000 Random points distributed within the circle radius 100
max_r = 1.8
min_r = 0.05
max_theta = 2.0 * np.pi
#number_points = 5000
#points = np.random.rand(number_points,2)*[max_r,max_theta]
#Some function to generate values for these points,
#this could be values = np.random.rand(number_points)
#values = points[:,0] * np.sin(points[:,1])* np.cos(points[:,1])
#now we create a grid of values, interpolated from our random sample above
theta = np.linspace(0.0, max_theta, 128)
r = np.linspace(min_r, max_r, 64)
#grid_r, grid_theta = np.meshgrid(r, theta)
#data = griddata(points, values, (grid_r, grid_theta), method='cubic',fill_value=0)
#Create a polar projection
fig = plt.figure()
ax = fig.add_subplot(111, projection='polar')
ax.set_axis_bgcolor('black')
ax.set_xticklabels([''])
ax.set_yticklabels([''])
#plt.show()
cax = ax.pcolormesh(theta,r, T[0,:,:])
fig.patch.set_facecolor('black')
color_bar = plt.colorbar(cax, orientation='horizontal')
cbytick_obj =
|
plt.getp(color_bar.ax.axes, 'xticklabels')
plt.setp(cbytick_obj, color = 'white')
#cax = ax.pcolormesh(theta,r, T[i,:,0,:])
for i in range(len(time)):
#Color bar
txt = ax.text(0.9*np.pi,3., r'$t = ' + str(time[i]) + r'$', color='white', fontsize=16)
#Plottingi
cax.set_array(T[i,:-1,:-1].ravel())
fig.savefig("pic/tempBR%0.5i.png" %i, facecolor=fig.get_facecolor(), edgecolor='none')
txt.remove(
|
)
#fig.clear()
|
FirstDraftGIS/firstdraft
|
projfd/appfd/models/base.py
|
Python
|
apache-2.0
| 940 | 0.006383 |
#-*- coding: utf-8 -*-
from django.contrib.gis.db.models import DateTimeField, Model
class Base(Model):
created = DateTimeField(auto_now_add=True, null=True)
modified = DateTimeField(auto_now=True, null=True)
class Meta:
abstract = True
def __str__(self
|
):
try:
for propname in ["name", "key", "token", "text"]:
if hasattr(self, propname):
text = getattr(self, propname).encode("utf-8")
|
if len(text) > 20:
return text[:20] + "..."
else:
return text
else:
return str(self.id)
except:
return str(self.id)
def update(self, d):
save = False
for k,v in d.items():
if getattr(self, k) != v:
save = True
setattr(self,k,v)
if save:
self.save()
|
GoogleCloudPlatform/datacatalog-connectors
|
google-datacatalog-connectors-commons/src/google/datacatalog_connectors/commons/monitoring/__init__.py
|
Python
|
apache-2.0
| 744 | 0 |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on
|
an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .metrics_processor import MetricsProcessor
from .monitoring_facade import MonitoringFacade
__all__ = ('Monitori
|
ngFacade', 'MetricsProcessor')
|
RaJiska/Warband-PW-Punishments-Manager
|
scripts/process_map_icons.py
|
Python
|
gpl-3.0
| 796 | 0.020101 |
import process_operations as po
|
import module_map_icons
def process_entry(processor, txt_file, entry, index):
entry_len = len(entry)
output_list = ["%s %d %s %f %d " % (entry[0], entry[1], entry[2], entry[3], processor.process_id(entry[4], "snd"))]
triggers = []
if entry_len >= 8:
output_list.append("%f %f %f " % entry[5:8])
if entry_len > 8:
triggers = entry[8]
else:
output_list.append("0 0 0 ")
if entry_len > 5:
triggers = entry[5]
out
|
put_list.extend(processor.process_triggers(triggers, entry[0]))
output_list.append("\r\n\r\n")
txt_file.write("".join(output_list))
export = po.make_export(data=module_map_icons.map_icons, data_name="map_icons", tag="icon",
header_format="map_icons_file version 1\r\n%d\r\n", process_entry=process_entry)
|
bqbn/addons-server
|
src/olympia/lib/safe_xml.py
|
Python
|
bsd-3-clause
| 774 | 0.002584 |
"""
Monkey
|
patch and defuse all stdlib xml packages and lxml.
"""
import sys
patched_modules = (
'lxml',
'ElementTree',
'minidom',
'pulldom',
'sax',
'expatbuilder',
'expatreader',
'xmlrpc',
)
if any(module in sys.modules for module in patched_modules):
existing_modules = [(module, module in sys.modules) for module in patched_modules]
raise ImportError(
'this monkey patch was not applied early enough. {0}'.format(existing_mo
|
dules)
)
from defusedxml import defuse_stdlib # noqa
defuse_stdlib()
import lxml # noqa
import lxml.etree # noqa
from xml.sax.handler import ( # noqa
feature_external_ges,
feature_external_pes,
)
from olympia.lib import safe_lxml_etree # noqa
lxml.etree = safe_lxml_etree
|
qedsoftware/commcare-hq
|
corehq/pillows/groups_to_user.py
|
Python
|
bsd-3-clause
| 4,115 | 0.002916 |
from collections import namedtuple
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from corehq.apps.change_feed.document_types import GROUP
from corehq.apps.groups.models import Group
from corehq.elastic import stream_es_query, get_es_new, ES_META
from corehq.pillows.mappings.user_mapping import USER_INDEX, USER_INDEX_INFO
from pillowtop.checkpoints.manager import PillowCheckpointEventHandler, get_checkpoint_for_elasticsearch_pillow
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors import PillowProcessor
from pillowtop.reindexer.change_providers.couch import CouchViewChangeProvider
from pillowtop.reindexer.reindexer import PillowChangeProviderReindexer
class GroupsToUsersProcessor(PillowProcessor):
def __init__(self):
self._es = get_es_new()
def process_change(self, pillow_instance, change):
if change.deleted:
remove_group_from_users(change.get_document(), self._es)
else:
update_es_user_with_groups(change.get_document(), self._es)
def get_group_to_user_pillow(pillow_id='GroupToUserPillow'):
assert pillow_id == 'GroupToUserPillow', 'Pillow ID is not allowed to change'
checkpoint = get_checkpoint_for_elasticsearch_pillow(pillow_id, USER_INDEX_INFO)
processor = GroupsToUsersProcessor()
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topics=[GROUP], group_id='groups-to-users'),
processor=processor,
change_processed_event_handler=PillowCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100,
),
)
def remove_group_from_users(group_doc, es_client):
if group_doc is None:
return
for user_source in stream_user_sources(group_doc.get("users", [])):
made_changes = False
if group_doc["name"] in user_source.group_names:
user_source.group_names.remove(group_doc["name"])
made_changes = True
if group_doc["_id"] in user_source.group_ids:
user_source.group_ids.remove(group_doc["_id"])
made_changes = True
if made_changes:
doc = {"__group_ids": list(user_source.group_ids), "__group_names": list(user_source.group_names)}
es_client.update(USER_INDEX, ES_META['users'].type, user_source.user_id, body={"doc": doc})
def update_es_user_with_groups(group_doc, es_client=None):
if not es_client:
es_client = get_es_new()
for user_source in stream_user_sources(group_doc.get("users", [])):
if group_doc["name"] not in user_source.group_names or group_doc["_id"] not in user_source.group_ids:
user_source.group_ids.add(group_doc["_id"])
user_source.group_names.add(group_doc["name"])
doc = {"__group_ids": list(user_source.group_ids), "__group_names": list(user_source.group_names)}
es_client.update(USER_INDEX, ES_META['users'].type, user_source.user_id, body={"doc": doc})
UserSource = namedtuple('UserSource', ['user_id', 'group_ids', 'group_names'])
def stream_user_sources(user_ids):
q = {"filter": {"and": [{"terms": {"_id": user_ids}}]}}
for result in stream_es_query(es_index='users', q=q, fields=["__group_ids", "__group_names"]):
group_ids = result.get('fields', {}).get("__group_ids", [])
group_ids = set(group_ids) if isinstance(group_ids, list) else {group_ids}
group_names = result.get('fields', {}).get("__group_names", [])
group_names = set(group_names) if isinstance(group_names, list) else {group_names}
yield UserSource(result['_id'], group_ids, group_names)
def get_groups_to_user_reindexer():
return PillowChangeProviderReindexer(
pillow=get_group_to_user_pillow(),
change_provider=CouchViewChangeProvider(
couch_db=Group.get_db(),
view_name=
|
'all_docs/by_doc_type',
view_kwargs={
'startkey': ['Group'],
'endkey': ['Group', {}],
|
'include_docs': True,
}
),
)
|
opennode/nodeconductor-openstack
|
setup.py
|
Python
|
mit
| 1,602 | 0.001873 |
#!/usr/bin/env python
from setuptools import setup, find_packages
tests_requires = [
'ddt>=1.0.0'
]
dev_requires = [
'Sphinx==1.2.2',
]
install_requires = [
'pbr!=2.1.0',
'Babel!=2.4.0,>=2.3.4',
'cmd2<0.9.0', # TODO: Drop restriction after Waldur is migrated to Python 3.
'iptools>=0.6.1',
'waldur-core>=0.161.1',
'python-ceilometerclient>=2.9.0',
'python-cinderclient>=3.1.0',
'python-glanceclient>=2.8.0',
'python-keystoneclient>=3.13.0',
'python-neutronclient>=6.5.0',
'python-novaclient>=9.1.0',
]
setup(
name='waldur-openstack',
version='0.43.4',
author='OpenNode Team',
author_email='info@opennodecloud.com',
url='http://waldur.com',
description='Waldur plugin for managing OpenStack resources.',
long_description=open('README.rst').read(),
license='MIT',
package_dir={'': 'src'},
packages=find_packages('src'),
install_requires=install_requires,
zip_safe=False,
extras_require=
|
{
'dev': dev_requ
|
ires,
'tests': tests_requires,
},
entry_points={
'waldur_extensions': (
'openstack = waldur_openstack.openstack.extension:OpenStackExtension',
'openstack_tenant = waldur_openstack.openstack_tenant.extension:OpenStackTenantExtension',
),
},
include_package_data=True,
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
)
|
JPWKU/unix-agent
|
src/dcm/agent/tests/unit/test_cloudmetadata.py
|
Python
|
apache-2.0
| 9,676 | 0 |
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import unittest
import uuid
import mock
import dcm.agent.exceptions as exceptions
import dcm.agent.tests.utils.general as test_utils
import dcm.agent.cloudmetadata as cm
class TestCloudMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.CloudMetaData(self.conf)
def test_base_instance_id(self):
instance_id = self.cm_obj.get_instance_id()
self.assertIsNone(instance_id)
def test_base_is_effective(self):
v = self.cm_obj.is_effective_cloud()
self.assertFalse(v)
def test_base_startup(self):
self.assertRaises(exceptions.AgentNotImplementedException,
self.cm_obj.get_startup_script)
def test_base_get_cloud_typ
|
e(self):
self.assertRaises(exceptions.AgentNotImplementedException,
self.cm_obj.get_cloud_type)
def test_env_injected
|
_id_no_env(self):
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
injected_id = self.cm_obj.get_injected_id()
self.assertIsNone(injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_env_injected_id_env(self):
tmp_dir = tempfile.mkdtemp()
fake_id = str(uuid.uuid4())
id_file = os.path.join(tmp_dir, "injected_id")
try:
self.conf.get_secure_dir.return_value = tmp_dir
with mock.patch.dict('os.environ',
{cm.ENV_INJECTED_ID_KEY: fake_id}):
injected_id = self.cm_obj.get_injected_id()
self.assertEqual(injected_id, fake_id)
self.assertTrue(os.path.exists(id_file))
with open(id_file, "r") as fptr:
v = fptr.read().strip()
self.assertEqual(v, injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_env_injected_id_env_file_exists(self):
tmp_dir = tempfile.mkdtemp()
fake_id = str(uuid.uuid4())
id_file = os.path.join(tmp_dir, "injected_id")
try:
with open(id_file, "w") as fptr:
fptr.write(fake_id)
self.conf.get_secure_dir.return_value = tmp_dir
injected_id = self.cm_obj.get_injected_id()
self.assertEqual(injected_id, fake_id)
with open(id_file, "r") as fptr:
v = fptr.read().strip()
self.assertEqual(v, injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_ipv4_address(self):
addr = self.cm_obj.get_ipv4_addresses()
self.assertEqual(type(addr), list)
self.assertGreaterEqual(len(addr), 1)
def test_handshake_address(self):
addr = self.cm_obj.get_handshake_ip_address()
self.assertEqual(type(addr), list)
self.assertGreaterEqual(len(addr), 1)
class TestUnknownMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.UnknownMetaData(conf)
def test_effective_cloud(self):
self.assertTrue(self.cm_obj.is_effective_cloud())
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(), cm.CLOUD_TYPES.UNKNOWN)
class TestAWSMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.AWSMetaData(self.conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(), cm.CLOUD_TYPES.Amazon)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_startup(self, md_server_data):
startup_data = "some date"
md_server_data.return_value = startup_data
sd = self.cm_obj.get_startup_script()
self.assertEqual(startup_data, sd)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_injected_id(self, md_server_data):
fake_id = "somedata"
md_server_data.return_value = fake_id
sd = self.cm_obj.get_injected_id()
self.assertEqual(fake_id, sd)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_injected_id_none(self, md_server_data):
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
fake_id = None
md_server_data.return_value = fake_id
sd = self.cm_obj.get_injected_id()
self.assertIsNone(sd)
finally:
shutil.rmtree(tmp_dir)
class TestCloudStackMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.CloudStackMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.CloudStack)
class TestJoyentMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.JoyentMetaData(self.conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Joyent)
@mock.patch('dcm.agent.utils.run_command')
def test_base_injected_id(self, runcmd):
fakeid = "someid"
runcmd.return_value = (fakeid, "", 0)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
@mock.patch('dcm.agent.utils.run_command')
def test_base_cached_injected_id(self, runcmd):
fakeid = "someid"
runcmd.return_value = (fakeid, "", 0)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
@mock.patch('dcm.agent.utils.run_command')
def test_base_injected_try_both_locations(self, runcmd):
runcmd.return_value = ("", "error", 1)
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
self.conf.system_sudo = "sudo"
x = self.cm_obj.get_injected_id()
call1 = mock.call(
self.conf,
["sudo", "/usr/sbin/mdata-get", "es:dmcm-launch-id"])
call2 = mock.call(
self.conf,
["sudo", "/lib/smartdc/mdata-get", "es:dmcm-launch-id"])
self.assertEqual(runcmd.call_args_list, [call1, call2])
self.assertEqual(runcmd.call_count, 2)
self.assertIsNone(x)
finally:
shutil.rmtree(tmp_dir)
class TestGCEMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.GCEMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Google)
class TestAzureMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.AzureMetaData(conf)
def test_cloud_type(self):
self.assertEqual
|
kvesteri/postgresql-audit
|
postgresql_audit/flask.py
|
Python
|
bsd-2-clause
| 2,063 | 0 |
from __future__ import absolute_import
from contextlib import contextmanager
from copy import copy
from flask import g, request
from flask.globals import _app_ctx_stack, _request_ctx_stack
from .base import VersioningManager as BaseVersioningManager
class VersioningManager(BaseVersioningManager):
_actor_cls = 'User'
def get_transaction_values(self):
values = copy(self.values)
if context_available() and hasattr(g, 'activity_values'):
values.update(g.activity_values)
if (
'client_addr' not in values and
self.default_client_addr is not None
):
values['client_addr'] = self.default_client_addr
if (
'actor_id' not in values and
self.default_actor_id is not None
):
values['act
|
or_id'] = self.default_actor_id
return values
@property
def default_actor_id(self):
from flask_login import current_user
# Return None if we are outside of request context.
if not context_available():
|
return
try:
return current_user.id
except AttributeError:
return
@property
def default_client_addr(self):
# Return None if we are outside of request context.
if not context_available():
return
return request.remote_addr or None
def context_available():
return (
_app_ctx_stack.top is not None and
_request_ctx_stack.top is not None
)
def merge_dicts(a, b):
c = copy(a)
c.update(b)
return c
@contextmanager
def activity_values(**values):
if not context_available():
return
if hasattr(g, 'activity_values'):
previous_value = g.activity_values
values = merge_dicts(previous_value, values)
else:
previous_value = None
g.activity_values = values
yield
if previous_value is None:
del g.activity_values
else:
g.activity_values = previous_value
versioning_manager = VersioningManager()
|
joergsimon/gesture-analysis
|
visualise/Stem.py
|
Python
|
apache-2.0
| 664 | 0.004518 |
from pylab import *
def stem_user(user, channel_list, start_index, stop_index):
data = user.windowData
for channel in channel_list:
stem_pandas_column(data, channel, start_index, stop_index)
def stem_pandas_column(datafra
|
me, columname, start_index, stop_index):
column = dataframe[columname]
stem_channel(column.values, start_index, stop_index)
def stem_channel(channel, start_index, stop_index):
values = channel[start_index:stop_index]
markerline, stemlines, baseline = stem(range(start_index,stop_index), values, '-.')
|
setp(markerline, 'markerfacecolor', 'b')
setp(baseline, 'color','r', 'linewidth', 2)
show()
|
lowRISC/opentitan
|
util/reggen/gen_rtl.py
|
Python
|
apache-2.0
| 5,763 | 0 |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Generate SystemVerilog designs from IpBlock object"""
import logging as log
import os
from typing import Dict, Optional, Tuple
from mako import exceptions # type: ignore
from mako.template import Template # type: ignore
from pkg_resources import resource_filename
from .ip_block import IpBlock
from .lib import check_int
from .multi_register import MultiRegister
from .reg_base import RegBase
from .register import Register
def escape_name(name: str) -> str:
return name.lower().replace(' ', '_')
def make_box_quote(msg: str, indent: str = ' ') -> str:
hr = indent + ('/' * (len(msg) + 6))
middle = indent + '// ' + msg + ' //'
return '\n'.join([hr, middle, hr])
def _get_awparam_name(iface_name: Optional[str]) -> str:
return (iface_name or 'Iface').capitalize() + 'Aw'
def get_addr_widths(block: IpBlock) -> Dict[Optional[str], Tuple[str, int]]:
'''Return the address widths for the device interfaces
Returns a dictionary keyed by interface name whose values are pairs:
(paramname, width) where paramname is IfaceAw for an unnamed interface and
FooAw for an interface called foo. This is constructed in the same order as
block.reg_blocks.
If there is a single device interface and that interface is unnamed, use
the more general parameter name "BlockAw".
'''
assert block.reg_blocks
if len(block.reg_blocks) == 1 and None in block.reg_blocks:
return {None: ('BlockAw', block.reg_blocks[None].get_addr_width())}
return {name: (_get_awparam_name(name), rb.get_addr_width())
for name, rb in block.reg_blocks.items()}
def get_type_name_pfx(block: IpBlock, iface_name: Optional[str]) -> str:
return block.name.lower() + ('' if iface_name is None
else '_{}'.format(iface_name.lower()))
def get_r0(reg: RegBas
|
e) -> Register:
'''Get a Register representing an entry in the RegBase'''
if isinstance(reg, Register):
return reg
else:
assert isinstance(reg, MultiRegister)
return reg.reg
def get_iface_tx_type(block: IpBlock,
iface_name: Optional[str],
hw2reg: bool) -> str
|
:
x2x = 'hw2reg' if hw2reg else 'reg2hw'
pfx = get_type_name_pfx(block, iface_name)
return '_'.join([pfx, x2x, 't'])
def get_reg_tx_type(block: IpBlock, reg: RegBase, hw2reg: bool) -> str:
'''Get the name of the hw2reg or reg2hw type for reg'''
if isinstance(reg, Register):
r0 = reg
type_suff = 'reg_t'
else:
assert isinstance(reg, MultiRegister)
r0 = reg.reg
type_suff = 'mreg_t'
x2x = 'hw2reg' if hw2reg else 'reg2hw'
return '_'.join([block.name.lower(),
x2x,
r0.name.lower(),
type_suff])
def gen_rtl(block: IpBlock, outdir: str) -> int:
# Read Register templates
reg_top_tpl = Template(
filename=resource_filename('reggen', 'reg_top.sv.tpl'))
reg_pkg_tpl = Template(
filename=resource_filename('reggen', 'reg_pkg.sv.tpl'))
# Generate <block>_reg_pkg.sv
#
# This defines the various types used to interface between the *_reg_top
# module(s) and the block itself.
reg_pkg_path = os.path.join(outdir, block.name.lower() + "_reg_pkg.sv")
with open(reg_pkg_path, 'w', encoding='UTF-8') as fout:
try:
fout.write(reg_pkg_tpl.render(block=block))
except: # noqa F722 for template Exception handling
log.error(exceptions.text_error_template().render())
return 1
# Generate the register block implementation(s). For a device interface
# with no name we generate the register module "<block>_reg_top" (writing
# to <block>_reg_top.sv). In any other case, we also need the interface
# name, giving <block>_<ifname>_reg_top.
lblock = block.name.lower()
for if_name, rb in block.reg_blocks.items():
if if_name is None:
mod_base = lblock
else:
mod_base = lblock + '_' + if_name.lower()
mod_name = mod_base + '_reg_top'
reg_top_path = os.path.join(outdir, mod_name + '.sv')
with open(reg_top_path, 'w', encoding='UTF-8') as fout:
try:
fout.write(reg_top_tpl.render(block=block,
mod_base=mod_base,
mod_name=mod_name,
if_name=if_name,
rb=rb))
except: # noqa F722 for template Exception handling
log.error(exceptions.text_error_template().render())
return 1
return 0
def render_param(dst_type: str, value: str) -> str:
'''Render a parameter value as used for the destination type
The value is itself a string but we have already checked that if dst_type
happens to be "int" or "int unsigned" then it can be parsed as an integer.
If dst_type is "int unsigned" and the value is larger than 2^31 then
explicitly generate a 32-bit hex value. This allows 32-bit literals whose
top bits are set (which can't be written as bare integers in SystemVerilog
without warnings, because those are interpreted as ints).
'''
if dst_type == 'int unsigned':
# This shouldn't fail because we've already checked it in
# _parse_parameter in params.py
int_val = check_int(value, "integer parameter")
if int_val >= (1 << 31):
return "32'h{:08x}".format(int_val)
return value
|
DeanSherwin/django-dynamic-scraper
|
dynamic_scraper/management/commands/check_last_checker_deletes.py
|
Python
|
bsd-3-clause
| 3,890 | 0.007969 |
#Stage 2 Update (Python 3)
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import datetime
from optparse import make_option
from django.conf import settings
from django.core.mail import mail_admins
from django.core.management import CommandError
from django.core.management.base import BaseCommand
from dynamic_scraper.models import Scraper
class Command(BaseCommand):
help = 'Checks last checker deletes of a scraper being older than <last_checker_delete_alert_period> period provided in admin form'
def add_arguments(self, parser):
parser.add_argument('--only-active', type=bool, default=False, help="Run checker delete checks only for active scrapers, default=False")
parser.add_argument('--send-admin-mail', type=bool, default=False, help="Send report mail to Django admins if last deletes are too old, default=False")
parser.add_argument('--with-next-alert', type=bool, default=False, help="Only run for scrapers with past next alert timestamp/update timestamp afterwards, default=False")
def handle(self, *args, **options):
mail_to_admins = False
msg = ''
only_active = options['only_active']
send_admin_mail = options['send_admin_mail']
wit
|
h_next_alert = options['with_next_alert']
if with_next_alert:
scrapers = Scraper.objects.filter(next_last_checker_delete_alert__lte=datetime.datetime.now())
print("{num} scraper(s) with future next alert timestamp found in DB...\n".format(num=len(scrapers)))
else:
scrapers = Scraper.objects.all()
print("{num
|
} scraper(s) found in DB...\n".format(num=len(scrapers)))
for s in scrapers:
if not (only_active and s.status != 'A'):
td = s.get_last_checker_delete_alert_period_timedelta()
if td:
period = s.last_checker_delete_alert_period
s_str = "SCRAPER: {scraper}\nID:{id}, Status:{status}, Alert Period:{period}".format(
scraper=str(s), id=s.pk, status=s.get_status_display(), period=period)
print(s_str)
if with_next_alert:
s.next_last_checker_delete_alert = datetime.datetime.now() + td
s.save()
if not s.last_checker_delete or \
(s.last_checker_delete < (datetime.datetime.now() - td)):
if s.last_checker_delete:
error_str = "Last checker delete older than alert period ({date_str})!".format(
date_str=s.last_checker_delete.strftime('%Y-%m-%d %H:%m'),)
else:
error_str = "Last checker delete not available!"
print(error_str)
msg += s_str + '\n' + error_str + '\n\n'
mail_to_admins = True
else:
print("OK")
print()
else:
print("Ommitting scraper {scraper}, no (valid) time period set.\n".format(scraper=str(s)))
else:
print("Ommitting scraper {scraper}, not active.\n".format(scraper=str(s)))
if send_admin_mail and mail_to_admins:
print("Send mail to admins...")
if 'django.contrib.sites' in settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
subject = Site.objects.get_current().name
else:
subject = 'DDS Scraper Site'
subject += " - Last checker delete check for scraper(s) failed"
mail_admins(subject, msg)
|
chemlab/chemlab
|
chemlab/core/serialization.py
|
Python
|
gpl-3.0
| 2,379 | 0.003363 |
from __future__ import division, print_function
import json
from collections import Iterable, OrderedDict, namedtuple
import numpy as np
from six import string_types
def isnamedtuple(obj):
"""Heuristic check if an object is a namedtuple."""
return isinstance(obj, tuple) \
and hasattr(obj, "_fields") \
and hasattr(obj, "_asdict") \
and callable(obj._asdict)
def serialize(data):
if data is None or isinstance(data, (bool, int, float, str, string_types)):
return data
if isinstance(data, list):
return [serialize(val) for val in data]
if isinstance(data, OrderedDict):
return {"py/collections.OrderedDict":
[[serialize(k), serialize(v)] for k, v in data.items()]}
if isnamedtuple(data):
return {"py/collections.namedtuple": {
"type": type(data).__name__,
|
"fields": list(data._fields),
"values": [serialize(getattr(data, f)) for f in data._fields]}}
if isinstance(data, dict):
if all(isinstance(k, str) for k in data):
return {k: serialize(v) for k, v in data.items()}
return {"py/dict": [[serialize(k), serialize(v)] for k, v in data.items()]}
if isinstance(data, tuple):
return {"py/tuple": [serialize(val) for val in data]}
if isinstance(data, set):
|
return {"py/set": [serialize(val) for val in data]}
if isinstance(data, np.ndarray):
return {"py/numpy.ndarray": {
"values": data.tolist(),
"dtype": str(data.dtype)}}
raise TypeError("Type %s not data-serializable" % type(data))
def restore(dct):
if "py/dict" in dct:
return dict(dct["py/dict"])
if "py/tuple" in dct:
return tuple(dct["py/tuple"])
if "py/set" in dct:
return set(dct["py/set"])
if "py/collections.namedtuple" in dct:
data = dct["py/collections.namedtuple"]
return namedtuple(data["type"], data["fields"])(*data["values"])
if "py/numpy.ndarray" in dct:
data = dct["py/numpy.ndarray"]
return np.array(data["values"], dtype=data["dtype"])
if "py/collections.OrderedDict" in dct:
return OrderedDict(dct["py/collections.OrderedDict"])
return dct
def data_to_json(data):
return json.dumps(serialize(data))
def json_to_data(s):
return json.loads(s, object_hook=restore)
|
michelp/xodb
|
xodb/xaql.py
|
Python
|
mit
| 1,060 | 0.001887 |
import pyparsing
"""
The query language that won't die.
Syntax:
Typical search engine query language, terms with boolean operators
and parenthesized grouping:
(term AND (term OR term OR ...) AND NOT term ...)
In it's simplest case, xaql searches for a list of terms:
term term term ...
This expands to '(term AND term AND term AND ...)'
Terms can be prefixed. The prefix and the value are separated by
colons. Values that contain spaces must be double quoted.
term color:blue name:"My Lagoon"
Func
|
tions:
Functions provide features that take query input from the user and
do some transformation on the query itself. Functions begin with
a star, then a name, then a pair of parenthesis that contain the
query input. The syntax of the input is up to the function:
$xp(...) -- Pass the input strin
|
g directly into the Xapian
query parser.
$rel(...)
$now
Prefix Modifiers:
This are pre-prefixes that transform the following term.
published-before:now
"""
|
cychenyin/sfproxy
|
py/timeutils.py
|
Python
|
apache-2.0
| 1,010 | 0.02521 |
# coding: utf-8
import time, datetime
# 接收的参数可以为:1. 时间戳字符串 2. datetime, 3. None
def unixtime_fromtimestamp(dt = None):
if dt and isinstance(dt, datetime.datetime) : return int(time.mktime(dt.timetuple()))
elif dt and isinstance(dt, basestring) : return int(time.mktime(time.strptime(dt, '%Y-%m-%d %H:%M:%S')))
else: return int(time.mktime(datetime.datetime.now().timetuple()))
# value为传入的值为时间戳(整形),如:1332888820
def timestamp_fromunixtime(value=None, format=None):
return time.strftime(format if format else '%Y-%m-%d %H:%M:%S' ,
|
time.localtime(value if value else unixtime_fromtimestamp()))
def timestamp_fromunixtime2(value=None, format=None):
return time.strftime(format if format else '%Y%m
|
%d_%H%M%S' , time.localtime(value if value else unixtime_fromtimestamp()))
if __name__ == "__main__":
# print unixtime_fromtimestamp()
print unixtime_fromtimestamp(datetime.datetime.today())
|
zdomjus60/astrometry
|
vsop87c/neptune.py
|
Python
|
cc0-1.0
| 232,946 | 0.003778 |
import math
class Neptune:
"""
NEPTUNE - VSOP87 Series Version C
HELIOCENTRIC DYNAMICAL ECLIPTIC AND EQUINOX OF THE DATE
Rectangular (X,Y,Z) Coordinates in AU (Astronomical Units)
Series Validity Span: 4000 BC < Date < 8000 AD
Theoretical accuracy over span: +-1 arc sec
R*R = X*X + Y*Y + Z*Z
t = (JD - 2451545) / 365250
C++ Programming Language
VSOP87 Functions Source Code
Generated By The VSOP87 Source Code Generator Tool
(c) Jay Tanner 2015
Ref:
Planetary Theories in Rectangular and Spherical Variables
VSOP87 Solutions
Pierre Bretagnon, Gerard Francou
Journal of Astronomy & Astrophysics
vol. 202, p309-p315
1988
Source code provided under the provisions of the
GNU General Public License (GPL), version 3.
http://www.gnu.org/licenses/gpl.html
"""
def __init__(self, t):
self.t = t
def calculate(self):
# Neptune_X0 (t) // 821 terms of order 0
X0 = 0
X0 += 30.05973100580 * math.cos(5.31188633083 + 38.3768531213 * self.t)
X0 += 0.40567587218 * math.cos(3.98149970131 + 0.2438174835 * self.t)
X0 += 0.13506026414 * math.cos(3.50055820972 + 76.50988875911 * self.t)
X0 += 0.15716341901 * math.cos(0.11310077968 + 36.892380413 * self.t)
X0 += 0.14935642614 * math.cos(1.08477702063 + 39.86132582961 * self.t)
X0 += 0.02590782232 * math.cos(1.99609768221 + 1.7282901918 * self.t)
X0 += 0.01073890204 * math.cos(5.38477153556 + 75.0254160508 * self.t)
X0 += 0.00816388197 * math.cos(0.78185518038 + 3.21276290011 * self.t)
X0 += 0.00702768075 * math.cos(1.45363642119 + 35.40790770471 * self.t)
X0 += 0.00687594822 * math.cos(0.72075739344 + 37.88921815429 * self.t)
X0 += 0.00565555652 * math.cos(5.98943773879 + 41.3457985379 * self.t)
X0 += 0.00495650075 * math.cos(0.59957534348 + 529.9347825781 * self.t)
X0 += 0.00306025380 * math.cos(0.39916788140 + 73.5409433425 * self.t)
X0 += 0.00272446904 * math.cos(0.87404115637 + 213.5429129215 * self.t)
X0 += 0.00135892298 * math.cos(5.54654979922 + 77.9943614674 * self.t)
X0 += 0.00122117697 * math.cos(1.30863876781 + 34.9202727377 * self.t)
X0 += 0.00090968285 * math.cos(1.68886748674 + 114.6429243969 * self.t)
X0 += 0.00068915400 * math.cos(5.83470374400 + 4.6972356084 * self.t)
X0 += 0.00040370680 * math.cos(2.66129691063 + 33.9234349964 * self.t)
X0 += 0.00028891307 * math.cos(4.78947715515 + 42.83027124621 * self.t)
X0 += 0.00029247752 * math.cos(1.62319522731 + 72.05647063421 * self.t)
X0 += 0.00025576289 * math.cos(1.48342967006 + 71.5688356672 * self.t)
X0 += 0.00020517968 * math.cos(2.55621077117 + 33.43580002939 * self.t)
X0 += 0.00012614154 * math.cos(3.56929744338 + 113.15845168861 * self.t)
X0 += 0.00012788929 * math.cos(2.73769634046 + 111.67397898031 * self.t)
X0 += 0.00012013477 * math.cos(0.94915799508 + 1059.6257476727 * self.t)
X0 += 0.00009854638 * math.cos(0.25713641240 + 36.404745446 * self.t)
X0 += 0.00008385825 * math.cos(1.65242210861 + 108.2173985967 * self.t)
X0 += 0.00007577585 * math.cos(0.09970777629 + 426.8420083595 * self.t)
X0 += 0.00006452053 * math.cos(4.62556526073 + 6.1817083167 * self.t)
X0 += 0.00006551074 * math.cos(1.91884050790 + 1.24065522479 * self.t)
X0 += 0.00004652534 * math.cos(0.10344003066 + 37.8555882595 * self.t)
X0 += 0.00004732958 * math.cos(4.09711900918 + 79.47883417571 * self.t)
X0 += 0.00004557247 * math.cos(1.09712661798 + 38.89811798311 * self.t)
X0 += 0.00004322550 * math.cos(2.37744779374 + 38.32866901151 * self.t)
X0 += 0.00004315539 * math.cos(5.10473140788 + 38.4250372311 * self.t)
X0 += 0.00004089036 * math.cos(1.99429063701 + 37.4136452748 * self.t)
X0 += 0.00004248658 * math.cos(5.63379709294 + 28.81562556571 * self.t)
X0 += 0.00004622142 * math.cos(2.73995451568 + 70.08436295889 * self.t)
X0 += 0.00003926447 * math.cos(5.48975060892 + 39.34006096781 * self.t)
X0 += 0.00003148422 * math.cos(5.18755364576 + 76.0222537921 * self.t)
X0 += 0.00003940981 * math.cos(2.29766376691 + 98.6561710411 * self.t)
X0 += 0.00003323363 * math.cos(4.68776245279 + 4.4366031775 * self.t)
X0 += 0.00003282964 * math.cos(2.81551282614 + 39.3736908626 * self.t)
X0 += 0.00003110464 * math.cos(1.84416897204 + 47.9380806769 * self.t)
X0 += 0.00002927062 * math.cos(2.83767313961 + 70.5719979259 * self.t)
X0 += 0.00002748919 * math.cos(3.86990252936 + 32.4389622881 * self.t)
X0 += 0.00003316668 * math.cos(1.82194084200 + 144.8659615262 * self.t)
X0 += 0.00002822405 * math.cos(3.78131048254 + 31.9513273211 * self.t)
X0 += 0.00002695972 * math.cos(3.85276301548 + 110.189506272 * self.t)
X0 += 0.00002522990 * math.cos(4.66308619966 + 311.9552664791 * self.t)
X0 += 0.00001888129 * math.cos(3.20464683230 + 35.9291725665 * self.t)
X0 += 0.00001648229 * math.cos(4.07040254381 + 30.300098274 * self.t)
X0 += 0.00001826545 * math.cos(3.58021128918 + 44.31474395451 * self.t)
X0 += 0.00001956241 * math.cos(4.14516146871 + 206.42936592071 * self.t)
X0 += 0.00001681257 * math.cos(4.27560127770 + 40.8245336761 * self.t)
X0 += 0.00001533383 * math.cos(1.17732213608 + 38.26497853671 * self.t)
X0 += 0.00001893076 * math.cos(0.75017402977 + 220.6564599223 * self.t)
X0 += 0.00001527526 * math.cos(0.02173638301 + 38.4887277059 * self.t)
X0 += 0.00002085691 * math.cos(1.56948272604 + 149.8070146181 * self.t)
X0 += 0.00002070612 * math.cos(2.82581806721 + 136.78920667889 * self.t)
X0 += 0.00001535699 * math.cos(0.61413315675 + 73.0533083755 * self.t)
X0 += 0.00001667976 * math.cos(2.91712458990 + 106.73292588839 * self.t)
X0 += 0.00001289620 * math.cos(3.39708861100 + 46.4536079686 * self.t)
X0 += 0.00001559811 * math.cos(0.55870841967 + 38.11622069041 * self.t)
X0 += 0.00001545705 * math.cos(0.64028776037 + 38.6374855522 * self.t)
X0 += 0.00001435033 * math.cos(0.72855949679 + 522.8212355773 * self.t)
X0 += 0.00001406206 * math.cos(3.61717027558 + 537.0483295789 * self.t)
X0 += 0.00001256446 * math.cos(2.70907758736 + 34.1840674273 * self.t)
X0 += 0.00001387973 * math.cos(3.71843398082 + 116.12739710521 * self.t)
X0 += 0.00001457739 * math.cos(1.98981635014 + 181.5145244557 * self.t)
X0 += 0.00001228429 * math.cos(2.78646343835 + 72.31710306511 * self.t)
X0 += 0.00001140665 * math.cos(3.96643713353 + 7.83293736379 * self.t)
X0 += 0.00001080801 * math.cos(4.75483465055 + 42.5696388153 * self.t)
X0 += 0.00001201409 * math.cos(0.74547986507 + 2.7251279331 * self.t)
X0 += 0.00001228671 * math.cos(2.65249731727 + 148.32254190981 * self.t)
X0 += 0.00000722014 * math.cos(6.16806714444 + 152.77596003471 * self.t)
X0 += 0.00000608545 * math.cos(4.49536985567 + 35.4560918145 * self.t)
X0 += 0.00000722865 * math.cos(3.09340262825 + 143.38148881789 * self.t)
X0 += 0.00000632820 * math.cos(3.41702130042 + 7.66618102501 * self.t)
X0 += 0.00000642369 * math.cos(3.97490787694 + 68.5998902506 * self.t)
X0 += 0.00000553789 * math.cos(2.98606728111 + 41.2976144281 * self.t)
X0 += 0.00000682276 * math.cos(2.15806346682 + 218.1630873852 * self.t)
X0 += 0.00000463186 * math.cos(2.74420554348 + 31.7845709823 * self.t)
X0 += 0.00000521560 * math.cos(0.34813640632 + 0.719390363 * self.t)
X0 += 0.00000437892 * math.cos(1.29807722623 + 1589.3167127673
|
* self.t)
X0 += 0.00000398091 * math.cos(5.50783691510 + 6.3484646555 * self.t)
|
X0 += 0.00000384065 * math.cos(4.72632236146 + 44.96913526031 * self.t)
X0 += 0.00000395583 * math.cos(5.05527677390 + 108.70503356371 * self.t)
X0 += 0.00000327446 * math.cos(
|
tomkentpayne/knitify
|
source/knitify.py
|
Python
|
gpl-2.0
| 676 | 0.008889 |
''' Generate a pdf knittinf pattern + chart from a bitmap image '''
__author__ = 'Thomas Payne'
__email__ = 'tomkentpayne@hotmail.com'
__copyrig
|
ht__ = 'Copyright © 2015 Th
|
omas Payne'
__licence__ = 'GPL v2'
from argparse import ArgumentParser
from pattern import Pattern
def parse_sys_args():
''' Parse arguments for bitmap path '''
parser = ArgumentParser(description='Generate knitting pattern from a bitmap image')
parser.add_argument('bitmap' ,action='store')
return parser.parse_args()
if __name__ == '__main__':
args = parse_sys_args()
bitmap_path = args.bitmap
pattern = Pattern()
#Print arg for debug purposes
print(bitmap_path)
|
adafruit/Adafruit_Legolas
|
Adafruit_Legolas/commands/__init__.py
|
Python
|
mit
| 1,864 | 0.002682 |
# Commands submodule definition.
#
# Import all python files in the directory to simplify adding commands.
# Just drop a new command .py file in the directory and it will be picked up
# automatically.
#
# Author: Tony DiCola
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE US
|
E OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
# Import all python files in the commands directory by setting them to the __all__
# global which tells python the modules to load. Grabs a list of all files in
# the directory and filters down to just the names (without .py extensions) of
# python files that don't start with '__' (which are module metadata that should
# be ignored.
__all__ = map(lambda x: x[:-3],
filter(lambda x: not x.startswith('__') and x.lo
|
wer().endswith('.py'),
os.listdir(__path__[0])))
|
shyamalschandra/scikit-learn
|
sklearn/linear_model/_bayes.py
|
Python
|
bsd-3-clause
| 26,416 | 0.000492 |
"""
Various bayesian regression
"""
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from ._base import LinearModel, _rescale_data
from ..base import RegressorMixin
from ._base import _deprecate_normalize
from ..utils.extmath import fast_logdet
from scipy.linalg import pinvh
from ..utils.validation import _check_sample_weight
###############################################################################
# BayesianRidge regression
class BayesianRidge(RegressorMixin, LinearModel):
"""Bayesian ridge regression.
Fit a Bayesian ridge model. See the Notes section for details on this
implementation and the optimization of the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, default=300
Maximum number of iterations. Should be greater than or equal to 1.
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
alpha_init : float, default=None
Initial value for alpha (precision of the noise).
If not set, alpha_init is 1/Var(y).
.. versionadded:: 0.22
lambda_init : float, default=None
Initial value for lambda (precision of the weights).
If not set, lambda_init is 1.
.. versionadded:: 0.22
compute_score : bool, default=False
If True, compute the log marginal likelihood at each iteration of the
optimization.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model.
The intercept is not treated as a probabilistic parameter
|
and thus has no associated variance. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.prepr
|
ocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated precision of the noise.
lambda_ : float
Estimated precision of the weights.
sigma_ : array-like of shape (n_features, n_features)
Estimated variance-covariance matrix of the weights
scores_ : array-like of shape (n_iter_+1,)
If computed_score is True, value of the log marginal likelihood (to be
maximized) at each iteration of the optimization. The array starts
with the value of the log marginal likelihood obtained for the initial
values of alpha and lambda and ends with the value obtained for the
estimated alpha and lambda.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
X_offset_ : float
If `normalize=True`, offset subtracted for centering data to a
zero mean.
X_scale_ : float
If `normalize=True`, parameter used to scale data to a unit
standard deviation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
ARDRegression : Bayesian ARD regression.
Notes
-----
There exist several strategies to perform Bayesian ridge regression. This
implementation is based on the algorithm described in Appendix A of
(Tipping, 2001) where updates of the regularization parameters are done as
suggested in (MacKay, 1992). Note that according to A New
View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
update rules do not guarantee that the marginal likelihood is increasing
between two consecutive iterations of the optimization.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
Journal of Machine Learning Research, Vol. 1, 2001.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
BayesianRidge()
>>> clf.predict([[1, 1]])
array([1.])
"""
def __init__(
self,
*,
n_iter=300,
tol=1.0e-3,
alpha_1=1.0e-6,
alpha_2=1.0e-6,
lambda_1=1.0e-6,
lambda_2=1.0e-6,
alpha_init=None,
lambda_init=None,
compute_score=False,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
verbose=False,
):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.alpha_init = alpha_init
self.lambda_init = lambda_init
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : object
Returns the instance itself.
"""
self._normalize = _deprecate_normalize(
self.normalize, default=False, estimator_name=self.__class__.__name__
)
if self.n_iter < 1:
raise ValueError(
"n_iter should be greater than or equal to 1. Got {!r}.".format(
self.n_iter
)
)
X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X,
y,
self.fit_intercept,
self._normalize,
self.copy_X,
sample_weight=sample_weight,
)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_
|
stonebig/bokeh
|
bokeh/colors/named.py
|
Python
|
bsd-3-clause
| 13,025 | 0.015432 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the standard 147 CSS (X11) named colors.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from .util import NamedColor
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
aliceblue = NamedColor("aliceblue", 240, 248, 255)
antiquewhite = NamedColor("antiquewhite", 250, 235, 215)
aqua = NamedColor("aqua", 0, 255, 255)
aquamarine = NamedColor("aquamarine", 127, 255, 212)
azure = NamedColor("azure", 240, 255, 255)
beige = NamedColor("beige", 245, 245, 220)
bisque = NamedColor("bisque", 255, 228, 196)
black = NamedColor("black", 0, 0, 0 )
blanchedalmond = NamedColor("blanchedalmond", 255, 235, 205)
blue = NamedColor("blue", 0, 0, 255)
blueviolet = NamedColor("blueviolet", 138, 43, 226)
brown = NamedColor("brown", 165, 42, 42 )
burlywood = NamedColor("burlywood", 222, 184, 135)
cadetblue = NamedColor("cadetblue", 95, 158, 160)
chartreuse = NamedColor("chartreuse", 127, 255, 0 )
chocolate = NamedColor("chocolate", 210, 105, 30 )
coral = NamedColor("coral", 255, 127, 80 )
cornflowerblue = NamedColor("cornflowerblue", 100, 149, 237)
cornsilk = NamedColor("cornsilk", 255, 248, 220)
crimson = NamedColor("crimson", 220, 20, 60 )
cyan = NamedColor("cyan", 0, 255, 255)
darkblue = NamedColor("darkblue", 0, 0, 139)
darkcyan = NamedColor("darkcyan", 0, 139, 139)
darkgoldenrod = NamedColor("darkgoldenrod", 184, 134, 11 )
darkgray = NamedColor("darkgray", 169, 169, 169)
darkgreen = NamedColor("darkgreen", 0, 100, 0 )
darkgrey = NamedColor("darkgrey", 169, 169, 169)
darkkhaki = NamedColor("darkkhaki", 189, 183, 107)
darkmagenta = NamedColor("darkmagenta", 139, 0, 139)
darkolivegreen = NamedColor("darkolivegreen", 85, 107, 47 )
darkorange = NamedColor("darkorange", 255, 140, 0 )
darkorchid = NamedColor("darkorchid", 153, 50, 204)
darkred = NamedColor("darkred", 139, 0, 0 )
darksalmon = NamedColor("darksalmon", 233, 150, 122)
darkseagreen = NamedColor("darkseagreen", 143, 188, 143)
darkslateblue = NamedColor("darkslateblue", 72, 61, 139)
darkslategray = NamedColor("darkslategray", 47, 79, 79 )
darkslategrey = NamedColor("darkslategrey", 47, 79, 79 )
darkturquoise = NamedColor("darkturquoise", 0, 206, 209)
darkviolet = NamedColor("darkviolet", 148, 0, 211)
deeppink = NamedColor("deeppink", 255, 20, 147)
deepskyblue = NamedColor("deepskyblue", 0, 191, 255)
dimgray = NamedColor("dimgray", 105, 105, 105)
dimgrey = NamedColor("dimgrey", 105, 105, 105)
dodgerblue = NamedColor("dodgerblue", 30, 144, 255)
firebrick = NamedColor("firebrick", 178, 34, 34 )
floralwhite = NamedColor("floralwhite", 255, 250, 240)
forestgreen = NamedColor("forestgreen", 34, 139, 34 )
fuchsia = NamedColor("fuchsia", 255, 0, 255)
gainsboro = NamedColor("gainsboro", 220, 220, 220)
ghostwhite = NamedColor("ghostwhite", 248, 248, 255)
gold = NamedColor("gold", 255, 215, 0 )
goldenrod = NamedColor("goldenrod", 218, 165, 32 )
gray = NamedColor("gray", 128, 128, 128)
green = NamedColor("green", 0, 128, 0 )
greenyellow = NamedColor("greenyellow", 173, 255, 47 )
grey = NamedColor("grey", 128, 128, 128)
honeydew = NamedColor("honeydew", 240, 255, 240)
hotpink = NamedColor("hotpink", 255, 105, 180)
indianred = NamedColor("indianred", 205, 92, 92 )
indigo = NamedColor("indigo", 75, 0, 130)
ivory = NamedColor("ivory", 255, 255, 240)
khaki = NamedColor("khaki", 240, 230, 140)
lavender = NamedColor("lavender", 230, 230, 250)
lavenderblush = NamedColor("lavenderblush", 255, 240, 245)
lawngreen = NamedColor("lawngreen", 124, 252, 0 )
lemonchiffon = NamedColor("lemonchiffon", 255, 250, 205)
lightblue = NamedColor("lightblue", 173, 216, 230)
lightcoral = NamedColor("lightcoral", 240, 128, 128)
lightcyan = NamedColor("lightcyan", 224, 255, 255)
lightgoldenrodyellow = NamedColor("lightgoldenrodyellow", 250, 250, 210)
lightgray = NamedColor("lightgray", 211, 211, 211)
lightgreen = Nam
|
edColor("lightgreen", 144, 238, 144)
lightgrey = NamedColor("lightgrey", 211, 211, 211)
lightpink
|
= NamedColor("lightpink", 255, 182, 193)
lightsalmon = NamedColor("lightsalmon", 255, 160, 122)
lightseagreen = NamedColor("lightseagreen", 32, 178, 170)
lightskyblue = NamedColor("lightskyblue", 135, 206, 250)
lightslategray = NamedColor("lightslategray", 119, 136, 153)
lightslategrey = NamedColor("lightslategrey", 119, 136, 153)
lightsteelblue = NamedColor("lightsteelblue", 176, 196, 222)
lightyellow = NamedColor("lightyellow", 255, 255, 224)
lime = NamedColor("lime", 0, 255, 0 )
limegreen = NamedColor("limegreen", 50, 205, 50 )
linen = NamedColor("linen", 250, 240, 230)
magenta = NamedColor("magenta", 255, 0, 255)
maroon = NamedColor("maroon", 128, 0, 0 )
mediumaquamarine = NamedColor("mediumaquamarine", 102, 205, 170)
mediumblue = NamedColor("mediumblue", 0, 0, 205)
mediumorchid = NamedColor("mediumorchid", 186, 85, 211)
mediumpurple = NamedColor("mediumpurple", 147, 112, 219)
mediumseagreen = NamedColor("mediumseagreen", 60, 179, 113)
mediumslateblue = NamedColor("mediumslate
|
nidhididi/CloudBot
|
plugins/attacks.py
|
Python
|
gpl-3.0
| 3,577 | 0.00643 |
import codecs
import json
import os
import random
import asyncio
import re
from cloudbot import hook
from cloudbot.util import textgen
@hook.on_start()
def load_attacks(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global larts, insults, flirts, kills, slaps, moms
with codecs.open(os.path.join(bot.data_dir, "larts.json"), encoding="utf-8") as f:
larts = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "flirts.json"), encoding="utf-8") as f:
flirts = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "moms.json"), encoding="utf-8") as f:
moms = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "kills.json"), encoding="utf-8") as f:
kills = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "slaps.json"), encoding="utf-8") as f:
slaps = json.load(f)
def is_self(conn, target):
"""
:type conn: cloudbot.client.Client
:type target: str
"""
if re.search("(^..?.?.?self|{})".format(re.escape(conn.nick.lower())), target.lower()):
return True
else:
return False
def get_attack_string(text, conn, nick, notice, attack_json, message):
"""
:type text: str
:type conn: cloudbot.client.Client
:type nick: str
"""
target = text.strip()
if " " in target:
notice("Invalid username!")
return None
# if the user is trying to make the bot target itself, target them
if is_self(conn, target):
target = nick
permission_manager = conn.permissions
if permission_manager.has_perm_nick(target, "unattackable"):
generator = textgen.TextGenerator(flirts["templates"], flirts["parts"], variables={"user": target})
message(generator.generate_string())
return None
else:
generator = textgen.TextGenerator(attack_json["templates"], attack_json["parts"], variables={"user": target})
return generator.generate_string()
@asyncio.coroutine
@hook.command()
def lart(text, conn, nick, notice, action, message):
"""<user> - LARTs <user>
:type text: str
:type conn: cloudbot.client.Client
:type nick: str
"""
phrase = get_attack_string(text, conn, nick, notice, larts, message)
if phrase is not None:
action(phrase)
@asyncio.coroutine
@hook.command()
def flirt(text, conn, nick, notice, action, message):
"""<user> - flirts with <user>
:type text: str
:type conn: cloudbot.client.Client
|
:type nick: str
"""
phrase = get_attack_string(text, conn, nick, notice, flirts, message)
if phrase is not None:
message(phrase)
@asyncio.
|
coroutine
@hook.command()
def kill(text, conn, nick, notice, action, message):
"""<user> - kills <user>
:type text: str
:type conn: cloudbot.client.Client
:type nick: str
"""
phrase = get_attack_string(text, conn, nick, notice, kills, message)
if phrase is not None:
action(phrase)
@hook.command
def slap(text, nick, conn, notice, action, message):
"""slap <user> -- Makes the bot slap <user>."""
phrase = get_attack_string(text, conn, nick, notice, slaps, message)
if phrase is not None:
action(phrase)
@asyncio.coroutine
@hook.command()
def insult(text, conn, nick, notice, action, message):
"""<user> - insults <user>
:type text: str
:type conn: cloudbot.client.Client
:type nick: str
"""
phrase = get_attack_string(text, conn, nick, notice, moms, message)
if phrase is not None:
message(phrase)
|
rvanlaar/tactic-client
|
test/pipeline_test.py
|
Python
|
epl-1.0
| 3,879 | 0.006961 |
#!/usr/bin/python
###########################################################
#
# Copyright (c) 2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import unittest, sys
# import the client lib
sys.path.insert( 0, ".." )
from tactic_client_lib.interpreter import *
class PipelineTest(unittest.TestCase):
def test_all(my):
my.test_handler = 'tactic_client_lib.test.TestHandler'
my.pipeline_xml = '''
<pipeline>
<process name='model'>
<action class='%s'>
<test>pig</test>
<test2>cow</test2>
</action>
</process>
<process name='texture'>
<action class='%s'/>
</process>
<process name='rig'>
<action class='tactic_client_lib.test.TestNextProcessHandler'/>
</process>
<process name='extra'/>
<process name='extra2'/>
<connect from='model' to='texture'/>
<connect from='model' to='rig'/>
<connect from='rig' to='publish'/>
</pipeline>
''' % (my.test_handler, my.test_handler)
my.pipeline = Pipeline(my.pipeline_xml)
my._test_pipeline()
my._test_interpreter()
def _test_pipeline(my):
# get the output names
output_names = my.pipeline.get_output_process_names('model')
my.assertEquals( ['texture', 'rig'], output_names )
# get the input names
input_names = my.pipeline.get_input_process_names('texture')
my.assertEquals( ['model'], input_names)
# get the handler class of model
handler_class = my.pipeline.get_handler_class('model')
my.assertEquals( my.test_handler, handler_class)
# small test running through pipeline
process = my.pipeline.get_first_process_name()
my.assertEquals( 'model', process)
def _test_interpreter(my):
# create a package to be delivered to each handler
package = {
'company
|
': 'Acme',
'city': 'Toronto',
'context': 'whatever'
}
# use client api
from tact
|
ic_client_lib import TacticServerStub
server = TacticServerStub()
interpreter = PipelineInterpreter(my.pipeline_xml)
interpreter.set_server(server)
interpreter.set_package(package)
interpreter.execute()
# introspect the interpreter to see if everything ran well
handlers = interpreter.get_handlers()
process_names = [x.get_process_name() for x in handlers]
expected = ['model', 'texture', 'rig', 'extra1', 'extra2']
my.assertEquals( expected, process_names )
# make sure all the handlers completed
my.assertEquals( 5, len(handlers) )
for handler in handlers:
my.assertEquals( "complete", handler.get_status() )
# check that the package is delivered to the input
my.assertEquals("Acme", handler.get_input_value('company') )
my.assertEquals("Toronto", handler.get_input_value('city') )
process_name = handler.get_process_name()
if process_name == 'model':
my.assertEquals("Acme", handler.company)
my.assertEquals("pig", handler.get_option_value('test') )
my.assertEquals("cow", handler.get_option_value('test2') )
# ensure input settings propogate
if process_name == 'extra1':
my.assertEquals("test.txt", handler.get_output_value('file'))
my.assertEquals("Acme", handler.get_package_value('company'))
if __name__ == "__main__":
unittest.main()
|
OVERLOADROBOTICA/OVERLOADROBOTICA.github.io
|
mail/formspree-master/formspree/users/helpers.py
|
Python
|
mit
| 231 | 0.008658 |
from werkzeug.secur
|
ity import generate_password_hash, check_password_hash
def hash_pwd(password):
return generate_password_hash(passw
|
ord)
def check_password(hashed, password):
return check_password_hash(hashed, password)
|
maphy-psd/python-webuntis
|
tests/utils/test_remote.py
|
Python
|
bsd-3-clause
| 1,228 | 0 |
import webuntis
import mock
from webuntis.utils.third_party import json
from .. import WebUntisTestCase, BytesIO
class BasicUsage(WebUntisTestCase):
def test_parse_result(self):
x = webuntis.utils.remote._parse_result
a = {'id': 2}
b = {'id': 3}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'Request ID', x, a, b)
a = b = {'id': 2}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'no information', x, a, b)
a = {'id': 2}
|
b = {'id': 2, 'result': 'YESSIR'}
assert x(a, b) == 'YESSIR'
def test_parse_error_code(self):
x = webuntis.utils.remote._parse_error_code
a = b = {}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'no information', x, a, b)
b = {'error': {'code': 0, 'message': 'hello world'}}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'hello wor
|
ld', x, a, b)
for code, exc in webuntis.utils.remote._errorcodes.items():
self.assertRaises(exc, x, a, {
'error': {'code': code, 'message': 'hello'}
})
|
DanialLiu/SkiaWin32Port
|
third_party/externals/gyp/pylib/gyp/generator/ninja.py
|
Python
|
bsd-3-clause
| 74,180 | 0.006794 |
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSVersion
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # char
|
acters with = in preprocesor definitions for
# some reason. Octal-enco
|
de to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, flavor, toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja
|
cnewcome/sos
|
sos/plugins/zfs.py
|
Python
|
gpl-2.0
| 1,124 | 0 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, UbuntuPlugin, DebianPlugin
class Zfs(Plugin, UbuntuPlugin, DebianPlugin):
"""ZFS filesystem
"""
plugin_name = 'zfs'
profiles = ('storage',)
packages = ('zfsutil
|
s-linux',)
def setup(self):
self.add_cmd_output([
"zfs get all",
"zfs list -t all -o space",
"zpool list",
"zpool status -x"
])
# vim: set et ts=4 sw=
|
4 :
|
dimagi/commcare-android
|
images/dpi_manager.py
|
Python
|
apache-2.0
| 4,399 | 0.000227 |
import os
from PIL import Image
import numpy
from utils import ensure_dir
DRAWABLE = 'drawable'
class ImageTypeException(Exception):
pass
class Density(object):
LDPI = 'ldpi'
MDPI = 'mdpi'
HDPI = 'hdpi'
XHDPI = 'xhdpi'
XXHDPI = 'xxhdpi'
XXXHDPI = 'xxxhdpi'
RATIOS = {
LDPI: 3,
MDPI: 4,
HDPI: 6,
XHDPI: 8,
XXHDPI: 12,
XXXHDPI: 16,
}
ORDER = [LDPI, MDPI, HDPI, XHDPI, XXHDPI, XXXHDPI]
class ImageType(object):
PNG = 'png'
PNG_9_BIT = '9.png'
SVG = 'svg'
# haven't flushed out SVG support yet, or scaling 9-bits
SUPPORTED_TYPES = [PNG]
@classmethod
def is_supported_type(cls, image_type):
return image_type in cls.SUPPORTED_TYPES
class ImageSpec(object):
def __init__(self, src):
self.filename = src['filename']
self.source_dpi = src['source_dpi']
self.other_scaling = src.get('other_scaling', {})
self.excluded_dpis = src.get('excluded_dpis', [])
# Determine Image Type by filename
extension = self.filename.split('.', 1)[1]
if not ImageType.is_supported_type(extension):
raise ImageTypeException(
'The image type %(ext)s is not yet supported.' % {
'ext': extension,
})
class DPIManager(object):
def __init__(self, spec_src, source_folder, target_folder):
"""The DPIManager handles all the scaling of an image according to its
spec and ImageType.
:param spec_src:
:param source_folder:
:return:
"""
self.source_folder = source_folder
self.target_folder = target_folder
self.spec = ImageSpec(spec_src)
src_dpi_index = Density.ORDER.index(self.spec.source_dpi) + 1
target_dpis = set(Density.ORDER[:src_dpi_index])
self.target_dpis = list(target_dpis.difference(self.spec.excluded_dpis))
self.scaling_ratios = self.get_scaling_ratios()
def get_scaling_ratios(self):
src_scale = Density.RATIOS[self.spec.source_dpi]
scaling = {}
for dpi in self.target_dpis:
scaling[dpi] = Density.RATIOS[dpi] / float(src_scale)
return scaling
def update_resources(self):
src_path = os.path.join(self.source_folder, self.spec.filename)
src_img = Image.open(src_path)
# Premult alpha resizing, to avoid halo effect
# http://stackoverflow.com/questions/9142825/transparent-png-resizing-with-python-image-library-
|
and-the-halo-effect
premult = numpy.fromstring(src_img.tobytes(), dtype=numpy.uint8)
alphaLayer = premult[3::4] / 255.0
premult[::4] *= alphaLayer
premult[1::4] *= alphaLayer
|
premult[2::4] *= alphaLayer
src_img = Image.frombytes("RGBA", src_img.size, premult.tobytes())
# save original image to drawables
default_dir = os.path.join(self.target_folder, DRAWABLE)
ensure_dir(default_dir)
default_path = os.path.join(default_dir, self.spec.filename)
src_img.save(default_path)
print "save to", default_path
src_width, src_height = src_img.size
for dpi in self.target_dpis:
ratio = self.scaling_ratios[dpi]
dpi_width = int(round(src_width * ratio, 0))
dpi_height = int(round(src_height * ratio, 0))
print "scale image %(from_dims)s --> %(to_dims)s" % {
'from_dims': "%d x %d" % (src_width, src_height),
'to_dims': "%d x %d" % (dpi_width, dpi_height),
}
dpi_dir = os.path.join(
self.target_folder, '%s-%s' % (DRAWABLE, dpi)
)
ensure_dir(dpi_dir)
dpi_path = os.path.join(dpi_dir, self.spec.filename)
src_img.resize((dpi_width, dpi_height), Image.ANTIALIAS).save(
dpi_path
)
print "save to", dpi_path
for label, size in self.spec.other_scaling.items():
scale_dir = os.path.join(
self.target_folder, '%s-%s' % (DRAWABLE, label)
)
ensure_dir(scale_dir)
scale_path = os.path.join(scale_dir, self.spec.filename)
src_img.resize((size[0], size[1]), Image.ANTIALIAS).save(
scale_path
)
print "save to", scale_path
|
strawlab/pyopy
|
pyopy/hctsa/hctsa_data.py
|
Python
|
bsd-3-clause
| 540 | 0 |
# coding=u
|
tf-8
"""HCTSA test time series."""
import os.path as op
import numpy as np
from pyopy.hctsa.hctsa_config import HCTSA_TESTTS_DIR
def hctsa_sine():
return np.loadtxt(op.join(HCTSA_TESTTS_DIR, 'SY_sine.dat'))
def hctsa_noise():
return np.loadtxt(op.join(HCTSA_TESTTS_DIR, 'SY_noise.dat'))
def hctsa_noisysinusoid():
return np.loadtxt(op.join(HCTSA_TESTTS_DIR, 'SY_noisysinusoid.dat'))
HCTSA_TEST_TIME_SERIES = (
('sine', hctsa_sine),
('noise', hctsa_noise),
('noisysinusoid', hc
|
tsa_noisysinusoid),
)
|
vjpai/grpc
|
tools/run_tests/sanity/check_port_platform.py
|
Python
|
apache-2.0
| 2,915 | 0.000686 |
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apach
|
e.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.chdir
|
(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
def check_port_platform_inclusion(directory_root):
bad_files = []
for root, dirs, files in os.walk(directory_root):
for filename in files:
path = os.path.join(root, filename)
if os.path.splitext(path)[1] not in ['.c', '.cc', '.h']:
continue
if path in [
os.path.join('include', 'grpc', 'support',
'port_platform.h'),
os.path.join('include', 'grpc', 'impl', 'codegen',
'port_platform.h'),
]:
continue
if filename.endswith('.pb.h') or filename.endswith('.pb.c'):
continue
# Skip check for upb generated code.
if (filename.endswith('.upb.h') or filename.endswith('.upb.c') or
filename.endswith('.upbdefs.h') or
filename.endswith('.upbdefs.c')):
continue
with open(path) as f:
all_lines_in_file = f.readlines()
for index, l in enumerate(all_lines_in_file):
if '#include' in l:
if l not in [
'#include <grpc/support/port_platform.h>\n',
'#include <grpc/impl/codegen/port_platform.h>\n'
]:
bad_files.append(path)
elif all_lines_in_file[index + 1] != '\n':
# Require a blank line after including port_platform.h in
# order to prevent the formatter from reording it's
# inclusion order upon future changes.
bad_files.append(path)
break
return bad_files
all_bad_files = []
all_bad_files += check_port_platform_inclusion(os.path.join('src', 'core'))
all_bad_files += check_port_platform_inclusion(os.path.join('include', 'grpc'))
if len(all_bad_files) > 0:
for f in all_bad_files:
print((('port_platform.h is not the first included header or there '
'is not a blank line following its inclusion in %s') % f))
sys.exit(1)
|
sublee/lets
|
lets/alarm.py
|
Python
|
bsd-3-clause
| 3,197 | 0.000313 |
# -*- coding: utf-8 -*-
"""
lets.alarm
~~~~~~~~~~
An event which is awoken up based on time.
:copyright: (c) 2013-2018 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
import operator
import time as time_
from gevent import get_hub, Timeout
from gevent.event import Event
__all__ = ['Alarm', 'Earliest', 'Latest']
class _Alarm(object):
"""A :class:`gevent.event.AsyncResult`-like class to wait until the final
set time.
"""
__slots__ = ('time', 'value', 'timer', 'event')
#: Implement it to decide to reschedule the awaking time. It's a function
#: which takes 2 time arguments. The first argument is new time to set,
#: and the second argument is the previously accepted time. Both arguments
#: are never ``None``.
accept = NotImplemented
def __init__(self):
self.time = self.value = self.timer = None
self.event = Event()
def set(self, time, value=None):
"""Sets the time to awake up. If the time is not accepted, will be
ignored and it returns ``False``. Otherwise, returns ``True``.
"""
if time is None:
raise TypeError('use clear() instead of setting none time')
elif self.time is not None and not self.accept(time, self.time):
# Not accepted.
return False
self._reset(time, value)
delay = time - time_.time()
if delay > 0:
# Set timer to wake up.
self.timer = get_hub().loop.timer(delay)
self.timer.start(self.event.set)
else:
# Wake up immediately.
self.event.set()
return True
def when(self):
"""When it will be awoken or ``None``."""
return self.time
def ready(self):
"""Whether it has been awoken."""
|
return self.event.ready()
def wait(self, timeout=None):
"""Waits until the awaking time. It returns the time."""
if self.event.wait(timeout):
return self.time
def get(self, block=True, timeout=None):
"""Waits
|
until and gets the awaking time and the value."""
if not block and not self.ready():
raise Timeout
if self.event.wait(timeout):
return self.time, self.value
raise Timeout(timeout)
def clear(self):
"""Discards the schedule for awaking."""
self.event.clear()
self._reset(None, None)
def _reset(self, time, value):
self.time = time
self.value = value
if self.timer is not None:
self.timer.stop()
def __nonzero__(self):
return self.time is not None
class Alarm(_Alarm):
"""An alarm which accepts any time. Its awaking time will always be reset.
"""
accept = lambda x, y: True
class Earliest(_Alarm):
"""An alarm which accepts only the earliest time among many times that've
been set. Earlier time means that smaller timestamp.
"""
accept = operator.lt
class Latest(_Alarm):
"""An alarm which accepts only the latest time among many times that've
been set. Later time means that bigger timestamp.
"""
accept = operator.gt
|
pblottiere/QGIS
|
tests/src/python/test_qgsdefaultvalue.py
|
Python
|
gpl-2.0
| 1,300 | 0 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDefaultValue.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '26.9.2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsDefaultValue)
from qgis.testing import unittest
class TestQgsRasterColorRampShader(unittest.TestCase):
def testValid(self):
self.assertFalse(QgsDefaultValue())
self.assertTrue(QgsDefaultValue('test'))
self.assertTrue(QgsDefaultValue('abc', True))
self.assertTrue(QgsDefaultValue('abc', False))
def setGetExpression(self):
value = QgsDefaultValue('abc', False)
self.assertEqual(value.expression(), 'abc')
value.setExpression('def')
self.assertEqual(value.expression(), 'def')
def setGetApplyOnUpdate(self):
value = QgsDefaultValue('abc', False)
self.assertEqual(value.applyOnUpdate(), False)
value.setApplyOnUpdate(True)
self.asse
|
rtEqual(value.a
|
pplyOnUpdate(), True)
if __name__ == '__main__':
unittest.main()
|
icyflame/batman
|
scripts/noreferences.py
|
Python
|
mit
| 25,133 | 0.000331 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script adds a missing references section to pages.
It goes over multiple pages, searches for pages where <references />
is missing although a <ref> tag is present, and in that case adds a new
references section.
These command line parameters can be used to specify which pages to work on:
¶ms;
-xml Retrieve information from a local XML dump (pages-articles
or pages-meta-current, see https://download.wikimedia.org).
Argument can also be given as "-xml:filename".
-namespace:n Number or name of namespace to process. The parameter can be
used multiple times. It works in combination with all other
parameters, except for the -start parameter. If you e.g.
want to iterate over all categories starting at M, use
-start:Category:M.
-always Don't prompt you for each replacement.
-quiet Use this option to get less output
If neither a page title nor a page generator is given, it takes all pages from
the default maintenance category.
It is strongly recommended not to run this script over the entire article
namespace (using the -start) parameter, as that would consume too much
bandwidth. Instead, use the -xml parameter, or use another way to generate
a list of affected articles
"""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import re
import pywikibot
from pywikibot import i18n, pagegenerators, textlib, Bot
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
# References sections are usually placed before further reading / external
# link sections. This dictionary defines these sections, sorted by priority.
# For example, on an English wiki, the script would place the "References"
# section in front of the "Further reading" section, if that existed.
# Otherwise, it would try to put it in front of the "External links" section,
# or if that fails, the "See also" section, etc.
placeBeforeSections = {
'ar': [ # no explicit policy on where to put the references
u'وصلات خارجية',
u'انظر أيضا',
u'ملاحظات'
],
'ca': [
u'Bibliografia',
u'Bibliografia complementària',
u'Vegeu també',
u'Enllaços externs',
u'Enllaços',
],
'cs': [
u'Externí odkazy',
u'Poznámky',
],
'da': [ # no explicit policy on where to put the references
u'Eksterne links'
],
'de': [ # no explicit policy on where to put the references
u'Literatur',
u'Weblinks',
u'Siehe auch',
u'Weblink', # bad, but common singular form of Weblinks
],
'dsb': [
u'Nožki',
],
'en': [ # no explicit policy on where to put the references
u'Further reading',
u'External links',
u'See also',
u'Notes'
],
'ru': [
u'Ссылки',
u'Литература',
],
'eo': [
u'Eksteraj ligiloj',
u'Ekstera ligilo',
u'Eksteraj ligoj',
u'Ekstera ligo',
u'Rete'
],
'es': [
u'Enlaces externos',
u'Véase también',
u'Notas',
],
'fa': [
u'پیوند به بیرون',
u'پانویس',
u'جستارهای وابسته'
],
'fi': [
u'Kirjallisuutta',
u'Aiheesta muualla',
u'Ulkoiset linkit',
u'Linkkejä',
],
'fr': [
u'Liens externes',
u'Voir aussi',
u'Notes'
],
'h
|
e': [
u'ראו גם',
u'לקריאה נוספת',
u'קישורים חיצוניים',
u'הערות שוליים',
],
'hsb': [
u'Nóžki',
],
'hu': [
u'Külső hivatkozások',
u'Lásd még',
],
'it': [
u'Bibliografia',
u'Voci correlate',
u'Altri progetti',
u'Collegamenti esterni',
u'Vedi anche',
],
'ja': [
u'関連項目',
u'参考文献',
u'外部リンク',
],
'ko': [ # no explicit policy on where to put the
|
references
u'외부 링크',
u'외부링크',
u'바깥 고리',
u'바깥고리',
u'바깥 링크',
u'바깥링크'
u'외부 고리',
u'외부고리'
],
'lt': [ # no explicit policy on where to put the references
u'Nuorodos'
],
'nl': [ # no explicit policy on where to put the references
u'Literatuur',
u'Zie ook',
u'Externe verwijzingen',
u'Externe verwijzing',
],
'pdc': [
u'Beweisunge',
u'Quelle unn Literatur',
u'Gwelle',
u'Gwuelle',
u'Auswenniche Gleecher',
u'Gewebbgleecher',
u'Guckt mol aa',
u'Seh aa',
],
'pl': [
u'Źródła',
u'Bibliografia',
u'Zobacz też',
u'Linki zewnętrzne',
],
'pt': [
u'Ligações externas',
u'Veja também',
u'Ver também',
u'Notas',
],
'sk': [
u'Pozri aj',
],
'szl': [
u'Przipisy',
u'Připisy',
],
'th': [
u'อ่านเพิ่มเติม',
u'แหล่งข้อมูลอื่น',
u'ดูเพิ่ม',
u'หมายเหตุ',
],
'zh': [
u'外部链接',
u'外部連结',
u'外部連結',
u'外部连接',
],
}
# Titles of sections where a reference tag would fit into.
# The first title should be the preferred one: It's the one that
# will be used when a new section has to be created.
referencesSections = {
'ar': [ # not sure about which ones are preferred.
u'مراجع',
u'المراجع',
u'مصادر',
u'المصادر',
u'مراجع ومصادر',
u'مصادر ومراجع',
u'المراجع والمصادر',
u'المصادر والمراجع',
],
'ca': [
u'Referències',
],
'cs': [
u'Reference',
u'Poznámky',
],
'da': [
u'Noter',
],
'de': [ # see [[de:WP:REF]]
u'Einzelnachweise',
u'Anmerkungen',
u'Belege',
u'Endnoten',
u'Fußnoten',
u'Fuß-/Endnoten',
u'Quellen',
u'Quellenangaben',
],
'dsb': [
u'Nožki',
],
'en': [ # not sure about which ones are preferred.
u'References',
u'Footnotes',
u'Notes',
],
'ru': [
u'Примечания',
u'Сноски',
u'Источники',
],
'eo': [
u'Referencoj',
],
'es': [
u'Referencias',
u'Notas',
],
'fa': [
u'منابع',
u'منبع'
],
'fi': [
u'Lähteet',
u'Viitteet',
],
'fr': [ # [[fr:Aide:Note]]
u'Notes et références',
u'Références',
u'References',
u'Notes'
],
'he': [
u'הערות שוליים',
],
'hsb': [
u'Nóžki',
],
'hu': [
u'Források és jegyzetek',
u'Források',
u'Jegyzetek',
u'Hivatkozások',
u'Megjegyzések',
],
'is': [
u'Heimildir',
u'Tilvísanir',
],
'it': [
u'Note',
u'Riferimenti',
],
'ja': [
u'脚注',
u'脚注欄',
u'脚注・出典',
u'出典',
u'注釈',
u'註',
],
'ko': [
u'주석',
u'각주'
u'주석 및 참고 자료'
u'주석 및 참고자료',
u'주석 및 참고 출처'
],
'lt': [ # not sure about which ones are preferred.
u'Šaltiniai',
u'Literatūra',
],
'nl': [ # not sure about which ones are preferred.
u'Voetnoten',
u'Voetnoot',
u'Referenties',
u'Noten',
u'Bronvermelding',
],
'pdc': [
u'Aamarrickunge',
],
'pl': [
u'Przypisy',
u'Uwagi',
],
'pt': [
u'Referências',
],
'sk': [
u'Referencie',
],
'szl': [
u'Przipisy',
u'Připisy',
],
'th': [
u'อ้างอิง',
u'เชิงอรรถ',
u'หมายเหตุ',
]
|
kshedstrom/pyroms
|
examples/cobalt-preproc/Clim_bio/make_clim_file_bio_addons.py
|
Python
|
bsd-3-clause
| 4,460 | 0.01009 |
import subprocess
import os
import sys
import commands
import numpy as np
import pyroms
import pyroms_toolbox
from remap_bio_woa import remap_bio_woa
from remap_bio_glodap import remap_bio_glodap
data_dir_woa = '/archive/u1/uaf/kate/COBALT/'
data_dir_glodap = '/archive/u1/uaf/kate/COBALT/'
dst_dir='./'
src_grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('/archive/u1/uaf/kate/COBALT/GFDL_CM2.1_grid.nc', name='ESM2M_NWGOA3')
dst_grd = pyroms.grid.get_ROMS_grid('NWGOA3')
# define all tracer stuff
list_tracer = ['alk', 'cadet_arag', 'cadet_calc', 'dic', 'fed', 'fedet', 'fedi', 'felg', 'fesm', 'ldon', 'ldop', 'lith', 'lithdet', 'nbact', 'ndet', 'ndi', 'nlg', 'nsm', 'nh4', 'no3', 'o2', 'pdet', 'po4', 'srdon', 'srdop', 'sldon', 'sldop', 'sidet', 'silg', 'sio4', 'nsmz', 'nmdz', 'nlgz']
tracer_longname = ['Alkalinity', 'Detrital CaCO3', 'Detrital CaCO3', 'Dissolved Inorganic Carbon', 'Dissolved Iron', 'Detrital Iron', 'Diazotroph Iron', 'Large Phytoplankton Iron', 'Small Phytoplankton Iron', 'labile DON', 'labile DOP', 'Lithogenic Aluminosilicate', 'lithdet', 'bacterial', 'ndet', 'Diazotroph Nitrogen', 'Large Phytoplankton Nitrogen', 'Small Phytoplankton Nitrogen', 'Ammonia', 'Nitrate', 'Oxygen', 'Detrital Phosphorus', 'Phosphate', 'Semi-Refractory DON', 'Semi-Refractory DOP', 'Semilabile DON', 'Semilabile DOP', 'Detrital Silicon', 'Large Phytoplankton Silicon', 'Silicate', 'Small Zooplankton Nitrogen', 'Medium-sized zooplankton Nitrogen', 'large Zooplankton Nitrogen']
tracer_units = ['mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'g/kg', 'g/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg']
#------- WOA13 ---------------------------------
id_tracer_update_woa = [19,20,22,29]
list_tracer_update_woa = []
tracer_longname_update_woa = []
tracer_units_update_woa = []
for idtra in id_tracer_update_woa:
print list_tracer[idtra]
for idtra in id_tracer_update_woa:
# add to tracer update
list_tracer_update_woa.append(list_tracer[idtra])
tracer_longname_update_woa.append(tracer_longname[idtra])
tracer_units_update_woa.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_woa)):
ctra = list_tracer_update_woa[ktr]
if ctra == 'sio4':
ctra = 'si'
mydict = {'tracer':list_tracer_update_woa[ktr],'longname':tracer_longname_update_woa[ktr],'units':tracer_units_update_woa[ktr],'file':data_dir_woa + ctra + '_WOA13-CM2.1_monthly.nc', \
|
'frame':mm}
remap_bio_woa(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst
|
_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_woa[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
#--------- GLODAP -------------------------------
id_tracer_update_glodap = [0,3]
list_tracer_update_glodap = []
tracer_longname_update_glodap = []
tracer_units_update_glodap = []
for idtra in id_tracer_update_glodap:
print list_tracer[idtra]
for idtra in id_tracer_update_glodap:
# add to tracer update
list_tracer_update_glodap.append(list_tracer[idtra])
tracer_longname_update_glodap.append(tracer_longname[idtra])
tracer_units_update_glodap.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_glodap)):
ctra = list_tracer_update_glodap[ktr]
mydict = {'tracer':list_tracer_update_glodap[ktr],'longname':tracer_longname_update_glodap[ktr],'units':tracer_units_update_glodap[ktr],'file':data_dir_glodap + ctra + '_GLODAP-ESM2M_annual.nc', \
'frame':mm}
remap_bio_glodap(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_glodap[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
|
jiivan/python-oauth2
|
oauth2/__init__.py
|
Python
|
mit
| 25,600 | 0.003984 |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
|
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
|
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized b
|
dokipen/trac
|
tracopt/perm/authz_policy.py
|
Python
|
bsd-3-clause
| 8,731 | 0.002978 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# Copyright (C) 2007 Alec Thomas <alec@swapoff.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Alec Thomas <alec@swapoff.org>
from fnmatch import fnmatch
from itertools import groupby
import os
from trac.core import *
from trac.config import Option
from trac.perm import PermissionSystem, IPermissionPolicy
ConfigObj = None
try:
from configobj import ConfigObj
except ImportError:
pass
class AuthzPolicy(Component):
"""Permission policy using an authz-like configuration file.
Refer to SVN documentation for syntax of the authz file. Groups are
supported.
As the fine-grained permissions brought by this permission policy are
often used in complement of the other pemission policies (like the
`DefaultPermissionPolicy`), there's no need to redefine all the
permissions here. Only additional rights or restrictions should be added.
=== Installation ===
Note that this plugin requires the `configobj` package:
http://www.voidspace.org.uk/python/configobj.html
You should be able to install it by doing a simple `easy_install configobj`
Enabling this policy requires listing it in `trac.ini:
{{{
[trac]
permission_policies = AuthzPolicy, DefaultPermissionPolicy
[authz_policy]
authz_file = conf/authzpolicy.conf
}}}
This means that the `AuthzPolicy` permissions will be checked first, and
only if no rule is found will the `DefaultPermissionPolicy` be used.
=== Configuration ===
The `authzpolicy.conf` file is a `.ini` style configuration file.
- Each section of the config is a glob pattern used to match against a
Trac resource descriptor. These descriptors are in the form:
{{{
<realm>:<id>@<version>[/<realm>:<id>@<version> ...]
}}}
Resources are ordered left to right, from parent to child. If any
component is inapplicable, `*` is substituted. If the version pattern is
not specified explicitely, all versions (`@*`) is added implicitly
Example: Match the WikiStart page
{{{
[wiki:*]
[wiki:WikiStart*]
[wiki:WikiStart@*]
[wiki:WikiStart]
}}}
Example: Match the attachment `wiki:WikiStart@117/attachment/FOO.JPG@*`
on WikiStart
{{{
[wiki:*]
[wiki:WikiStart*]
[wiki:WikiStart@*]
[wiki:WikiStart@*/attachment/*]
[wiki:WikiStart@117/attachment/FOO.JPG]
}}}
- Sections are checked against the current Trac resource '''IN ORDER''' of
appearance in the configuration file. '''ORDER IS CRITICAL'''.
- Once a section matches, the current username is matched, '''IN ORDER''',
against the keys of the section. If a key is prefixed with a `@`, it is
treated as a group. If a key is prefixed with a `!`, the permission is
denied rather than granted. The username will match any of 'anonymous',
'authenticated', <username> or '*', using normal Trac permission rules.
Example configuration:
{{{
[groups]
administrators = athomas
[*/attachment:*]
* = WIKI_VIEW, TICKET_VIEW
[wiki:WikiStart@*]
@administrators = WIKI_ADMIN
anonymous = WIKI_VIEW
* = WIKI_VIEW
# Deny access to page templates
[wiki:PageTemplates/*]
* =
# Match everything else
[*]
@administrators = TRAC_ADMIN
anonymous = BROWSER_VIEW, CHANGESET_VIEW, FILE_VIEW, LOG_VIEW,
MILESTONE_VIEW, POLL_VIEW, REPORT_SQL_VIEW, REPORT_VIEW, ROADMAP_VIEW,
SEARCH_VIEW, TICKET_CREATE, TICKET_MODIFY, TICKET_VIEW, TIMELINE_VIEW,
WIKI_CREATE, WIKI_MODIFY, WIKI_VIEW
# Give authenticated users some extra permissions
authenticated = REPO_SEARCH, XML_RPC
}}}
"""
implements(IPermissionPolicy)
authz_file = Option('authz_policy', 'authz_file', None,
'Location of authz policy configuration file.')
authz = None
authz_mtime = None
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
if ConfigObj is None:
self.log.error('configobj package not found')
return None
if self.authz_file and not self.authz_mtime or \
os.path.getmtime(self.get_authz_file()) > self.authz_mtime:
self.parse_authz()
resource_key = self.normalise_resource(resource)
self.log.debug('Checking %s on %s', action, resource_key)
permissions = self.authz_permissions(resource_key, username)
if permissions is None:
return None # no match, can't decide
elif permissions == ['']:
return False # all actions are denied
# FIXME: expand all permissions once for all
ps = PermissionSystem(self.env)
for deny, perms in groupby(permissions,
key=lambda p: p.startswith('!')):
if deny and action in ps.expand_actions([p[1:] for p in perms]):
return False # action is explicitly denied
elif action in ps.expand_actions(perms):
return True # action is explicitly granted
return None # no match for action, can't decide
# Internal methods
def get_authz_file(self):
f = self.authz_file
return os.path.isabs(f) and f or os.path.join(self.env.path, f)
def parse_authz(self):
self.env.log.debug('Parsing authz security policy %s' %
self.get_authz_file())
self.authz = ConfigObj(self.get_authz_file())
self.groups_by_user = {}
for group, users in self.authz.get('groups', {}).iteritems():
if isinstance(users, basestring):
users = [users]
for user in users:
self.groups_by_user.setdefault(user, set()).add('@' + group)
self.authz_mtime = os.path.getmtime(self.get_authz_file())
def normalise_resource(self, resource):
def flatten(resource):
if not resource or not (resource.realm or resource.id):
return []
# XXX Due to the mixed functionality in resource we can end up with
# ticket, ticket:1, ticket:1@10. This code naively collapses all
# subsets of the parent resource into one. eg. ticket:1@10
parent = resource.parent
while parent and (resource.realm == parent.realm or \
(resource.realm == parent.realm and resource.id == parent.id)):
parent = parent.parent
if par
|
ent:
parent = flatten(parent)
else:
parent = []
return parent + ['%s:%s@%s' % (resource.realm or '*',
resource.id or '*',
resource.version or '*')]
return '/'.join(flatten(resource))
def authz_permissions(self, r
|
esource_key, username):
# TODO: Handle permission negation in sections. eg. "if in this
# ticket, remove TICKET_MODIFY"
valid_users = ['*', 'anonymous']
if username and username != 'anonymous':
valid_users = ['*', 'authenticated', username]
for resource_section in [a for a in self.authz.sections
if a != 'groups']:
resource_glob = resource_section
if '@' not in resource_glob:
resource_glob += '@*'
if fnmatch(resource_key, resource_glob):
section = self.authz[resource_sect
|
v-legoff/croissant
|
croissant/output/__init__.py
|
Python
|
bsd-3-clause
| 1,636 | 0 |
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SE
|
RVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package containing the different outputs.
Each output type is defined inside a module.
"""
|
kwailamchan/programming-languages
|
python/django/elf/elf/src/event/forms.py
|
Python
|
mit
| 332 | 0.006024 |
from django import forms
from event.models import Event
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = [
'date', 'starttime', 'endtime',
'event', 'location', 'organization',
|
'speakers',
'rating', 'feedback'
|
]
|
lkarsten/pyobd
|
network_codes.py
|
Python
|
gpl-2.0
| 18,096 | 0.066589 |
ucodes = {
"U0001" : "High Speed CAN Communication Bus" ,
"U0002" : "High Speed CAN Communication Bus (Performance)" ,
"U0003" : "High Speed CAN Communication Bus (Open)" ,
"U0004" : "High Speed CAN Communication Bus (Low)" ,
"U0005" : "High Speed CAN Communication Bus (High)" ,
"U0006" : "High Speed CAN Communication Bus (Open)" ,
"U0007" : "High Speed CAN Communication Bus (Low)" ,
"U0008" : "High Speed CAN Communication Bus (High)" ,
"U0009" : "High Speed CAN Communication Bus (shorted to Bus)" ,
"U0010" : "Medium Speed CAN Communication Bus" ,
"U0011" : "Medium Speed CAN Communication Bus (Performance)" ,
"U0012" : "Medium Speed CAN Communication Bus (Open)" ,
"U0013" : "Medium Speed CAN Communication Bus (Low)" ,
"U0014" : "Medium Speed CAN Communication Bus (High)" ,
"U0015" : "Medium Speed CAN Communication Bus (Open)" ,
"U0016" : "Medium Speed CAN Communication Bus (Low)" ,
"U0017" : "Medium Speed CAN Communication Bus (High)" ,
"U0018" : "Medium Speed CAN Communication Bus (shorted to Bus)" ,
"U0019" : "Low Speed CAN Communication Bus" ,
"U0020" : "Low Speed CAN Communication Bus (Performance)" ,
"U0021" : "Low Speed CAN Communication Bus (Open)" ,
"U0022" : "Low Speed CAN Communication Bus (Low)" ,
"U0023" : "Low Speed CAN Comm
|
unication Bus (High)" ,
"U0024" : "Low Speed CAN Communication Bus (Open)" ,
"U0025" : "Low Speed CAN Communication Bus (Low)" ,
"U0026" : "Low Speed CAN Communication Bu
|
s (High)" ,
"U0027" : "Low Speed CAN Communication Bus (shorted to Bus)" ,
"U0028" : "Vehicle Communication Bus A" ,
"U0029" : "Vehicle Communication Bus A (Performance)" ,
"U0030" : "Vehicle Communication Bus A (Open)" ,
"U0031" : "Vehicle Communication Bus A (Low)" ,
"U0032" : "Vehicle Communication Bus A (High)" ,
"U0033" : "Vehicle Communication Bus A (Open)" ,
"U0034" : "Vehicle Communication Bus A (Low)" ,
"U0035" : "Vehicle Communication Bus A (High)" ,
"U0036" : "Vehicle Communication Bus A (shorted to Bus A)" ,
"U0037" : "Vehicle Communication Bus B" ,
"U0038" : "Vehicle Communication Bus B (Performance)" ,
"U0039" : "Vehicle Communication Bus B (Open)" ,
"U0040" : "Vehicle Communication Bus B (Low)" ,
"U0041" : "Vehicle Communication Bus B (High)" ,
"U0042" : "Vehicle Communication Bus B (Open)" ,
"U0043" : "Vehicle Communication Bus B (Low)" ,
"U0044" : "Vehicle Communication Bus B (High)" ,
"U0045" : "Vehicle Communication Bus B (shorted to Bus B)" ,
"U0046" : "Vehicle Communication Bus C" ,
"U0047" : "Vehicle Communication Bus C (Performance)" ,
"U0048" : "Vehicle Communication Bus C (Open)" ,
"U0049" : "Vehicle Communication Bus C (Low)" ,
"U0050" : "Vehicle Communication Bus C (High)" ,
"U0051" : "Vehicle Communication Bus C (Open)" ,
"U0052" : "Vehicle Communication Bus C (Low)" ,
"U0053" : "Vehicle Communication Bus C (High)" ,
"U0054" : "Vehicle Communication Bus C (shorted to Bus C)" ,
"U0055" : "Vehicle Communication Bus D" ,
"U0056" : "Vehicle Communication Bus D (Performance)" ,
"U0057" : "Vehicle Communication Bus D (Open)" ,
"U0058" : "Vehicle Communication Bus D (Low)" ,
"U0059" : "Vehicle Communication Bus D (High)" ,
"U0060" : "Vehicle Communication Bus D (Open)" ,
"U0061" : "Vehicle Communication Bus D (Low)" ,
"U0062" : "Vehicle Communication Bus D (High)" ,
"U0063" : "Vehicle Communication Bus D (shorted to Bus D)" ,
"U0064" : "Vehicle Communication Bus E" ,
"U0065" : "Vehicle Communication Bus E (Performance)" ,
"U0066" : "Vehicle Communication Bus E (Open)" ,
"U0067" : "Vehicle Communication Bus E (Low)" ,
"U0068" : "Vehicle Communication Bus E (High)" ,
"U0069" : "Vehicle Communication Bus E (Open)" ,
"U0070" : "Vehicle Communication Bus E (Low)" ,
"U0071" : "Vehicle Communication Bus E (High)" ,
"U0072" : "Vehicle Communication Bus E (shorted to Bus E)" ,
"U0073" : "Control Module Communication Bus Off" ,
"U0074" : "Reserved by J2012" ,
"U0075" : "Reserved by J2012" ,
"U0076" : "Reserved by J2012" ,
"U0077" : "Reserved by J2012" ,
"U0078" : "Reserved by J2012" ,
"U0079" : "Reserved by J2012" ,
"U0080" : "Reserved by J2012" ,
"U0081" : "Reserved by J2012" ,
"U0082" : "Reserved by J2012" ,
"U0083" : "Reserved by J2012" ,
"U0084" : "Reserved by J2012" ,
"U0085" : "Reserved by J2012" ,
"U0086" : "Reserved by J2012" ,
"U0087" : "Reserved by J2012" ,
"U0088" : "Reserved by J2012" ,
"U0089" : "Reserved by J2012" ,
"U0090" : "Reserved by J2012" ,
"U0091" : "Reserved by J2012" ,
"U0092" : "Reserved by J2012" ,
"U0093" : "Reserved by J2012" ,
"U0094" : "Reserved by J2012" ,
"U0095" : "Reserved by J2012" ,
"U0096" : "Reserved by J2012" ,
"U0097" : "Reserved by J2012" ,
"U0098" : "Reserved by J2012" ,
"U0099" : "Reserved by J2012" ,
"U0100" : "Lost Communication With ECM/PCM A" ,
"U0101" : "Lost Communication with TCM" ,
"U0102" : "Lost Communication with Transfer Case Control Module" ,
"U0103" : "Lost Communication With Gear Shift Module" ,
"U0104" : "Lost Communication With Cruise Control Module" ,
"U0105" : "Lost Communication With Fuel Injector Control Module" ,
"U0106" : "Lost Communication With Glow Plug Control Module" ,
"U0107" : "Lost Communication With Throttle Actuator Control Module" ,
"U0108" : "Lost Communication With Alternative Fuel Control Module" ,
"U0109" : "Lost Communication With Fuel Pump Control Module" ,
"U0110" : "Lost Communication With Drive Motor Control Module" ,
"U0111" : "Lost Communication With Battery Energy Control Module 'A'" ,
"U0112" : "Lost Communication With Battery Energy Control Module 'B'" ,
"U0113" : "Lost Communication With Emissions Critical Control Information" ,
"U0114" : "Lost Communication With Four-Wheel Drive Clutch Control Module" ,
"U0115" : "Lost Communication With ECM/PCM B" ,
"U0116" : "Reserved by J2012" ,
"U0117" : "Reserved by J2012" ,
"U0118" : "Reserved by J2012" ,
"U0119" : "Reserved by J2012" ,
"U0120" : "Reserved by J2012" ,
"U0121" : "Lost Communication With Anti-Lock Brake System (ABS) Control Module" ,
"U0122" : "Lost Communication With Vehicle Dynamics Control Module" ,
"U0123" : "Lost Communication With Yaw Rate Sensor Module" ,
"U0124" : "Lost Communication With Lateral Acceleration Sensor Module" ,
"U0125" : "Lost Communication With Multi-axis Acceleration Sensor Module" ,
"U0126" : "Lost Communication With Steering Angle Sensor Module" ,
"U0127" : "Lost Communication With Tire Pressure Monitor Module" ,
"U0128" : "Lost Communication With Park Brake Control Module" ,
"U0129" : "Lost Communication With Brake System Control Module" ,
"U0130" : "Lost Communication With Steering Effort Control Module" ,
"U0131" : "Lost Communication With Power Steering Control Module" ,
"U0132" : "Lost Communication With Ride Level Control Module" ,
"U0133" : "Reserved by J2012" ,
"U0134" : "Reserved by J2012" ,
"U0135" : "Reserved by J2012" ,
"U0136" : "Reserved by J2012" ,
"U0137" : "Reserved by J2012" ,
"U0138" : "Reserved by J2012" ,
"U0139" : "Reserved by J2012" ,
"U0140" : "Lost Communication With Body Control Module" ,
"U0141" : "Lost Communication With Body Control Module 'A'" ,
"U0142" : "Lost Communication With Body Control Module 'B'" ,
"U0143" : "Lost Communication With Body Control Module 'C'" ,
"U0144" : "Lost Communication With Body Control Module 'D'" ,
"U0145" : "Lost Communication With Body Control Module 'E'" ,
"U0146" : "Lost Communication With Gateway 'A'" ,
"U0147" : "Lost Communication With Gateway 'B'" ,
"U0148" : "Lost Communication With Gateway 'C'" ,
"U0149" : "Lost Communication With Gateway 'D'" ,
"U0150" : "Lost Communication With Gateway 'E'" ,
"U0151" : "Lost Communication With Restraints Control Module" ,
"U0152" : "Lost Communication With Side Restraints Control Module Left" ,
"U0153" : "Lost Communication With Side Restraints Control Module Right" ,
"U0154" : "Lost Communication With Restraints Occupant Sensing Control Module" ,
"U0155" : "Lost Communication With Instrument Panel Cluster (IPC) Control Module" ,
|
tlevine/vlermv
|
vlermv/__init__.py
|
Python
|
agpl-3.0
| 144 | 0 |
from ._f
|
s import Vlermv
from ._s3 import S3Vlermv
from . import serializers, transformers
# For backwards compatibility
cache = Vlermv
|
.memoize
|
FrankNagel/qlc
|
src/webapp/quanthistling/quanthistling/lib/base.py
|
Python
|
gpl-3.0
| 1,177 | 0.004248 |
"""The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
from pylons.templating import render_mako as render
from quanthistling.model.meta import Session
#from quanthistling.lib.helpers import History
from pylons import request, response, s
|
ession, tmpl_context as c, url
class BaseController(WSGIController):
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
try:
return WSGIController.__call__(self, environ, start_response)
finally:
Se
|
ssion.remove()
# def __after__(self, action):
# if 'history' in session:
# c.history = session['history']
# print "History in session"
# else:
# c.history = History(20)
#
#
# if hasattr(c, 'heading'):
# c.history.add(c.heading, url)
#
# session['history'] = c.history
# session.save()
#
|
babykick/gain
|
tests/test_parser.py
|
Python
|
gpl-3.0
| 757 | 0.002642 |
from gain import Css, Item, Parser, Xpath
def test_parse():
html = '<title class="username">tom</title><div class="karma">15</div>'
class User(Item):
username = Xpath('//title')
karma = Css('.karma')
parser = Parser(html, User)
us
|
er = parser.parse_item(html)
assert user.results == {
'username': 'tom',
'karma': '15'
}
def test_parse_urls():
html = ('<a href="item?id=14447885">64comments</a>'
'<a href="item?id=14447886">64comments</a>')
class User(Item):
username = Xpath('//title')
karma = Css('.karma')
parser = Parser('item\?id=\d+', User)
parser.parse_urls(html, 'https://blog.scrapinghub.com')
assert parser.pre_parse_urls.
|
qsize() == 2
|
NIASC/VirusMeta
|
seq_corret_module/seg_trim.py
|
Python
|
gpl-3.0
| 963 | 0.030114 |
import os
import sys
from Bio import SeqIO
with open(sys.argv[1], "rU") as handle, open(sys.argv[2], "r") as segments, open(sys.argv[3],"w") as out_handle:
"""
This function takes fasta file as first argument.
As second argument it takes tab delimited file containing
queryid and start and end postion of segment of interest.
Then trims unaligned parts of the sequence and outputs in out in new fasta file
"""
#first create dictionarry of QI and start postioions
QI_pos_dict = dict()
for lines in segments:
line = lines.strip().split()
QI_pos_dict[line[0]] = (line[1],line[2])
#loop through the fasta file and extarct segments
for record in SeqIO.parse(handle , "fasta"):
for QI in
|
QI_pos_dict:
if record.id == QI:
out_handle.write(">%s\n%s\n" % (record.id,record.seq
|
[ int(QI_pos_dict[QI][0]) : int(QI_pos_dict[QI][1])] ))
|
imbolc/aiohttp-login
|
aiohttp_login/decorators.py
|
Python
|
isc
| 1,959 | 0 |
from functools import wraps
from aiohttp.abc import AbstractView
from aiohttp.web import HTTPForbidden, json_response, StreamResponse
try:
import ujson as json
except ImportError:
import json
from .cfg import cfg
from .utils import url_for, redirect, get_cur_user
def _get_request(args):
# Supports class based views see web.View
if isinstance(args[0], AbstractView):
return args[0].request
return args[-1]
def user_to_request(handler):
'''Add user to request if user logged in'''
@wraps(handler)
async def decorator(*args):
request = _get_request(args)
request[cfg.REQUEST_USER_KEY] = await get_cur_user(request)
return await handler(*args)
return decorator
def login_required(handler):
@user_to_request
@wraps(handler)
async def decorator(*args):
request = _get_request(args)
if not request[cfg.REQUEST_USER_KEY]:
return redirect(get_login_url(request))
return await handler(*args)
return decorator
def restricted_api(handler):
@user_to_request
@wraps(h
|
andler)
async def decorator(*args):
request = _get_request(args)
if not request[c
|
fg.REQUEST_USER_KEY]:
return json_response({'error': 'Access denied'}, status=403)
response = await handler(*args)
if not isinstance(response, StreamResponse):
response = json_response(response, dumps=json.dumps)
return response
return decorator
def admin_required(handler):
@wraps(handler)
async def decorator(*args):
request = _get_request(args)
response = await login_required(handler)(request)
if request['user']['email'] not in cfg.ADMIN_EMAILS:
raise HTTPForbidden(reason='You are not admin')
return response
return decorator
def get_login_url(request):
return url_for('auth_login').with_query({
cfg.BACK_URL_QS_KEY: request.path_qs})
|
GALabs/StaticAid
|
static_aid/DataExtractor_Adlib.py
|
Python
|
mit
| 27,394 | 0.00522 |
from datetime import datetime
from json import load, dump
import logging
from logging import INFO
from os import listdir, makedirs, remove
from os.path import join, exists
import requests
import shelve
from static_aid import config
from static_aid.DataExtractor import DataExtractor, bytesLabel
def makeDir(dirPath):
try:
makedirs(dirPath)
except OSError:
# exists
pass
def adlibKeyFromUnicode(u):
return u.encode('ascii', errors='backslashreplace').lower()
def prirefString(u):
if type(u) == int:
return str(u)
return u.encode('ascii', errors='backslashreplace').lower()
def uriRef(category, priref):
linkDestination = config.destinations[category].strip('/ ')
return '/%s/%s' % (linkDestination, priref)
class DataExtractor_Adlib(DataExtractor):
def __init__(self, *args, **kwargs):
super(DataExtractor_Adlib, self).__init__(*args, **kwargs)
self.objectCaches = {} # contains 'shelve' instances keyed by collection name
self.objectCacheInsertionCount = 0
# set to True to cache the raw JSON result from Adlib (before it is converted to StaticAid-friendly JSON)
DUMP_RAW_DATA = True
# set to True to read raw JSON results from the cache instead of from Adlib endpoints (offline/debug mode)
READ_FROM_RAW_DUMP = False
# set to False for testing purposes (quicker processing when using RAW_DUMP mechanism)
READ_FROM_ADLIB_API = True
# number of records to save to JSON cache before syncing to disk
CACHE_SYNC_INTERVAL = 100
### Top-level stuff ###
def _run(self):
# create a collection > key > object cache so that we can generate links between them
self.cacheAllCollections()
# link each cached object by priref wherever there is a reference to agent name, part_of, parts, etc.
self.linkRecordsById()
# analyze the extent records by item > ... > collection
self.propagateDefaultExtentsToChildren()
# save the results to build/data/**.json
self.saveAllRecords()
def cacheAllCollections(self):
logging.debug('Extracting data from Adlib into object cache...')
self.clearCache()
self.extractPeople()
self.extractOrganizations()
self.extractCollections()
self.extractSubCollections()
self.extractSeries()
self.extractSubSeries()
self.extractFileLevelObjects()
self.extractItemLevelObjects()
def linkRecordsById(self):
tree = shelve.open(
|
self.cacheFilename('trees'))
for category in self.objectCaches:
cache = self.objectCaches[category]
for adlibKey in cache:
data = cache[adlibKey]
# link re
|
cords together by type
if category == 'objects':
self.addRefToLinkedAgents(data, category)
self.createTreeNode(tree, data, 'archival_object', category)
elif category == 'collections':
# NOTE: in ArchivesSpace, collection.tree.ref is something like "/repositories/2/resources/91/tree"
# but in collections.html, it's only used as an indicator of whether a tree node exists.
data['tree'] = {'ref': True}
self.addRefToLinkedAgents(data, category)
self.createTreeNode(tree, data, 'resource', category)
# this is necessary because the 'shelve' objects don't behave *exactly* like a dict
self.objectCaches[category][adlibKey] = data
# sync after each category so the in-memory map doesn't get too heavy
cache.sync()
tree.sync()
# combine the tree with the other data so that it gets saved to *.json
self.objectCaches['trees'] = tree
# now we have all records joined by ID, and we have un-linked tree nodes.
# Go through each tree node and recursively link them using the format:
# node.children = [node, node, ...]
self.createParentChildStructure()
def addRefToLinkedAgents(self, data, category):
# linked_agents[]: (objects OR collections) => (people OR organizations)
for linkedAgent in data.get('linked_agents', []):
if 'ref' not in linkedAgent:
linkKey = adlibKeyFromUnicode(linkedAgent['title'])
if linkKey in self.objectCaches.get('people', []):
linkCategory = 'people'
elif linkKey in self.objectCaches.get('organizations', []):
linkCategory = 'organizations'
else:
msg = '''
While processing '%s/%s', linked_agent '%s' could not be found in 'people' or 'organizations' caches.
'''.strip() % (category, data['adlib_key'], linkKey)
logging.error(msg)
continue
priref = self.objectCaches[linkCategory][linkKey]['id']
linkedAgent['ref'] = uriRef(linkCategory, priref)
def createTreeNode(self, tree, data, nodeType, category):
node = {
'id': data['id'],
'title': data['title'],
'level': data['level'], # item/file/collection/etc
'adlib_key': data['adlib_key'], # for traversing node > data
'category': category, # for traversing node > data
'node_type': nodeType,
'jsonmodel_type': 'resource_tree',
'publish': True,
'children': [],
}
tree[str(data['id'])] = node
def createParentChildStructure(self):
'''start at the top-level collections and recurse downward by 'parts_reference' links'''
collections = self.objectCaches['collections']
trees = self.objectCaches['trees']
for adlibKey in collections:
data = collections[adlibKey]
node = trees[data['id']]
self.createNodeChildren(node, data, 'collections')
# this is necessary for updates because the 'shelve' objects don't behave *exactly* like a dict
trees[data['id']] = node
trees.sync()
# TODO necessary?
self.objectCaches['trees'] = trees
def createNodeChildren(self, node, data, category):
selfRef = {'ref': uriRef(category, data['id'])}
for childKey in data['parts_reference']:
# connect the objects by 'parent.ref' field
if category == 'collections' and childKey in self.objectCaches['collections']:
# parts_reference links which point TO collections are only valid FROM collections.
# if this is wrong, it will mess up link creation.
childCategory = 'collections'
elif childKey in self.objectCaches['objects']:
childCategory = 'objects'
else:
msg = '''
While processing '%s/%s', parts_reference '%s' could not be found in 'objects' or 'collections' caches.
'''.strip() % (category, data['adlib_key'], childKey)
logging.error(msg)
continue
child = self.objectCaches[childCategory][childKey]
child['parent'] = selfRef
child['parent_node_object'] = node
child['resource'] = {'ref': selfRef}
# connect the tree-node objects by children[] list
childNode = self.objectCaches['trees'][child['id']]
node['children'].append(childNode)
node['has_children'] = True
node['tree'] = {'ref': True}
self.createNodeChildren(childNode, child, childCategory)
def propagateDefaultExtentsToChildren(self):
'''start at the top-level collections and recurse downward by 'parts_reference' links'''
collections = self.objectCaches['collections']
for adlibKey in collections:
data = collections[adlibKey]
if data['level'] == 'collection':
# start the recursion process at the toplevel ('collecti
|
jerry57/Robotframework-iRODS-Library
|
src/iRODSLibrary/__init__.py
|
Python
|
bsd-3-clause
| 422 | 0.004739 |
from iRODSLibrary import iRODSLibrary
__vers
|
ion__ = "0.0.4"
class iRODSLibrary(iRODSLibrary):
""" iRODSLibrary is a client keyword library that uses
the python-irodsclient module from iRODS
https://github.com/irods/pytho
|
n-irodsclient
Examples:
| Connect To Grid | iPlant | data.iplantcollaborative.org | ${1247} | jdoe | jdoePassword | tempZone
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
|
uber/ludwig
|
ludwig/collect.py
|
Python
|
apache-2.0
| 15,469 | 0.000388 |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import logging
import os
import sys
from typing import List, Union
import numpy as np
from ludwig.api import LudwigModel
from ludwig.backend import ALL_BACKENDS, LOCAL, Backend
from ludwig.constants import FULL, TEST, TRAINING, VALIDATION
from ludwig.contrib import contrib_command
from ludwig.globals import LUDWIG_VERSION
from ludwig.utils.print_utils import (logging_level_registry, print_boxed,
print_ludwig)
from ludwig.utils.strings_utils import make_safe_filename
logger = logging.getLogger(__name__)
def collect_activations(
model_path: str,
layers: List[str],
dataset: str,
data_format: str = None,
split: str = FULL,
batch_size: int = 128,
output_directory: str = 'results',
gpus: List[str] = None,
gpu_memory_limit: int =None,
allow_parallel_threads: bool = True,
backend: Union[Backend, str] = None,
debug: bool = False,
**kwargs
) -> List[str]:
"""
Uses the pretrained model to collect the tensors corresponding to a
datapoint in the dataset. Saves the tensors to the experiment directory
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param layers: (List[str]) list of strings for layer names in the model
to collect activations.
:param dataset: (str) source
containing the data to make predictions.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param split: (str, default: `full`) split on which
to perform predictions. Valid values are `'training'`, `'validation'`,
`'test'` and `'full'`.
:param batch_size: (int, default `128`) size of batches for processing.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the activations.
"""
logger.info('Dataset path: {}'.format(dataset)
)
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(
model_path,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
backend=backend
)
# collect activations
print_boxed('COLLECT ACTIVATIONS')
collected_tensors = model.collect_activations(
layers,
dataset,
data_format=data_format,
split=split,
batch_size=batch_size,
debug=debug
)
# saving
os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def collect_weights(
model_path: str,
tensors: List[str],
output_directory: str = 'results',
debug: bool = False,
**kwargs
) -> List[str]:
"""
Loads a pretrained model and collects weights.
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param tensors: (list, default: `None`) List of tensor names to collect
weights
:param output_directory: (str, default: `'results'`) the directory where
collected weights will be stored.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the weights.
"""
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(model_path)
# collect weights
print_boxed('COLLECT WEIGHTS')
collected_tensors = model.collect_weights(tensors)
# saving
os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def save_tensors(collected_tensors, output_directory):
filenames = []
for tensor_name, tensor_value in collected_tensors:
np_filename = os.path.join(
output_directory,
make_safe_filename(tensor_name) + '.npy'
)
np.save(np_filename, tensor_value.numpy())
filenames.append(np_filename)
return filenames
def print_model_summary(
model_path: str,
**kwargs
) -> None:
"""
Loads a pretrained model and prints names of weights and layers activations.
# Inputs
:param model_path: (str) filepath to pre-trained model.
# Return
:return: (`None`)
|
"""
model = LudwigModel.load(model_path)
collected_tensors = model.collect_weights()
names = [name for name, w in collected_tensors]
keras_model = model.model.get_connected_model(training=False)
keras_model.summary()
print('\nLayers:\n')
for layer in keras_model.layers:
print(layer.name)
print('\nWeights:\n')
for name in names:
print(name)
def cli_collect_activations(sys_argv):
"""Command Line Interface to
|
communicate with the collection of tensors and
there are several options that can specified when calling this function:
--data_csv: Filepath for the input csv
--data_hdf5: Filepath for the input hdf5 file, if there is a csv file, this
is not read
--d: Refers to the dataset type of the file being read, by default is
*generic*
--s: Refers to the split of the data, can be one of: train, test,
validation, full
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--t: Tensors to collect
--od: Output directory of the model, defaults to results
--bs: Batch size
--g: Number of gpus that are to be used
--gf: Fraction of each GPUs memory to use.
--dbg: Debug if the model is to be started with python debugger
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model and uses it collect '
'tensors for each datapoint in the dataset.',
prog
|
onoga/wm
|
src/gnue/common/external/plex/Errors.py
|
Python
|
gpl-2.0
| 1,109 | 0.027953 |
#=======================================================================
#
# Python Lexical Analyser
#
# Exception classes
#
#=======================================================================
import exceptions
class PlexError(exceptions.Exception):
message = ""
class PlexTypeError(PlexError, TypeError):
pass
class
|
PlexValueError(PlexError, ValueError):
pass
class InvalidRegex(PlexError):
pass
class InvalidToken(PlexError):
def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
class InvalidScanner(PlexError):
pass
class AmbiguousAction(PlexError):
message = "Two tokens with different actions can match the same string"
def __init__(self):
pass
class UnrecognizedInput(Ple
|
xError):
scanner = None
position = None
state_name = None
def __init__(self, scanner, state_name):
self.scanner = scanner
self.position = scanner.position()
self.state_name = state_name
def __str__(self):
return ("'%s', line %d, char %d: Token not recognised in state %s"
% (self.position + (repr(self.state_name),)))
|
h-hirokawa/swampdragon
|
swampdragon/tests/test_base_model_router_subscribe.py
|
Python
|
bsd-3-clause
| 911 | 0.002195 |
from ..route_handler import BaseModelRouter, SUCCESS
from ..serializers.model_serializer import ModelSerializer
from .dragon_test_case import DragonTestCase
from swampdragon.tests.models import TwoFieldModel
class Serializer(ModelSerializer):
class Meta:
update_fields = ('text', 'number')
model = TwoFieldModel
cla
|
ss Router(BaseModelRouter):
model = TwoFieldModel
serializer_class = Serializer
class TestBaseModelRouter(DragonTestCase):
def setUp(self):
self.router = Router(self.connection)
def test_sub
|
scribe(self):
data = {'channel': 'client-channel'}
self.router.subscribe(**data)
self.assertEqual(self.connection.last_message['context']['state'], SUCCESS)
self.assertIn('channel_data', self.connection.last_message)
self.assertEqual(self.connection.last_message['channel_data']['local_channel'], 'client-channel')
|
swegener/sigrok-meter
|
multiplotwidget.py
|
Python
|
gpl-3.0
| 6,148 | 0.00244 |
##
## This file is part of the sigrok-meter project.
##
## Copyright (C) 2015 Jens Steinhauser <jens.steinhauser@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import qtcompat
QtCore = qtcompat.QtCore
QtGui = qtcompat.QtGui
pyqtgraph = qtcompat.pyqtgraph
# Black foreground on white background.
pyqtgraph.setConfigOption('background', 'w')
pyqtgraph.setConfigOption('foreground', 'k')
class Plot(object):
'''Helper class to keep all graphics items of a plot together.'''
def __init__(self, view, xaxis, yaxis):
self.view = view
self.xaxis = xaxis
self.yaxis = yaxis
self.visible = False
class MultiPlotItem(pyqtgraph.GraphicsWidget):
# Emitted when a plot is shown.
plotShown = QtCore.Signal()
# Emitted when a plot is hidden by the user via the context menu.
plotHidden = QtCore.Signal(Plot)
def __init__(self, parent=None):
pyqtgraph.GraphicsWidget.__init__(self, parent)
self.setLayout(QtGui.QGraphicsGridLayout())
self.layout().setContentsMargins(10, 10, 10, 1)
self.layout().setHorizontalSpacing(0)
self.layout().setVerticalSpacing(0)
for i in range(2):
self.layout().setColumnPreferredWidth(i, 0)
self.layout().setColumnMinimumWidth(i, 0)
self.layout().setColumnSpacing(i, 0)
self.layout().setColumnStretchFactor(0, 0)
self.layout().setColumnStretchFactor(1, 100)
# List of 'Plot' objects that are shown.
self._plots = []
self._hideActions = {}
def addPlot(self):
'''Adds and returns a new plot.'''
row = self.layout().rowCount()
view = pyqtgraph.ViewBox(parent=self)
# If this is not the first plot, link to the axis of the previous one.
if self._plots:
view.setXLink(self._plots[-1].view)
yaxis = pyqtgraph.AxisItem(parent=self, orientation='left')
yaxis.linkToView(view)
yaxis.setGrid(255)
xaxis = pyqtgraph.AxisItem(parent=self, orientation='bottom')
xaxis.linkToView(view)
xaxis.setGrid(255)
plot = Plot(view, xaxis, yaxis)
self._plots.append(plot)
self.showPlot(plot)
# Create a separate action object for each plots context menu, so that
# we can later find out which plot should be hidden by looking at
# 'self._hideActions'.
hideAction = QtGui.QAction('Hide', self)
hideAction.triggered.connect(self._onHideActionTriggered)
self._hideActions[id(hideAction)] = plot
view.menu.insertAction(view.menu.actions()[0], hideAction)
return plot
def _rowNumber(self, plot):
'''Returns the number of the first row a plot occupies.'''
# Every plot
|
takes up two rows.
ret
|
urn 2 * self._plots.index(plot)
@QtCore.Slot()
def _onHideActionTriggered(self, checked=False):
# The plot that we want to hide.
plot = self._hideActions[id(self.sender())]
self.hidePlot(plot)
def hidePlot(self, plot):
'''Hides 'plot'.'''
# Only hiding wouldn't give up the space occupied by the items,
# we have to remove them from the layout.
self.layout().removeItem(plot.view)
self.layout().removeItem(plot.xaxis)
self.layout().removeItem(plot.yaxis)
plot.view.hide()
plot.xaxis.hide()
plot.yaxis.hide()
row = self._rowNumber(plot)
self.layout().setRowStretchFactor(row, 0)
self.layout().setRowStretchFactor(row + 1, 0)
plot.visible = False
self.plotHidden.emit(plot)
def showPlot(self, plot):
'''Adds the items of the plot to the scene's layout and makes
them visible.'''
if plot.visible:
return
row = self._rowNumber(plot)
self.layout().addItem(plot.yaxis, row, 0, QtCore.Qt.AlignRight)
self.layout().addItem(plot.view, row, 1)
self.layout().addItem(plot.xaxis, row + 1, 1)
plot.view.show()
plot.xaxis.show()
plot.yaxis.show()
for i in range(row, row + 2):
self.layout().setRowPreferredHeight(i, 0)
self.layout().setRowMinimumHeight(i, 0)
self.layout().setRowSpacing(i, 0)
self.layout().setRowStretchFactor(row, 100)
self.layout().setRowStretchFactor(row + 1, 0)
plot.visible = True
self.plotShown.emit()
class MultiPlotWidget(pyqtgraph.GraphicsView):
'''Widget that aligns multiple plots on top of each other.
(The built in classes fail at doing this correctly when the axis grow,
just try zooming in the "GraphicsLayout" or the "Linked View" examples.)'''
def __init__(self, parent=None):
pyqtgraph.GraphicsView.__init__(self, parent)
self.multiPlotItem = MultiPlotItem()
self.setCentralItem(self.multiPlotItem)
for m in [
'addPlot',
'hidePlot',
'showPlot'
]:
setattr(self, m, getattr(self.multiPlotItem, m))
self.multiPlotItem.plotShown.connect(self._on_plotShown)
# Expose the signal of the plot item.
self.plotHidden = self.multiPlotItem.plotHidden
def _on_plotShown(self):
# This call is needed if only one plot exists and it was hidden,
# without it the layout would start acting weird and not make the
# MultiPlotItem fill the view widget after showing the plot again.
self.resizeEvent(None)
|
lnxpgn/scrapy_multiple_spiders
|
commands/crawl.py
|
Python
|
mit
| 809 | 0.002472 |
from scrapy.commands.crawl import Command
from scrapy.exceptions import UsageError
class CustomCrawlCommand(Command):
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more
|
than one spider is no longer supported")
spname =
|
args[0]
# added new code
spider_settings_path = self.settings.getdict('SPIDER_SETTINGS', {}).get(spname, None)
if spider_settings_path is not None:
self.settings.setmodule(spider_settings_path, priority='cmdline')
# end
crawler = self.crawler_process.create_crawler()
spider = crawler.spiders.create(spname, **opts.spargs)
crawler.crawl(spider)
self.crawler_process.start()
|
wwgc/trafficstats
|
trafficstats.py
|
Python
|
gpl-2.0
| 912 | 0.032151 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import paramiko
def traffic(ip,username,passwd):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip,22,username,passwd,timeout=5)
stdin, stdout, stderr = ssh.exec_command('cat /proc/net/dev')
out = stdout.readlines()
conn = out[2].split()
ReceiveBytes = round(float(conn[1])/(10**9),1)
TransmitBytes = round(float(conn[9])/(10**9),1)
print '%s\t%s\t%s'%(ip,ReceiveBytes,TransmitBytes)
ssh.close()
except :
|
print '%s\tError'%(ip)
if __name__=='__main__':
ip_list=['192.168.1.55','192.168.1.100','192.168.1.101','192.168.1.200','192.168.1.201']
username = "root" #用户名
passwd = "" #密码
print '\nIP\t\tRX(G)\tTX(G)'
for ip
|
in ip_list:
traffic(ip,username,passwd)
print '\n'
|
yasutaka/nlp_100
|
johnny/第1章/08.py
|
Python
|
mit
| 425 | 0.009412 |
# -*- coding:utf-8 -*-
def cipher(string):
|
encoding = ""
for character in string:
#range 97 -> 123 = a-z
if 97 <= ord(character) <= 123:
#encode 219 - character number
encoding +=chr(219-ord(character))
|
else:
encoding += character
return encoding
input_value = "Hello World"
print cipher(input_value)
print cipher(cipher(input_value))
|
tommorris/mf2py
|
mf2py/__init__.py
|
Python
|
mit
| 369 | 0 |
""
|
"
Microformats2 is a general way to mark up any HTML document with
classes and propeties. This library parses structured data from
a microformatted HTML document and returns a well-formed JSON
dictionary.
"""
from .version import __version__
from .parser import Parser, parse
from .mf_helpers import get_url
__all__ = ['Parser', 'parse', 'get_u
|
rl', '__version__']
|
meitham/python-client
|
neovim/msgpack_rpc/event_loop/asyncio.py
|
Python
|
apache-2.0
| 4,579 | 0 |
"""Event loop implementation that uses the `asyncio` standard module.
The `asyncio` module was added to python standard library on 3.4, and it
provides a pure python implementation of an event loop library. It is used
as a fallback in case pyuv is not available(on python implementations other
than CPython).
Earlier python versions are supported through the `trollius` package, which
is a backport of `asyncio` that works on Python 2.6+.
"""
from __future__ import absolute_import
import os
import sys
from collections import deque
try:
# For python 3.4+, use the standard library module
import asyncio
except (ImportError, SyntaxError):
# Fallback to trollius
import trollius as asyncio
from .base import BaseEventLoop
loop_cls = asyncio.SelectorEventLoop
if os.name == 'nt':
# On windows use ProactorEventLoop which support pipes and is backed by the
# more powerful IOCP facility
loop_cls = asyncio.ProactorEventLoop
class AsyncioEventLoop(BaseEventLoop, asyncio.Protocol,
asyncio.SubprocessProtocol):
"""`BaseEventLoop` subclass that uses `asyncio` as a backend."""
def connection_made(self, transport):
"""Used to signal `asyncio.Protocol` of a successful connection."""
self._transport = transport
self._raw_transport = transport
if isinstance(transport, asyncio.SubprocessTransport):
self._transport = transport.get_pipe_transport(0)
def connection_lost(self, exc):
"""Used to signal `asyncio.Protocol` of a lost connection."""
self._on_error(exc.args[0] if exc else 'EOF')
def data_received(self, data):
"""Used to signal `asyncio.Protocol` of incoming data."""
if self._on_data:
self._on_data(data)
return
self._queued_data.append(data)
def pipe_connection_lost(self, fd, exc):
"""Used to signal `asyncio.SubprocessProtocol` of a lost connection."""
self._on_error(exc.args[0] if exc else 'EOF')
def pipe_data_received(self, fd, data):
"""Used to signal `asyncio.SubprocessProtocol` of incoming data."""
if fd == 2: # stderr fd number
self._on_stderr(data)
elif self._on_data:
self._on_data(data)
else:
self._queued_data.append(data)
def process_exited(self):
"""Used to signal `asyncio.SubprocessProtocol` when the child exits."""
self._on_error('EOF')
def _init(self):
self._loop = loop_cls()
self._queued_data = deque()
self._fact = lambda: self
self._raw_transport = None
def _connect_tcp(self, address, port):
coroutine = self._loop.create_connection(self._fact, address, port)
self._loop.run_until_complete(coroutine)
def _connect_socket(self, path):
if os.name == 'nt':
coroutine = self._loop.create_pipe_connection(self._fact, path)
else:
coroutine = self._loop.create_unix_connection(self._fact, path)
self._loop.run_until_complete(coroutine)
def _connect_stdio(self):
coroutine = self._loop.connect_read_pipe(self._fact, sys.stdin)
self._loop.run_until_complete(coroutine)
coroutine = self._loop.connect_write_pipe(self._fact, sys.stdout)
self._loop.run_until_complete(coroutine)
def _connect_child(self, argv):
self._child_watcher = asyncio.get_child_watcher()
self._child_watcher.attach_loop(self._loop)
coroutine = self._loop.subprocess_exec(self._fact, *argv)
self._loop.run_until_complete(coroutine)
def _start_reading(self):
pa
|
ss
def _send(self, data):
self._transport.write(data)
def _run(self):
while self._
|
queued_data:
self._on_data(self._queued_data.popleft())
self._loop.run_forever()
def _stop(self):
self._loop.stop()
def _close(self):
if self._raw_transport is not None:
self._raw_transport.close()
self._loop.close()
def _threadsafe_call(self, fn):
self._loop.call_soon_threadsafe(fn)
def _setup_signals(self, signals):
if os.name == 'nt':
# add_signal_handler is not supported in win32
self._signals = []
return
self._signals = list(signals)
for signum in self._signals:
self._loop.add_signal_handler(signum, self._on_signal, signum)
def _teardown_signals(self):
for signum in self._signals:
self._loop.remove_signal_handler(signum)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.