text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import os
from nose.tools import eq_
import mapnik
from .utilities import execution_path, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'shape' in mapnik.DatasourceCache.plugin_names():
def test_query_tolerance():
srs = '+init=epsg:4326'
lyr = mapnik.Layer('test')
ds = mapnik.Shapefile(file='../data/shp/arrows.shp')
lyr.datasource = ds
lyr.srs = srs
_width = 256
_map = mapnik.Map(_width, _width, srs)
_map.layers.append(lyr)
# zoom determines tolerance
_map.zoom_all()
_map_env = _map.envelope()
tol = (_map_env.maxx - _map_env.minx) / _width * 3
# 0.046875 for arrows.shp and zoom_all
eq_(tol, 0.046875)
# check point really exists
x, y = 2.0, 4.0
features = _map.query_point(0, x, y)
eq_(len(list(features)), 1)
# check inside tolerance limit
x = 2.0 + tol * 0.9
features = _map.query_point(0, x, y)
eq_(len(list(features)), 1)
# check outside tolerance limit
x = 2.0 + tol * 1.1
features = _map.query_point(0, x, y)
eq_(len(list(features)), 0)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
| mapycz/python-mapnik | test/python_tests/query_tolerance_test.py | Python | lgpl-2.1 | 1,397 | 0.000716 |
"""SCons.Tool.aixc++
Tool-specific initialization for IBM xlC / Visual Age C++ compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixc++.py rel_2.3.5:3329:275e75118ad4 2015/06/20 11:18:26 bdbaddog"
import os.path
import SCons.Platform.aix
cplusplus = __import__('c++', globals(), locals(), [])
packages = ['vacpp.cmp.core', 'vacpp.cmp.batch', 'vacpp.cmp.C', 'ibmcxx.cmp']
def get_xlc(env):
xlc = env.get('CXX', 'xlC')
return SCons.Platform.aix.get_xlc(env, xlc, packages)
def generate(env):
"""Add Builders and construction variables for xlC / Visual Age
suite to an Environment."""
path, _cxx, version = get_xlc(env)
if path and _cxx:
_cxx = os.path.join(path, _cxx)
if 'CXX' not in env:
env['CXX'] = _cxx
cplusplus.generate(env)
if version:
env['CXXVERSION'] = version
def exists(env):
path, _cxx, version = get_xlc(env)
if path and _cxx:
xlc = os.path.join(path, _cxx)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| IljaGrebel/OpenWrt-SDK-imx6_HummingBoard | staging_dir/host/lib/scons-2.3.5/SCons/Tool/aixc++.py | Python | gpl-2.0 | 2,413 | 0.002072 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('slug', models.SlugField(unique=True, blank=True)),
('site', models.URLField(null=True, blank=True)),
('rate', models.IntegerField(default=50)),
('bank', models.CharField(default=b'anz', max_length=100)),
('bank_account_name', models.CharField(max_length=100)),
('bank_account_no', models.CharField(max_length=30)),
],
options={
},
bases=(models.Model,),
),
]
| guoqiao/django-nzpower | nzpower/migrations/0001_initial.py | Python | mit | 981 | 0.001019 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from cassandra.datastax.cloud import parse_metadata_info
from cassandra.query import SimpleStatement
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table, create_keyspace_simple
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
import unittest
import six
from ssl import SSLContext, PROTOCOL_TLS
from cassandra import DriverException, ConsistencyLevel, InvalidRequest
from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster, _execution_profile_to_string
from cassandra.connection import SniEndPoint
from cassandra.auth import PlainTextAuthProvider
from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy
from mock import patch
from tests.integration import requirescloudproxy
from tests.util import wait_until_not_raised
from tests.integration.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER
DISALLOWED_CONSISTENCIES = [
ConsistencyLevel.ANY,
ConsistencyLevel.ONE,
ConsistencyLevel.LOCAL_ONE
]
@requirescloudproxy
class CloudTests(CloudProxyCluster):
def hosts_up(self):
return [h for h in self.cluster.metadata.all_hosts() if h.is_up]
def test_resolve_and_connect(self):
self.connect(self.creds)
self.assertEqual(len(self.hosts_up()), 3)
for host in self.cluster.metadata.all_hosts():
self.assertTrue(host.is_up)
self.assertIsInstance(host.endpoint, SniEndPoint)
self.assertEqual(str(host.endpoint), "{}:{}:{}".format(
host.endpoint.address, host.endpoint.port, host.host_id))
self.assertIn(host.endpoint._resolved_address, ("127.0.0.1", '::1'))
def test_match_system_local(self):
self.connect(self.creds)
self.assertEqual(len(self.hosts_up()), 3)
for host in self.cluster.metadata.all_hosts():
row = self.session.execute('SELECT * FROM system.local', host=host).one()
self.assertEqual(row.host_id, host.host_id)
self.assertEqual(row.rpc_address, host.broadcast_rpc_address)
def test_set_auth_provider(self):
self.connect(self.creds)
self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider)
self.assertEqual(self.cluster.auth_provider.username, 'user1')
self.assertEqual(self.cluster.auth_provider.password, 'user1')
def test_support_leaving_the_auth_unset(self):
with self.assertRaises(NoHostAvailable):
self.connect(self.creds_no_auth)
self.assertIsNone(self.cluster.auth_provider)
def test_support_overriding_auth_provider(self):
try:
self.connect(self.creds, auth_provider=PlainTextAuthProvider('invalid', 'invalid'))
except:
pass # this will fail soon when sni_single_endpoint is updated
self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider)
self.assertEqual(self.cluster.auth_provider.username, 'invalid')
self.assertEqual(self.cluster.auth_provider.password, 'invalid')
def test_error_overriding_ssl_context(self):
with self.assertRaises(ValueError) as cm:
self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLS))
self.assertIn('cannot be specified with a cloud configuration', str(cm.exception))
def test_error_overriding_ssl_options(self):
with self.assertRaises(ValueError) as cm:
self.connect(self.creds, ssl_options={'check_hostname': True})
self.assertIn('cannot be specified with a cloud configuration', str(cm.exception))
def _bad_hostname_metadata(self, config, http_data):
config = parse_metadata_info(config, http_data)
config.sni_host = "127.0.0.1"
return config
def test_verify_hostname(self):
with patch('cassandra.datastax.cloud.parse_metadata_info', wraps=self._bad_hostname_metadata):
with self.assertRaises(NoHostAvailable) as e:
self.connect(self.creds)
self.assertIn("hostname", str(e.exception).lower())
def test_error_when_bundle_doesnt_exist(self):
try:
self.connect('/invalid/path/file.zip')
except Exception as e:
if six.PY2:
self.assertIsInstance(e, IOError)
else:
self.assertIsInstance(e, FileNotFoundError)
def test_load_balancing_policy_is_dcawaretokenlbp(self):
self.connect(self.creds)
self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy,
TokenAwarePolicy)
self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy._child_policy,
DCAwareRoundRobinPolicy)
def test_resolve_and_reconnect_on_node_down(self):
self.connect(self.creds,
idle_heartbeat_interval=1, idle_heartbeat_timeout=1,
reconnection_policy=ConstantReconnectionPolicy(120))
self.assertEqual(len(self.hosts_up()), 3)
CLOUD_PROXY_SERVER.stop_node(1)
wait_until_not_raised(
lambda: self.assertEqual(len(self.hosts_up()), 2),
0.02, 250)
host = [h for h in self.cluster.metadata.all_hosts() if not h.is_up][0]
with patch.object(SniEndPoint, "resolve", wraps=host.endpoint.resolve) as mocked_resolve:
CLOUD_PROXY_SERVER.start_node(1)
wait_until_not_raised(
lambda: self.assertEqual(len(self.hosts_up()), 3),
0.02, 250)
mocked_resolve.assert_called()
def test_metadata_unreachable(self):
with self.assertRaises(DriverException) as cm:
self.connect(self.creds_unreachable, connect_timeout=1)
self.assertIn('Unable to connect to the metadata service', str(cm.exception))
def test_metadata_ssl_error(self):
with self.assertRaises(DriverException) as cm:
self.connect(self.creds_invalid_ca)
self.assertIn('Unable to connect to the metadata', str(cm.exception))
def test_default_consistency(self):
self.connect(self.creds)
self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM)
# Verify EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT,
# EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT
for ep_key in six.iterkeys(self.cluster.profile_manager.profiles):
ep = self.cluster.profile_manager.profiles[ep_key]
self.assertEqual(
ep.consistency_level,
ConsistencyLevel.LOCAL_QUORUM,
"Expecting LOCAL QUORUM for profile {}, but got {} instead".format(
_execution_profile_to_string(ep_key), ConsistencyLevel.value_to_name[ep.consistency_level]
))
def test_default_consistency_of_execution_profiles(self):
cloud_config = {'secure_connect_bundle': self.creds}
self.cluster = Cluster(cloud=cloud_config, protocol_version=4, execution_profiles={
'pre_create_default_ep': ExecutionProfile(),
'pre_create_changed_ep': ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
),
})
self.cluster.add_execution_profile('pre_connect_default_ep', ExecutionProfile())
self.cluster.add_execution_profile(
'pre_connect_changed_ep',
ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
)
)
session = self.cluster.connect(wait_for_all_pools=True)
self.cluster.add_execution_profile('post_connect_default_ep', ExecutionProfile())
self.cluster.add_execution_profile(
'post_connect_changed_ep',
ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
)
)
for default in ['pre_create_default_ep', 'pre_connect_default_ep', 'post_connect_default_ep']:
cl = self.cluster.profile_manager.profiles[default].consistency_level
self.assertEqual(
cl, ConsistencyLevel.LOCAL_QUORUM,
"Expecting LOCAL QUORUM for profile {}, but got {} instead".format(default, cl)
)
for changed in ['pre_create_changed_ep', 'pre_connect_changed_ep', 'post_connect_changed_ep']:
cl = self.cluster.profile_manager.profiles[changed].consistency_level
self.assertEqual(
cl, ConsistencyLevel.LOCAL_ONE,
"Expecting LOCAL ONE for profile {}, but got {} instead".format(default, cl)
)
def test_consistency_guardrails(self):
self.connect(self.creds)
self.session.execute(
"CREATE KEYSPACE IF NOT EXISTS test_consistency_guardrails "
"with replication={'class': 'SimpleStrategy', 'replication_factor': 1}"
)
self.session.execute("CREATE TABLE IF NOT EXISTS test_consistency_guardrails.guardrails (id int primary key)")
for consistency in DISALLOWED_CONSISTENCIES:
statement = SimpleStatement(
"INSERT INTO test_consistency_guardrails.guardrails (id) values (1)",
consistency_level=consistency
)
with self.assertRaises(InvalidRequest) as e:
self.session.execute(statement)
self.assertIn('not allowed for Write Consistency Level', str(e.exception))
# Sanity check to make sure we can do a normal insert
statement = SimpleStatement(
"INSERT INTO test_consistency_guardrails.guardrails (id) values (1)",
consistency_level=ConsistencyLevel.LOCAL_QUORUM
)
try:
self.session.execute(statement)
except InvalidRequest:
self.fail("InvalidRequest was incorrectly raised for write query at LOCAL QUORUM!")
def test_cqlengine_can_connect(self):
class TestModel(Model):
id = columns.Integer(primary_key=True)
val = columns.Text()
connection.setup(None, "test", cloud={'secure_connect_bundle': self.creds})
create_keyspace_simple('test', 1)
sync_table(TestModel)
TestModel.objects.create(id=42, value='test')
self.assertEqual(len(TestModel.objects.all()), 1)
| datastax/python-driver | tests/integration/cloud/test_cloud.py | Python | apache-2.0 | 10,950 | 0.003288 |
from django.core.urlresolvers import reverse
from django.db import models
from gamesoup.games.models import *
class Match(models.Model):
game = models.ForeignKey(Game)
state = models.TextField(blank=True)
def __unicode__(self):
return self.game.name
class Meta:
verbose_name_plural = 'Matches'
def play_link(self):
return '<a href="%s">play</a>' % reverse('matches:play_match', args=[self.id])
play_link.short_description = 'Play'
play_link.allow_tags = True
| ktonon/GameSoup | gamesoup/matches/models.py | Python | mit | 520 | 0.003846 |
from mock import patch
from pip.vcs.git import Git
from tests.test_pip import (reset_env, run_pip,
_create_test_package,)
from tests.git_submodule_helpers import (
_change_test_package_submodule,
_pull_in_submodule_changes_to_module,
_create_test_package_with_submodule,
)
def test_get_tag_revs_should_return_tag_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'tag', '0.1', cwd=version_pkg_path)
env.run('git', 'tag', '0.2', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_tag_revs(version_pkg_path)
assert result == {'0.1': commit, '0.2': commit}, result
def test_get_branch_revs_should_return_branch_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
def test_get_branch_revs_should_ignore_no_branch():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
# current branch here is "* (nobranch)"
env.run('git', 'checkout', commit,
cwd=version_pkg_path, expect_stderr=True)
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_branch_name(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('master', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_tag_name(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_ambiguous_commit(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456'], result
def test_check_submodule_addition():
"""
Submodules are pulled in on install and updated on upgrade.
"""
env = reset_env()
module_path, submodule_path = _create_test_package_with_submodule(env)
install_result = run_pip('install', '-e', 'git+'+module_path+'#egg=version_pkg')
assert '.virtualenv/src/version-pkg/testpkg/static/testfile' in install_result.files_created
_change_test_package_submodule(env, submodule_path)
_pull_in_submodule_changes_to_module(env, module_path)
# expect error because git may write to stderr
update_result = run_pip('install', '-e', 'git+'+module_path+'#egg=version_pkg', '--upgrade', expect_error=True)
assert env.venv/'src/version-pkg/testpkg/static/testfile2' in update_result.files_created
| domenkozar/pip | tests/test_vcs_git.py | Python | mit | 3,904 | 0.001281 |
# coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
Example input for simulation of ion dynamics
No intensity effects
:Authors: **Alexandre Lasheen**
'''
from __future__ import division, print_function
from builtins import range
from scipy.constants import physical_constants
# Atomic Mass Unit [eV]
u = physical_constants['atomic mass unit-electron volt relationship'][0]
import numpy as np
from blond.input_parameters.ring import Ring
from blond.input_parameters.rf_parameters import RFStation
from blond.trackers.tracker import RingAndRFTracker
from blond.beam.distributions import bigaussian
from blond.monitors.monitors import BunchMonitor
from blond.beam.profile import Profile, CutOptions
from blond.beam.beam import Beam, Particle
from blond.plots.plot import Plot
import os
this_directory = os.path.dirname(os.path.realpath(__file__)) + '/'
try:
os.mkdir(this_directory + '../output_files')
except:
pass
try:
os.mkdir(this_directory + '../output_files/EX_07_fig')
except:
pass
# Simulation parameters --------------------------------------------------------
# Bunch parameters
N_b = 5.0e11 # Design Intensity in SIS100
N_p = 50000 # Macro-particles
tau_0 = 100.0e-9 # Initial bunch length, 4 sigma [s]
Z = 28. # Charge state of Uranium
m_p = 238.05078826*u # Isotope mass of U-238
# Machine and RF parameters
C = 1083.6 # Machine circumference [m]
p_i = 153.37e9 # Synchronous momentum [eV/c]
p_f = 535.62e9 # Synchronous momentum, final 535.62e9
h = 10 # Harmonic number
V = 280.e3 # RF voltage [V]
dphi = np.pi # Phase modulation/offset
gamma_t = 15.59 # Transition gamma
alpha = 1./gamma_t/gamma_t # First order mom. comp. factor
# Tracking details
N_t = 45500 # Number of turns to track
dt_plt = 5000 # Time steps between plots
# Simulation setup -------------------------------------------------------------
print("Setting up the simulation...")
print("")
# Define general parameters
general_params = Ring(C, alpha, np.linspace(p_i, p_f, N_t+1),
Particle(m_p, Z), n_turns=N_t)
# Define beam and distribution
beam = Beam(general_params, N_p, N_b)
print("Particle mass is %.3e eV" %general_params.Particle.mass)
print("Particle charge is %d e" %general_params.Particle.charge)
linspace_test = np.linspace(p_i, p_f, N_t+1)
momentum_test = general_params.momentum
beta_test = general_params.beta
gamma_test = general_params.gamma
energy_test = general_params.energy
mass_test = general_params.Particle.mass # [eV]
charge_test = general_params.Particle.charge # e*Z
# Define RF station parameters and corresponding tracker
rf_params = RFStation(general_params, [h], [V], [dphi])
print("Initial bucket length is %.3e s" %(2.*np.pi/rf_params.omega_rf[0,0]))
print("Final bucket length is %.3e s" %(2.*np.pi/rf_params.omega_rf[0,N_t]))
phi_s_test = rf_params.phi_s #: *Synchronous phase
omega_RF_d_test = rf_params.omega_rf_d #: *Design RF frequency of the RF systems in the station [GHz]*
omega_RF_test = rf_params.omega_rf #: *Initial, actual RF frequency of the RF systems in the station [GHz]*
phi_RF_test = rf_params.omega_rf #: *Initial, actual RF phase of each harmonic system*
E_increment_test = rf_params.delta_E #Energy increment (acceleration/deceleration) between two turns,
long_tracker = RingAndRFTracker(rf_params, beam)
eta_0_test = rf_params.eta_0 #: *Slippage factor (0th order) for the given RF section*
eta_1_test = rf_params.eta_1 #: *Slippage factor (1st order) for the given RF section*
eta_2_test = rf_params.eta_2 #: *Slippage factor (2nd order) for the given RF section*
alpha_order_test = rf_params.alpha_order
bigaussian(general_params, rf_params, beam, tau_0/4,
reinsertion = 'on', seed=1)
# Need slices for the Gaussian fit
slice_beam = Profile(beam, CutOptions(n_slices=100))
# Define what to save in file
bunchmonitor = BunchMonitor(general_params, rf_params, beam,
this_directory + '../output_files/EX_07_output_data',
Profile=slice_beam)
format_options = {'dirname': this_directory + '../output_files/EX_07_fig'}
plots = Plot(general_params, rf_params, beam, dt_plt, N_t, 0, 8.e-7,
-400e6, 400e6, separatrix_plot=True, Profile=slice_beam,
h5file=this_directory + '../output_files/EX_07_output_data',
format_options=format_options)
# For testing purposes
test_string = ''
test_string += '{:<17}\t{:<17}\t{:<17}\t{:<17}\n'.format(
'mean_dE', 'std_dE', 'mean_dt', 'std_dt')
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(beam.dE), np.std(beam.dE), np.mean(beam.dt), np.std(beam.dt))
# Accelerator map
map_ = [long_tracker] + [slice_beam] + [bunchmonitor] + [plots]
print("Map set")
print("")
# Tracking ---------------------------------------------------------------------
for i in range(1, N_t+1):
# Plot has to be done before tracking (at least for cases with separatrix)
if (i % dt_plt) == 0:
print("Outputting at time step %d..." %i)
print(" Beam momentum %.6e eV" %beam.momentum)
print(" Beam gamma %3.3f" %beam.gamma)
print(" Beam beta %3.3f" %beam.beta)
print(" Beam energy %.6e eV" %beam.energy)
print(" Four-times r.m.s. bunch length %.4e s" %(4.*beam.sigma_dt))
print("")
# Track
for m in map_:
m.track()
# Define losses according to separatrix
beam.losses_separatrix(general_params, rf_params)
# For testing purposes
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(beam.dE), np.std(beam.dE), np.mean(beam.dt), np.std(beam.dt))
with open(this_directory + '../output_files/EX_07_test_data.txt', 'w') as f:
f.write(test_string)
print("Done!")
| blond-admin/BLonD | __EXAMPLES/main_files/EX_07_Ions.py | Python | gpl-3.0 | 6,437 | 0.010253 |
#SPDX-License-Identifier: MIT
import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="insight_worker",
version="1.0.0",
url="https://github.com/chaoss/augur",
license='MIT',
author="Augurlabs",
author_email="s@goggins.com",
description="Augur Worker that discovers and stores data anomalies",
packages=find_packages(exclude=('tests',)),
install_requires=[
'Flask==1.1.4',
'Flask-Cors==3.0.10',
'Flask-Login==0.5.0',
'Flask-WTF==0.14.3',
'requests==2.22.0',
'psycopg2-binary==2.8.6',
'click==7.1.2',
'scipy==1.4.1',
'sklearn==0.0',
'numpy==1.19.5',
],
entry_points={
'console_scripts': [
'insight_worker_start=workers.insight_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
| OSSHealth/ghdata | workers/insight_worker/setup.py | Python | mit | 1,388 | 0.003602 |
# -*- coding: utf-8 -*-
import json
from . import check_input_attribute, standard_error_message
from pyipasnhistory import IPASNHistory
from pymisp import MISPAttribute, MISPEvent, MISPObject
misperrors = {'error': 'Error'}
mispattributes = {'input': ['ip-src', 'ip-dst'], 'format': 'misp_standard'}
moduleinfo = {'version': '0.2', 'author': 'Raphaël Vinot',
'description': 'Query an IP ASN history service (https://github.com/CIRCL/IP-ASN-history.git)',
'module-type': ['expansion', 'hover']}
def parse_result(attribute, values):
event = MISPEvent()
initial_attribute = MISPAttribute()
initial_attribute.from_dict(**attribute)
event.add_attribute(**initial_attribute)
mapping = {'asn': ('AS', 'asn'), 'prefix': ('ip-src', 'subnet-announced')}
print(values)
for last_seen, response in values['response'].items():
asn = MISPObject('asn')
asn.add_attribute('last-seen', **{'type': 'datetime', 'value': last_seen})
for feature, attribute_fields in mapping.items():
attribute_type, object_relation = attribute_fields
asn.add_attribute(object_relation, **{'type': attribute_type, 'value': response[feature]})
asn.add_reference(initial_attribute.uuid, 'related-to')
event.add_object(**asn)
event = json.loads(event.to_json())
return {key: event[key] for key in ('Attribute', 'Object')}
def handler(q=False):
if q is False:
return False
request = json.loads(q)
if not request.get('attribute') or not check_input_attribute(request['attribute']):
return {'error': f'{standard_error_message}, which should contain at least a type, a value and an uuid.'}
if request['attribute']['type'] not in mispattributes['input']:
return {'error': 'Unsupported attribute type.'}
toquery = request['attribute']['value']
ipasn = IPASNHistory()
values = ipasn.query(toquery)
if not values:
misperrors['error'] = 'Unable to find the history of this IP'
return misperrors
return {'results': parse_result(request['attribute'], values)}
def introspection():
return mispattributes
def version():
return moduleinfo
| VirusTotal/misp-modules | misp_modules/modules/expansion/ipasn.py | Python | agpl-3.0 | 2,208 | 0.002266 |
"""
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=2)
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/examples/stats/plot_cluster_stats_evoked.py | Python | bsd-3-clause | 2,991 | 0 |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume3-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup5'],
[TestAction.delete_volume_snapshot, 'vm1-snapshot5'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9', 'vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1', 'vm1-backup5', 'volume1-backup5', 'volume2-backup5', 'volume3-backup5', 'vm1-image1']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['vm1-snapshot5', 'vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_backup2:['vm1-backup5', 'volume1-backup5', 'volume2-backup5', 'volume3-backup5']---vm1_volume1_volume2_volume3
vm_snap3:['vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9']---vm1volume1_volume2_volume3
vm_backup1:['vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']---vm1_volume1_volume2_volume3
'''
| zstackio/zstack-woodpecker | integrationtest/vm/multihosts/vm_snapshots/paths/xsky_path9.py | Python | apache-2.0 | 2,040 | 0.015196 |
import scipy
import numpy
import matplotlib.pyplot as pyplot
import pyfits
import VLTTools
ciao = VLTTools.VLTConnection(simulate=False)
ciao.get_InteractionMatrices()
ciao.dumpCommandMatrix(nFiltModes=10)
print "This is where we will Compute the Modal Basis from the IMs"
| soylentdeen/CIAO-commissioning-tools | sandbox/dumpModalBasis.py | Python | mit | 275 | 0 |
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.test import TestCase
from videos.models import Video
from utils.multi_query_set import MultiQuerySet
class MultiQuerySetTest(TestCase):
fixtures = ['test.json']
def test_full(self):
self.assertEqual(list(Video.objects.all()),
list(MultiQuerySet(Video.objects.all())),
"Full, single MQS didn't match full QS.")
self.assertEqual(list(Video.objects.all()),
list(MultiQuerySet(Video.objects.none(),
Video.objects.all(),
Video.objects.none())),
"Full MQS with blanks didn't match full QS.")
self.assertEqual(list(Video.objects.all()) + list(Video.objects.all()),
list(MultiQuerySet(Video.objects.none(),
Video.objects.all(),
Video.objects.none(),
Video.objects.all())),
"Double MQS with blanks didn't match double full QS.")
def test_slice(self):
qs = Video.objects.all()
mqs = MultiQuerySet(Video.objects.all())
self.assertEqual(list(qs[0:1]),
list(mqs[0:1]),
"MQS[:1] failed.")
self.assertEqual(list(qs[0:2]),
list(mqs[0:2]),
"MQS[:2] failed.")
self.assertEqual(list(qs[0:3]),
list(mqs[0:3]),
"MQS[:3] (out-of-bounds endpoint) failed.")
self.assertEqual(list(qs[1:3]),
list(mqs[1:3]),
"MQS[1:3] failed.")
self.assertEqual(list(qs[2:3]),
list(mqs[2:3]),
"MQS[2:3] failed.")
self.assertEqual(list(qs[1:1]),
list(mqs[1:1]),
"MQS[1:1] (empty slice) failed.")
def test_slice_multiple(self):
qs = list(Video.objects.all())
qs = qs + qs + qs
mqs = MultiQuerySet(Video.objects.all(),
Video.objects.all(),
Video.objects.all())
self.assertEqual(qs[0:3],
list(mqs[0:3]),
"MQS[:3] failed.")
self.assertEqual(qs[0:6],
list(mqs[0:6]),
"MQS[:6] (entire range) failed.")
self.assertEqual(qs[0:7],
list(mqs[0:7]),
"MQS[:7] (out-of-bounds endpoint) failed.")
self.assertEqual(qs[1:3],
list(mqs[1:3]),
"MQS[1:3] failed.")
self.assertEqual(qs[1:6],
list(mqs[1:6]),
"MQS[1:6] (entire range) failed.")
self.assertEqual(qs[1:7],
list(mqs[1:7]),
"MQS[1:7] (out-of-bounds endpoint) failed.")
self.assertEqual(qs[3:3],
list(mqs[3:3]),
"MQS[3:3] failed.")
self.assertEqual(qs[3:6],
list(mqs[3:6]),
"MQS[3:6] (entire range) failed.")
self.assertEqual(qs[3:7],
list(mqs[3:7]),
"MQS[3:7] (out-of-bounds endpoint) failed.")
| ujdhesa/unisubs | utils/tests/multiqueryset.py | Python | agpl-3.0 | 4,283 | 0.000467 |
# Copyright 2022 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python 3
r"""Example of basic DEFINE_config_dataclass usage.
To run this example:
python define_config_dataclass_basic.py -- --my_config.field1=8 \
--my_config.nested.field=2.1 --my_config.tuple='(1, 2, (1, 2))'
"""
import dataclasses
from typing import Any, Mapping, Sequence
from absl import app
from ml_collections import config_flags
@dataclasses.dataclass
class MyConfig:
field1: int
field2: str
nested: Mapping[str, Any]
tuple: Sequence[int]
config = MyConfig(
field1=1,
field2='tom',
nested={'field': 2.23},
tuple=(1, 2, 3),
)
_CONFIG = config_flags.DEFINE_config_dataclass('my_config', config)
def main(_):
print(_CONFIG.value)
if __name__ == '__main__':
app.run(main)
| google/ml_collections | ml_collections/config_flags/examples/define_config_dataclass_basic.py | Python | apache-2.0 | 1,322 | 0.004539 |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'MX DDT',
'version': '0.1',
'category': 'Accounting',
'description': '''
DDT module with mx stock move
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'account',
'stock',
'sale_stock',
'stock_account',
],
'init_xml': [],
'demo': [],
'data': [
'view/ddt_view.xml',
'wizard/ddt_create_direct_invoice_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| Micronaet/micronaet-mx8 | mx_pick_in/__openerp__.py | Python | agpl-3.0 | 1,557 | 0.001285 |
import re
import quantities as pq
from numbers import NumberService
class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, input):
def handleExponents(input):
m = re.search(r'\bsquare (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquare (\w+)', r'\g<1>^2', input)
m = re.search(r'\bsquared (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquared (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) squared', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) squared', r'\g<1>^2', input)
m = re.search(r'\bsq (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsq (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) cubed', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) cubed', r'\g<1>^3', input)
m = re.search(r'\bcubic (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bcubic (\w+)', r'\g<1>^3', input)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', input)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
input = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), input)
return input
input = re.sub(r'\sper\s', r' / ', input)
input = handleExponents(input)
return input
def parseUnits(self, input):
"""Carries out a conversion (represented as a string) and returns the
result as a human-readable string.
Args:
input (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units.
"""
quantity = self.convert(input)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units
def isValidUnit(self, w):
"""Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module.
"""
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/'
def extractUnits(self, input):
"""Collects all the valid units from an input string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
input (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit.
"""
input = self._preprocess(input)
units = []
description = ""
for w in input.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units
def convert(self, input):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
input (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
input = self._preprocess(input)
n = NumberService().longestNumber(input)
units = self.extractUnits(input)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
| jobdash/semantic | semantic/units.py | Python | mit | 4,744 | 0.000211 |
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2018, Raffaele Salmaso <raffaele@salmaso.org>
# Copyright (c) 2012 Omoto Kenji
# Copyright (c) 2011 Sam Stephenson
# Copyright (c) 2011 Josh Peek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import json
import re
import os
from subprocess import Popen, PIPE, STDOUT
import tempfile
from .exceptions import RuntimeError, ProgramError, RuntimeUnavailable
from .utils import json2_source, which
def encode_unicode_codepoints(str):
r"""
>>> encode_unicode_codepoints("a") == 'a'
True
>>> ascii = ''.join(chr(i) for i in range(0x80))
>>> encode_unicode_codepoints(ascii) == ascii
True
>>> encode_unicode_codepoints('\u4e16\u754c') == '\\u4e16\\u754c'
True
"""
codepoint_format = '\\u{0:04x}'.format
def codepoint(m):
return codepoint_format(ord(m.group(0)))
return re.sub('[^\x00-\x7f]', codepoint, str)
class Runtime(object):
def __init__(self, name, command, runner_source, encoding='utf8'):
self._name = name
if isinstance(command, str):
command = [command]
self._command = command
self._runner_source = runner_source
self._encoding = encoding
def __str__(self):
return "{class_name}({runtime_name})".format(
class_name=type(self).__name__,
runtime_name=self._name,
)
@property
def name(self):
return self._name
def exec_(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).exec_(source)
def eval(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).eval(source)
def compile(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self, source)
def is_available(self):
return self._binary() is not None
def runner_source(self):
return self._runner_source
def _binary(self):
"""protected"""
if not hasattr(self, "_binary_cache"):
self._binary_cache = which(self._command)
return self._binary_cache
def _execfile(self, filename):
"""protected"""
cmd = self._binary() + [filename]
p = None
try:
p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
stdoutdata, stderrdata = p.communicate()
ret = p.wait()
finally:
del p
if ret == 0:
return stdoutdata
else:
raise RuntimeError(stdoutdata)
class Context(object):
def __init__(self, runtime, source=''):
self._runtime = runtime
self._source = source
def eval(self, source):
if not source.strip():
data = "''"
else:
data = "'('+" + json.dumps(source, ensure_ascii=True) + "+')'"
code = 'return eval({data})'.format(data=data)
return self.exec_(code)
def exec_(self, source):
if self._source:
source = self._source + '\n' + source
(fd, filename) = tempfile.mkstemp(prefix='babeljs', suffix='.js')
os.close(fd)
try:
with io.open(filename, "w+", encoding=self._runtime._encoding) as fp:
fp.write(self._compile(source))
output = self._runtime._execfile(filename)
finally:
os.remove(filename)
output = output.decode(self._runtime._encoding)
output = output.replace("\r\n", "\n").replace("\r", "\n")
output = self._extract_result(output.split("\n")[-2])
return output
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
def _compile(self, source):
"""protected"""
runner_source = self._runtime.runner_source()
replacements = {
'#{source}': lambda: source,
'#{encoded_source}': lambda: json.dumps(
"(function(){ " +
encode_unicode_codepoints(source) +
" })()"
),
'#{json2_source}': json2_source,
}
pattern = "|".join(re.escape(k) for k in replacements)
runner_source = re.sub(pattern, lambda m: replacements[m.group(0)](), runner_source)
return runner_source
def _extract_result(self, output_last_line):
"""protected"""
if not output_last_line:
status = value = None
else:
ret = json.loads(output_last_line)
if len(ret) == 1:
ret = [ret[0], None]
status, value = ret
if status == "ok":
return value
elif value and value.startswith('SyntaxError:'):
raise RuntimeError(value)
else:
raise ProgramError(value)
class PyV8Runtime(object):
def __init__(self):
try:
import PyV8
except ImportError:
self._is_available = False
else:
self._is_available = True
@property
def name(self):
return "PyV8"
def exec_(self, source):
return self.Context().exec_(source)
def eval(self, source):
return self.Context().eval(source)
def compile(self, source):
return self.Context(source)
def is_available(self):
return self._is_available
class Context:
def __init__(self, source=""):
self._source = source
def exec_(self, source):
source = '''\
(function() {{
{0};
{1};
}})()'''.format(
encode_unicode_codepoints(self._source),
encode_unicode_codepoints(source)
)
source = str(source)
import PyV8
import contextlib
#backward compatibility
with contextlib.nested(PyV8.JSContext(), PyV8.JSEngine()) as (ctxt, engine):
js_errors = (PyV8.JSError, IndexError, ReferenceError, SyntaxError, TypeError)
try:
script = engine.compile(source)
except js_errors as e:
raise RuntimeError(e)
try:
value = script.run()
except js_errors as e:
raise ProgramError(e)
return self.convert(value)
def eval(self, source):
return self.exec_('return ' + encode_unicode_codepoints(source))
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
@classmethod
def convert(cls, obj):
from PyV8 import _PyV8
if isinstance(obj, bytes):
return obj.decode('utf8')
if isinstance(obj, _PyV8.JSArray):
return [cls.convert(v) for v in obj]
elif isinstance(obj, _PyV8.JSFunction):
return None
elif isinstance(obj, _PyV8.JSObject):
ret = {}
for k in obj.keys():
v = cls.convert(obj[k])
if v is not None:
ret[cls.convert(k)] = v
return ret
else:
return obj
| rsalmaso/django-babeljs | babeljs/execjs/runtime.py | Python | mit | 8,806 | 0.000908 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the Account Settings page.
"""
from datetime import datetime
from unittest import skip
import pytest
from bok_choy.page_object import XSS_INJECTION
from pytz import timezone, utc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage, FULL_NAME
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class AccountSettingsTestMixin(EventsTestMixin, AcceptanceTest):
"""
Mixin with helper methods to test the account settings page.
"""
CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated"
USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed'
ACCOUNT_SETTINGS_REFERER = u"/account/settings"
def visit_account_settings_page(self, gdpr=False):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax()
# TODO: LEARNER-4422 - delete when we clean up flags
if gdpr:
self.account_settings_page.browser.get(self.browser.current_url + "?course_experience.gdpr=1")
self.account_settings_page.wait_for_page()
def log_in_as_unique_user(self, email=None, full_name=None, password=None):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(
self.browser,
username=username,
email=email,
full_name=full_name,
password=password
).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def settings_changed_event_filter(self, event):
"""Filter out any events that are not "settings changed" events."""
return event['event_type'] == self.USER_SETTINGS_CHANGED_EVENT_NAME
def expected_settings_changed_event(self, setting, old, new, table=None):
"""A dictionary representing the expected fields in a "settings changed" event."""
return {
'username': self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': self.user_id,
'setting': setting,
'old': old,
'new': new,
'truncated': [],
'table': table or 'auth_userprofile'
}
}
def settings_change_initiated_event_filter(self, event):
"""Filter out any events that are not "settings change initiated" events."""
return event['event_type'] == self.CHANGE_INITIATED_EVENT_NAME
def expected_settings_change_initiated_event(self, setting, old, new, username=None, user_id=None):
"""A dictionary representing the expected fields in a "settings change initiated" event."""
return {
'username': username or self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': user_id or self.user_id,
'setting': setting,
'old': old,
'new': new,
}
}
def get_settings_page_url(self):
"""The absolute URL of the account settings page given the test context."""
return self.relative_path_to_absolute_uri(self.ACCOUNT_SETTINGS_REFERER)
def assert_no_setting_changed_event(self):
"""Assert no setting changed event has been emitted thus far."""
self.assert_no_matching_events_were_emitted({'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME})
class DashboardMenuTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests that the dashboard menu works correctly with the account settings page.
"""
shard = 8
def test_link_on_dashboard_works(self):
"""
Scenario: Verify that the "Account" link works from the dashboard.
Given that I am a registered user
And I visit my dashboard
And I click on "Account" in the top drop down
Then I should see my account settings page
"""
self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Account', dashboard_page.username_dropdown_link_text)
dashboard_page.click_account_settings_link()
class AccountSettingsPageTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests that verify behaviour of the Account Settings page.
"""
SUCCESS_MESSAGE = 'Your changes have been saved.'
shard = 8
def setUp(self):
"""
Initialize account and pages.
"""
super(AccountSettingsPageTest, self).setUp()
self.full_name = FULL_NAME
self.social_link = ''
self.username, self.user_id = self.log_in_as_unique_user(full_name=self.full_name)
self.visit_account_settings_page()
def test_page_view_event(self):
"""
Scenario: An event should be recorded when the "Account Settings"
page is viewed.
Given that I am a registered user
And I visit my account settings page
Then a page view analytics event should be recorded
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.user.settings.viewed'}, number_of_matches=1)
self.assert_events_match(
[
{
'event': {
'user_id': self.user_id,
'page': 'account',
'visibility': None
}
}
],
actual_events
)
def test_all_sections_and_fields_are_present(self):
"""
Scenario: Verify that all sections and fields are present on the page.
"""
expected_sections_structure = [
{
'title': 'Basic Account Information',
'fields': [
'Username',
'Full Name',
'Email Address (Sign In)',
'Password',
'Language',
'Country or Region of Residence',
'Time Zone',
]
},
{
'title': 'Additional Information',
'fields': [
'Education Completed',
'Gender',
'Year of Birth',
'Preferred Language',
]
},
{
'title': 'Social Media Links',
'fields': [
'Twitter Link',
'Facebook Link',
'LinkedIn Link',
]
},
{
'title': 'Delete My Account',
'fields': []
},
]
self.assertEqual(self.account_settings_page.sections_structure(), expected_sections_structure)
def _test_readonly_field(self, field_id, title, value):
"""
Test behavior of a readonly field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_readonly_field(field_id), value)
def _test_text_field(
self, field_id, title, initial_value, new_invalid_value, new_valid_values, success_message=SUCCESS_MESSAGE,
assert_after_reload=True
):
"""
Test behaviour of a text field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), initial_value)
self.assertEqual(
self.account_settings_page.value_for_text_field(field_id, new_invalid_value), new_invalid_value
)
self.account_settings_page.wait_for_indicator(field_id, 'validation-error')
self.browser.refresh()
self.assertNotEqual(self.account_settings_page.value_for_text_field(field_id), new_invalid_value)
for new_value in new_valid_values:
self.assertEqual(self.account_settings_page.value_for_text_field(field_id, new_value), new_value)
self.account_settings_page.wait_for_message(field_id, success_message)
if assert_after_reload:
self.browser.refresh()
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), new_value)
def _test_dropdown_field(
self,
field_id,
title,
initial_value,
new_values,
success_message=SUCCESS_MESSAGE, # pylint: disable=unused-argument
reloads_on_save=False
):
"""
Test behaviour of a dropdown field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id, focus_out=True), initial_value)
for new_value in new_values:
self.assertEqual(
self.account_settings_page.value_for_dropdown_field(field_id, new_value, focus_out=True),
new_value
)
# An XHR request is made when changing the field
self.account_settings_page.wait_for_ajax()
if reloads_on_save:
self.account_settings_page.wait_for_loading_indicator()
else:
self.browser.refresh()
self.account_settings_page.wait_for_page()
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id, focus_out=True), new_value)
def _test_link_field(self, field_id, title, link_title, field_type, success_message):
"""
Test behaviour a link field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
self.account_settings_page.click_on_link_in_link_field(field_id, field_type=field_type)
self.account_settings_page.wait_for_message(field_id, success_message)
def test_username_field(self):
"""
Test behaviour of "Username" field.
"""
self._test_readonly_field('username', 'Username', self.username)
def test_full_name_field(self):
"""
Test behaviour of "Full Name" field.
"""
self._test_text_field(
u'name',
u'Full Name',
self.full_name,
u'@',
[u'<h1>another name<h1>', u'<script>'],
'Full Name cannot contain the following characters: < >',
False
)
def test_email_field(self):
"""
Test behaviour of "Email" field.
"""
email = u"test@example.com"
username, user_id = self.log_in_as_unique_user(email=email)
self.visit_account_settings_page()
self._test_text_field(
u'email',
u'Email Address (Sign In)',
email,
u'test@example.com' + XSS_INJECTION,
[u'me@here.com', u'you@there.com'],
success_message='Click the link in the message to update your email address.',
assert_after_reload=False
)
actual_events = self.wait_for_events(
event_filter=self.settings_change_initiated_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_change_initiated_event(
'email', email, 'me@here.com', username=username, user_id=user_id),
# NOTE the first email change was never confirmed, so old has not changed.
self.expected_settings_change_initiated_event(
'email', email, 'you@there.com', username=username, user_id=user_id),
],
actual_events
)
# Email is not saved until user confirms, so no events should have been
# emitted.
self.assert_no_setting_changed_event()
def test_password_field(self):
"""
Test behaviour of "Password" field.
"""
self._test_link_field(
u'password',
u'Password',
u'Reset Your Password',
u'button',
success_message='Click the link in the message to reset your password.',
)
event_filter = self.expected_settings_change_initiated_event('password', None, None)
self.wait_for_events(event_filter=event_filter, number_of_matches=1)
# Like email, since the user has not confirmed their password change,
# the field has not yet changed, so no events will have been emitted.
self.assert_no_setting_changed_event()
@skip(
'On bokchoy test servers, language changes take a few reloads to fully realize '
'which means we can no longer reliably match the strings in the html in other tests.'
)
def test_language_field(self):
"""
Test behaviour of "Language" field.
"""
self._test_dropdown_field(
u'pref-lang',
u'Language',
u'English',
[u'Dummy Language (Esperanto)', u'English'],
reloads_on_save=True,
)
def test_education_completed_field(self):
"""
Test behaviour of "Education Completed" field.
"""
self._test_dropdown_field(
u'level_of_education',
u'Education Completed',
u'',
[u'Bachelor\'s degree', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('level_of_education', None, 'b'),
self.expected_settings_changed_event('level_of_education', 'b', None),
],
actual_events
)
def test_gender_field(self):
"""
Test behaviour of "Gender" field.
"""
self._test_dropdown_field(
u'gender',
u'Gender',
u'',
[u'Female', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('gender', None, 'f'),
self.expected_settings_changed_event('gender', 'f', None),
],
actual_events
)
def test_year_of_birth_field(self):
"""
Test behaviour of "Year of Birth" field.
"""
# Note that when we clear the year_of_birth here we're firing an event.
self.assertEqual(self.account_settings_page.value_for_dropdown_field('year_of_birth', '', focus_out=True), '')
expected_events = [
self.expected_settings_changed_event('year_of_birth', None, 1980),
self.expected_settings_changed_event('year_of_birth', 1980, None),
]
with self.assert_events_match_during(self.settings_changed_event_filter, expected_events):
self._test_dropdown_field(
u'year_of_birth',
u'Year of Birth',
u'',
[u'1980', u''],
)
def test_country_field(self):
"""
Test behaviour of "Country or Region" field.
"""
self._test_dropdown_field(
u'country',
u'Country or Region of Residence',
u'',
[u'Pakistan', u'Palau'],
)
def test_time_zone_field(self):
"""
Test behaviour of "Time Zone" field
"""
kiev_abbr, kiev_offset = self._get_time_zone_info('Europe/Kiev')
pacific_abbr, pacific_offset = self._get_time_zone_info('US/Pacific')
self._test_dropdown_field(
u'time_zone',
u'Time Zone',
u'Default (Local Time Zone)',
[
u'Europe/Kiev ({abbr}, UTC{offset})'.format(abbr=kiev_abbr, offset=kiev_offset),
u'US/Pacific ({abbr}, UTC{offset})'.format(abbr=pacific_abbr, offset=pacific_offset),
],
)
def _get_time_zone_info(self, time_zone_str):
"""
Helper that returns current time zone abbreviation and UTC offset
and accounts for daylight savings time
"""
time_zone = datetime.now(utc).astimezone(timezone(time_zone_str))
abbr = time_zone.strftime('%Z')
offset = time_zone.strftime('%z')
return abbr, offset
def test_social_links_field(self):
"""
Test behaviour of one of the social media links field.
"""
self._test_text_field(
u'social_links',
u'Twitter Link',
self.social_link,
u'www.google.com/invalidlink',
[u'https://www.twitter.com/edX', self.social_link],
)
def test_linked_accounts(self):
"""
Test that fields for third party auth providers exist.
Currently there is no way to test the whole authentication process
because that would require accounts with the providers.
"""
providers = (
['auth-oa2-facebook', 'Facebook', 'Link Your Account'],
['auth-oa2-google-oauth2', 'Google', 'Link Your Account'],
)
# switch to "Linked Accounts" tab
self.account_settings_page.switch_account_settings_tabs('accounts-tab')
for field_id, title, link_title in providers:
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
def test_order_history(self):
"""
Test that we can see orders on Order History tab.
"""
# switch to "Order History" tab
self.account_settings_page.switch_account_settings_tabs('orders-tab')
# verify that we are on correct tab
self.assertTrue(self.account_settings_page.is_order_history_tab_visible)
expected_order_data_first_row = {
'number': 'Order Number:\nEdx-123',
'date': 'Date Placed:\nApr 21, 2016',
'price': 'Cost:\n$100.00',
}
expected_order_data_second_row = {
'number': 'Product Name:\nTest Course',
'date': 'Date Placed:\nApr 21, 2016',
'price': 'Cost:\n$100.00',
}
for field_name, value in expected_order_data_first_row.iteritems():
self.assertEqual(
self.account_settings_page.get_value_of_order_history_row_item('order-Edx-123', field_name)[0], value
)
for field_name, value in expected_order_data_second_row.iteritems():
self.assertEqual(
self.account_settings_page.get_value_of_order_history_row_item('order-Edx-123', field_name)[1], value
)
self.assertTrue(self.account_settings_page.order_button_is_visible('order-Edx-123'))
class AccountSettingsDeleteAccountTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests for the account deletion workflow.
"""
def setUp(self):
"""
Initialize account and pages.
"""
super(AccountSettingsDeleteAccountTest, self).setUp()
self.full_name = FULL_NAME
self.social_link = ''
self.password = 'password'
self.username, self.user_id = self.log_in_as_unique_user(full_name=self.full_name, password=self.password)
self.visit_account_settings_page(gdpr=True)
def test_button_visible(self):
self.assertTrue(
self.account_settings_page.is_delete_button_visible
)
def test_delete_modal(self):
self.account_settings_page.click_delete_button()
self.assertTrue(
self.account_settings_page.is_delete_modal_visible
)
self.assertFalse(
self.account_settings_page.delete_confirm_button_enabled()
)
self.account_settings_page.fill_in_password_field(self.password)
self.assertTrue(
self.account_settings_page.delete_confirm_button_enabled()
)
@pytest.mark.a11y
class AccountSettingsA11yTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Class to test account settings accessibility.
"""
def test_account_settings_a11y(self):
"""
Test the accessibility of the account settings page.
"""
self.log_in_as_unique_user()
self.visit_account_settings_page()
self.account_settings_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
]
})
self.account_settings_page.a11y_audit.check_for_accessibility_errors()
| philanthropy-u/edx-platform | common/test/acceptance/tests/lms/test_account_settings.py | Python | agpl-3.0 | 21,442 | 0.002798 |
#
# SNMPv1 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc1157.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import univ, namedtype, namedval, tag
from pyasn1_modules import rfc1155
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('version-1', 0)
)
defaultValue = 0
class Community(univ.OctetString): pass
class RequestID(univ.Integer): pass
class ErrorStatus(univ.Integer):
namedValues = namedval.NamedValues(
('noError', 0),
('tooBig', 1),
('noSuchName', 2),
('badValue', 3),
('readOnly', 4),
('genErr', 5)
)
class ErrorIndex(univ.Integer): pass
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1155.ObjectName()),
namedtype.NamedType('value', rfc1155.ObjectSyntax())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
class _RequestBase(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', RequestID()),
namedtype.NamedType('error-status', ErrorStatus()),
namedtype.NamedType('error-index', ErrorIndex()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class GetResponsePDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class TrapPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
namedtype.NamedType('generic-trap', univ.Integer().clone(
namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3),
('authenticationFailure', 4), ('egpNeighborLoss', 5),
('enterpriseSpecific', 6)))),
namedtype.NamedType('specific-trap', univ.Integer()),
namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class Pdus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-response', GetResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('trap', TrapPDU())
)
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('community', Community()),
namedtype.NamedType('data', Pdus())
)
| itielshwartz/BackendApi | lib/pyasn1_modules/rfc1157.py | Python | apache-2.0 | 3,309 | 0.001511 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from contextlib import closing
from pants.base.project_tree import Dir, File
from pants.engine.nodes import Noop, Return, Runnable, Throw, Waiting
from pants.engine.storage import Cache, InvalidKeyError, Lmdb, Storage
def _runnable(an_arg):
return an_arg
class PickleableException(Exception):
def __eq__(self, other):
return type(self) == type(other)
class StorageTest(unittest.TestCase):
TEST_KEY = b'hello'
TEST_VALUE = b'world'
TEST_PATH = File('/foo')
TEST_PATH2 = Dir('/bar')
class SomeException(Exception): pass
def setUp(self):
self.storage = Storage.create()
self.result = 'something'
self.request = Runnable(func=_runnable, args=('this is an arg',))
def test_lmdb_key_value_store(self):
lmdb = Lmdb.create()[0]
with closing(lmdb) as kvs:
# Initially key does not exist.
self.assertFalse(kvs.get(self.TEST_KEY))
# Now write a key value pair and read back.
written = kvs.put(self.TEST_KEY, self.TEST_VALUE)
self.assertTrue(written)
self.assertEquals(self.TEST_VALUE, kvs.get(self.TEST_KEY).getvalue())
# Write the same key again will not overwrite.
self.assertFalse(kvs.put(self.TEST_KEY, self.TEST_VALUE))
def test_storage(self):
with closing(self.storage) as storage:
key = storage.put(self.TEST_PATH)
self.assertEquals(self.TEST_PATH, storage.get(key))
with self.assertRaises(InvalidKeyError):
self.assertFalse(storage.get(self.TEST_KEY))
def test_storage_key_mappings(self):
with closing(self.storage) as storage:
key1 = storage.put(self.TEST_PATH)
key2 = storage.put(self.TEST_PATH2)
storage.add_mapping(key1, key2)
self.assertEquals(key2, storage.get_mapping(key1))
# key2 isn't mapped to any other key.
self.assertIsNone(storage.get_mapping(key2))
def test_state_roundtrips(self):
states = [
Return('a'),
Throw(PickleableException()),
Waiting(['a']),
Runnable(_runnable, ('an arg',)),
Noop('nada {}', ('op',))
]
with closing(self.storage) as storage:
for state in states:
key = storage.put_state(state)
actual = storage.get_state(key)
self.assertEquals(state, actual)
self.assertEquals(key, storage.put_state(actual))
class CacheTest(unittest.TestCase):
def setUp(self):
"""Setup cache as well as request and result."""
self.storage = Storage.create()
self.cache = Cache.create(storage=self.storage)
self.request = Runnable(func=_runnable, args=('this is an arg',))
self.result = 'something'
def test_cache(self):
"""Verify get and put."""
with closing(self.cache):
self.assertIsNone(self.cache.get(self.request)[1])
self._assert_hits_misses(hits=0, misses=1)
request_key = self.storage.put_state(self.request)
self.cache.put(request_key, self.result)
self.assertEquals(self.result, self.cache.get(self.request)[1])
self._assert_hits_misses(hits=1, misses=1)
def test_failure_to_update_mapping(self):
"""Verify we can access cached result only if we save both result and the key mapping."""
with closing(self.cache):
# This places result to the main storage without saving to key mapping. This
# simulates error might happen for saving key mapping after successfully saving the result.
self.cache._storage.put(self.result)
self.assertIsNone(self.cache.get(self.request)[1])
self._assert_hits_misses(hits=0, misses=1)
def _assert_hits_misses(self, hits, misses):
self.assertEquals(hits, self.cache.get_stats().hits)
self.assertEquals(misses, self.cache.get_stats().misses)
self.assertEquals(hits+misses, self.cache.get_stats().total)
| kwlzn/pants | tests/python/pants_test/engine/test_storage.py | Python | apache-2.0 | 4,071 | 0.012282 |
import pymongo
class BaseMigration(object):
def __init__(self,
host='127.0.0.1',
port='27017',
database=None,
user=None,
password=None,
url=None):
if url and database and user is not None: #provide auth_database in url (mongodb://mongohostname:27017/auth_database)
client = pymongo.MongoClient(url, username=user, password=password)
self.db = client.get_database(database)
elif url:
client = pymongo.MongoClient(url)
self.db = client.get_default_database()
elif database:
client = pymongo.MongoClient(host=host, port=port)
self.db = client[database]
else:
raise Exception('no database, url or auth_database in url provided')
def upgrade(self):
raise NotImplementedError
def downgrade(self):
raise NotImplementedError
| DoubleCiti/mongodb-migrations | mongodb_migrations/base.py | Python | gpl-3.0 | 967 | 0.004137 |
import unittest
from graph_search import Graph
from graph_topological_ordering import find_topological_order
class TestGraphTopologicalOrdering(unittest.TestCase):
def check_labels(self, graph, smaller, larger):
self.assertTrue(graph.get_node(smaller).label < graph.get_node(larger).label)
def test_1(self):
graph = Graph([[0,1],[0,2],[1,3],[2,3]], True)
find_topological_order(graph)
self.check_labels(graph, 0, 1)
self.check_labels(graph, 0, 2)
self.check_labels(graph, 1, 3)
self.check_labels(graph, 2, 3)
def test_2(self):
graph = Graph([[0,1],[1,2],[1,3],[2,4],[3,5]], True)
find_topological_order(graph)
self.check_labels(graph, 0, 1)
self.check_labels(graph, 1, 2)
self.check_labels(graph, 1, 3)
self.check_labels(graph, 2, 4)
self.check_labels(graph, 3, 5)
if __name__ == '__main__':
unittest.main()
| codebox/algorithms | test_graph_topological_ordering.py | Python | mit | 943 | 0.020148 |
from django.conf import settings
from django.test import TestCase, override_settings
from freezegun import freeze_time
from adyen.gateway import MissingFieldException
from adyen.scaffold import Scaffold
TEST_RETURN_URL = 'https://www.example.com/checkout/return/adyen/'
EXPECTED_FIELDS_LIST = [
{'type': 'hidden', 'name': 'currencyCode', 'value': 'EUR'},
{'type': 'hidden', 'name': 'merchantAccount', 'value': settings.ADYEN_IDENTIFIER},
{'type': 'hidden', 'name': 'merchantReference', 'value': '00000000123'},
{'type': 'hidden', 'name': 'merchantReturnData', 'value': '123'},
{'type': 'hidden', 'name': 'merchantSig', 'value': 'kKvzRvx7wiPLrl8t8+owcmMuJZM='},
{'type': 'hidden', 'name': 'paymentAmount', 'value': '123'},
{'type': 'hidden', 'name': 'resURL', 'value': TEST_RETURN_URL},
{'type': 'hidden', 'name': 'sessionValidity', 'value': '2014-07-31T17:20:00Z'},
{'type': 'hidden', 'name': 'shipBeforeDate', 'value': '2014-08-30'},
{'type': 'hidden', 'name': 'shopperEmail', 'value': 'test@example.com'},
{'type': 'hidden', 'name': 'shopperLocale', 'value': 'fr'},
{'type': 'hidden', 'name': 'shopperReference', 'value': '789'},
{'type': 'hidden', 'name': 'skinCode', 'value': 'cqQJKZpg'},
{'type': 'hidden', 'name': 'countryCode', 'value': 'fr'},
{'type': 'hidden', 'name': 'brandCode', 'value': 'ideal'},
{'type': 'hidden', 'name': 'issuerId', 'value': '1211'},
]
ORDER_DATA = {
'amount': 123,
'basket_id': 456,
'client_email': 'test@example.com',
'client_id': 789,
'currency_code': 'EUR',
'country_code': 'fr',
'description': 'Order #123',
'order_id': 'ORD-123',
'order_number': '00000000123',
'return_url': TEST_RETURN_URL,
'shopper_locale': 'fr',
'brand_code': 'ideal',
'issuer_id': '1211',
}
class TestAdyenPaymentRequest(TestCase):
@override_settings(ADYEN_ACTION_URL='foo')
def test_form_action(self):
"""
Test that the form action is properly fetched from the settings.
"""
assert 'foo' == Scaffold().get_form_action(request=None)
def test_form_fields_ok(self):
"""
Test that the payment form fields list is properly built.
"""
with freeze_time('2014-07-31 17:00:00'): # Any datetime will do.
fields_list = Scaffold().get_form_fields(request=None, order_data=ORDER_DATA)
# Order doesn't matter, so normally we'd use a set. But Python doesn't do
# sets of dictionaries, so we compare individually.
assert len(fields_list) == len(EXPECTED_FIELDS_LIST)
for field in fields_list:
assert field in EXPECTED_FIELDS_LIST
def test_form_fields_with_missing_mandatory_field(self):
"""
Test that the proper exception is raised when trying
to build a fields list with a missing mandatory field.
"""
new_order_data = ORDER_DATA.copy()
del new_order_data['amount']
with self.assertRaises(MissingFieldException):
Scaffold().get_form_fields(request=None, order_data=new_order_data)
| oscaro/django-oscar-adyen | tests/test_requests.py | Python | bsd-3-clause | 3,128 | 0.001598 |
# -*- coding: utf-8 -*-
# django-po2xls
# tests/management/commands/test_po-to-xls.py
import os
import pathlib
from typing import List
from importlib import import_module
from django.test import TestCase
# po-to-xls management command imported on the fly
# because we can't import something from the module that contains "-"
Command = import_module("po2xls.management.commands.po-to-xls").Command # type: ignore
__all__: List[str] = ["CommandTest"]
class CommandTest(TestCase):
"""po-to-xls management command tests."""
@classmethod
def tearDownClass(cls) -> None:
"""Tear down."""
os.remove("po2xls/locale/uk/LC_MESSAGES/django.xls")
os.remove("po2xls/locale/en/LC_MESSAGES/django.xls")
super().tearDownClass()
def test_convert(self) -> None:
"""convert method must write converted data to .xls files for chosen locale.""" # noqa: D403,E501
Command().convert(locale="uk")
self.assertTrue(
expr=pathlib.Path("po2xls/locale/uk/LC_MESSAGES/django.xls").exists()
)
def test_convert__all(self) -> None:
"""convert method must write converted data to .xls files for all locales.""" # noqa: D403,E501
Command().handle()
self.assertTrue(
expr=pathlib.Path("po2xls/locale/en/LC_MESSAGES/django.xls").exists()
)
self.assertTrue(
expr=pathlib.Path("po2xls/locale/uk/LC_MESSAGES/django.xls").exists()
)
| vint21h/django-po2xls | tests/management/commands/test_po-to-xls.py | Python | gpl-3.0 | 1,479 | 0.002705 |
# EasyShells Module - API for easier Shell Model Construction in Salome
# MidSurface.py: Mid surface extraction for EasyShells module
#
# Copyright (C) 2013 Stefan Reiterer - maldun.finsterschreck@gmail.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import salome
import geompy
from numpy import array, arange
from MyGeom.Types import *
def create_parallel_midpoints(points_lower, points_upper):
"""
Help function to create the midpoints of two given parallel surfaces
"""
length_u = len(points_lower[0])
length_v = len(points_lower)
return [[(points_lower[i][j] + points_upper[i][j])*0.5 \
for j in range(length_u)] \
for i in range(length_v)]
def parallel_midsurface(lower_face, upper_face, lower_deg = 2, upper_deg = 5):
"""
Determines the midsurface of 2 parallel
surfaces. Hereby parallel means that they
share the same normal direction. It is assumed
that both normals point outwards.
"""
points_u = arange(0,1+1./upper_deg,1./upper_deg)
points_v = points_u
lower_points = create_local_coordinates(lower_face,points_u,points_v)
lower_points = create_local_coordinates(upper_face,points_u,points_v)
midpoints = create_parallel_midpoints
def face_normal_translation(face,distance,change_orientation = False):
"""
Help function to make a translation
"""
if change_orientation:
face.changeOrientation()
normal = face.getNormal()
result = geompy.MakeTranslationVectorDistance(face.getGeomObject(),normal.getGeomObject(),
distance)
return MyFace(result)
| maldun/EasyShells | MidSurface.py | Python | lgpl-2.1 | 2,382 | 0.011335 |
__kupfer_name__ = _("Vim")
__kupfer_sources__ = ("RecentsSource", "ActiveVim", )
__kupfer_actions__ = ("InsertInVim", )
__description__ = _("Recently used documents in Vim")
__version__ = "2011-04"
__author__ = "Plugin: Ulrik Sverdrup, VimCom: Ali Afshar"
def initialize_plugin(name):
global RecentsSource
global ActiveVim
global InsertInVim
from kupfer.plugin.vim.plugin import RecentsSource, ActiveVim, InsertInVim
| Theragon/kupfer | kupfer/plugin/vim/__init__.py | Python | gpl-3.0 | 423 | 0.009456 |
#!/usr/bin/env python
telescope = "ATCA"
latitude_deg = -30.312906
diameter_m = 22.0
import os
import sys
from util_misc import ascii_dat_read
#-----------------------------------------------------------------------------#
def main():
# Read the station lookup table
col, dummy = ascii_dat_read("ATCA_stations.txt", delim=" ",
doFloatCols=[2, 3])
statDict = {}
for station, N, W in zip(col[1], col[2], col[3]):
statDict[station] = (-W+1622.449, N)
# Read the array configuration file
col, dummy = ascii_dat_read("ATCA_configs.txt", delim=" ",
doFloatCols=[2, 3, 4, 5, 6, 7])
for confName, A1, A2, A3, A4, A5, A6 in zip(col[1], col[2], col[3], col[4],
col[5], col[6], col[7]):
if A1=='':
continue
outFileName = "ATCA_%s.config" % confName
FH = open(outFileName, "w")
FH.write("#" + "-"*78 + "#\n")
FH.write("#\n")
FH.write("# Array definition file for the %s %s configuration.\n"
% (telescope, confName))
FH.write("#\n")
FH.write("#" + "-"*78 + "#\n")
FH.write("\n")
FH.write("# Name of the telescope\n")
FH.write("telescope = %s\n" % telescope)
FH.write("\n")
FH.write("# Name of the configuration\n")
FH.write("config = %s\n" % confName)
FH.write("\n")
FH.write("# Latitude of the array centre\n")
FH.write("latitude_deg = %f\n" % latitude_deg)
FH.write("\n")
FH.write("# Antenna diameter\n")
FH.write("diameter_m = %f\n" % diameter_m)
FH.write("\n")
FH.write("# Antenna coordinates (offset E, offset N)\n")
FH.write("%f, %f\n" % (statDict[A1][0], statDict[A1][1]))
FH.write("%f, %f\n" % (statDict[A2][0], statDict[A2][1]))
FH.write("%f, %f\n" % (statDict[A3][0], statDict[A3][1]))
FH.write("%f, %f\n" % (statDict[A4][0], statDict[A4][1]))
FH.write("%f, %f\n" % (statDict[A5][0], statDict[A5][1]))
FH.write("%f, %f\n" % (statDict[A6][0], statDict[A6][1]))
FH.close()
for confName, A1, A2, A3, A4, A5 in zip(col[1], col[2], col[3], col[4],
col[5], col[6]):
if A1=='':
continue
confName += "_No_6"
outFileName = "ATCA_%s.config" % confName
FH = open(outFileName, "w")
FH.write("#" + "-"*78 + "#\n")
FH.write("#\n")
FH.write("# Array definition file for the %s %s configuration.\n"
% (telescope, confName))
FH.write("#\n")
FH.write("#" + "-"*78 + "#\n")
FH.write("\n")
FH.write("# Name of the telescope\n")
FH.write("telescope = %s\n" % telescope)
FH.write("\n")
FH.write("# Name of the configuration\n")
FH.write("config = %s\n" % confName)
FH.write("\n")
FH.write("# Latitude of the array centre\n")
FH.write("latitude_deg = %f\n" % latitude_deg)
FH.write("\n")
FH.write("# Antenna diameter\n")
FH.write("diameter_m = %f\n" % diameter_m)
FH.write("\n")
FH.write("# Antenna coordinates (offset E, offset N)\n")
FH.write("%f, %f\n" % (statDict[A1][0], statDict[A1][1]))
FH.write("%f, %f\n" % (statDict[A2][0], statDict[A2][1]))
FH.write("%f, %f\n" % (statDict[A3][0], statDict[A3][1]))
FH.write("%f, %f\n" % (statDict[A4][0], statDict[A4][1]))
FH.write("%f, %f\n" % (statDict[A5][0], statDict[A5][1]))
FH.close()
#-----------------------------------------------------------------------------#
main()
| crpurcell/friendlyVRI | arrays/array_data/ATCA/mk_ATCA_array_configs.py | Python | mit | 3,761 | 0.003191 |
from datetime import timedelta
from django.contrib import admin
from django.db.models import Case, Value, When
from django.utils import timezone
from .models import Channel, Post, RssFeed
@admin.register(Channel)
class ChannelAdmin(admin.ModelAdmin):
list_display = ('__str__', 'title', 'username', 'publish_picture', 'linked_title', 'short_link')
change_list_template = "rss/actions.html"
@admin.register(RssFeed)
class RssFeedAdmin(admin.ModelAdmin):
list_display = ('__str__', 'channel', 'link', 'active')
actions = ('activate', 'deactivate', 'toggle_active')
def activate(self, request, queryset):
queryset.update(active=True)
def deactivate(self, request, queryset):
queryset.update(active=False)
def toggle_active(self, request, queryset):
queryset.update(active=Case(When(active=True, then=Value(False)), default=Value(True)))
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'feed', 'link', 'created', 'older_then_five_days')
ordering = ('-created',)
def older_then_five_days(self, post: Post):
five_days_before = timezone.now() - timedelta(days=5)
return post.created < five_days_before
older_then_five_days.boolean = True
| vaniakosmos/memes-reposter | apps/rss/admin.py | Python | mit | 1,262 | 0.001585 |
from tfidf import *
import psycopg2
import psycopg2.extensions
import math
def cos_sim(A,B):
def dot_product(a,b):
sum = 0.0
for key in a.keys():
if key in b:
sum += a[key]*b[key]
return sum
return dot_product(A,B)/(math.sqrt(dot_product(A,A)) * math.sqrt(dot_product(B,B)))
conn = psycopg2.connect("host=localhost dbname=SOFTFile user=AUREA password=AUREA")
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
c = conn.cursor()
qry = "SELECT dataset_id, dataset_title, dataset_description \
FROM dataset"
#WHERE dataset_id < 20"
c.execute(qry)
documentList = []
documentNumber = 0
docMap = []
for id,title, description in c.fetchall():
documentList.append(title + description)
docMap.append(id)
c.close()
vectors = []
print "gotDocs"
for x in range(len(documentList)):
words = {}
for word in documentList[documentNumber].split(None):
words[word] = tfidf(word,documentList[documentNumber],documentList)
#for item in sorted(words.items(), key=itemgetter(1), reverse=True):
# print "%f <= %s" % (item[1], item[0])
vectors.append(words)
documentNumber = x+1
print "got vectors"
sim = []
for i in range(len(vectors[:-1])):
for j in range(i+1, len(vectors)):
sim = cos_sim(vectors[i], vectors[j])
db_id1 = docMap[i]
db_id2 = docMap[j]
qry = "INSERT into cosine_similarity(id1, id2, score) VALUES (%s, %s, %s)"
c = conn.cursor()
c.execute(qry, (db_id1, db_id2, sim))
c.close()
| JohnCEarls/AUREA | scripts/testScripts/testTFIDF.py | Python | agpl-3.0 | 1,566 | 0.009579 |
from django.shortcuts import render
from SharedFunctions.models import *
from Lights.models import *
def HandlePhoneRequest(request):
area = request.GET.get('area', 'None')
if area == 'None':
return PhoneHomePage(request)
elif area == 'lights':
if request.GET.get('room', 'None') != 'None':
if request.GET.get('light', 'None') != 'None':
if request.GET.get('command', 'None') != 'None':
return PhoneLightSetRGBPage(request)
else:
return PhoneLightPage(request)
else:
return PhoneLightsPage(request)
else:
return PhoneLightsRoomPage(request)
else:
return PhoneHomePage(request)
def PhoneHomePage(request):
items = [{'title':'Lights', 'address':'?page=ciscophone&area=lights'},
{'title':'Alarm', 'address':'?page=ciscophone&area=alarm'},
{'title':'Temperature', 'address':'?page=ciscophone&area=temp'}]
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Service'}, content_type="text/xml")
def PhoneLightsRoomPage(request):
items = [{'title':'All Rooms', 'address':'?page=ciscophone&area=lights&room=allrooms'}]
for room in Rooms.objects.all():
items.append({'title':room.Name.replace("_", " "), 'address':'?page=ciscophone&area=lights&room=' + str(room.id)})
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Service'}, content_type="text/xml")
def PhoneLightsPage(request):
items = []
room = request.GET.get('room', 'None')
if room == 'allrooms':
lights = Lights.objects.all()
else:
theRoom = Rooms.objects.get(id=int(room))
lights = Lights.objects.filter(Room=theRoom)
for light in lights:
items.append({'title':light.LightName.replace("_", " "), 'address':'?page=ciscophone&area=lights&room=' + str(room) + '&light=' + str(light.id)})
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Light', 'softkey1':'test'}, content_type="text/xml")
def PhoneLightPage(request):
light = request.GET.get('light', 'None')
items = [{'title':'Toggle Light', 'address':'?page=ciscophone&area=lights&room=allrooms&light=' + light + '&command=toggle'},
{'title':'Set RGB Values', 'address':'?page=ciscophone&area=lights&room=allrooms&light=' + light + '&command=setrgb'},
{'title':'Select Scene', 'address':'?page=ciscophone&area=lights&room=allrooms&light=' + light + '&command=selectscene'}]
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Service'}, content_type="text/xml")
def PhoneLightSetRGBPage(request):
items = [{'DisplayName':'Set Red Value', 'QueryStringParam':'r', 'DefaultValue':'255', 'InputFlag':'N'},
{'DisplayName':'Set Green Value', 'QueryStringParam':'g', 'DefaultValue':'255', 'InputFlag':'N'},
{'DisplayName':'Set Blue Value', 'QueryStringParam':'b', 'DefaultValue':'255', 'InputFlag':'N'}]
return render(request, 'OctaHomeApi/PhoneValueSet.html', {'Items':items, 'Prompt':'Please Select A Service', 'Url':'setrgb.xml'}, content_type="text/xml") | Tomcuzz/OctaHomeAutomation | Api/ciscophone.py | Python | mit | 3,020 | 0.036755 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for user models.
"""
from django.contrib.auth.models import User, Group
from django.test import TestCase
from weblate.accounts.models import AutoGroup
class AutoGroupTest(TestCase):
@staticmethod
def create_user():
return User.objects.create_user('test1', 'noreply@weblate.org', 'pass')
def test_default(self):
user = self.create_user()
self.assertEqual(user.groups.count(), 1)
def test_none(self):
AutoGroup.objects.all().delete()
user = self.create_user()
self.assertEqual(user.groups.count(), 0)
def test_matching(self):
AutoGroup.objects.create(
match='^.*@weblate.org',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 2)
def test_nonmatching(self):
AutoGroup.objects.create(
match='^.*@example.net',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 1)
| dtschan/weblate | weblate/accounts/tests/test_models.py | Python | gpl-3.0 | 1,853 | 0 |
import Queue
import handlers
import inspect
import threading
import pkgutil
import os
import sys
import imp
from gatherer import Gatherer
import signal
import platform
import ConfigParser
class GatherAgent(object):
"""
A simple layer between inputs (gatherers) and output (handler) using a simple
implementation of reactor pattern.
"""
KEY_SEPARATOR = '.'
def start(self, config_file='gather_agent.ini'):
"""
Initialization method of the GatherAgent. Sets up required queues, arses
the configuration, loads gatherers and handler and starts the dispatcher.
"""
self.q = Queue.Queue()
self.gatherers = []
# Load configuration properties
config = ConfigParser.ConfigParser()
config.read(config_file)
config.set('Gatherers', 'prefix', platform.node())
self.config = config
# Start gatherers and handlers..
self.handler = self.start_handler(config.get('General', 'handler'))
self.start_gatherers(self.load_gatherers(), self.handler)
signal.signal(signal.SIGINT, self._stop)
self.active = True
self.loop()
def start_handler(self, handler_cls):
handler_generic_config = self.load_partial_config('Handlers')
handler_specific_config = self.load_partial_config('Handlers', handler_cls)
handler_specific_config.update(handler_generic_config)
for o, _ in self.load_classes_list('handlers'):
if o.__name__ == handler_cls:
obj = o(handler_specific_config)
return obj
def start_gatherers(self, instances, handler):
"""
Creates new threads for each gatherer running the gatherer's run() method
"""
for instance in instances:
t = threading.Thread(target=instance.run)
t.daemon = True
t.start()
self.gatherers.append(instance)
def loop(self):
"""
Main dispatcher loop which waits for available objects in the queue. Once
an event is received, it calls event's handler and waits for results before
processing the next event.
"""
while self.active:
event = self.q.get()
event.handle()
def load_partial_config(self, section, keyprefix=None):
"""
Parses a partial configuration from the ini-file, filtering any key that
isn't defined by the keyprefix. If no keyprefix is given, filters all the
properties that are namespaced with dot (.)
"""
section_config = self.config.items(section)
partial_config = {}
for k, v in section_config:
d = None
if keyprefix is not None:
keyprefix = keyprefix.lower()
i = k.rfind(keyprefix + self.KEY_SEPARATOR)
if i > -1:
d = { k: v }
else:
i = k.rfind(self.KEY_SEPARATOR)
if i < 0:
d = { k: v }
if d is not None:
partial_config.update(d)
return partial_config
def load_handlers_config(self, class_name):
handlers_config = self.load_partial_config('Handlers', class_name)
return handlers_config
def load_gatherers_config(self, class_name):
generic_gatherer_config = self.load_partial_config('Gatherers')
specific_gatherer_config = self.load_partial_config('Gatherers', class_name)
generic_gatherer_config.update(specific_gatherer_config)
return generic_gatherer_config
def load_classes_list(self, package):
"""
Loads all classes from the given package. Returns a generator with two
parameters, class_name and the module
"""
path = os.path.join(os.path.dirname(__file__), package)
modules = pkgutil.iter_modules(path=[path])
for _, module_name, _ in modules:
fp, pathname, description = imp.find_module(module_name, [path])
module = imp.load_module(module_name, fp, pathname, description)
for name in dir(module):
o = getattr(module, name)
if inspect.isclass(o):
yield o, name
def load_gatherers(self):
"""
Creates and returns a generator with one instance of each gatherers
object.
"""
for o, name in self.load_classes_list('gatherers'):
if issubclass(o, Gatherer) and o is not Gatherer:
partial_config = self.load_gatherers_config(name)
obj = o(self.handler, partial_config, self.q)
yield obj
def _stop(self, signum, frame):
"""
If a signal is received from the OS, this method is used to clean up and
stop all the gatherers and handlers.
"""
print 'Received signal ' + str(signum) + ', closing gatherers and handlers'
self.active = False
for i in self.gatherers:
i.close()
self.handler.close()
if __name__ == "__main__":
g = GatherAgent()
if len(sys.argv) > 1:
g.start(sys.argv[1])
else:
g.start()
| burmanm/gather_agent | gather_agent/gather_agent.py | Python | apache-2.0 | 5,283 | 0.005111 |
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Timesheet on Issues',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds the Timesheet support for the Issues/Bugs Management in Project.
=================================================================================
Worklogs can be maintained to signify number of hours spent by users to handle an issue.
""",
'website': 'https://www.odoo.com/page/project-management',
'depends': [
'project_issue',
'hr_timesheet_sheet',
],
'data': [
'project_issue_sheet_view.xml',
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
| akhmadMizkat/odoo | addons/project_issue_sheet/__openerp__.py | Python | gpl-3.0 | 850 | 0.002353 |
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPError", "NNTPReplyError", "NNTPTemporaryError",
"NNTPPermanentError", "NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes a unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to an OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl._create_stdlib_context()
return context.wrap_socket(sock, server_hostname=hostname)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be a unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline(_MAXLINE +1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns a unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is a unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end),
file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp) from None
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.io',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
| prefetchnta/questlab | bin/x64bin/python/37/Lib/nntplib.py | Python | lgpl-2.1 | 44,234 | 0.000701 |
# Author: matigonkas
# URL: https://github.com/SiCKRAGETV/sickrage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
class STRIKEProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Strike")
self.supportsBacklog = True
self.public = True
self.url = 'https://getstrike.net/'
self.ratio = 0
self.cache = StrikeCache(self)
self.minseed, self.minleech = 2 * [None]
def isEnabled(self):
return self.enabled
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys(): #Mode = RSS, Season, Episode
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: " + search_string.strip(), logger.DEBUG)
searchURL = self.url + "api/v2/torrents/search/?category=TV&phrase=" + search_string
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
jdata = self.getURL(searchURL, json=True)
if not jdata:
logger.log("No data returned from provider", logger.DEBUG)
return []
results = []
for item in jdata['torrents']:
seeders = ('seeds' in item and item['seeds']) or 0
leechers = ('leeches' in item and item['leeches']) or 0
title = ('torrent_title' in item and item['torrent_title']) or ''
size = ('size' in item and item['size']) or 0
download_url = ('magnet_uri' in item and item['magnet_uri']) or ''
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
item = title, download_url, size, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class StrikeCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# set this 0 to suppress log line, since we aren't updating it anyways
self.minTime = 0
def _getRSSData(self):
# no rss for getstrike.net afaik, also can't search with empty string
return {'entries': {}}
provider = STRIKEProvider()
| hale36/SRTV | sickbeard/providers/strike.py | Python | gpl-3.0 | 3,919 | 0.003572 |
import os
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from subscription_manager import lock
class TestLock(unittest.TestCase):
lf_name = "lock.file"
def setUp(self):
self.tmp_dir = self._tmp_dir()
self.other_process = None
def _tmp_dir(self):
tmp_dir = tempfile.mkdtemp(suffix="lock", prefix="subman-unit-tests-")
return tmp_dir
def _lock_path(self):
tmp_dir = self._tmp_dir()
return os.path.join(tmp_dir, self.lf_name)
# For thread.Timer()
def _kill_other_process(self, other_process):
self.fail("nothing happened before we timed out.")
# die die die
other_process.terminate()
other_process.kill()
self.timer.cancel()
def _grab_lock_from_other_pid(self, lockfile_path,
other_process_timeout=None,
acquire_timeout=None):
# klugey
other_process_timeout = other_process_timeout or 3.0
acquire_timeout = acquire_timeout or 5.0
sys_path = os.path.join(os.path.dirname(__file__), "../src")
self.other_process = subprocess.Popen(["/usr/bin/python", __file__, lockfile_path],
close_fds=True,
stdin=subprocess.PIPE,
env={'PYTHONPATH': sys_path})
#lock_path = os.path.join(self.tmp_dir, 'lock.file')
# make sure other process has had time to create the lock file
while True:
lock_exists = os.path.exists(lockfile_path)
if lock_exists:
break
time.sleep(0.05)
# in another thread, wait 3 seconds, then send 'whatever' to stdin of
# other process so it closes. A timeout...
def wait_for_pid(timer):
time.sleep(other_process_timeout)
self.close_lock_holder()
timer.cancel()
timer = threading.Timer(acquire_timeout, self.timeout_fail)
op_thread = threading.Thread(target=wait_for_pid, args=[timer])
op_thread.start()
return op_thread
def close_lock_holder(self):
try:
self.other_process.communicate("whatever")
except Exception, e:
print e
# whatever, we closed it in the other thread
def timeout_fail(self):
self.close_lock_holder()
self.fail("timeoutsdfsdf")
def test_two_pids_blocking_none_blocks(self):
lock_path = self._lock_path()
# start a different proc that holds the lock, that times out after 3
self._grab_lock_from_other_pid(lock_path, 1.0, 0.2)
b = lock.Lock(lock_path)
res = b.acquire()
self.assertTrue(res is None)
def test_two_pids_blocking_none(self):
lock_path = self._lock_path()
# start a different proc that holds the lock, that times out after 3
self._grab_lock_from_other_pid(lock_path, 0.2, 1.0)
b = lock.Lock(lock_path)
res = b.acquire()
self.assertTrue(b.acquired())
self.assertTrue(res is None)
def test_two_pids_blocking_true(self):
lock_path = self._lock_path()
# start a different proc that holds the lock, that times out after 3
self._grab_lock_from_other_pid(lock_path, 0.2, 1.0)
b = lock.Lock(lock_path)
res = b.acquire(blocking=True)
self.assertTrue(b.acquired())
self.assertTrue(res)
def test_two_pids_blocking_false(self):
lock_path = self._lock_path()
self._grab_lock_from_other_pid(lock_path, 0.2, 1.0)
b = lock.Lock(lock_path)
res = b.acquire(blocking=False)
self.assertFalse(b.acquired())
self.other_process.communicate("whatever")
self.assertFalse(res)
def test_lock(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
self.assertEquals(lf.path, lock_path)
self.assertEquals(lf.depth, 0)
def test_lock_acquire(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
res = lf.acquire()
# given no args, acquire() blocks or returns None
self.assertEquals(res, None)
def test_lock_acquire_blocking_true(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=True)
# acquire(blocking=True) will block or return True
self.assertTrue(res)
def test_lock_acquire_blocking_false(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=False)
# res of False indicates lock could not be acquired without blocking
# True indicates lock was acquired
self.assertTrue(res)
def test_lock_release(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
lf.acquire()
lf.release()
def _stale_lock(self):
lock_path = self._lock_path()
fakepid = 123456789
f = open(lock_path, 'w')
f.write('%s\n' % fakepid)
f.close()
return lock_path
def test_lock_acquire_stale_pid(self):
lock_path = self._stale_lock()
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=True)
self.assertTrue(res)
def test_lock_acquire_stale_pid_nonblocking(self):
lock_path = self._stale_lock()
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=False)
self.assertTrue(res)
# always blocks, needs eventloop/threads
# def test_lock_drive_full_blocking(self):
# lock_path = "/dev/full"
# lf = lock.Lock(lock_path)
# res = lf.acquire(blocking=True)
# log.debug(res)
# FIXME: the lockfile creation fails on /dev/full
# def test_lock_drive_full_nonblocking(self):
# lock_path = "/dev/full"
# lf = lock.Lock(lock_path)
# res = lf.acquire(blocking=False)
# self.assertFalse(res)
# run this module's main in a subprocess to grab a lock from a different
# pid.
def main(args):
lock_file_path = args[1]
test_lock = lock.Lock(lock_file_path)
# could return a useful value, so the thread communicating with
# it could notice it couldn't get the lock
res = test_lock.acquire(blocking=False)
if res is False:
return 128
# exit on any stdin input
for line in sys.stdin.readlines():
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[:]))
| vritant/subscription-manager | test/test_lock.py | Python | gpl-2.0 | 6,575 | 0.000608 |
from __future__ import absolute_import
from builtins import filter
import os
import sys
import zipfile
from gi.repository import Gtk, Gio, GdkPixbuf, GLib
from sunflower.common import UserDirectory, get_user_directory, get_static_assets_directory
class IconManager:
"""Icon manager class provides easy and abstract way of dealing with icons"""
def __init__(self, parent):
self._parent = parent
self._icon_theme = Gtk.IconTheme.get_default()
self._user_directories = None
self._default_file = None
self._default_directory = None
# preload information
self._prepare_icons()
def _prepare_icons(self):
"""Load special user directories"""
# set default icons for file and directory
self._default_file = 'text-x-generic'
self._default_directory = 'folder'
# special user directories
directories = []
icon_names = {
UserDirectory.DESKTOP: 'user-desktop',
UserDirectory.DOWNLOADS: 'folder-download',
UserDirectory.TEMPLATES: 'folder-templates',
UserDirectory.PUBLIC: 'folder-publicshare',
UserDirectory.DOCUMENTS: 'folder-documents',
UserDirectory.MUSIC: 'folder-music',
UserDirectory.PICTURES: 'folder-pictures',
UserDirectory.VIDEOS: 'folder-videos'
}
# add all directories
for directory in icon_names:
full_path = get_user_directory(directory)
icon_name = icon_names[directory]
# make sure icon exists
if not self.has_icon(icon_name):
icon_name = self._default_directory
directories.append((full_path, icon_name))
# add user home directory
if self.has_icon('user-home'):
directories.append((os.path.expanduser('~'), 'user-home'))
# create a dictionary
self._user_directories = dict(directories)
def has_icon(self, icon_name):
"""Check if icon with specified name exists in theme"""
return self._icon_theme.has_icon(icon_name)
def get_icon_sizes(self, icon_name):
"""Get icon sizes for specified name"""
return self._icon_theme.get_icon_sizes(icon_name)
def get_icon_for_file(self, filename):
"""Load icon for specified file"""
result = self._default_file
mime_type = self._parent.associations_manager.get_mime_type(filename)
themed_icon = None
# get icon names
if mime_type is not None:
themed_icon = Gio.content_type_get_icon(mime_type)
# get only valid icon names
if themed_icon is not None:
icon_list = themed_icon.get_names()
icon_list = list(filter(self.has_icon, icon_list))
if len(icon_list) > 0:
result = icon_list[0]
return result
def get_icon_for_directory(self, path):
"""Get icon for specified directory"""
result = self._default_directory
if path in self._user_directories:
result = self._user_directories[path]
return result
def get_mount_icon_name(self, icons):
"""Return existing icon name from the specified list"""
result = 'drive-harddisk'
# create a list of icons and filter non-existing
icon_list = icons.split(' ')
icon_list = list(filter(self.has_icon, icon_list))
# if list has items, grab first
if len(icon_list) > 0:
result = icon_list[0]
return result
def set_window_icon(self, window):
"""Set window icon"""
# check system for icon
if self.has_icon('sunflower'):
window.set_icon(self._icon_theme.load_icon('sunflower', 256, 0))
# try loading from zip file
elif os.path.isfile(sys.path[0]) and sys.path[0] != '':
archive = zipfile.ZipFile(sys.path[0])
with archive.open('images/sunflower.svg') as raw_file:
buff = Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new(raw_file.read()))
icon = GdkPixbuf.Pixbuf.new_from_stream(buff, None)
window.set_icon(icon)
archive.close()
# load from local path
else:
base_path = get_static_assets_directory()
window.set_icon_from_file(os.path.join(base_path, 'images', 'sunflower.svg'))
| MeanEYE/Sunflower | sunflower/icons.py | Python | gpl-3.0 | 3,797 | 0.025283 |
"""
the handlers that respond to requests from browsers or clients.
Each view function is mapped to one or more request URLs.
"""
from flask import render_template, flash, redirect, session, url_for, reqeust, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from .forms import LoginForm
from models import User, ROLE_USER, ROLE_ADMIN
#the two decorators create mappings from URLs / and /index to this function
@app.route('/')
@app.route('/index')
@login_required
def index():
user = {'nickname': 'Okkar'}
posts = [
{
'author': {'nickname': 'Max'},
'body': 'Golden Gate Bridge!'
},
{
'author': {'nickname': 'Pan'},
'body': 'I want bacon!'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.before_request
def before_request():
g.user = current_user
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index')) | r2k0/flask-apps | mega-tut/app/views.py | Python | mit | 2,555 | 0.005479 |
from django.conf.urls import url, include
from snippets.views import SnippetViewSet, UserViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'snippets', SnippetViewSet)
router.register(r'users', UserViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
| datyayu/cemetery | [Python][Django] Rest framework tutorial/snippets/urls.py | Python | mit | 314 | 0 |
import time
import pyupm_ttp223 as ttp223
import requests
import json
url = "http://requestb.in/1mj62581?inspect"
headers = {'content-type': 'application/json'}
touch1 = ttp223.TTP223(4)
touch1Pressed = False
touch2 = ttp223.TTP223(8)
touch2Pressed = False
def sendInfo(touch, tId, Pressed):
if touch.isPressed():
if not Pressed:
print "Send Info"
Pressed = True
data = {"Id": "AI", "Espacio": tId, "Disponible": False}
data = json.dumps(data)
requests.post(url, params=data, headers=headers)
else:
if Pressed:
print "Send Info"
Pressed = False
data = {"Id": "AI", "Espacio": tId, "Disponible": True}
data = json.dumps(data)
requests.post(url, params=data, headers=headers)
return Pressed
while True:
touch1Pressed = sendInfo(touch1, 1, touch1Pressed)
touch2Pressed = sendInfo(touch2, 2, touch2Pressed)
time.sleep(1)
del touch1
del touch2
| OpenParking/Open-Parkinig---Edison | source/touh.py | Python | gpl-2.0 | 1,004 | 0.000996 |
""""""
from __future__ import annotations
from flask import Flask
from .criterion import TagCriterion
from .extension import TagsExtension
__all__ = ["TagsExtension", "TagCriterion"]
def register_plugin(app: Flask):
TagsExtension(app)
| abilian/abilian-core | src/abilian/web/tags/__init__.py | Python | lgpl-2.1 | 244 | 0 |
import datetime
import time
from django.db.utils import DatabaseError
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
from collections import namedtuple
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.signals import connection_created
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseErrorWrapper
from django.utils.functional import cached_property
from django.utils.importlib import import_module
from django.utils import six
from django.utils import timezone
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Savepoint management related attributes
self.savepoint_state = 0
# Transaction management related attributes
self.autocommit = False
self.transaction_state = []
# Tracks if the connection is believed to be in transaction. This is
# set somewhat aggressively, as the DBAPI doesn't make it easy to
# deduce if the connection is in transaction or not.
self._dirty = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# List of savepoints created by 'atomic'
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = thread.get_ident()
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.alias)
##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
raise NotImplementedError
def get_new_connection(self, conn_params):
"""Opens a connection to the database."""
raise NotImplementedError
def init_connection_state(self):
"""Initializes the database connection settings."""
raise NotImplementedError
def create_cursor(self):
"""Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError
##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
if self.settings_dict['AUTOCOMMIT']:
self.set_autocommit(True)
connection_created.send(sender=self.__class__, connection=self)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self):
self.ensure_connection()
with self.wrap_database_errors:
return self.create_cursor()
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""
Creates a cursor, opening a connection if necessary.
"""
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def commit(self):
"""
Commits a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
self.set_clean()
def rollback(self):
"""
Rolls back a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
self.set_clean()
def close(self):
"""
Closes the connection to the database.
"""
self.validate_thread_sharing()
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
self.set_clean()
##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
##### Generic savepoint management methods #####
def savepoint(self):
"""
Creates a savepoint inside the current transaction. Returns an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back to a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Releases a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError
##### Generic transaction management methods #####
def enter_transaction_management(self, managed=True, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
If you switch off transaction management and there is a pending
commit/rollback, the data will be commited, unless "forced" is True.
"""
self.validate_no_atomic_block()
self.transaction_state.append(managed)
if not managed and self.is_dirty() and not forced:
self.commit()
self.set_clean()
if managed == self.get_autocommit():
self.set_autocommit(not managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self.validate_no_atomic_block()
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError(
"This code isn't under transaction management")
if self.transaction_state:
managed = self.transaction_state[-1]
else:
managed = not self.settings_dict['AUTOCOMMIT']
if self._dirty:
self.rollback()
if managed == self.get_autocommit():
self.set_autocommit(not managed)
raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK")
if managed == self.get_autocommit():
self.set_autocommit(not managed)
def get_autocommit(self):
"""
Check the autocommit state.
"""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit):
"""
Enable or disable autocommit.
"""
self.validate_no_atomic_block()
self.ensure_connection()
self._set_autocommit(autocommit)
self.autocommit = autocommit
def get_rollback(self):
"""
Get the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""
Raise an error if an atomic block is active.
"""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
def abort(self):
"""
Roll back any ongoing transaction and clean the transaction state
stack.
"""
if self._dirty:
self.rollback()
while self.transaction_state:
self.leave_transaction_management()
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if not self.get_autocommit():
self._dirty = True
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
self._dirty = False
self.clean_savepoints()
##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Context manager that disables foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
##### Connection termination handling #####
def is_usable(self):
"""
Tests if the database connection is usable.
This function may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Closes the current connection if unrecoverable errors have occurred,
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if not (self.allow_thread_sharing
or self._thread_ident == thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
##### Miscellaneous #####
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def make_debug_cursor(self, cursor):
"""
Creates a cursor that logs all queries in self.queries.
"""
return util.CursorDebugWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provides a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
cursor = self.cursor()
try:
yield cursor
finally:
cursor.close()
if must_close:
self.close()
def _start_transaction_under_autocommit(self):
"""
Only required when autocommits_when_autocommit_is_off = True.
"""
raise NotImplementedError
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"Confirm support for transactions"
try:
# Make sure to run inside a managed transaction block,
# otherwise autocommit will cause the confimation to
# fail.
self.connection.enter_transaction_management()
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection.commit()
finally:
self.connection.leave_transaction_management()
return count == 0
@cached_property
def supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
return True
except NotImplementedError:
return False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError()
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError()
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import force_text
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = dict((to_unicode(k), to_unicode(v)) for k, v in params.items())
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError()
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import force_text
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
return [first, second]
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent type
that is compatible with the field type.
"""
if value is None or field is None:
return value
internal_type = field.get_internal_type()
if internal_type == 'FloatField':
return float(value)
elif (internal_type and (internal_type.endswith('IntegerField')
or internal_type == 'AutoField')):
return int(value)
return value
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo',
'name type_code display_size internal_size precision scale null_ok'
)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
if cursor is None:
cursor = self.connection.cursor()
return sorted(self.get_table_list(cursor))
def get_table_list(self, cursor):
"""
Returns an unsorted list of names of all tables that exist in the
database.
"""
raise NotImplementedError
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = list(map(self.table_name_converter, tables))
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
"""
for column in six.iteritems(self.get_indexes(cursor, table_name)):
if column[1]['primary_key']:
return column[0]
return None
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| mariosky/evo-drawings | venv/lib/python2.7/site-packages/django/db/backends/__init__.py | Python | agpl-3.0 | 47,905 | 0.001232 |
import easygui
from os import listdir
from os.path import isfile, join
# Import Custom Libraries
from libs.requestStripper import *
file_path = "/home/tyler/Documents/Thesis Testing"
print(file_path)
files = [f for f in listdir(file_path) if isfile(join(file_path, f))]
for i, value in enumerate(files):
files[i] = file_path + "/" + files[i]
print(files)
lines = []
for f in files:
gatherStrings(lines, f)
newFile = open(file_path+"/"+"compiledRequests", "w")
exportFile(lines, newFile)
newFile.close()
| xTVaser/Schoolwork-Fall-2016 | Thesis/Parser/main.py | Python | gpl-3.0 | 526 | 0.001901 |
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError, why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error, errors
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error, err:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error, "Destination path '%s' already exists" % real_dst
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error, "Cannot move a directory '%s' into itself '%s'." % (src, dst)
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', None: ''}
compress_ext = {'gzip': '.gz', 'bzip2': '.bz2'}
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext.keys():
raise ValueError, \
("bad value for 'compress': must be None, 'gzip' or 'bzip2'")
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
logger.info("creating %s" % archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
import tarfile # late import so Python build itself doesn't break
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError, \
("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [],"ZIP file")
}
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2 :
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError, "unknown archive format '%s'" % format
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
| Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86 | usr/pkg/lib/python2.7/shutil.py | Python | mit | 18,302 | 0.002186 |
#!/usr/bin/python2
import os, glob
os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs' )
base_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped'
output_base_dir = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/downscaled'
cru_base_dir = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts20/akcan'
for root, dirs, files in os.walk( base_dir ):
if files:
path, variable = os.path.split( root )
path, model = os.path.split( path )
# this gets rid of any .xml or .txt files that may be living amongst the NetCDF's
files = [ fn for fn in files if fn.endswith( '.nc' ) ]
for fn in files:
print 'running %s' % fn
# split out the sub_dirs to have both model_name and variable folder hierarchy
# from the prepped folder directory
output_dir = os.path.join( output_base_dir, model, variable )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
# anomalies calculation type and cru input path condition
if 'tas_' in os.path.basename( fn ):
anomalies_calc_type = 'absolute'
downscale_operation = 'add'
cru_path = os.path.join( cru_base_dir, 'tas' )
elif 'hur_' in os.path.basename( fn ):
anomalies_calc_type = 'proportional'
downscale_operation = 'mult'
cru_path = os.path.join( cru_base_dir, 'hur' )
plev = 1000
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
# condition to determine if we need to read in the historical dataset with the modeled for
# anomalies calculation
if 'historical' in fn:
# run with only the historical file
dates = os.path.basename( fn ).strip( '.nc' ).split( '_' )
dates = dates[ len( dates )-2 ], dates[ len( dates )-1 ]
begin_time, end_time = [ '-'.join([ i[:4], i[4:] ]) for i in dates ]
if 'tas_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -hi ' + os.path.join( root, fn ) + ' -o ' + output_dir + ' -bt ' + begin_time + \
' -et ' + end_time + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
elif 'hur_' in fn:
# run with only the historical file
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -hi ' + os.path.join( root, fn ) + ' -o ' + output_dir + ' -bt ' + begin_time + \
' -et ' + end_time + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -plev ' + str(plev) + ' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
else:
# grab the historical file from that particular folder
historical_fn = glob.glob( os.path.join( root, '*historical*.nc' ) )[0]
# run with both historical and modeled files for anomalies calc.
if 'tas_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -mi ' + os.path.join( root, fn ) + ' -hi ' + historical_fn + ' -o ' + output_dir + \
' -bt ' + '2006-01' + ' -et ' + '2100-12' + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
elif 'hur_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -mi ' + os.path.join( root, fn ) + ' -hi ' + historical_fn + ' -o ' + output_dir + \
' -bt ' + '2006-01' + ' -et ' + '2100-12' + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + ' -plev ' + str(plev) + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
| ua-snap/downscale | old/old_bin/downscaling_launcher.py | Python | mit | 3,849 | 0.043128 |
from mock import *
from .gp_unittest import *
from gpconfig_modules.compare_segment_guc import MultiValueGuc
from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc
from gpconfig_modules.file_segment_guc import FileSegmentGuc
class CompareSegmentGucTest(GpTestCase):
def setUp(self):
row = ['contentid', 'guc_name', 'file_value', "dbid"]
self.file_seg_guc = FileSegmentGuc(row)
row = ['contentid', 'guc_name', 'sql_value']
self.db_seg_guc = DatabaseSegmentGuc(row)
self.subject = MultiValueGuc(self.file_seg_guc, self.db_seg_guc)
def test_init_when_comparison_guc_supplied(self):
row = ['contentid', 'guc_name', 'file_value', "diff_dbid"]
file_seg_guc = FileSegmentGuc(row)
old = self.subject
self.subject = MultiValueGuc(self.subject, file_seg_guc)
self.assertEqual(self.subject.db_seg_guc, old.db_seg_guc)
self.assertEqual(self.subject.primary_file_seg_guc, old.primary_file_seg_guc)
self.assertEqual(self.subject.mirror_file_seg_guc, file_seg_guc)
def test_init_with_wrong_content_id_raises(self):
row = ['contentid', 'guc_name', 'file_value', "dbid"]
file_seg_guc = FileSegmentGuc(row)
row = ['different', 'guc_name', 'sql_value']
db_seg_guc = DatabaseSegmentGuc(row)
with self.assertRaisesRegex(Exception, "Not the same context"):
MultiValueGuc(file_seg_guc, db_seg_guc)
def test_init_handles_both_orders(self):
self.assertEqual(self.file_seg_guc, self.subject.primary_file_seg_guc)
self.assertEqual(self.db_seg_guc, self.subject.db_seg_guc)
self.assertTrue(isinstance(self.subject.primary_file_seg_guc, FileSegmentGuc))
self.assertTrue(isinstance(self.subject.db_seg_guc, DatabaseSegmentGuc))
self.subject = MultiValueGuc(self.db_seg_guc, self.file_seg_guc)
self.assertEqual(self.file_seg_guc, self.subject.primary_file_seg_guc)
self.assertEqual(self.db_seg_guc, self.subject.db_seg_guc)
self.assertTrue(isinstance(self.subject.primary_file_seg_guc, FileSegmentGuc))
self.assertTrue(isinstance(self.subject.db_seg_guc, DatabaseSegmentGuc))
def test_init_when_none_raises(self):
with self.assertRaisesRegex(Exception, "comparison requires two gucs"):
self.subject = MultiValueGuc(self.db_seg_guc, None)
with self.assertRaisesRegex(Exception, "comparison requires two gucs"):
self.subject = MultiValueGuc(None, self.db_seg_guc)
def test_report_fail_format_for_database_and_file_gucs(self):
self.assertEqual(self.subject.report_fail_format(),
["[context: contentid] [dbid: dbid] [name: guc_name] [value: sql_value | file: file_value]"])
def test_report_fail_format_file_segment_guc_only(self):
self.subject.db_seg_guc = None
row = ['contentid', 'guc_name', 'primary_value', "dbid1"]
self.subject.set_primary_file_segment(FileSegmentGuc(row))
row = ['contentid', 'guc_name', 'mirror_value', "dbid2"]
self.subject.set_mirror_file_segment(FileSegmentGuc(row))
self.assertEqual(self.subject.report_fail_format(),
["[context: contentid] [dbid: dbid1] [name: guc_name] [value: primary_value]",
"[context: contentid] [dbid: dbid2] [name: guc_name] [value: mirror_value]"])
def test_when_segment_report_success_format(self):
self.assertEqual(self.subject.report_success_format(),
"Segment value: sql_value | file: file_value")
def test_when_values_match_report_success_format_file_compare(self):
self.subject.db_seg_guc.value = 'value'
self.subject.primary_file_seg_guc.value = 'value'
self.assertEqual(self.subject.report_success_format(), "Segment value: value | file: value")
def test_is_internally_consistent_fails(self):
self.assertEqual(self.subject.is_internally_consistent(), False)
def test_is_internally_consistent_when_file_value_is_none_succeeds(self):
self.file_seg_guc.value = None
self.assertEqual(self.subject.is_internally_consistent(), True)
def test_is_internally_consistent_when_primary_is_same_succeeds(self):
self.subject.primary_file_seg_guc.value = "sql_value"
self.assertEqual(self.subject.is_internally_consistent(), True)
def test_is_internally_consistent_when_mirror_is_different_fails(self):
self.subject.primary_file_seg_guc.value = "sql_value"
row = ['contentid', 'guc_name', 'diffvalue', "dbid1"]
self.subject.set_mirror_file_segment(FileSegmentGuc(row))
self.assertEqual(self.subject.is_internally_consistent(), False)
def test_is_internally_consistent_with_quotes_and_escaping(self):
cases = [
{'file_value': "'value'", 'db_value': 'value'},
{'file_value': "''", 'db_value': ''},
{'file_value': "'\\n\\r\\b\\f\\t'", 'db_value': '\n\r\b\f\t'},
{'file_value': "'\\0\\1\\2\\3\\4\\5\\6\\7'", 'db_value': '\0\1\2\3\4\5\6\7'},
{'file_value': "'\\8'", 'db_value': '8'},
{'file_value': "'\\01\\001\\377\\777\\7777'", 'db_value': '\x01\x01\xFF\xFF\xFF7'},
]
for case in cases:
file_seg_guc = FileSegmentGuc(['contentid', 'guc_name', case['file_value'], "dbid"])
db_seg_guc = DatabaseSegmentGuc(['contentid', 'guc_name', case['db_value']])
subject = MultiValueGuc(file_seg_guc, db_seg_guc)
error_message = "expected file value: %r to be equal to db value: %r" % (case['file_value'], case['db_value'])
self.assertEqual(subject.is_internally_consistent(), True, error_message)
def test_is_internally_consistent_when_there_is_no_quoting(self):
cases = [
{'file_value': "value123", 'db_value': 'value123'},
{'file_value': "value-._:/", 'db_value': 'value-._:/'},
]
for case in cases:
file_seg_guc = FileSegmentGuc(['contentid', 'guc_name', case['file_value'], "dbid"])
db_seg_guc = DatabaseSegmentGuc(['contentid', 'guc_name', case['db_value']])
subject = MultiValueGuc(file_seg_guc, db_seg_guc)
error_message = "expected file value: %r to be equal to db value: %r" % (case['file_value'], case['db_value'])
self.assertEqual(subject.is_internally_consistent(), True, error_message)
def test_is_internally_consistent_when_gucs_are_different_returns_false(self):
file_seg_guc = FileSegmentGuc(['contentid', 'guc_name', "'hello", "dbid"])
db_seg_guc = DatabaseSegmentGuc(['contentid', 'guc_name', "hello"])
subject = MultiValueGuc(file_seg_guc, db_seg_guc)
self.assertFalse(subject.is_internally_consistent())
def test__unquote(self):
cases = [
('hello', 'hello'),
("''", ''),
("'hello'", 'hello'),
("'a\\b\\f\\n\\r\\tb'", 'a\b\f\n\r\tb'),
("'\\0\\1\\2\\3\\4\\5\\6\\7\\8\\9'", '\0\1\2\3\4\5\6\789'),
("'\\1\\01\\001\\0001'", '\x01\x01\x01\x001'),
("'\\1a1'", '\x01a1'),
("'\\377\\400\\776\\7777'", '\xFF\x00\xFE\xFF7'),
("''''", "'"),
]
for quoted, unquoted in cases:
self.assertEqual(MultiValueGuc._unquote(quoted), unquoted)
def test__unquote_failure_cases(self):
cases = [
"'hello",
"",
"'",
"'hello\\'",
"'hel'lo'",
"'''",
]
for quoted in cases:
with self.assertRaises(MultiValueGuc.ParseError):
MultiValueGuc._unquote(quoted)
def test_set_file_segment_succeeds(self):
row = ['contentid', 'guc_name', 'file_value', "diff_dbid"]
file_seg_guc = FileSegmentGuc(row)
self.subject.set_mirror_file_segment(file_seg_guc)
self.assertEqual(self.subject.mirror_file_seg_guc, file_seg_guc)
def test_get_value_returns_unique(self):
self.assertEqual(self.subject.get_value(), "sql_value||file_value")
| 50wu/gpdb | gpMgmt/bin/gppylib/test/unit/test_unit_compare_segment_guc.py | Python | apache-2.0 | 8,167 | 0.003061 |
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 Håvard Gulldahl
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# appengine stuff
from google.appengine.ext import ndb
class Color(ndb.Model):
foreground = ndb.StringProperty()
background = ndb.StringProperty()
colorId = ndb.StringProperty()
category = ndb.StringProperty() # 'calendar' or 'event'
title = ndb.StringProperty()
class CalendarPrettyTitle(ndb.Model):
cal_id = ndb.StringProperty()
pretty_title = ndb.StringProperty()
class UserSetup(ndb.Model):
user = ndb.UserProperty()
google_token = ndb.JsonProperty()
trello_token = ndb.JsonProperty() # oauth1 access token dict, where .keys() == ('oauth_token', 'oauth_token_secret')
timestamp = ndb.DateTimeProperty(auto_now=True)
| havardgulldahl/perpetual-yearcal | models.py | Python | mit | 1,783 | 0.011223 |
import re, sys
import functools
import graphviz as gv
from graphviz import Source
bad_words = [ 'jns', 'js', 'jnz', 'jz', 'jno', 'jo', 'jbe', 'jb', 'jle', 'jl', 'jae', 'ja', 'jne loc', 'je', 'jmp', 'jge', 'jg', 'SLICE_EXTRA', 'SLICE_ADDRESSING', '[BUG]', 'SLICE_VERIFICATION', 'syscall', '#PARAMS_LOG']
instrEdges = []
instrNodes = []
with open('smallCleanedSlice.txt') as oldfile:
for line in oldfile:
tempLine = line.split()
instrNodes.append(tempLine[1] + '-' + tempLine[2])
i=0
for x in instrNodes:
instrNodes[i] = x.replace("#", "")
i += 1
instrNodesString = ''.join(instrNodes)
print('Done! Instruction Nodes List Size is : ') #+ instrNodesString
#print(instrNodes)
print(len(instrNodes))
#print(instrNodes[len(instrNodes)-1])
pattern = '\s+(\S+)\s'
with open('smallCleanedSlice.txt') as oldfile:
for line in oldfile:
prepline = line.replace("#\S*", " r1 ")
prepline = prepline.replace("[SLICE_INFO]", " r2 ")
prepline = prepline.replace("[SLICE_INFO]", " r2 ")
prepline = prepline.replace("[SLICE]", " r3 ")
prepline = prepline.replace("\t", " \t ")
prepline = prepline.rstrip("\t")
prepline = re.sub(r'(\s)#\w+', r'\1', prepline)
prepline = re.sub(r'.*SLICE', '', prepline)
prepline = re.sub(r'(\s)SLICE\s+', r'\1', prepline)
splitList = re.split("r1 | r2 | \t | r3 ", prepline)
if (len(splitList) >=2):
tempEdge = splitList[1]
tempEdge = tempEdge.lstrip()
#print tempEdges
#print len(splitList)
else :
tempEdge = splitList[0]
#print ('hello: '+tempEdge)
instrEdges.append(tempEdge)
#str1 = ''.join(tempLine)
#for line in str1:
dict1 ={}
j = 0
#give unique id number for each instruction based on its line number (starting at 0)
'''for x in instrNodes:
instrNodes[j] = str(j)+ '-' +instrNodes[j]
j+=1
'''
instrNodesString = ''.join(instrEdges)
print('Done! Instruction Edges List size is : ') #+ instrNodesString
#print(instrEdges)
#print(instrNodes)
print(len(instrEdges))
new_dict = {k: v for k, v in zip(instrNodes, instrEdges)}
#print(dict1)
#example dictionary entry is dict1['0-cmp': 'eax, 0xfffff001']
print('Done! Dict (LineNumber-Instruction: Edges) is : ')
#print((new_dict).keys())
#print((new_dict))
print("first node(instr): and its edges(operands): " + 'b7ff5c05-cmp: '+str(new_dict['b7ff5c05-cmp']))
#PRINT OUT THE TWO LISTS INTO TWO SEPERATE FILES
#y = ",".join(map(str, instrNodes))
#z = ",,".join(map(str, instrEdges))
#outputFile= open('nodesOut.txt', 'w')
#outputFile.write(y)
#outputFile2 = open('edgesOut.txt', 'w')
#outputFile2.write(z)
flagEnterKeys = 1
while (flagEnterKeys == 1):
input_var = raw_input('Enter a key (b7ff5c05-cmp for the 1st instruction cmp in the slice): TYPE EXIT TO End.\n')
if (input_var in new_dict):
print("Operands for " + input_var + " are: " + str(new_dict[input_var]) + ".\n")
break
if ((input_var == "exit") or (input_var == ",exit,")):
flagEnterKeys = 0;
break
else :
print("ERROR! Please enter in a valid key for the instrNodes, instrEdges dictionary.")
##New Graphviz-dot code here
graph = functools.partial(gv.Graph, format='svg')
digraph = functools.partial(gv.Digraph, format='svg')
datG = digraph()
nodes = instrNodes
edges = instrEdges
#nodes = testNodes
#edges = testEdges
print(nodes)
print(edges)
def add_nodes(graph):
for n in nodes:
graph.node(n, label = str(n) + '(' + str(new_dict[n]) + ')')
return graph
def add_edges(graph):
for e in edges:
graph.edge(*e)
return graph
cmpFlags = []
newestOF = ''
newestSF = ''
newestZF = ''
newestAF = ''
newestCF = ''
newestPF = ''
# default values 'R' means edge from root node in the 32-bit 4word registers
#Accumulator Counter Data Base Stack Pointer Stack Base Pointer Source Destination
EAX = ['R','R','R','R']
ECX = ['R','R','R','R']
EDI = ['R','R','R','R']
EDX = ['R','R','R','R']
EBX = ['R','R','R','R']
ESP = ['R','R','R','R']
EBP = ['R','R','R','R']
ESI = ['R','R','R','R']
EDI = ['R','R','R','R']
#modify Eax register and its 16 and 8 bit versions
def modifyEAX(firstWord, secondWord, thirdWord, fourthWord):
EAX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyAX(thirdWord, fourthWord):
EAX[2:4] = [thirdWord, fourthWord]
def modifyAH(thirdWord):
EAX[2:3] = [thirdWord]
def modifyAL(fourthWord):
EAX[3:4] = [fourthWord]
#modify ecx register and its 16 and 8 bit versions
def modifyECX(firstWord, secondWord, thirdWord, fourthWord):
ECX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyCX(thirdWord, fourthWord):
ECX[2:4] = [thirdWord, fourthWord]
def modifyCH(thirdWord):
ECX[2:3] = [thirdWord]
def modifyCL(fourthWord):
ECX[3:4] = [fourthWord]
#modify edx register and its 16 and 8 bit versions
def modifyEDX(firstWord, secondWord, thirdWord, fourthWord):
EDX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyDX(thirdWord, fourthWord):
EDX[2:4] = [thirdWord, fourthWord]
def modifyDH(thirdWord):
EDX[2:3] = [thirdWord]
def modifyDL(fourthWord):
EDX[3:4] = [fourthWord]
#modify ebx register and its 16 and 8 bit versions
def modifyEBX(firstWord, secondWord, thirdWord, fourthWord):
EBX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyBX(thirdWord, fourthWord):
EBX[2:4] = [thirdWord, fourthWord]
def modifyBH(thirdWord):
EBX[2:3] = [thirdWord]
def modifyBL(fourthWord):
EBX[3:4] = [fourthWord]
#modify esp register and its 16bit versions
def modifyESP(firstWord, secondWord, thirdWord, fourthWord):
ESP[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifySP(thirdWord, fourthWord):
ESP[2:4] = [thirdWord, fourthWord]
#modify ebp register and its 16bit versions
def modifyEBP(firstWord, secondWord, thirdWord, fourthWord):
EBP[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyBP(thirdWord, fourthWord):
EBP[2:4] = [thirdWord, fourthWord]
#modify esi register and its 16bit versions
def modifyESI(firstWord, secondWord, thirdWord, fourthWord):
ESI[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifySI(thirdWord, fourthWord):
ESI[2:4] = [thirdWord, fourthWord]
#modify edi register and its 16bit versions
def modifyEDI(firstWord, secondWord, thirdWord, fourthWord):
EDI[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyDI(thirdWord, fourthWord):
EDI[2:4] = [thirdWord, fourthWord]
ax = EAX[2:4]
print(EAX)
print(ax)
ax = ['changedax1', 'changedax2']
print(EAX)
print(ax)
datG.node('R', 'Root')
#datG.edge('R', '0-cmp', label='eax')
#datG.edge('R', '0-cmp', label='0xfffff001' )
datG.node('Out', 'Output')
pattern = re.compile("^\s+|\s*,\s*|\s+$")
for idx, c in enumerate(instrEdges):
splitStr = [a for a in pattern.split(c) if a]
for idz, b in enumerate(splitStr):
tempNodeStr = instrNodes[(idx)]
if (idz == 0 and 'mov' not in tempNodeStr):
# if dest reg is eax
if b == "eax":
modifyEAX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "ax":
modifyAX(nodes[idx],nodes[idx])
if b == "ah":
modifyAH(nodes[idx])
if b == "al":
modifyAL(nodes[idx])
#
# if dest reg is ecx
if b == "ecx":
modifyECX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "cx":
modifyCX(nodes[idx],nodes[idx])
if b == "ch":
modifyCH(nodes[idx])
if b == "cl":
modifyCL(nodes[idx])
#
# if dest reg is edx
if b == "edx":
modifyEDX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "dx":
modifyDX(nodes[idx],nodes[idx])
if b == "dh":
modifyDH(nodes[idx])
if b == "dl":
modifyDL(nodes[idx])
#
# if dest reg is ebx
if b == "ebx":
modifyEBX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "bx":
modifyBX(nodes[idx],nodes[idx])
if b == "bh":
modifyBH(nodes[idx])
if b == "bl":
modifyBL(nodes[idx])
#
# if dest reg is esp
if b == "esp":
modifyESP(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "sp":
modifySP(nodes[idx],nodes[idx])
# if dest reg is ebp
if b == "ebp":
modifyEBP(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "ebp":
modifyBP(nodes[idx],nodes[idx])
# if dest reg is esi
if b == "esi":
modifyESI(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "esi":
modifySI(nodes[idx],nodes[idx])
# if dest reg is edi
if b == "edi":
modifyEDI(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "di":
modifyDI(nodes[idx],nodes[idx])
if "cmp" in tempNodeStr and idz == 0:
#Eax edges
if splitStr[idz] == "eax":
for ido, k in enumerate(EAX):
datG.edge(k, tempNodeStr, label=str(k)+'(eax)'+str(ido))
if splitStr[idz] == "ax":
for ido, k in enumerate(EAX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(ax)'+str(ido))
if splitStr[idz] == "ah":
for ido, k in enumerate(EAX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ah)'+str(ido))
if splitStr[idz] == "al":
for ido, k in enumerate(EAX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(al)'+str(ido))
#Ecx edges
if splitStr[idz] == "ecx":
for ido, k in enumerate(ECX):
datG.edge(k, tempNodeStr, label=str(k)+'(ecx)'+str(ido))
if splitStr[idz] == "cx":
for ido, k in enumerate(ECX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cx)'+str(ido))
if splitStr[idz] == "ch":
for ido, k in enumerate(ECX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ch)'+str(ido))
if splitStr[idz] == "cl":
for ido, k in enumerate(ECX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cl)'+str(ido))
#
#Edx edges
if splitStr[idz] == "edx":
for ido, k in enumerate(EDX):
datG.edge(k, tempNodeStr, label=str(k)+'(edx)'+str(ido))
if splitStr[idz] == "dx":
for ido, k in enumerate(EDX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dx)'+str(ido))
if splitStr[idz] == "dh":
for ido, k in enumerate(EDX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(dh)'+str(ido))
if splitStr[idz] == "dl":
for ido, k in enumerate(EDX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dl)'+str(ido))
#
#Ebx edges
if splitStr[idz] == "ebx":
for ido, k in enumerate(EBX):
datG.edge(k, tempNodeStr, label=str(k)+'(ebx)'+str(ido))
if splitStr[idz] == "bx":
for ido, k in enumerate(EBX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bx)'+str(ido))
if splitStr[idz] == "bh":
for ido, k in enumerate(EBX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(bh)'+str(ido))
if splitStr[idz] == "bl":
for ido, k in enumerate(EBX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bl)'+str(ido))
#esp edges
if splitStr[idz] == "esp":
for ido, k in enumerate(ESP):
datG.edge(k, tempNodeStr, label=str(k)+'(esp)'+str(ido))
if splitStr[idz] == "sp":
for ido, k in enumerate(ESP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(sp)'+str(ido))
#
#ebp edges
if splitStr[idz] == "ebp":
for ido, k in enumerate(EBP):
datG.edge(k, tempNodeStr, label=str(k)+'(ebp)'+str(ido))
if splitStr[idz] == "bp":
for ido, k in enumerate(EBP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bp)'+str(ido))
#
#esi edges
if splitStr[idz] == "esi":
for ido, k in enumerate(ESI):
datG.edge(k, tempNodeStr, label=str(k)+'(esi)'+str(ido))
if splitStr[idz] == "si":
for ido, k in enumerate(ESI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(si)'+str(ido))
#
#
if splitStr[idz] == "edi":
for ido, k in enumerate(EDI):
datG.edge(k, tempNodeStr, label=str(k)+'(edi)'+str(ido))
if splitStr[idz] == "di":
for ido, k in enumerate(EDI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(di)'+str(ido))
#
else:
datG.edge('R', tempNodeStr, label=str(k)+'(misc cmp)'+str(ido))
if "cmp" in tempNodeStr and idz == 0:
statusFlags = ['OF', 'SF', 'ZF', 'AF', 'CF', 'PF']
#if b == "edi":
# if src reg is eax
if "mov" in tempNodeStr and idz == 1:
#Eax edges
if splitStr[idz] == "eax":
for ido, k in enumerate(EAX):
datG.edge(k, tempNodeStr, label=str(k)+'(eax)'+str(ido))
elif splitStr[idz] == "ax":
for ido, k in enumerate(EAX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(ax)'+str(ido))
elif splitStr[idz] == "ah":
for ido, k in enumerate(EAX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ah)'+str(ido))
elif splitStr[idz] == "al":
for ido, k in enumerate(EAX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(al)'+str(ido))
#Ecx edges
elif splitStr[idz] == "ecx":
for ido, k in enumerate(ECX):
datG.edge(k, tempNodeStr, label=str(k)+'(ecx)'+str(ido))
elif splitStr[idz] == "cx":
for ido, k in enumerate(ECX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cx)'+str(ido))
elif splitStr[idz] == "ch":
for ido, k in enumerate(ECX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ch)'+str(ido))
elif splitStr[idz] == "cl":
for ido, k in enumerate(ECX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cl)'+str(ido))
#
#Edx edges
elif splitStr[idz] == "edx":
for ido, k in enumerate(EDX):
datG.edge(k, tempNodeStr, label=str(k)+'(edx)'+str(ido))
elif splitStr[idz] == "dx":
for ido, k in enumerate(EDX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dx)'+str(ido))
elif splitStr[idz] == "dh":
for ido, k in enumerate(EDX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(dh)'+str(ido))
elif splitStr[idz] == "dl":
for ido, k in enumerate(EDX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dl)'+str(ido))
#
#Ebx edges
elif splitStr[idz] == "ebx":
for ido, k in enumerate(EBX):
datG.edge(k, tempNodeStr, label=str(k)+'(ebx)'+str(ido))
elif splitStr[idz] == "bx":
for ido, k in enumerate(EBX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bx)'+str(ido))
elif splitStr[idz] == "bh":
for ido, k in enumerate(EBX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(bh)'+str(ido))
elif splitStr[idz] == "bl":
for ido, k in enumerate(EBX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bl)'+str(ido))
#esp edges
elif splitStr[idz] == "esp":
for ido, k in enumerate(ESP):
datG.edge(k, tempNodeStr, label=str(k)+'(esp)'+str(ido))
elif splitStr[idz] == "sp":
for ido, k in enumerate(ESP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(sp)'+str(ido))
#
#ebp edges
elif splitStr[idz] == "ebp":
for ido, k in enumerate(EBP):
datG.edge(k, tempNodeStr, label=str(k)+'(ebp)'+str(ido))
elif splitStr[idz] == "bp":
for ido, k in enumerate(EBP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bp)'+str(ido))
#
#esi edges
elif splitStr[idz] == "esi":
for ido, k in enumerate(ESI):
datG.edge(k, tempNodeStr, label=str(k)+'(esi)'+str(ido))
elif splitStr[idz] == "si":
for ido, k in enumerate(ESI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(si)'+str(ido))
#
#
elif splitStr[idz] == "edi":
for ido, k in enumerate(EDI):
datG.edge(k, tempNodeStr, label=str(k)+'(edi)'+str(ido))
elif splitStr[idz] == "di":
for ido, k in enumerate(EDI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(di)'+str(ido))
#
else:
datG.edge('R', tempNodeStr, label=str('unhandledParam')+'(misc mov)'+str(-1))
#iterate through the flags outputted (affected) by the instruction and do both:
#add an edge from the instruction to generic 'OutputNode'
#update the flags with newest most recent values
for idy, c in enumerate(statusFlags):
datG.edge(tempNodeStr, 'Out', label=tempNodeStr + ',' + str(c))
if c == "OF":
newestOF = tempNodeStr + '-' + str(c)
if c == "SF":
newestSF = tempNodeStr + '-' + str(c)
if c == "ZF":
newestZF = tempNodeStr + '-' + str(c)
if c == "AF":
newestAF = tempNodeStr + '-' + str(c)
if c == "CF":
newestCF = tempNodeStr + '-' + str(c)
if c == "PF":
newestPF = tempNodeStr + '-' + str(c)
statusFlags = []
newFlagRegList = [newestOF, newestSF, newestZF, newestAF, newestCF, newestPF]
'''
for idx, c in enumerate(statusFlags):
tempNodeStr = instrNodes[(idx)]
datG.edge('b7ff5c05-cmp', 'Out', label=tempNodeStr + '-' + c)
'''
add_nodes(datG)
#add_edges(datG)
print(datG.source)
src = Source(datG)
src.render('test-output/dataFlowSliceWes1.gv', view=True)
#some example graph code
'''
class Graph(object):
def __init__(self, graph_dict=None):
""" initializes a graph object
If no dictionary or None is given,
an empty dictionary will be used
"""
if graph_dict == None:
graph_dict = {}
self.__graph_dict = graph_dict
def vertices(self):
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict:
self.__graph_dict[vertex] = []
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict:
self.__graph_dict[vertex1].append(vertex2)
else:
self.__graph_dict[vertex1] = [vertex2]
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbour in self.__graph_dict[vertex]:
if {neighbour, vertex} not in edges:
edges.append({vertex, neighbour})
return edges
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
if __name__ == "__main__":
f = { "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}
print(new_dict)
print(new_dict['0-cmp'])
graph = Graph(new_dict)
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print("Add vertex:")
graph.add_vertex("z")
print("Vertices of graph:")
print(graph.vertices())
print("Add an edge:")
graph.add_edge({"a","z"})
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print('Adding an edge {"x","y"} with new vertices:')
graph.add_edge({"x","y"})
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
'''
| WesCoomber/dataFlowGraphProjecto | presentgetBothNodesEdges.py | Python | mit | 22,404 | 0.010266 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.events),
]
| fanout/django-eventstream | django_eventstream/urls.py | Python | mit | 96 | 0 |
"""
*2014.09.10 16:10:05
DEPRECATED!!!!
please use building.models.search_building and building.models.make_building
instead of the make_unit and make_building functions found here...
out of date.
"""
import sys, os, json, codecs, re
sys.path.append(os.path.dirname(os.getcwd()))
from geopy import geocoders, distance
# MapQuest no longer available in present api. Work around
# detailed here: http://stackoverflow.com/questions/30132636/geocoding-error-with-geopandas-and-geopy
geocoders.MapQuest = geocoders.OpenMapQuest
#http://stackoverflow.com/questions/8047204/django-script-to-access-model-objects-without-using-manage-py-shell
#from rentrocket import settings
#from django.core.management import setup_environ
#setup_environ(settings)
#pre django 1.4 approach:
#from rentrocket import settings as rrsettings
#from django.core.management import setup_environ
#setup_environ(settings)
#from django.conf import settings
#settings.configure(rrsettings)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rentrocket.settings")
from building.models import Building, Parcel, BuildingPerson, Unit
from person.models import Person
def parse_person(text):
"""
take a string representing all details of a person
and try to parse out the different details for that person...
usually it's a comma separated string,
but sometimes names have commas in them
instead, look for the start of the address,
either a number or a PO variation
"""
name = ''
address = ''
phone = ''
remainder = ''
print "Parsing: %s" % text
phone = re.compile("(\d{3})\W*(\d{3})\W*(\d{4})\W*(\w*)")
m = phone.search(text)
if m:
#print dir(m)
#print len(m.groups())
phone1 = m.group(1)
parts = text.split(phone1)
#update text so it only contains part without phone number:
text = parts[0]
full_phone = phone1+parts[1]
print "Phone found: %s" % full_phone
filler='.*?' # Non-greedy match on filler
po_box='( P\\.O\\. | P O | PO )'
rg = re.compile(po_box,re.IGNORECASE|re.DOTALL)
m = rg.search(text)
if m:
csv1=m.group(1)
print "PO BOX MATCH: ("+csv1+")"+"\n"
print text
parts = text.split(csv1)
#name = m.group(0)
name = parts[0]
#IndexError: no such group
#address = m.group(1) + m.group(2)
address = m.group(1) + parts[1]
else:
re2='(\\d+)' # Integer Number 1
rg = re.compile(re2,re.IGNORECASE|re.DOTALL)
m = rg.search(text)
if m:
int1 = m.group(1)
print "NUMBER MATCH: (" + int1 + ")"
parts = text.split(int1)
#name = m.group(0)
name = parts[0]
#IndexError: no such group
#address = m.group(1) + m.group(2)
address = m.group(1) + parts[1]
address = address.strip()
name = name.strip()
print "name: %s" % name
print "address: %s" % address
print ""
if name[-1] == ',':
name = name[:-1]
if address[-1] == ',':
address = address[:-1]
return (name, address, phone, remainder)
def make_building(location, bldg_id, city, feed_source, parcel_id=None, bldg_type=None, no_units=None, sqft=None):
"""
add the building to the database
#*2015.03.07 14:04:37
#see search_building(bldgform.cleaned_data.get("address"), unit=unit, make=True)
"""
full_city = '%s, IN, USA' % city.name
match = False
#find an address to use
for geo_source in location.sources:
if not match:
source_list = location.get_source(geo_source)
if len(source_list) and source_list[0]['place'] and source_list[0]['place'] != full_city:
print "using: %s to check: %s" % (geo_source, source_list[0]['place'])
match = True
#TODO: process this a bit more...
#probably don't want city and zip here:
#keeping city and zip minimizes chance for overlap
#especially since this is used as a key
#can always take it out on display, if necessary
#*2014.09.10 14:51:28
#this has changed... should only use street now...
#see building/models.py -> make_building
#cur_address = source_list[0]['place']
#cur_address = source_list[0]['place']
if parcel_id == None:
cid = "%s-%s" % (city.tag, bldg_id)
else:
cid = parcel_id
print "Checking parcel id: %s" % (cid)
parcels = Parcel.objects.filter(custom_id=cid)
if parcels.exists():
parcel = parcels[0]
print "Already had parcel: %s" % parcel.custom_id
else:
parcel = Parcel()
parcel.custom_id = cid
parcel.save()
print "Created new parcel: %s" % parcel.custom_id
buildings = Building.objects.filter(city=city).filter(address=cur_address)
bldg = None
#check if a previous building object in the db exists
if buildings.exists():
bldg = buildings[0]
print "Already had: %s" % bldg.address
else:
#if not,
#CREATE A NEW BUILDING OBJECT HERE
#cur_building = Building()
bldg = Building()
#bldg.address = source_list[0]['place']
bldg.address = source_list[0]['street']
bldg.latitude = float(source_list[0]['lat'])
bldg.longitude = float(source_list[0]['lng'])
bldg.parcel = parcel
bldg.geocoder = geo_source
bldg.source = feed_source
bldg.city = city
bldg.state = city.state
if bldg_type:
bldg.type = bldg_type
if no_units:
bldg.number_of_units = no_units
if sqft:
bldg.sqft = sqft
bldg.save()
print "Created new building: %s" % bldg.address
return bldg
else:
print "Skipping: %s with value: %s" % (geo_source, source_list[0]['place'])
def make_unit(apt_num, building):
#check for existing:
units = Unit.objects.filter(building=building).filter(number=apt_num)
unit = None
#check if a previous building object in the db exists
if units.exists():
unit = units[0]
print "Already had Unit: %s" % unit.address
else:
#if not,
#CREATE A NEW UNIT OBJECT HERE
unit = Unit()
unit.building = building
unit.number = apt_num
# don't want to set this unless it's different:
#unit.address = building.address + ", " + apt_num
## bedrooms
## bathrooms
## sqft
## max_occupants
unit.save()
print "Created new unit: %s" % unit.number
return unit
def make_person(name, building, relation, address=None, city=None, website=None, phone=None):
#now associate applicant with building:
#first find/make person
people = Person.objects.filter(city=city).filter(name=name)
person = None
#check if a previous building object in the db exists
if people.exists():
person = people[0]
print "Already had Person: %s" % person.name
else:
#if not,
#CREATE A NEW PERSON OBJECT HERE
person = Person()
person.name = name
if city:
person.city = city
if address:
person.address = address
if website:
person.website = website
if phone:
person.phone = phone
person.save()
#then find/make association:
bpeople = BuildingPerson.objects.filter(building=building).filter(person=person)
bperson = None
#check if a previous building_person object in the db exists
if bpeople.exists():
bperson = bpeople[0]
print "Already had BuildingPerson: %s with: %s" % (bperson.person.name, bperson.building.address)
else:
#if not,
#CREATE A NEW BUILDING PERSON OBJECT HERE
bperson = BuildingPerson()
bperson.person = person
bperson.building = building
bperson.relation = relation
bperson.save()
return (person, bperson)
def save_results(locations, destination="test.tsv"):
#destination = "test.tsv"
match_tallies = {}
closest_tallies = {}
furthest_tallies = {}
print "Saving: %s results to %s" % (len(locations), destination)
with codecs.open(destination, 'w', encoding='utf-8') as out:
#print locations.values()[0].make_header()
out.write(locations.values()[0].make_header())
for key, location in locations.items():
for source in location.sources:
#if hasattr(location, source) and getattr(location, source)[0]['place']:
source_list = location.get_source(source)
if len(source_list) and source_list[0]['place']:
if match_tallies.has_key(source):
match_tallies[source] += 1
else:
match_tallies[source] = 1
location.compare_points()
#print location.make_row()
# this was used to filter units with 1, 1 out separately
#if location.bldg_units == '1, 1':
# out.write(location.make_row())
print match_tallies
exit()
class Location(object):
"""
hold geolocation data associated with a specific address
making an object to help with processing results consistently
"""
def __init__(self, dictionary={}, sources=None):
"""
http://stackoverflow.com/questions/1305532/convert-python-dict-to-object
"""
self.__dict__.update(dictionary)
if sources:
self.sources = sources
else:
self.sources = ["google", "bing", "usgeo", "geonames", "openmq", "mq"]
#*2014.01.08 09:01:16
#this was only needed for csv exporting
#but these valued should be passed in to make_building
#this is not provided by any geolocation service,
#so it doesn't make sense to track here:
#self.units_bdrms = ''
#self.bldg_units = ''
def get_source(self, source):
"""
wrap hasattr/getattr combination
if we have something, return it,
otherwise return empty list
"""
if hasattr(self, source):
return getattr(self, source)
else:
return []
def to_dict(self):
"""
http://stackoverflow.com/questions/61517/python-dictionary-from-an-objects-fields
"""
result = self.__dict__.copy()
#can't remove('sources') on a dict
result.pop('sources', None)
return result
def compare_points(self):
#find only points with something in them
options = {}
for source in self.sources:
#this does the same thing as the next 2 lines,
#but is not as easy to read
#if hasattr(self, source) and getattr(self, source)[0]['place']:
source_list = self.get_source(source)
if len(source_list) and source_list[0]['place']:
#options[source] = getattr(self, source)[0]
options[source] = source_list[0]
d = distance.distance
available = options.keys()
self.distances = {}
self.totals = {}
index = 1
for item in available:
total = 0
others = available[:]
if item in others:
others.remove(item)
for other in others:
pt1 = ( options[item]['lat'], options[item]['lng'] )
pt2 = ( options[other]['lat'], options[other]['lng'] )
key = "%s-%s" % (item, other)
#https://github.com/geopy/geopy/blob/master/geopy/distance.py
#miles are also an option
#cur_d = d(pt1, pt2).miles
cur_d = d(pt1, pt2).feet
if not self.distances.has_key(key):
self.distances[key] = cur_d
total += cur_d
#this will be the same for all items if adding everything
self.totals[item] = total
def min_max_distances(self):
if not self.distances:
self.compare_points()
sortable = []
for key, value in self.distances.items():
sortable.append( (value, key) )
sortable.sort()
if len(sortable) >= 2:
return ( sortable[0], sortable[-1] )
else:
return ( ('', ''), ('', '') )
def min_max_totals(self):
if not self.distances:
self.compare_points()
sortable = []
for key, value in self.totals.items():
sortable.append( (value, key) )
sortable.sort()
if len(sortable) >= 2:
return ( sortable[0], sortable[-1] )
else:
return ( ('', ''), ('', '') )
def make_header(self):
"""
return a row representation of the header (for CSV output)
"""
#header = [ 'search', 'address', 'bldg_units', 'units_bdrms', '' ]
header = [ 'search', 'address', '' ]
header.extend( self.sources )
header.extend( [ '', 'closest', 'closest_amt', 'furthest', 'furthest_amt', '' ] )
header.extend( [ '', 'tclosest', 'tclosest_amt', 'tfurthest', 'tfurthest_amt', '' ] )
index = 1
for item1 in self.sources:
for item2 in self.sources[index:]:
title = "%s-%s" % (item1, item2)
header.append(title)
return "\t".join(header) + '\n'
def make_row(self):
"""
return a row representation of our data (for CSV output)
"""
## for source in self.sources:
## if self.get
## if source == 'google':
## #set this as the default
## if location.google['place']:
## location.address = location.google['place']
## else:
## #TODO
## #maybe check other places?
## location.address = ''
#row = [ self.address ]
row = []
found_address = False
for source in self.sources:
source_list = self.get_source(source)
if len(source_list) and source_list[0]['place']:
#if hasattr(self, source) and getattr(self, source)[0]['place']:
# cur = getattr(self, source)[0]
cur = source_list[0]
ll = "%s, %s" % (cur['lat'], cur['lng'])
#pick out the first address that has a value
if not found_address:
#insert these in reverse order:
self.address = cur['place']
row.insert(0, '')
#row.insert(0, self.units_bdrms)
#row.insert(0, self.bldg_units)
row.insert(0, self.address)
#this should always be set... if not, investigate why:
if not hasattr(self, 'address_alt'):
print self.to_dict()
exit()
row.insert(0, self.address_alt)
found_address = True
else:
ll = ''
row.append( ll )
#couldn't find an address anywhere:
if not found_address:
row.insert(0, '')
#row.insert(0, self.units_bdrms)
#row.insert(0, self.bldg_units)
row.insert(0, '')
row.insert(0, self.address_alt)
print "ERROR LOCATING: %s" % self.address_alt
(mi, ma) = self.min_max_distances()
# 'closest', 'closest_amt', 'furthest', 'furthest_amt',
row.extend( [ '', mi[1], str(mi[0]), ma[1], str(ma[0]), '' ] )
(mi, ma) = self.min_max_totals()
# 'closest', 'closest_amt', 'furthest', 'furthest_amt',
row.extend( [ '', mi[1], str(mi[0]), ma[1], str(ma[0]), '' ] )
index = 1
for item1 in self.sources:
for item2 in self.sources[index:]:
title = "%s-%s" % (item1, item2)
if self.distances.has_key(title):
row.append(str(self.distances[title]))
else:
row.append('')
return "\t".join(row) + '\n'
class Geo(object):
"""
object to assist with geocoding tasks...
wraps geopy
and initializes coders in one spot
"""
def __init__(self):
#initialize geocoders once:
self.google = geocoders.GoogleV3()
#doesn't look like yahoo supports free api any longer:
#http://developer.yahoo.com/forum/General-Discussion-at-YDN/Yahoo-GeoCode-404-Not-Found/1362061375511-7faa66ba-191d-4593-ba63-0bb8f5d43c06
#yahoo = geocoders.Yahoo('PCqXY9bV34G8P7jzm_9JeuOfIviv37mvfyTvA62Ro_pBrwDtoIaiNLT_bqRVtETpb79.avb0LFV4U1fvgyz0bQlX_GoBA0s-')
self.usgeo = geocoders.GeocoderDotUS()
#self.geonames = geocoders.GeoNames()
self.bing = geocoders.Bing('AnFGlcOgRppf5ZSLF8wxXXN2_E29P-W9CMssWafE1RC9K9eXhcAL7nqzTmjwzMQD')
self.openmq = geocoders.OpenMapQuest()
#self.mq = geocoders.MapQuest('Fmjtd%7Cluub2hu7nl%2C20%3Do5-9uzg14')
#skipping mediawiki, seems less complete?
#mediawiki = geocoders.MediaWiki("http://wiki.case.edu/%s")
def lookup(self, address, source="google", location=None, force=False):
"""
look up the specified address using the designated source
if location dictionary is specified (for local caching)
store results there
return results either way
"""
updated = False
if not location is None:
self.location = location
else:
self.location = Location()
#if we already have any value for source (even None)
#won't look again unless force is set True
if (not hasattr(location, source)) or force:
do_query = False
if hasattr(location, source):
previous_result = getattr(location, source)
if previous_result[0]['place'] is None:
do_query = True
else:
do_query = True
if do_query:
print "Looking for: %s in %s" % (address, source)
coder = getattr(self, source)
if hasattr(location, source):
result = getattr(location, source)
else:
result = []
#Be very careful when enabling try/except here:
#can hide limit errors with a geocoder.
#good to do at the last phase
#try:
options = coder.geocode(address, exactly_one=False)
if options:
if isinstance(options[0], unicode):
(place, (lat, lng)) = options
result.append({'place': place, 'lat': lat, 'lng': lng})
setattr(location, source, result)
print location.to_dict()
updated = True
else:
print options
for place, (lat, lng) in options:
#clear out any old "None" entries:
for item in result[:]:
if item['place'] is None:
result.remove(item)
result.append({'place': place, 'lat': lat, 'lng': lng})
setattr(location, source, result)
print location.to_dict()
updated = True
#print "Result was: %s" % place
#print "lat: %s, long: %s" % (lat, lng)
#setattr(location, source, {'place': place, 'lat': lat, 'lng': lng})
## except:
## print "Error with lookup!"
## result.append({'place': None, 'lat': None, 'lng': None})
## setattr(location, source, result)
else:
print "Already have %s results for: %s" % (source, address)
return updated
def save_json(destination, json_objects):
json_file = codecs.open(destination, 'w', encoding='utf-8', errors='ignore')
json_file.write(json.dumps(json_objects))
json_file.close()
def load_json(source_file, create=False):
if not os.path.exists(source_file):
json_objects = {}
if create:
print "CREATING NEW JSON FILE: %s" % source_file
json_file = codecs.open(source_file, 'w', encoding='utf-8', errors='ignore')
#make sure there is something there for subsequent loads
json_file.write(json.dumps(json_objects))
json_file.close()
else:
raise ValueError, "JSON file does not exist: %s" % source_file
else:
json_file = codecs.open(source_file, 'r', encoding='utf-8', errors='ignore')
try:
json_objects = json.loads(json_file.read())
except:
raise ValueError, "No JSON object could be decoded from: %s" % source_file
json_file.close()
return json_objects
| City-of-Bloomington/green-rental | scripts/helpers.py | Python | agpl-3.0 | 22,169 | 0.011728 |
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from matplotlib.ticker import ScalarFormatter
from subprocess import call
fs = 8
order = np.array([])
nsteps = np.array([])
error = np.array([])
# load SDC data
file = open('conv-data.txt', 'r')
while True:
line = file.readline()
if not line: break
items = str.split(line, " ", 3)
order = np.append(order, int(items[0]))
nsteps = np.append(nsteps, int(float(items[1])))
error = np.append(error, float(items[2]))
file.close()
assert np.size(order)==np.size(nsteps), 'Found different number of entries in order and nsteps'
assert np.size(nsteps)==np.size(error), 'Found different number of entries in nsteps and error'
N = np.size(nsteps)/3
assert isinstance(N, int), 'Number of entries not a multiple of three'
# load Runge-Kutta data
order_rk = np.array([])
nsteps_rk = np.array([])
error_rk = np.array([])
file = open('conv-data-rk.txt', 'r')
while True:
line = file.readline()
if not line: break
items = str.split(line, " ", 3)
order_rk = np.append(order_rk, int(items[0]))
nsteps_rk = np.append(nsteps_rk, int(float(items[1])))
error_rk = np.append(error_rk, float(items[2]))
file.close()
assert np.size(order_rk)==np.size(nsteps_rk), 'Found different number of entries in order and nsteps'
assert np.size(nsteps_rk)==np.size(error_rk), 'Found different number of entries in nsteps and error'
N = np.size(nsteps_rk)/3
assert isinstance(N, int), 'Number of entries not a multiple of three'
### Compute and plot error constant ###
errconst_sdc = np.zeros((3,N))
errconst_rk = np.zeros((3,N))
nsteps_plot_sdc = np.zeros((3,N))
nsteps_plot_rk = np.zeros((3,N))
order_plot = np.zeros(3)
for ii in range(0,3):
order_plot[ii] = order[N*ii]
for jj in range(0,N):
p_sdc = order[N*ii+jj]
err_sdc = error[N*ii+jj]
nsteps_plot_sdc[ii,jj] = nsteps[N*ii+jj]
dt_sdc = 1.0/float(nsteps_plot_sdc[ii,jj])
errconst_sdc[ii,jj] = err_sdc/dt_sdc**float(p_sdc)
p_rk = order_rk[N*ii+jj]
err_rk = error_rk[N*ii+jj]
nsteps_plot_rk[ii,jj] = nsteps_rk[N*ii+jj]
dt_rk = 1.0/float(nsteps_plot_rk[ii,jj])
errconst_rk[ii,jj] = err_rk/dt_rk**float(p_rk)
color = [ 'r', 'b', 'g' ]
shape_sdc = ['<', '^', '>']
shape_rk = ['o', 'd', 's']
rcParams['figure.figsize'] = 2.5, 2.5
fig = plt.figure()
for ii in range(0,3):
plt.semilogy(nsteps_plot_sdc[ii,:], errconst_sdc[ii,:], shape_sdc[ii], markersize=fs, color=color[ii], label='SDC('+str(int(order_plot[ii]))+')')
plt.semilogy(nsteps_plot_rk[ii,:], errconst_rk[ii,:], shape_rk[ii], markersize=fs-2, color=color[ii], label='IMEX('+str(int(order_plot[ii]))+')')
plt.legend(loc='lower left', fontsize=fs, prop={'size':fs-1}, ncol=2)
plt.xlabel('Number of time steps', fontsize=fs)
plt.ylabel('Estimated error constant', fontsize=fs, labelpad=2)
plt.xlim([0.9*np.min(nsteps_plot_sdc), 1.1*np.max(nsteps_plot_sdc)])
plt.ylim([1e1, 1e6])
plt.yticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6],fontsize=fs)
plt.xticks([20, 30, 40, 60, 80, 100], fontsize=fs)
plt.gca().get_xaxis().get_major_formatter().labelOnlyBase = False
plt.gca().get_xaxis().set_major_formatter(ScalarFormatter())
#plt.show()
filename = 'error_constants.pdf'
fig.savefig(filename,bbox_inches='tight')
call(["pdfcrop", filename, filename])
| danielru/pySDC | playgrounds/deprecated/acoustic_1d_imex/ploterrorconstants.py | Python | bsd-2-clause | 3,326 | 0.02285 |
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Sum
from django.contrib.contenttypes.models import ContentType
class TrendingManager(models.Manager):
def trending(self, model, days=30, kind=""):
views = self.filter(
viewed_content_type=ContentType.objects.get_for_model(model),
views_on__gte=datetime.date.today() - datetime.timedelta(days=days),
kind=kind
).values(
"viewed_content_type",
"viewed_object_id",
"kind"
).annotate(
num_views=Sum("count")
).order_by("-num_views")
for d in views:
try:
d["object"] = ContentType.objects.get_for_id(
d["viewed_content_type"]
).get_object_for_this_type(
pk=d["viewed_object_id"]
)
except ObjectDoesNotExist:
d["object"] = None
return views
| eldarion/django-trending | trending/managers.py | Python | bsd-3-clause | 1,033 | 0.000968 |
"""
thetvdb.com Python API
(c) 2009 James Smith (http://loopj.com)
(c) 2014 Wayne Davison <wayne@opencoder.net>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import datetime
import random
import re
import copy
import xml.parsers.expat as expat
from cStringIO import StringIO
from zipfile import ZipFile
class TheTVDB(object):
def __init__(self, api_key='2B8557E0CBF7D720', language = 'en', want_raw = False):
#http://thetvdb.com/api/<apikey>/<request>
self.api_key = api_key
self.mirror_url = "http://thetvdb.com"
self.base_url = self.mirror_url + "/api"
self.base_key_url = "%s/%s" % (self.base_url, self.api_key)
self.language = language
self.want_raw = want_raw
# Mirror selection got deprecated a while back, so tell it to skip the actual fetch.
self.select_mirrors(False)
def select_mirrors(self, do_the_fetch = True):
#http://thetvdb.com/api/<apikey>/mirrors.xml
url = "%s/mirrors.xml" % self.base_key_url
self.xml_mirrors = []
self.zip_mirrors = []
try:
filt_func = lambda name, attrs: attrs if name == 'Mirror' else None
xml = self._get_xml_data(url, filt_func) if do_the_fetch else {}
for mirror in xml.get("Mirror", []):
mirrorpath = mirror.get("mirrorpath", None)
typemask = mirror.get("typemask", None)
if not mirrorpath or not typemask:
continue
typemask = int(typemask)
if typemask & 1:
self.xml_mirrors.append(mirrorpath)
if typemask & 4:
self.zip_mirrors.append(mirrorpath)
except:
pass
if not self.xml_mirrors:
self.xml_mirrors = [ self.mirror_url ]
if not self.zip_mirrors:
self.zip_mirrors = [ self.mirror_url ]
self.xml_mirror_url = random.choice(self.xml_mirrors)
self.zip_mirror_url = random.choice(self.zip_mirrors)
self.base_xml_url = "%s/api/%s" % (self.xml_mirror_url, self.api_key)
self.base_zip_url = "%s/api/%s" % (self.zip_mirror_url, self.api_key)
def _2show(self, attrs):
return attrs
def _2episode(self, attrs):
return attrs
@staticmethod
def convert_time(time_string):
"""Convert a thetvdb time string into a datetime.time object."""
time_res = [re.compile(r"\D*(?P<hour>\d{1,2})(?::(?P<minute>\d{2}))?.*(?P<ampm>a|p)m.*", re.IGNORECASE), # 12 hour
re.compile(r"\D*(?P<hour>\d{1,2}):?(?P<minute>\d{2}).*")] # 24 hour
for r in time_res:
m = r.match(time_string)
if m:
gd = m.groupdict()
if "hour" in gd and "minute" in gd and gd["minute"] and "ampm" in gd:
hour = int(gd["hour"])
if hour == 12:
hour = 0
if gd["ampm"].lower() == "p":
hour += 12
return datetime.time(hour, int(gd["minute"]))
elif "hour" in gd and "ampm" in gd:
hour = int(gd["hour"])
if hour == 12:
hour = 0
if gd["ampm"].lower() == "p":
hour += 12
return datetime.time(hour, 0)
elif "hour" in gd and "minute" in gd:
return datetime.time(int(gd["hour"]), int(gd["minute"]))
return None
@staticmethod
def convert_date(date_string):
"""Convert a thetvdb date string into a datetime.date object."""
first_aired = None
try:
first_aired = datetime.date(*map(int, date_string.split("-")))
except ValueError:
pass
return first_aired
# language can be "all", "en", "fr", etc.
def get_matching_shows(self, show_name, language=None, want_raw=False):
"""Get a list of shows matching show_name."""
if type(show_name) == type(u''):
show_name = show_name.encode('utf-8')
get_args = {"seriesname": show_name}
if language is not None:
get_args['language'] = language
get_args = urllib.urlencode(get_args, doseq=True)
url = "%s/GetSeries.php?%s" % (self.base_url, get_args)
if want_raw:
filt_func = lambda name, attrs: attrs if name == "Series" else None
else:
filt_func = lambda name, attrs: (attrs.get("seriesid", ""), attrs.get("SeriesName", ""), attrs.get("IMDB_ID", "")) if name == "Series" else None
xml = self._get_xml_data(url, filt_func)
return xml.get('Series', [])
def get_show(self, show_id):
"""Get the show object matching this show_id."""
url = "%s/series/%s/%s.xml" % (self.base_xml_url, show_id, self.language)
return self._get_show_by_url(url)
def _get_show_by_url(self, url):
filt_func = lambda name, attrs: self._2show(attrs) if name == "Series" else None
xml = self._get_xml_data(url, filt_func)
return xml['Series'][0] if 'Series' in xml else None
def get_episode(self, episode_id):
"""Get the episode object matching this episode_id."""
url = "%s/episodes/%s" % (self.base_xml_url, episode_id)
return self._get_episode_by_url(url)
def _get_episode_by_url(self, url):
filt_func = lambda name, attrs: self._2episode(attrs) if name == "Episode" else None
xml = self._get_xml_data(url, filt_func)
return xml['Episode'][0] if 'Episode' in xml else None
def get_show_and_episodes(self, show_id):
"""Get the show object and all matching episode objects for this show_id."""
url = "%s/series/%s/all/%s.zip" % (self.base_zip_url, show_id, self.language)
zip_name = '%s.xml' % self.language
filt_func = lambda name, attrs: self._2episode(attrs) if name == "Episode" else self._2show(attrs) if name == "Series" else None
xml = self._get_xml_data(url, filt_func, zip_name=zip_name)
if 'Series' not in xml:
return None
return (xml['Series'][0], xml.get('Episode', []))
def get_updates(self, callback, period = "day"):
"""Return all series, episode, and banner updates w/o having to have it
all in memory at once. Also returns the Data timestamp. The callback
routine should be defined as: my_callback(name, attrs) where name will
be "Data", "Series", "Episode", or "Banner", and attrs will be a dict
of the values (e.g. id, time, etc)."""
self._get_update_info(period, callback=callback)
def _get_update_info(self, period, filter_func = None, callback = None):
url = "%s/updates/updates_%s.zip" % (self.base_zip_url, period)
zip_name = 'updates_%s.xml' % period
return self._get_xml_data(url, filter_func, zip_name, callback)
def _get_xml_data(self, url, filter_func = None, zip_name = None, callback = None):
data = urllib.urlopen(url)
if zip_name:
zipfile = ZipFile(StringIO(data.read()))
data = zipfile.open(zip_name)
if not data:
raise Exception("Failed to get any data")
e = ExpatParseXml(callback, filter_func)
e.parse(data)
return e.xml
class ExpatParseXml(object):
def __init__(self, callback, filter_func):
self.el_container = None
self.el_name = None
self.el_attr_name = None
self.el_attrs = None
self.el_callback = callback if callback else self.stash_xml
self.el_filter_func = filter_func # only used by stash_xml()
self.xml = {}
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self.start_element
self.parser.EndElementHandler = self.end_element
self.parser.CharacterDataHandler = self.char_data
def parse(self, fh):
# Sadly ParseFile(fh) actually mangles the data, so we parse the file line by line:
for line in fh:
self.parser.Parse(line)
def start_element(self, name, attrs):
if not self.el_name:
if not self.el_container:
self.el_container = name
self.el_callback(name, attrs)
else:
self.el_name = name
self.el_attrs = {}
elif not self.el_attr_name:
self.el_attr_name = name
def end_element(self, name):
if self.el_attr_name and name == self.el_attr_name:
self.el_attr_name = None
elif self.el_name and name == self.el_name:
self.el_callback(self.el_name, self.el_attrs)
self.el_name = None
self.el_attr_name = None
def char_data(self, data):
if self.el_attr_name:
if self.el_attr_name in self.el_attrs:
self.el_attrs[self.el_attr_name] += data
else:
self.el_attrs[self.el_attr_name] = data
def stash_xml(self, name, attrs):
if self.el_filter_func:
attrs = self.el_filter_func(name, attrs)
if attrs is None:
return
if name in self.xml:
self.xml[name].append(attrs)
else:
self.xml[name] = [ attrs ]
# vim: sw=4 ts=8 et
| KODeKarnage/script.sub.missing | resources/lib/thetvdbapi.py | Python | gpl-3.0 | 10,006 | 0.006696 |
#!/usr/bin/python
"""Thermistor Value Lookup Table Generator
Generates lookup to temperature values for use in a microcontroller in C format based on:
http://en.wikipedia.org/wiki/Steinhart-Hart_equation
The main use is for Arduino programs that read data from the circuit board described here:
http://reprap.org/wiki/Temperature_Sensor_v2.0
Usage: python createTemperatureLookupMarlin.py [options]
Options:
-h, --help show this help
--rp=... pull-up resistor
--t1=ttt:rrr low temperature temperature:resistance point (around 25 degC)
--t2=ttt:rrr middle temperature temperature:resistance point (around 150 degC)
--t3=ttt:rrr high temperature temperature:resistance point (around 250 degC)
--num-temps=... the number of temperature points to calculate (default: 36)
"""
from math import *
import sys
import getopt
"Constants"
ZERO = 273.15 # zero point of Kelvin scale
VADC = 5 # ADC voltage
VCC = 5 # supply voltage
ARES = pow(2,10) # 10 Bit ADC resolution
VSTEP = VADC / ARES # ADC voltage resolution
TMIN = 0 # lowest temperature in table
TMAX = 350 # highest temperature in table
class Thermistor:
"Class to do the thermistor maths"
def __init__(self, rp, t1, r1, t2, r2, t3, r3):
l1 = log(r1)
l2 = log(r2)
l3 = log(r3)
y1 = 1.0 / (t1 + ZERO) # adjust scale
y2 = 1.0 / (t2 + ZERO)
y3 = 1.0 / (t3 + ZERO)
x = (y2 - y1) / (l2 - l1)
y = (y3 - y1) / (l3 - l1)
c = (y - x) / ((l3 - l2) * (l1 + l2 + l3))
b = x - c * (l1**2 + l2**2 + l1*l2)
a = y1 - (b + l1**2 *c)*l1
if c < 0:
print "//////////////////////////////////////////////////////////////////////////////////////"
print "// WARNING: negative coefficient 'c'! Something may be wrong with the measurements! //"
print "//////////////////////////////////////////////////////////////////////////////////////"
c = -c
self.c1 = a # Steinhart-Hart coefficients
self.c2 = b
self.c3 = c
self.rp = rp # pull-up resistance
def resol(self, adc):
"Convert ADC reading into a resolution"
res = self.temp(adc)-self.temp(adc+1)
return res
def voltage(self, adc):
"Convert ADC reading into a Voltage"
return adc * VSTEP # convert the 10 bit ADC value to a voltage
def resist(self, adc):
"Convert ADC reading into a resistance in Ohms"
r = self.rp * self.voltage(adc) / (VCC - self.voltage(adc)) # resistance of thermistor
return r
def temp(self, adc):
"Convert ADC reading into a temperature in Celcius"
l = log(self.resist(adc))
Tinv = self.c1 + self.c2*l + self.c3* l**3 # inverse temperature
return (1/Tinv) - ZERO # temperature
def adc(self, temp):
"Convert temperature into a ADC reading"
x = (self.c1 - (1.0 / (temp+ZERO))) / (2*self.c3)
y = sqrt((self.c2 / (3*self.c3))**3 + x**2)
r = exp((y-x)**(1.0/3) - (y+x)**(1.0/3))
return (r / (self.rp + r)) * ARES
def main(argv):
"Default values"
t1 = 25 # low temperature in Kelvin (25 degC)
r1 = 100000 # resistance at low temperature (10 kOhm)
t2 = 150 # middle temperature in Kelvin (150 degC)
r2 = 1641.9 # resistance at middle temperature (1.6 KOhm)
t3 = 250 # high temperature in Kelvin (250 degC)
r3 = 226.15 # resistance at high temperature (226.15 Ohm)
rp = 4700; # pull-up resistor (4.7 kOhm)
num_temps = 36; # number of entries for look-up table
try:
opts, args = getopt.getopt(argv, "h", ["help", "rp=", "t1=", "t2=", "t3=", "num-temps="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == "--rp":
rp = int(arg)
elif opt == "--t1":
arg = arg.split(':')
t1 = float(arg[0])
r1 = float(arg[1])
elif opt == "--t2":
arg = arg.split(':')
t2 = float(arg[0])
r2 = float(arg[1])
elif opt == "--t3":
arg = arg.split(':')
t3 = float(arg[0])
r3 = float(arg[1])
elif opt == "--num-temps":
num_temps = int(arg)
t = Thermistor(rp, t1, r1, t2, r2, t3, r3)
increment = int((ARES-1)/(num_temps-1));
step = (TMIN-TMAX) / (num_temps-1)
low_bound = t.temp(ARES-1);
up_bound = t.temp(1);
min_temp = int(TMIN if TMIN > low_bound else low_bound)
max_temp = int(TMAX if TMAX < up_bound else up_bound)
temps = range(max_temp, TMIN+step, step);
print "// Thermistor lookup table for Marlin"
print "// ./createTemperatureLookupMarlin.py --rp=%s --t1=%s:%s --t2=%s:%s --t3=%s:%s --num-temps=%s" % (rp, t1, r1, t2, r2, t3, r3, num_temps)
print "// Steinhart-Hart Coefficients: a=%.15g, b=%.15g, c=%.15g " % (t.c1, t.c2, t.c3)
print "// Theoretical limits of thermistor: %.2f to %.2f degC" % (low_bound, up_bound)
print
print "const short temptable[][2] PROGMEM = {"
for temp in temps:
adc = t.adc(temp)
print " { (short) (%7.2f * OVERSAMPLENR ), %4s }%s // v=%.3f\tr=%.3f\tres=%.3f degC/count" % (adc , temp, \
',' if temp != temps[-1] else ' ', \
t.voltage(adc), \
t.resist( adc), \
t.resol( adc) \
)
print "};"
def usage():
print __doc__
if __name__ == "__main__":
main(sys.argv[1:])
| pandel/Marlin | buildroot/share/scripts/createTemperatureLookupMarlin.py | Python | gpl-3.0 | 6,204 | 0.009349 |
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, hierarchical distributed counter."""
import threading
import time
from typing import Dict, Mapping, Optional, Union
from acme import core
Number = Union[int, float]
class Counter(core.Saveable):
"""A simple counter object that can periodically sync with a parent."""
def __init__(self,
parent: Optional['Counter'] = None,
prefix: str = '',
time_delta: float = 1.0,
return_only_prefixed: bool = False):
"""Initialize the counter.
Args:
parent: a Counter object to cache locally (or None for no caching).
prefix: string prefix to use for all local counts.
time_delta: time difference in seconds between syncing with the parent
counter.
return_only_prefixed: if True, and if `prefix` isn't empty, return counts
restricted to the given `prefix` on each call to `increment` and
`get_counts`. The `prefix` is stripped from returned count names.
"""
self._parent = parent
self._prefix = prefix
self._time_delta = time_delta
# Hold local counts and we'll lock around that.
# These are counts to be synced to the parent and the cache.
self._counts = {}
self._lock = threading.Lock()
# We'll sync periodically (when the last sync was more than self._time_delta
# seconds ago.)
self._cache = {}
self._last_sync_time = 0.0
self._return_only_prefixed = return_only_prefixed
def increment(self, **counts: Number) -> Dict[str, Number]:
"""Increment a set of counters.
Args:
**counts: keyword arguments specifying count increments.
Returns:
The [name, value] mapping of all counters stored, i.e. this will also
include counts that were not updated by this call to increment.
"""
with self._lock:
for key, value in counts.items():
self._counts.setdefault(key, 0)
self._counts[key] += value
return self.get_counts()
def get_counts(self) -> Dict[str, Number]:
"""Return all counts tracked by this counter."""
now = time.time()
# TODO(b/144421838): use futures instead of blocking.
if self._parent and (now - self._last_sync_time) > self._time_delta:
with self._lock:
counts = _prefix_keys(self._counts, self._prefix)
# Reset the local counts, as they will be merged into the parent and the
# cache.
self._counts = {}
self._cache = self._parent.increment(**counts)
self._last_sync_time = now
# Potentially prefix the keys in the counts dictionary.
counts = _prefix_keys(self._counts, self._prefix)
# If there's no prefix make a copy of the dictionary so we don't modify the
# internal self._counts.
if not self._prefix:
counts = dict(counts)
# Combine local counts with any parent counts.
for key, value in self._cache.items():
counts[key] = counts.get(key, 0) + value
if self._prefix and self._return_only_prefixed:
counts = dict([(key[len(self._prefix) + 1:], value)
for key, value in counts.items()
if key.startswith(f'{self._prefix}_')])
return counts
def save(self) -> Mapping[str, Mapping[str, Number]]:
return {'counts': self._counts, 'cache': self._cache}
def restore(self, state: Mapping[str, Mapping[str, Number]]):
# Force a sync, if necessary, on the next get_counts call.
self._last_sync_time = 0.
self._counts = state['counts']
self._cache = state['cache']
def _prefix_keys(dictionary: Dict[str, Number], prefix: str):
"""Return a dictionary with prefixed keys.
Args:
dictionary: dictionary to return a copy of.
prefix: string to use as the prefix.
Returns:
Return a copy of the given dictionary whose keys are replaced by
"{prefix}_{key}". If the prefix is the empty string it returns the given
dictionary unchanged.
"""
if prefix:
dictionary = {f'{prefix}_{k}': v for k, v in dictionary.items()}
return dictionary
| deepmind/acme | acme/utils/counting.py | Python | apache-2.0 | 4,636 | 0.003883 |
"""
Django settings for movie_organizer project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ak7d+2obkx$-@!3jd@l!e*#95*4vfwfb2p01_nsek^#2ke)y3@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'movies'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'movie_organizer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'movie_organizer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| jlaurelli/movie_organizer | movie_organizer/settings.py | Python | mit | 2,681 | 0 |
__author__ = 'lucifurtun'
| sensidev/serpisori | app/__init__.py | Python | mit | 26 | 0 |
'''
Extract Ways from OSM PBF files
'''
import osmium as o
import json, os, requests, sys, time
import shapely.wkb as wkblib
# http://docs.osmcode.org/pyosmium/latest/intro.html
# A global factory that creates WKB from a osmium geometry
wkbfab = o.geom.WKBFactory()
# set in Dockerfile as env variable
GEO_DATA_DIR = os.environ.get("GEO_DATA_DIR")
class WayMap():
def __init__(self, extract_type='highway'):
self.extracter = WayExtracter(extract_type)
def extract_files(self, file_list):
for path in file_list:
self.run_extraction(path)
def run_extraction(self, file_path):
t0 = time.time()
self.extracter.apply_file(file_path, locations=True)
t1 = time.time()
elapsed = "{0:.1f}".format(t1-t0)
print "EXTRACTED WAYS with locations from pbf file {}, took {}s".format(file_path, elapsed)
class WayExtracter(o.SimpleHandler):
def __init__(self, extract_type='highway'):
'''
extract_type can so far be in: highway, tennis
'''
o.SimpleHandler.__init__(self)
self.ways = []
self.way_dict = {}
self.types = []
self.extract_type = extract_type
def way(self, w):
if self.extract_type == 'tennis':
self.extract_if_tennis_court(w)
elif self.extract_type == 'highway':
self.extract_if_highway(w)
else:
print "ERROR unknown type to extract from PBF file"
def extract_if_tennis_court(self, w):
name = ''
is_tennis = False
for tag in w.tags:
if tag.k == 'sport' and 'tennis' == tag.v:
is_tennis = True
if tag.k == 'name':
name = tag.v
if not is_tennis:
return
way_dict = {
'uid': w.uid,
'ends_have_same_id': w.ends_have_same_id(),
'id': w.id,
'tags':[]}
for tag in w.tags:
way_dict['tags'].append((tag.k, tag.v))
self.add_linestring(w, way_dict)
def extract_if_highway(self, w):
is_highway = False
is_big = False
name = ''
highway_type = None
for tag in w.tags:
if tag.k == 'name':
name = tag.v
# and tag.v in ['primary', 'secondary', 'tertiary', 'trunk']
if tag.k == 'highway':
highway_type = tag.v
is_highway = True
#try:
# if tag.k == 'lanes' and int(tag.v[len(tag.v)-1]) >= 2:
# is_big = True
# # #for t in w.tags:
# # # print "tag {} {}".format(t.k, t.v)
#except:
# print("exception, weird lanes designation {}".format(tag.v))
# or not is_big
if not is_highway:
return
if not highway_type in self.types:
self.types.append(highway_type)
way_dict = {'visible': w.visible,
'deleted': w.deleted,
'uid': w.uid,
'highway_type': highway_type,
'ends_have_same_id': w.ends_have_same_id(),
'id': w.id,
'tags':[]}
for tag in w.tags:
way_dict['tags'].append((tag.k, tag.v))
self.add_linestring(w, way_dict)
def add_linestring(self, w, way_dict):
try:
wkb = wkbfab.create_linestring(w)
except:
# throws on single point ways
return
line = wkblib.loads(wkb, hex=True)
reverse_points = []
for point in list(line.coords):
reverse_points.append([point[1],point[0]])
way_dict['linestring'] = reverse_points
self.ways.append(way_dict)
def download_and_extract(file_urls_to_download, extract_type='highway'):
file_urls = file_urls_to_download
file_paths = download_files(file_urls)
w = WayMap(extract_type=extract_type)
w.extract_files(file_paths)
return w
def download_file(url):
local_filename = url.split('/')[-1]
full_local_filename = os.path.join(GEO_DATA_DIR, local_filename)
r = requests.get(url, stream=True)
with open(full_local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return full_local_filename
def download_files(url_list):
paths = []
print("DOWNLOADING {} PBFs...".format(len(url_list)))
t0 = time.time()
for url in url_list:
local_filename = url.split('/')[-1]
full_local_filename = os.path.join(GEO_DATA_DIR, local_filename)
if not os.path.exists(full_local_filename):
paths.append(download_file(url))
else:
paths.append(full_local_filename)
print("PBF {} already downloaded".format(full_local_filename))
if time.time()-t0 > 0.01:
print("downloads took {0:.1f}s".format(time.time()-t0))
return paths
| silberman/Deep-OSM | src/download_labels.py | Python | mit | 4,752 | 0.016204 |
import tempfile
import synapse.common as s_common
import synapse.lib.msgpack as s_msgpack
_readsz = 10000000
def splice(act, **info):
'''
Form a splice event from a given act name and info.
Args:
act (str): The name of the action.
**info: Additional information about the event.
Example:
splice = splice('add:node', form='inet:ipv4', valu=0)
self.fire(splice)
Notes:
Splice events were reworked in v0.0.45 and now contain a sub-event of
the (act, info) under the 'mesg' key.
Returns:
(str, dict): The splice event.
'''
return (act, info)
def convertOldSplice(mesg):
'''
Converts an "old" splice event to the "new" format.
Args:
mesg ((str,dict)): An event tuple.
Examples:
Convert a splice to the new format:
newsplice = convertOldSplice(oldsplice)
Raises:
(BadSpliceMesg): The splice was unable to be converted.
Returns:
(str, dict): The splice event.
'''
if not(isinstance(mesg, tuple) and len(mesg) is 2):
raise s_common.BadSpliceMesg('invalid event mesg')
evtname = mesg[0]
if evtname != 'splice':
raise s_common.BadSpliceMesg('event mesg is not a splice')
data = mesg[1]
if data.get('mesg'):
raise s_common.BadSpliceMesg('splice has already been converted')
act = mesg[1].pop('act', None)
if not act:
raise s_common.BadSpliceMesg('splice is missing act')
return splice(act, **data)
def convertSpliceFd(fpath):
'''
Converts an "old" splice log to the new format.
Args:
fpath (str): The path to the "old" splice log file.
Example:
convertSpliceFd('/stuff/oldsplicelog.mpk')
Notes:
This function reads the an "old" splice log file, writes to a temporary
file, and then overwrites the old file with the new data. This function
only converts old splices to new splices. If any messages are invalid,
an exception will be raised and the conversion will exit early and not
overwrite any data.
Returns:
None
'''
with tempfile.SpooledTemporaryFile() as tmp:
with open(fpath, 'r+b') as fd:
for chnk in s_common.chunks(s_msgpack.iterfd(fd), 1000):
for mesg in chnk:
mesg = convertOldSplice(mesg)
tmp.write(s_msgpack.en(mesg))
tmp.seek(0)
fd.seek(0)
data = tmp.read(_readsz)
while data:
fd.write(data)
data = tmp.read(_readsz)
fd.truncate()
| vivisect/synapse | synapse/lib/splice.py | Python | apache-2.0 | 2,652 | 0.000754 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Course.start_time'
db.alter_column(u'courses_course', 'start_time', self.gf('django.db.models.fields.DateField')())
# Changing field 'Course.end_time'
db.alter_column(u'courses_course', 'end_time', self.gf('django.db.models.fields.DateField')())
def backwards(self, orm):
# Changing field 'Course.start_time'
db.alter_column(u'courses_course', 'start_time', self.gf('django.db.models.fields.TimeField')())
# Changing field 'Course.end_time'
db.alter_column(u'courses_course', 'end_time', self.gf('django.db.models.fields.TimeField')())
models = {
u'courses.course': {
'Meta': {'object_name': 'Course'},
'description': ('django.db.models.fields.TextField', [], {}),
'end_time': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'start_time': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['courses'] | HackBulgaria/Odin | courses/south_migrations/0003_auto__chg_field_course_start_time__chg_field_course_end_time.py | Python | agpl-3.0 | 1,387 | 0.005047 |
"""Tests related to retraction of public registrations"""
import datetime
from rest_framework import status as http_status
import mock
import pytest
from django.utils import timezone
from django.db import DataError
from nose.tools import * # noqa
from framework.auth import Auth
from framework.exceptions import PermissionsError
from tests.base import fake, OsfTestCase
from osf_tests.factories import (
AuthUserFactory, NodeFactory, ProjectFactory,
RegistrationFactory, UserFactory, UnconfirmedUserFactory,
UnregUserFactory, OSFGroupFactory
)
from osf.utils import tokens
from osf.exceptions import (
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
NodeStateError,
)
from osf.models import Contributor, Retraction
from osf.utils import permissions
@pytest.mark.enable_bookmark_creation
class RegistrationRetractionModelsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationRetractionModelsTestCase, self).setUp()
self.user = UserFactory()
self.registration = RegistrationFactory(creator=self.user, is_public=True)
self.valid_justification = fake.sentence()
self.invalid_justification = fake.text(max_nb_chars=3000)
def test_set_public_registration_to_private_raises_NodeStateException(self):
self.registration.save()
with assert_raises(NodeStateError):
self.registration.set_privacy('private')
self.registration.reload()
assert_true(self.registration.is_public)
def test_initiate_retraction_saves_retraction(self):
initial_count = Retraction.objects.all().count()
self.registration._initiate_retraction(self.user)
assert_equal(Retraction.objects.all().count(), initial_count + 1)
def test__initiate_retraction_does_not_create_tokens_for_unregistered_admin(self):
unconfirmed_user = UnconfirmedUserFactory()
Contributor.objects.create(node=self.registration, user=unconfirmed_user)
self.registration.add_permission(unconfirmed_user, permissions.ADMIN, save=True)
assert_equal(Contributor.objects.get(node=self.registration, user=unconfirmed_user).permission, permissions.ADMIN)
retraction = self.registration._initiate_retraction(self.user)
assert_true(self.user._id in retraction.approval_state)
assert_false(unconfirmed_user._id in retraction.approval_state)
def test__initiate_retraction_adds_admins_on_child_nodes(self):
project_admin = UserFactory()
project_non_admin = UserFactory()
child_admin = UserFactory()
child_non_admin = UserFactory()
grandchild_admin = UserFactory()
project = ProjectFactory(creator=project_admin)
project.add_contributor(project_non_admin, auth=Auth(project.creator), save=True)
child = NodeFactory(creator=child_admin, parent=project)
child.add_contributor(child_non_admin, auth=Auth(child.creator), save=True)
grandchild = NodeFactory(creator=grandchild_admin, parent=child) # noqa
registration = RegistrationFactory(project=project)
retraction = registration._initiate_retraction(registration.creator)
assert_in(project_admin._id, retraction.approval_state)
assert_in(child_admin._id, retraction.approval_state)
assert_in(grandchild_admin._id, retraction.approval_state)
assert_not_in(project_non_admin._id, retraction.approval_state)
assert_not_in(child_non_admin._id, retraction.approval_state)
# Backref tests
def test_retraction_initiator_has_backref(self):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_equal(Retraction.objects.filter(initiated_by=self.user).count(), 1)
# Node#retract_registration tests
def test_pending_retract(self):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_false(self.registration.is_retracted)
assert_equal(self.registration.retraction.state, Retraction.UNAPPROVED)
assert_equal(self.registration.retraction.justification, self.valid_justification)
assert_equal(self.registration.retraction.initiated_by, self.user)
assert_equal(
self.registration.retraction.initiation_date.date(),
timezone.now().date()
)
def test_retract_component_raises_NodeStateError(self):
project = ProjectFactory(is_public=True, creator=self.user)
NodeFactory(is_public=True, creator=self.user, parent=project)
registration = RegistrationFactory(is_public=True, project=project)
with assert_raises(NodeStateError):
registration._nodes.first().retract_registration(self.user, self.valid_justification)
def test_long_justification_raises_ValidationValueError(self):
with assert_raises(DataError):
self.registration.retract_registration(self.user, self.invalid_justification)
self.registration.save()
assert_is_none(self.registration.retraction)
def test_retract_private_registration_raises_NodeStateError(self):
self.registration.is_public = False
with assert_raises(NodeStateError):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_is_none(self.registration.retraction)
def test_retraction_of_registration_pending_embargo_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
def test_retraction_of_registration_in_active_embargo_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
embargo_approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, embargo_approval_token)
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
retraction_approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, retraction_approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
# Retraction#approve_retraction_tests
def test_invalid_approval_token_raises_InvalidSanctionApprovalToken(self):
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
with assert_raises(InvalidSanctionApprovalToken):
self.registration.retraction.approve_retraction(self.user, fake.sentence())
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_non_admin_approval_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
with assert_raises(PermissionsError):
self.registration.retraction.approve_retraction(non_admin, approval_token)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
# group admin on node cannot retract registration
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
self.registration.registered_from.add_osf_group(group, permissions.ADMIN)
with assert_raises(PermissionsError):
self.registration.retraction.approve_retraction(group_mem, approval_token)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_one_approval_with_one_admin_retracts(self):
self.registration.retract_registration(self.user)
self.registration.save()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
assert_true(self.registration.is_pending_retraction)
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 1)
def test_approval_adds_to_parent_projects_log(self):
initial_project_logs = self.registration.registered_from.logs.count()
self.registration.retract_registration(self.user)
self.registration.save()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
# Logs: Created, registered, retraction initiated, retraction approved
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 2)
def test_retraction_of_registration_pending_embargo_cancels_embargo_public(self):
self.registration.is_public = True
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
def test_approval_of_registration_with_embargo_adds_to_parent_projects_log(self):
initial_project_logs = self.registration.registered_from.logs.count()
self.registration.is_public = True
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
self.registration.retract_registration(self.user)
self.registration.save()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
# Logs: Created, registered, embargo initiated, retraction initiated, retraction approved, embargo cancelled
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 4)
def test_retraction_of_public_registration_in_active_embargo_cancels_embargo(self):
self.registration.is_public = True
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
embargo_approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, embargo_approval_token)
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
retraction_approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, retraction_approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
def test_two_approvals_with_two_admins_retracts(self):
self.admin2 = UserFactory()
Contributor.objects.create(node=self.registration, user=self.admin2)
self.registration.add_permission(self.admin2, permissions.ADMIN, save=True)
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
# First admin approves
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_pending_retraction)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 1)
# Second admin approves
approval_token = self.registration.retraction.approval_state[self.admin2._id]['approval_token']
self.registration.retraction.approve_retraction(self.admin2, approval_token)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 2)
assert_true(self.registration.is_retracted)
def test_one_approval_with_two_admins_stays_pending(self):
self.admin2 = UserFactory()
Contributor.objects.create(node=self.registration, user=self.admin2)
self.registration.add_permission(self.admin2, permissions.ADMIN, save=True)
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
assert_equal(self.registration.retraction.state, Retraction.UNAPPROVED)
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_pending_retraction)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 1)
# Retraction#disapprove_retraction tests
def test_invalid_rejection_token_raises_InvalidSanctionRejectionToken(self):
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
with assert_raises(InvalidSanctionRejectionToken):
self.registration.retraction.disapprove_retraction(self.user, fake.sentence())
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_non_admin_rejection_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
with assert_raises(PermissionsError):
self.registration.retraction.disapprove_retraction(non_admin, rejection_token)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_one_disapproval_cancels_retraction(self):
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
assert_equal(self.registration.retraction.state, Retraction.UNAPPROVED)
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
assert_true(self.registration.retraction.is_rejected)
def test_disapproval_adds_to_parent_projects_log(self):
initial_project_logs = self.registration.registered_from.logs.count()
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
# Logs: Created, registered, retraction initiated, retraction cancelled
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 2)
def test__on_complete_makes_project_and_components_public(self):
project_admin = UserFactory()
child_admin = UserFactory()
grandchild_admin = UserFactory()
project = ProjectFactory(creator=project_admin, is_public=False)
child = NodeFactory(creator=child_admin, parent=project, is_public=False)
grandchild = NodeFactory(creator=grandchild_admin, parent=child, is_public=False) # noqa
registration = RegistrationFactory(project=project)
registration._initiate_retraction(self.user)
registration.retraction._on_complete(self.user)
for each in registration.node_and_primary_descendants():
each.reload()
assert_true(each.is_public)
# Retraction property tests
def test_new_retraction_is_pending_retraction(self):
self.registration.retract_registration(self.user)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
@pytest.mark.enable_bookmark_creation
class RegistrationWithChildNodesRetractionModelTestCase(OsfTestCase):
def setUp(self):
super(RegistrationWithChildNodesRetractionModelTestCase, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.project = ProjectFactory(is_public=True, creator=self.user)
self.component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
self.subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject'
)
self.subproject_component = NodeFactory(
creator=self.user,
parent=self.subproject,
title='Subcomponent'
)
self.registration = RegistrationFactory(project=self.project, is_public=True)
# Reload the registration; else tests won't catch failures to svae
self.registration.reload()
@mock.patch('api.share.utils.settings.SHARE_ENABLED', True)
@mock.patch('api.share.utils.send_share_json')
def test_approval_retracts_descendant_nodes(self, mock_update_share):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are pending registration
descendants = self.registration.get_descendants_recursive()
for node in descendants:
node.save()
assert_true(node.is_pending_retraction)
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
# Ensure descendant nodes are retracted
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_retracted)
assert mock_update_share.called
def test_disapproval_cancels_retraction_on_descendant_nodes(self):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are pending registration
descendants = self.registration.get_descendants_recursive()
for node in descendants:
node.save()
assert_true(node.is_pending_retraction)
# Disapprove parent registration's retraction
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
assert_false(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
assert_true(self.registration.retraction.is_rejected)
# Ensure descendant nodes' retractions are cancelled
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_false(node.is_pending_retraction)
assert_false(node.is_retracted)
@mock.patch('api.share.utils.settings.SHARE_ENABLED', True)
@mock.patch('api.share.utils.send_share_json')
def test_approval_cancels_pending_embargoes_on_descendant_nodes(self, mock_update_share):
# Initiate embargo for registration
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_pending_retraction)
assert_true(node.is_pending_embargo)
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
# Ensure descendant nodes are not pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_retracted)
assert_false(node.is_pending_embargo)
assert mock_update_share.called
@mock.patch('api.share.utils.settings.SHARE_ENABLED', True)
@mock.patch('api.share.utils.send_share_json')
def test_approval_cancels_active_embargoes_on_descendant_nodes(self, mock_update_share):
# Initiate embargo for registration
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
# Approve embargo for registration
embargo_approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, embargo_approval_token)
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are not pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_pending_retraction)
assert_true(node.embargo_end_date)
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
# Ensure descendant nodes are not pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_retracted)
assert mock_update_share.called
@pytest.mark.enable_bookmark_creation
class RegistrationRetractionShareHook(OsfTestCase):
def setUp(self):
super(RegistrationRetractionShareHook, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.project = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(project=self.project, is_public=True)
# Reload the registration; else tests won't catch failures to svae
self.registration.reload()
@mock.patch('api.share.utils.settings.SHARE_ENABLED', True)
@mock.patch('api.share.utils.send_share_json')
def test_approval_calls_share_hook(self, mock_update_share):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
assert mock_update_share.called
@mock.patch('api.share.utils.settings.SHARE_ENABLED', True)
@mock.patch('api.share.utils.send_share_json')
def test_disapproval_does_not_call_share_hook(self, mock_update_share):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
assert_false(self.registration.is_retracted)
assert not mock_update_share.called
@pytest.mark.enable_bookmark_creation
class RegistrationRetractionApprovalDisapprovalViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationRetractionApprovalDisapprovalViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.registered_from = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(is_public=True, project=self.registered_from)
self.registration.retract_registration(self.user)
self.registration.save()
self.approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.corrupt_token = fake.sentence()
self.token_without_sanction = tokens.encode({
'action': 'approve_retraction',
'user_id': self.user._id,
'sanction_id': 'invalid id'
})
# node_registration_retraction_approve_tests
def test_GET_approve_from_unauthorized_user_returns_HTTPError_UNAUTHORIZED(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('token_action', token=self.approval_token),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
def test_GET_approve_registration_without_retraction_returns_HTTPError_BAD_REQUEST(self):
assert_true(self.registration.is_pending_retraction)
self.registration.retraction.reject(user=self.user, token=self.rejection_token)
assert_false(self.registration.is_pending_retraction)
self.registration.retraction.save()
res = self.app.get(
self.registration.web_url_for('token_action', token=self.approval_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_GET_approve_with_invalid_token_returns_HTTPError_BAD_REQUEST(self):
res = self.app.get(
self.registration.web_url_for('token_action', token=self.corrupt_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_GET_approve_with_non_existant_sanction_returns_HTTPError_BAD_REQUEST(self):
res = self.app.get(
self.registration.web_url_for('token_action', token=self.token_without_sanction),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_GET_approve_with_valid_token_returns_302(self):
res = self.app.get(
self.registration.web_url_for('token_action', token=self.approval_token),
auth=self.user.auth
)
self.registration.retraction.reload()
assert_true(self.registration.is_retracted)
assert_false(self.registration.is_pending_retraction)
assert_equal(res.status_code, http_status.HTTP_302_FOUND)
# node_registration_retraction_disapprove_tests
def test_GET_disapprove_from_unauthorized_user_returns_HTTPError_UNAUTHORIZED(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('token_action', token=self.rejection_token),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
def test_GET_disapprove_registration_without_retraction_returns_HTTPError_BAD_REQUEST(self):
assert_true(self.registration.is_pending_retraction)
self.registration.retraction.reject(user=self.user, token=self.rejection_token)
assert_false(self.registration.is_pending_retraction)
self.registration.retraction.save()
res = self.app.get(
self.registration.web_url_for('token_action', token=self.rejection_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_GET_disapprove_with_invalid_token_HTTPError_BAD_REQUEST(self):
res = self.app.get(
self.registration.web_url_for('token_action', token=self.corrupt_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_GET_disapprove_with_valid_token_returns_redirect(self):
res = self.app.get(
self.registration.web_url_for('token_action', token=self.rejection_token),
auth=self.user.auth,
)
self.registration.retraction.reload()
assert_false(self.registration.is_retracted)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.retraction.is_rejected)
assert_equal(res.status_code, http_status.HTTP_302_FOUND)
@pytest.mark.enable_bookmark_creation
class ComponentRegistrationRetractionViewsTestCase(OsfTestCase):
def setUp(self):
super(ComponentRegistrationRetractionViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.project = ProjectFactory(is_public=True, creator=self.user)
self.component = NodeFactory(
is_public=True,
creator=self.user,
parent=self.project,
title='Component'
)
self.subproject = ProjectFactory(
is_public=True,
creator=self.user,
parent=self.project,
title='Subproject'
)
self.subproject_component = NodeFactory(
is_public=True,
creator=self.user,
parent=self.subproject,
title='Subcomponent'
)
self.registration = RegistrationFactory(is_public=True, project=self.project)
self.component_registration = self.registration._nodes.order_by('created').first()
self.subproject_registration = list(self.registration._nodes.order_by('created'))[1]
self.subproject_component_registration = self.subproject_registration._nodes.order_by('created').first()
def test_POST_retraction_to_component_returns_HTTPError_BAD_REQUEST(self):
res = self.app.post_json(
self.component_registration.api_url_for('node_registration_retraction_post'),
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_POST_retraction_to_subproject_returns_HTTPError_BAD_REQUEST(self):
res = self.app.post_json(
self.subproject_registration.api_url_for('node_registration_retraction_post'),
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_POST_retraction_to_subproject_component_returns_HTTPError_BAD_REQUEST(self):
res = self.app.post_json(
self.subproject_component_registration.api_url_for('node_registration_retraction_post'),
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
@pytest.mark.enable_bookmark_creation
class RegistrationRetractionViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationRetractionViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.registered_from = ProjectFactory(creator=self.user, is_public=True)
self.registration = RegistrationFactory(project=self.registered_from, is_public=True)
self.retraction_post_url = self.registration.api_url_for('node_registration_retraction_post')
self.retraction_get_url = self.registration.web_url_for('node_registration_retraction_get')
self.justification = fake.sentence()
self.group_mem = AuthUserFactory()
self.group = OSFGroupFactory(creator=self.group_mem)
self.registration.registered_from.add_osf_group(self.group, permissions.ADMIN)
def test_GET_retraction_page_when_pending_retraction_returns_HTTPError_BAD_REQUEST(self):
self.registration.retract_registration(self.user)
self.registration.save()
res = self.app.get(
self.retraction_get_url,
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_POST_retraction_to_private_registration_returns_HTTPError_FORBIDDEN(self):
self.registration.is_public = False
self.registration.save()
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_403_FORBIDDEN)
self.registration.reload()
assert_is_none(self.registration.retraction)
@mock.patch('website.mails.send_mail')
def test_POST_retraction_does_not_send_email_to_unregistered_admins(self, mock_send_mail):
unreg = UnregUserFactory()
self.registration.add_unregistered_contributor(
unreg.fullname,
unreg.email,
auth=Auth(self.user),
permissions=permissions.ADMIN,
existing_user=unreg
)
self.registration.save()
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
# Only the creator gets an email; the unreg user does not get emailed
assert_equal(mock_send_mail.call_count, 1)
def test_POST_pending_embargo_returns_HTTPError_HTTPOK(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_200_OK)
self.registration.reload()
assert_true(self.registration.is_pending_retraction)
def test_POST_active_embargo_returns_HTTPOK(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve(user=self.user, token=approval_token)
assert_true(self.registration.embargo_end_date)
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_200_OK)
self.registration.reload()
assert_true(self.registration.is_pending_retraction)
def test_POST_retraction_by_non_admin_retract_HTTPError_UNAUTHORIZED(self):
res = self.app.post_json(self.retraction_post_url, expect_errors=True)
assert_equals(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
self.registration.reload()
assert_is_none(self.registration.retraction)
# group admin POST fails
res = self.app.post_json(self.retraction_post_url, auth=self.group_mem.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_403_FORBIDDEN)
@mock.patch('website.mails.send_mail')
def test_POST_retraction_without_justification_returns_HTTPOK(self, mock_send):
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
assert_equal(res.status_code, http_status.HTTP_200_OK)
self.registration.reload()
assert_false(self.registration.is_retracted)
assert_true(self.registration.is_pending_retraction)
assert_is_none(self.registration.retraction.justification)
@mock.patch('website.mails.send_mail')
def test_valid_POST_retraction_adds_to_parent_projects_log(self, mock_send):
initial_project_logs = self.registration.registered_from.logs.count()
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
self.registration.registered_from.reload()
# Logs: Created, registered, retraction initiated
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 1)
@mock.patch('website.mails.send_mail')
def test_valid_POST_retraction_when_pending_retraction_raises_400(self, mock_send):
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
@mock.patch('website.mails.send_mail')
def test_valid_POST_calls_send_mail_with_username(self, mock_send):
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
assert_true(mock_send.called)
args, kwargs = mock_send.call_args
assert_true(self.user.username in args)
def test_non_contributor_GET_approval_returns_HTTPError_UNAUTHORIZED(self):
non_contributor = AuthUserFactory()
self.registration.retract_registration(self.user)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
approval_url = self.registration.web_url_for('token_action', token=approval_token)
res = self.app.get(approval_url, auth=non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
# group admin on node fails disapproval GET
res = self.app.get(approval_url, auth=self.group_mem.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
def test_non_contributor_GET_disapproval_returns_HTTPError_UNAUTHORIZED(self):
non_contributor = AuthUserFactory()
self.registration.retract_registration(self.user)
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
disapproval_url = self.registration.web_url_for('token_action', token=rejection_token)
res = self.app.get(disapproval_url, auth=non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
# group admin on node fails disapproval GET
res = self.app.get(disapproval_url, auth=self.group_mem.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
| mfraezz/osf.io | tests/test_registrations/test_retractions.py | Python | apache-2.0 | 43,805 | 0.003196 |
#!/usr/bin/env python
# ./postit.py http://localhost:5000/db/loadpost users fixtures/users.txt
# alternately, if you are running locally, visit
# http://localhost:5000/db/loadfixture/users/users.txt
# to drop the db go to
# http://localhost:5000/db/drop
# to show the db go to
# http://localhost:5000/db/show
import urllib, urllib2, httplib
def post(url, schema, key, value):
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
req = urllib2.Request(url+"/"+schema)
connection = httplib.HTTPConnection(req.get_host())
params = urllib.urlencode({key: value})
print params
connection.request('POST', req.get_selector(),
params, headers)
response = connection.getresponse()
print response.status, response.reason
data = response.read()
connection.close()
def splitline(line):
return [x for x in (y.strip() for y in line.split(',')) if len(x)]
if __name__ == "__main__":
import sys
url = sys.argv[1]
schema = sys.argv[2]
filename = sys.argv[3]
lines = None
with open(filename, 'r') as f:
lines = f.read()
post(url, schema, 'payload', lines)
| ejconlon/iwantaride | postit.py | Python | mit | 1,204 | 0.004153 |
# Generated by Django 2.2.25 on 2022-01-03 12:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0022_auto_20191015_0510"),
]
operations = [
migrations.AlterField(
model_name="user",
name="notify_unassigned_letter",
field=models.BooleanField(
default=False,
help_text="Whether or not to notify user about any letter in case without anybody who can reply to client",
verbose_name="Defaults to reply in cases",
),
),
]
| watchdogpolska/poradnia | poradnia/users/migrations/0023_auto_20220103_1354.py | Python | mit | 618 | 0.001618 |
# -*- coding: utf-8 -*-
# flake8: noqa
import unittest
import datetime
import uuid
from urllib.error import HTTPError
from mock import Mock
from mailman.email.message import Message
from mailman.interfaces.archiver import ArchivePolicy
#import kittystore.utils
#from kittystore import get_store
#from kittystore.caching import mailman_user
#from kittystore.test import FakeList, SettingsModule
class ListCacheTestCase(unittest.TestCase):
def setUp(self):
self.store = get_store(SettingsModule(), auto_create=True)
kittystore.utils.MM_CLIENT = Mock()
def tearDown(self):
self.store.close()
kittystore.utils.MM_CLIENT = None
def test_properties_on_new_message(self):
ml = FakeList("example-list")
ml.display_name = "name 1"
ml.subject_prefix = "[prefix 1]"
ml.description = "desc 1"
kittystore.utils.MM_CLIENT.get_list.side_effect = lambda n: ml
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
ml_db = self.store.get_lists()[0]
self.assertEqual(ml_db.display_name, "name 1")
self.assertEqual(ml_db.subject_prefix, "[prefix 1]")
ml.display_name = "name 2"
ml.subject_prefix = "[prefix 2]"
ml.description = "desc 2"
ml.archive_policy = "private"
msg.replace_header("Message-ID", "<dummy2>")
self.store.add_to_list("example-list", msg)
ml_db = self.store.get_lists()[0]
#ml_db = self.store.db.find(List).one()
self.assertEqual(ml_db.display_name, "name 2")
self.assertEqual(ml_db.subject_prefix, "[prefix 2]")
self.assertEqual(ml_db.description, "desc 2")
self.assertEqual(ml_db.archive_policy, ArchivePolicy.private)
def test_on_old_message(self):
kittystore.utils.MM_CLIENT = None
olddate = datetime.datetime.utcnow() - datetime.timedelta(days=40)
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg["Date"] = olddate.isoformat()
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
ml_db = self.store.get_lists()[0]
self.assertEqual(ml_db.recent_participants_count, 0)
self.assertEqual(ml_db.recent_threads_count, 0)
class FakeMMUser(object):
user_id = None
class UserIdCacheTestCase(unittest.TestCase):
def setUp(self):
self.store = get_store(SettingsModule(), auto_create=True)#, debug=True)
self.mm_client = Mock()
mailman_user._MAILMAN_CLIENT = self.mm_client
self.mm_client.get_user.side_effect = HTTPError(
None, 404, "dummy", {}, None)
def tearDown(self):
self.store.close()
mailman_user._MAILMAN_CLIENT = None
def test_on_new_message_userid(self):
# Check that the user_id is set on a new message
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
# setup Mailman's reply
new_user_id = FakeMMUser()
uid = uuid.uuid1()
new_user_id.user_id = uid.int
self.mm_client.get_user.side_effect = lambda addr: new_user_id
# check the User does not exist yet
self.assertEqual(0,
self.store.get_message_count_by_user_id(uid))
# do the test and check
self.store.add_to_list("example-list", msg)
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, uid)
self.assertTrue(dbmsg.sender.user is not None,
"A 'User' instance was not created")
self.assertEqual(dbmsg.sender.user.id, uid)
self.assertEqual(1,
self.store.get_message_count_by_user_id(uid))
self.assertEqual(self.store.get_users_count(), 1)
def test_on_new_message_no_reply_from_mailman(self):
# Check that the user_id is set on a new message
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, None)
def test_sync_mailman_user(self):
# Check that the user_id is set when sync_mailman_user is run
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, None)
# setup Mailman's reply
uid = uuid.uuid1()
new_user_id = FakeMMUser()
new_user_id.user_id = uid.int
self.mm_client.get_user.side_effect = lambda addr: new_user_id
# do the test and check
mailman_user.sync_mailman_user(self.store)
#dbmsg = self.store.get_message_by_id_from_list(
# "example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, uid)
self.assertTrue(dbmsg.sender.user is not None,
"A 'User' instance was not created")
self.assertEqual(dbmsg.sender.user.id, uid)
self.assertEqual(1,
self.store.get_message_count_by_user_id(uid))
def test_on_new_message_bad_reply_from_mailman(self):
# Check that errors from mailmanclient are handled gracefully
self.mm_client.get_user.side_effect = ValueError
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
try:
self.store.add_to_list("example-list", msg)
except ValueError as e:
self.fail("Errors from mailmanclient should be handled gracefully")
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, None)
class TestNotifyStore(unittest.TestCase):
def setUp(self):
self.store = get_sa_store(SettingsModule(), auto_create=True)
self.store.db.cache.get_or_create = Mock()
self.store.db.cache.get_or_create.side_effect = lambda *a, **kw: a[1]()
self.store.db.cache.set = Mock()
# cache.delete() will be called if the cache is invalidated
self.store.db.cache.delete = Mock()
def tearDown(self):
self.store.close()
def test_on_new_message_invalidate(self):
# Check that the cache is invalidated on new message
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
today = datetime.datetime.utcnow().date() # don't use datetime.date.today(), we need UTC
self.store.add_to_list("example-list", msg)
# calls to cache.delete() -- invalidation
delete_args = [ call[0][0] for call in
self.store.db.cache.delete.call_args_list ]
#from pprint import pprint; pprint(delete_args)
self.assertEqual(set(delete_args), set([
'list:example-list:recent_participants_count',
'list:example-list:recent_threads_count',
'list:example-list:participants_count:%d:%d' % (today.year, today.month),
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:emails_count',
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:participants_count'
]))
# calls to cache.get_or_create() -- repopulation
goc_args = [ call[0][0] for call in
self.store.db.cache.get_or_create.call_args_list ]
#from pprint import pprint; pprint(goc_args)
self.assertEqual(set(goc_args), set([
'list:example-list:recent_participants_count',
'list:example-list:recent_threads_count',
'list:example-list:participants_count:%d:%d' % (today.year, today.month),
'list:example-list:threads_count:%d:%d' % (today.year, today.month),
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:emails_count',
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:participants_count',
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:starting_email_id',
]))
#self.assertEqual(l.recent_participants_count, 1)
#self.assertEqual(l.recent_threads_count, 1)
#msg.replace_header("Message-ID", "<dummy2>")
#self.store.add_to_list("example-list", msg)
#self.assertEqual(l.recent_participants_count, 1)
#self.assertEqual(l.recent_threads_count, 2)
def test_on_new_thread_invalidate(self):
# Check that the cache is invalidated on new message
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
msg.replace_header("Message-ID", "<dummy2>")
msg["In-Reply-To"] = "<dummy>"
self.store.add_to_list("example-list", msg)
call_args = [ call[0][0] for call in self.store.db.cache.set.call_args_list ]
# we have duplicates because both the Storm and the SQLAlchemy model
# subscribe to the event, so we must deduplicate
call_args = set(call_args)
#from pprint import pprint; pprint(call_args)
#print(repr(call_args))
self.assertEqual(call_args, set([
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:subject'
]))
| systers/hyperkitty | hyperkitty/tests/_test_caching.py | Python | gpl-3.0 | 9,926 | 0.004332 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineView."""
import math
import functools
import sys
import re
import html as html_utils
import sip
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QEvent, QPoint, QPointF,
QUrl, QTimer)
from PyQt5.QtGui import QKeyEvent, QIcon
from PyQt5.QtNetwork import QAuthenticator
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript
from qutebrowser.config import configdata
from qutebrowser.browser import browsertab, mouse, shared
from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory,
interceptor, webenginequtescheme,
webenginedownloads,
webenginesettings)
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import (usertypes, qtutils, log, javascript, utils,
message, objreg, jinja, debug)
_qute_scheme_handler = None
def init():
"""Initialize QtWebEngine-specific modules."""
# For some reason we need to keep a reference, otherwise the scheme handler
# won't work...
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-September/038075.html
global _qute_scheme_handler
app = QApplication.instance()
log.init.debug("Initializing qute://* handler...")
_qute_scheme_handler = webenginequtescheme.QuteSchemeHandler(parent=app)
_qute_scheme_handler.install(webenginesettings.default_profile)
_qute_scheme_handler.install(webenginesettings.private_profile)
log.init.debug("Initializing request interceptor...")
host_blocker = objreg.get('host-blocker')
req_interceptor = interceptor.RequestInterceptor(
host_blocker, parent=app)
req_interceptor.install(webenginesettings.default_profile)
req_interceptor.install(webenginesettings.private_profile)
log.init.debug("Initializing QtWebEngine downloads...")
download_manager = webenginedownloads.DownloadManager(parent=app)
download_manager.install(webenginesettings.default_profile)
download_manager.install(webenginesettings.private_profile)
objreg.register('webengine-download-manager', download_manager)
greasemonkey = objreg.get('greasemonkey')
greasemonkey.scripts_reloaded.connect(webenginesettings.inject_userscripts)
webenginesettings.inject_userscripts()
# Mapping worlds from usertypes.JsWorld to QWebEngineScript world IDs.
_JS_WORLD_MAP = {
usertypes.JsWorld.main: QWebEngineScript.MainWorld,
usertypes.JsWorld.application: QWebEngineScript.ApplicationWorld,
usertypes.JsWorld.user: QWebEngineScript.UserWorld,
usertypes.JsWorld.jseval: QWebEngineScript.UserWorld + 1,
}
class WebEngineAction(browsertab.AbstractAction):
"""QtWebEngine implementations related to web actions."""
action_class = QWebEnginePage
action_base = QWebEnginePage.WebAction
def exit_fullscreen(self):
self._widget.triggerPageAction(QWebEnginePage.ExitFullScreen)
def save_page(self):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
def show_source(self):
try:
self._widget.triggerPageAction(QWebEnginePage.ViewSource)
except AttributeError:
# Qt < 5.8
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
urlstr = self._tab.url().toString(QUrl.RemoveUserInfo)
# The original URL becomes the path of a view-source: URL
# (without a host), but query/fragment should stay.
url = QUrl('view-source:' + urlstr)
tb.tabopen(url, background=False, related=True)
class WebEnginePrinting(browsertab.AbstractPrinting):
"""QtWebEngine implementations related to printing."""
def check_pdf_support(self):
return True
def check_printer_support(self):
if not hasattr(self._widget.page(), 'print'):
raise browsertab.WebTabError(
"Printing is unsupported with QtWebEngine on Qt < 5.8")
def check_preview_support(self):
raise browsertab.WebTabError(
"Print previews are unsupported with QtWebEngine")
def to_pdf(self, filename):
self._widget.page().printToPdf(filename)
def to_printer(self, printer, callback=None):
if callback is None:
callback = lambda _ok: None
self._widget.page().print(printer, callback)
class WebEngineSearch(browsertab.AbstractSearch):
"""QtWebEngine implementations related to searching on the page.
Attributes:
_flags: The QWebEnginePage.FindFlags of the last search.
_pending_searches: How many searches have been started but not called
back yet.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
self._pending_searches = 0
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
self._pending_searches += 1
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
self._pending_searches -= 1
if self._pending_searches > 0:
# See https://github.com/qutebrowser/qutebrowser/issues/2442
# and https://github.com/qt/qtwebengine/blob/5.10/src/core/web_contents_adapter.cpp#L924-L934
log.webview.debug("Ignoring cancelled search callback with "
"{} pending searches".format(
self._pending_searches))
return
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key(
QWebEnginePage, flags, klass=QWebEnginePage.FindFlag))
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
callback(found)
self._widget.findText(text, flags, wrapped_callback)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
self.text = text
self._flags = QWebEnginePage.FindFlags(0)
if self._is_case_sensitive(ignore_case):
self._flags |= QWebEnginePage.FindCaseSensitively
if reverse:
self._flags |= QWebEnginePage.FindBackward
self._find(text, self._flags, result_cb, 'search')
def clear(self):
self.search_displayed = False
self._widget.findText('')
def prev_result(self, *, result_cb=None):
# The int() here makes sure we get a copy of the flags.
flags = QWebEnginePage.FindFlags(int(self._flags))
if flags & QWebEnginePage.FindBackward:
flags &= ~QWebEnginePage.FindBackward
else:
flags |= QWebEnginePage.FindBackward
self._find(self.text, flags, result_cb, 'prev_result')
def next_result(self, *, result_cb=None):
self._find(self.text, self._flags, result_cb, 'next_result')
class WebEngineCaret(browsertab.AbstractCaret):
"""QtWebEngine implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
if self._tab.search.search_displayed:
# We are currently in search mode.
# convert the search to a blue selection so we can operate on it
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
self._tab.run_js_async(
javascript.assemble('caret', 'setPlatform', sys.platform))
self._js_call('setInitialCursor')
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
if mode != usertypes.KeyMode.caret:
return
self.drop_selection()
self._js_call('disableCaret')
def move_to_next_line(self, count=1):
for _ in range(count):
self._js_call('moveDown')
def move_to_prev_line(self, count=1):
for _ in range(count):
self._js_call('moveUp')
def move_to_next_char(self, count=1):
for _ in range(count):
self._js_call('moveRight')
def move_to_prev_char(self, count=1):
for _ in range(count):
self._js_call('moveLeft')
def move_to_end_of_word(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfWord')
def move_to_next_word(self, count=1):
for _ in range(count):
self._js_call('moveToNextWord')
def move_to_prev_word(self, count=1):
for _ in range(count):
self._js_call('moveToPreviousWord')
def move_to_start_of_line(self):
self._js_call('moveToStartOfLine')
def move_to_end_of_line(self):
self._js_call('moveToEndOfLine')
def move_to_start_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfNextBlock')
def move_to_start_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfPrevBlock')
def move_to_end_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfNextBlock')
def move_to_end_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfPrevBlock')
def move_to_start_of_document(self):
self._js_call('moveToStartOfDocument')
def move_to_end_of_document(self):
self._js_call('moveToEndOfDocument')
def toggle_selection(self):
self._js_call('toggleSelection')
def drop_selection(self):
self._js_call('dropSelection')
def selection(self, callback):
# Not using selectedText() as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-53134
# Even on Qt 5.10 selectedText() seems to work poorly, see
# https://github.com/qutebrowser/qutebrowser/issues/3523
self._tab.run_js_async(javascript.assemble('caret', 'getSelection'),
callback)
def _follow_selected_cb(self, js_elem, tab=False):
"""Callback for javascript which clicks the selected element.
Args:
js_elem: The element serialized from javascript.
tab: Open in a new tab.
"""
if js_elem is None:
return
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab:
click_type = usertypes.ClickTarget.tab
else:
click_type = usertypes.ClickTarget.normal
# Only click if we see a link
if elem.is_link():
log.webview.debug("Found link in selection, clicking. ClickTarget "
"{}, elem {}".format(click_type, elem))
elem.click(click_type)
def follow_selected(self, *, tab=False):
if self._tab.search.search_displayed:
# We are currently in search mode.
# let's click the link via a fake-click
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
log.webview.debug("Clicking a searched link via fake key press.")
# send a fake enter, clicking the orange selection box
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
else:
# click an existing blue selection
js_code = javascript.assemble('webelem', 'find_selected_link')
self._tab.run_js_async(js_code, lambda jsret:
self._follow_selected_cb(jsret, tab))
def _js_call(self, command):
self._tab.run_js_async(
javascript.assemble('caret', command))
class WebEngineScroller(browsertab.AbstractScroller):
"""QtWebEngine implementations related to scrolling."""
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._args = objreg.get('args')
self._pos_perc = (0, 0)
self._pos_px = QPoint()
self._at_bottom = False
def _init_widget(self, widget):
super()._init_widget(widget)
page = widget.page()
page.scrollPositionChanged.connect(self._update_pos)
def _repeated_key_press(self, key, count=1, modifier=Qt.NoModifier):
"""Send count fake key presses to this scroller's WebEngineTab."""
for _ in range(min(count, 5000)):
self._tab.key_press(key, modifier)
@pyqtSlot(QPointF)
def _update_pos(self, pos):
"""Update the scroll position attributes when it changed."""
self._pos_px = pos.toPoint()
contents_size = self._widget.page().contentsSize()
scrollable_x = contents_size.width() - self._widget.width()
if scrollable_x == 0:
perc_x = 0
else:
try:
perc_x = min(100, round(100 / scrollable_x * pos.x()))
except ValueError:
# https://github.com/qutebrowser/qutebrowser/issues/3219
log.misc.debug("Got ValueError!")
log.misc.debug("contents_size.width(): {}".format(
contents_size.width()))
log.misc.debug("self._widget.width(): {}".format(
self._widget.width()))
log.misc.debug("scrollable_x: {}".format(scrollable_x))
log.misc.debug("pos.x(): {}".format(pos.x()))
raise
scrollable_y = contents_size.height() - self._widget.height()
if scrollable_y == 0:
perc_y = 0
else:
perc_y = min(100, round(100 / scrollable_y * pos.y()))
self._at_bottom = math.ceil(pos.y()) >= scrollable_y
if (self._pos_perc != (perc_x, perc_y) or
'no-scroll-filtering' in self._args.debug_flags):
self._pos_perc = perc_x, perc_y
self.perc_changed.emit(*self._pos_perc)
def pos_px(self):
return self._pos_px
def pos_perc(self):
return self._pos_perc
def to_perc(self, x=None, y=None):
js_code = javascript.assemble('scroll', 'to_perc', x, y)
self._tab.run_js_async(js_code)
def to_point(self, point):
js_code = javascript.assemble('window', 'scroll', point.x(), point.y())
self._tab.run_js_async(js_code)
def delta(self, x=0, y=0):
self._tab.run_js_async(javascript.assemble('window', 'scrollBy', x, y))
def delta_page(self, x=0, y=0):
js_code = javascript.assemble('scroll', 'delta_page', x, y)
self._tab.run_js_async(js_code)
def up(self, count=1):
self._repeated_key_press(Qt.Key_Up, count)
def down(self, count=1):
self._repeated_key_press(Qt.Key_Down, count)
def left(self, count=1):
self._repeated_key_press(Qt.Key_Left, count)
def right(self, count=1):
self._repeated_key_press(Qt.Key_Right, count)
def top(self):
self._tab.key_press(Qt.Key_Home)
def bottom(self):
self._tab.key_press(Qt.Key_End)
def page_up(self, count=1):
self._repeated_key_press(Qt.Key_PageUp, count)
def page_down(self, count=1):
self._repeated_key_press(Qt.Key_PageDown, count)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
return self._at_bottom
class WebEngineHistory(browsertab.AbstractHistory):
"""QtWebEngine implementations related to page history."""
def current_idx(self):
return self._history.currentItemIndex()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def _item_at(self, i):
return self._history.itemAt(i)
def _go_to_item(self, item):
self._tab.predicted_navigation.emit(item.url())
self._history.goToItem(item)
def serialize(self):
if not qtutils.version_check('5.9', compiled=False):
# WORKAROUND for
# https://github.com/qutebrowser/qutebrowser/issues/2289
# Don't use the history's currentItem here, because of
# https://bugreports.qt.io/browse/QTBUG-59599 and because it doesn't
# contain view-source.
scheme = self._tab.url().scheme()
if scheme in ['view-source', 'chrome']:
raise browsertab.WebTabError("Can't serialize special URL!")
return qtutils.serialize(self._history)
def deserialize(self, data):
return qtutils.deserialize(data, self._history)
def load_items(self, items):
stream, _data, cur_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
@pyqtSlot()
def _on_load_finished():
self._tab.scroller.to_point(cur_data['scroll-pos'])
self._tab.load_finished.disconnect(_on_load_finished)
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
self._tab.load_finished.connect(_on_load_finished)
class WebEngineZoom(browsertab.AbstractZoom):
"""QtWebEngine implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
class WebEngineElements(browsertab.AbstractElements):
"""QtWebEngine implemementations related to elements on the page."""
def _js_cb_multiple(self, callback, js_elems):
"""Handle found elements coming from JS and call the real callback.
Args:
callback: The callback to call with the found elements.
Called with None if there was an error.
js_elems: The elements serialized from javascript.
"""
if js_elems is None:
callback(None)
return
elems = []
for js_elem in js_elems:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
elems.append(elem)
callback(elems)
def _js_cb_single(self, callback, js_elem):
"""Handle a found focus elem coming from JS and call the real callback.
Args:
callback: The callback to call with the found element.
Called with a WebEngineElement or None.
js_elem: The element serialized from javascript.
"""
debug_str = ('None' if js_elem is None
else utils.elide(repr(js_elem), 1000))
log.webview.debug("Got element from JS: {}".format(debug_str))
if js_elem is None:
callback(None)
else:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
callback(elem)
def find_css(self, selector, callback, *, only_visible=False):
js_code = javascript.assemble('webelem', 'find_css', selector,
only_visible)
js_cb = functools.partial(self._js_cb_multiple, callback)
self._tab.run_js_async(js_code, js_cb)
def find_id(self, elem_id, callback):
js_code = javascript.assemble('webelem', 'find_id', elem_id)
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_focused(self, callback):
js_code = javascript.assemble('webelem', 'find_focused')
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
pos /= self._tab.zoom.factor()
js_code = javascript.assemble('webelem', 'find_at_pos',
pos.x(), pos.y())
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
class WebEngineTab(browsertab.AbstractTab):
"""A QtWebEngine tab in the browser.
Signals:
_load_finished_fake:
Used in place of unreliable loadFinished
"""
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
_load_finished_fake = pyqtSignal(bool)
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
private=private, parent=parent)
widget = webview.WebEngineView(tabdata=self.data, win_id=win_id,
private=private)
self.history = WebEngineHistory(self)
self.scroller = WebEngineScroller(self, parent=self)
self.caret = WebEngineCaret(mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebEngineZoom(tab=self, parent=self)
self.search = WebEngineSearch(parent=self)
self.printing = WebEnginePrinting()
self.elements = WebEngineElements(tab=self)
self.action = WebEngineAction(tab=self)
# We're assigning settings in _set_widget
self.settings = webenginesettings.WebEngineSettings(settings=None)
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebEngine
self._init_js()
self._child_event_filter = None
self._saved_zoom = None
self._reload_url = None
def _init_js(self):
js_code = '\n'.join([
'"use strict";',
'window._qutebrowser = window._qutebrowser || {};',
utils.read_file('javascript/scroll.js'),
utils.read_file('javascript/webelem.js'),
utils.read_file('javascript/caret.js'),
])
script = QWebEngineScript()
# We can't use DocumentCreation here as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-66011
script.setInjectionPoint(QWebEngineScript.DocumentReady)
script.setSourceCode(js_code)
page = self._widget.page()
script.setWorldId(QWebEngineScript.ApplicationWorld)
# FIXME:qtwebengine what about runsOnSubFrames?
page.scripts().insert(script)
def _install_event_filter(self):
self._widget.focusProxy().installEventFilter(self._mouse_event_filter)
self._child_event_filter = mouse.ChildEventFilter(
eventfilter=self._mouse_event_filter, widget=self._widget,
parent=self)
self._widget.installEventFilter(self._child_event_filter)
@pyqtSlot()
def _restore_zoom(self):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if self._saved_zoom is None:
return
self.zoom.set_factor(self._saved_zoom)
self._saved_zoom = None
def openurl(self, url):
self._saved_zoom = self.zoom.factor()
self._openurl_prepare(url)
self._widget.load(url)
def url(self, requested=False):
page = self._widget.page()
if requested:
return page.requestedUrl()
else:
return page.url()
def dump_async(self, callback, *, plain=False):
if plain:
self._widget.page().toPlainText(callback)
else:
self._widget.page().toHtml(callback)
def run_js_async(self, code, callback=None, *, world=None):
if world is None:
world_id = QWebEngineScript.ApplicationWorld
elif isinstance(world, int):
world_id = world
else:
world_id = _JS_WORLD_MAP[world]
if callback is None:
self._widget.page().runJavaScript(code, world_id)
else:
self._widget.page().runJavaScript(code, world_id, callback)
def shutdown(self):
self.shutting_down.emit()
self.action.exit_fullscreen()
self._widget.shutdown()
def reload(self, *, force=False):
self.predicted_navigation.emit(self.url())
if force:
action = QWebEnginePage.ReloadAndBypassCache
else:
action = QWebEnginePage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def icon(self):
return self._widget.icon()
def set_html(self, html, base_url=QUrl()):
# FIXME:qtwebengine
# check this and raise an exception if too big:
# Warning: The content will be percent encoded before being sent to the
# renderer via IPC. This may increase its size. The maximum size of the
# percent encoded content is 2 megabytes minus 30 bytes.
self._widget.setHtml(html, base_url)
def networkaccessmanager(self):
return None
def user_agent(self):
return None
def clear_ssl_errors(self):
raise browsertab.UnsupportedOperationError
def key_press(self, key, modifier=Qt.NoModifier):
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def _show_error_page(self, url, error):
"""Show an error page in the tab."""
log.misc.debug("Showing error page for {}".format(error))
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error=error)
self.set_html(error_page)
@pyqtSlot()
def _on_history_trigger(self):
try:
self._widget.page()
except RuntimeError:
# Looks like this slot can be triggered on destroyed tabs:
# https://crashes.qutebrowser.org/view/3abffbed (Qt 5.9.1)
# wrapped C/C++ object of type WebEngineView has been deleted
log.misc.debug("Ignoring history trigger for destroyed tab")
return
url = self.url()
requested_url = self.url(requested=True)
# Don't save the title if it's generated from the URL
title = self.title()
title_url = QUrl(url)
title_url.setScheme('')
if title == title_url.toDisplayString(QUrl.RemoveScheme).strip('/'):
title = ""
# Don't add history entry if the URL is invalid anyways
if not url.isValid():
log.misc.debug("Ignoring invalid URL being added to history")
return
self.add_history_item.emit(url, requested_url, title)
@pyqtSlot(QUrl, 'QAuthenticator*', 'QString')
def _on_proxy_authentication_required(self, url, authenticator,
proxy_host):
"""Called when a proxy needs authentication."""
msg = "<b>{}</b> requires a username and password.".format(
html_utils.escape(proxy_host))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=[self.shutting_down, self.load_started], url=urlstr)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
else:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
self._show_error_page(url, "Proxy authentication required")
@pyqtSlot(QUrl, 'QAuthenticator*')
def _on_authentication_required(self, url, authenticator):
netrc_success = False
if not self.data.netrc_used:
self.data.netrc_used = True
netrc_success = shared.netrc_authentication(url, authenticator)
if not netrc_success:
abort_on = [self.shutting_down, self.load_started]
answer = shared.authentication_required(url, authenticator,
abort_on)
if not netrc_success and answer is None:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
# WORKAROUND for
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-December/038400.html
self._show_error_page(url, "Authentication required")
@pyqtSlot('QWebEngineFullScreenRequest')
def _on_fullscreen_requested(self, request):
request.accept()
on = request.toggleOn()
self.data.fullscreen = on
self.fullscreen_requested.emit(on)
if on:
notification = miscwidgets.FullscreenNotification(self)
notification.show()
notification.set_timeout(3000)
@pyqtSlot()
def _on_load_started(self):
"""Clear search when a new load is started if needed."""
if (qtutils.version_check('5.9', compiled=False) and
not qtutils.version_check('5.9.2', compiled=False)):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-61506
self.search.clear()
super()._on_load_started()
self.data.netrc_used = False
@pyqtSlot(QWebEnginePage.RenderProcessTerminationStatus, int)
def _on_render_process_terminated(self, status, exitcode):
"""Show an error when the renderer process terminated."""
if (status == QWebEnginePage.AbnormalTerminationStatus and
exitcode == 256):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58697
status = QWebEnginePage.CrashedTerminationStatus
status_map = {
QWebEnginePage.NormalTerminationStatus:
browsertab.TerminationStatus.normal,
QWebEnginePage.AbnormalTerminationStatus:
browsertab.TerminationStatus.abnormal,
QWebEnginePage.CrashedTerminationStatus:
browsertab.TerminationStatus.crashed,
QWebEnginePage.KilledTerminationStatus:
browsertab.TerminationStatus.killed,
-1:
browsertab.TerminationStatus.unknown,
}
self.renderer_process_terminated.emit(status_map[status], exitcode)
@pyqtSlot(int)
def _on_load_progress_workaround(self, perc):
"""Use loadProgress(100) to emit loadFinished(True).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if perc == 100 and self.load_status() != usertypes.LoadStatus.error:
self._load_finished_fake.emit(True)
@pyqtSlot(bool)
def _on_load_finished_workaround(self, ok):
"""Use only loadFinished(False).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if not ok:
self._load_finished_fake.emit(False)
def _error_page_workaround(self, html):
"""Check if we're displaying a Chromium error page.
This gets only called if we got loadFinished(False) without JavaScript,
so we can display at least some error page.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643
Needs to check the page content as a WORKAROUND for
https://bugreports.qt.io/browse/QTBUG-66661
"""
match = re.search(r'"errorCode":"([^"]*)"', html)
if match is None:
return
self._show_error_page(self.url(), error=match.group(1))
@pyqtSlot(bool)
def _on_load_finished(self, ok):
"""Display a static error page if JavaScript is disabled."""
super()._on_load_finished(ok)
js_enabled = self.settings.test_attribute('content.javascript.enabled')
if not ok and not js_enabled:
self.dump_async(self._error_page_workaround)
if ok and self._reload_url is not None:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
log.config.debug(
"Reloading {} because of config change".format(
self._reload_url.toDisplayString()))
QTimer.singleShot(100, lambda url=self._reload_url:
self.openurl(url))
self._reload_url = None
if not qtutils.version_check('5.10', compiled=False):
# We can't do this when we have the loadFinished workaround as that
# sometimes clears icons without loading a new page.
# In general, this is handled by Qt, but when loading takes long,
# the old icon is still displayed.
self.icon_changed.emit(QIcon())
@pyqtSlot(QUrl)
def _on_predicted_navigation(self, url):
"""If we know we're going to visit an URL soon, change the settings."""
self.settings.update_for_url(url)
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
super()._on_navigation_request(navigation)
if not navigation.accepted or not navigation.is_main_frame:
return
needs_reload = {
'content.plugins',
'content.javascript.enabled',
'content.javascript.can_access_clipboard',
'content.javascript.can_access_clipboard',
'content.print_element_backgrounds',
'input.spatial_navigation',
'input.spatial_navigation',
}
assert needs_reload.issubset(configdata.DATA)
changed = self.settings.update_for_url(navigation.url)
if (changed & needs_reload and navigation.navigation_type !=
navigation.Type.link_clicked):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
self._reload_url = navigation.url
def _connect_signals(self):
view = self._widget
page = view.page()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
page.loadStarted.connect(self._on_load_started)
page.certificate_error.connect(self._on_ssl_errors)
page.authenticationRequired.connect(self._on_authentication_required)
page.proxyAuthenticationRequired.connect(
self._on_proxy_authentication_required)
page.fullScreenRequested.connect(self._on_fullscreen_requested)
page.contentsSizeChanged.connect(self.contents_size_changed)
page.navigation_request.connect(self._on_navigation_request)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.renderProcessTerminated.connect(
self._on_render_process_terminated)
view.iconChanged.connect(self.icon_changed)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
if qtutils.version_check('5.10', compiled=False):
page.loadProgress.connect(self._on_load_progress_workaround)
self._load_finished_fake.connect(self._on_history_trigger)
self._load_finished_fake.connect(self._restore_zoom)
self._load_finished_fake.connect(self._on_load_finished)
page.loadFinished.connect(self._on_load_finished_workaround)
else:
# for older Qt versions which break with the above
page.loadProgress.connect(self._on_load_progress)
page.loadFinished.connect(self._on_history_trigger)
page.loadFinished.connect(self._restore_zoom)
page.loadFinished.connect(self._on_load_finished)
self.predicted_navigation.connect(self._on_predicted_navigation)
def event_target(self):
return self._widget.focusProxy()
| kmarius/qutebrowser | qutebrowser/browser/webengine/webenginetab.py | Python | gpl-3.0 | 37,530 | 0.00008 |
#!/usr/bin/env python
from random import Random
colors_support = True
try:
from colorama import init, Fore
init()
except:
colors_support = False
print "For colors install colorama"
hint_table = \
{('5',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('6',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('7',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('8',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('9',): {'A': 'h', '10': 'h', '3': 'd', '2': 'h', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('10',): {'A': 'h', '10': 'h', '3': 'd', '2': 'd', '5': 'd', '4': 'd', '7': 'd', '6': 'd', '9': 'd', '8': 'd'},
('11',): {'A': 'h', '10': 'd', '3': 'd', '2': 'd', '5': 'd', '4': 'd', '7': 'd', '6': 'd', '9': 'd', '8': 'd'},
('12',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('13',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('14',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('15',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('16',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('17',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('18',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('19',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('20',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('2', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'h', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('3', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'h', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('4', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('5', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('6', 'A'): {'A': 'h', '10': 'h', '3': 'd', '2': 'h', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('7', 'A'): {'A': 'h', '10': 'h', '3': 'd', '2': 's', '5': 'd', '4': 'd', '7': 's', '6': 'd', '9': 'h', '8': 's'},
('8', 'A'): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('9', 'A'): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('A', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('2', '2'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('3', '3'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('4', '4'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('5', '5'): {'A': 'h', '10': 'h', '3': 'd', '2': 'd', '5': 'd', '4': 'd', '7': 'd', '6': 'd', '9': 'd', '8': 'd'},
('6', '6'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('7', '7'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('8', '8'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('9', '9'): {'A': 's', '10': 's', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 's', '6': 'h', '9': 'h', '8': 'h'},
('10', '10'): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'}}
def color(color):
if colors_support:
if color is "green":
return Fore.GREEN # @UndefinedVariable
elif color is "red":
return Fore.RED # @UndefinedVariable
elif color is "white":
return Fore.WHITE # @UndefinedVariable
elif color is "yellow":
return Fore.YELLOW # @UndefinedVariable
elif color is "blue":
return Fore.BLUE # @UndefinedVariable
else:
return Fore.WHITE # @UndefinedVariable
else:
return ''
class Bookie(object):
def __init__(self, credit=1000):
self.credit = credit
self.bet = None
self.previous_bet = None
def place_bet(self, bet=None, ratio=2):
if bet is None and self.previous_bet is None:
raise Exception("No bet was specified")
if bet is None and self.previous_bet is not None:
# Using the last bet
bet = self.previous_bet
if bet > self.credit:
raise Exception("There is only {0} in credit\
, can't place bet of {1}".format(self.credit, bet))
self.ratio = ratio
self.previous_bet = bet
self.bet = bet
def report_win(self):
if self.bet is None:
raise Exception("No bet was placed")
self.credit += self.bet * self.ratio - self.bet
def report_lose(self):
if self.bet is None:
raise Exception("No bet was placed")
self.credit -= self.bet
def double_bet(self):
if self.bet is None:
raise Exception("No bet was placed")
self.bet *= 2
def half_bet(self):
if self.bet is None:
raise Exception("No bet was placed")
self.bet /= 2
def abort_bet(self):
self.bet = 0
class Deck(object):
def __init__(self, num_of_decks):
self.cards = []
self.rand = Random()
for deck_num in range(num_of_decks * 4):
self.cards.extend(range(2, 11))
self.cards.extend(['J'] * 4 * num_of_decks)
self.cards.extend(['Q'] * 4 * num_of_decks)
self.cards.extend(['K'] * 4 * num_of_decks)
self.cards.extend(['A'] * 4 * num_of_decks)
def get_card(self):
card_num = self.rand.randint(0, len(self.cards) - 1)
card = self.cards[card_num]
del self.cards[card_num]
return card
class Player(object):
def __init__(self, deck):
self.cards = []
self.deck = deck
def draw_card_from_deck(self):
self.cards.append(self.deck.get_card())
def get_sum_of_cards(self):
sum_of_cards = 0
aces = 0
for card in self.cards:
# Each one of the faces card is 10
if card is 'J' or card is 'Q' or card is 'K':
sum_of_cards += 10
elif card is 'A':
aces += 1
elif card is 'X':
# Hidden card
continue
else:
sum_of_cards += card
# We need to see how to handle aces
if aces > 0:
temp_sum = 11 + (aces - 1) + sum_of_cards
if temp_sum <= 21:
sum_of_cards = temp_sum
else:
sum_of_cards += aces
return sum_of_cards
def get_cards(self):
return self.cards
class MachinePlayer(Player):
def __init__(self, deck):
super(MachinePlayer, self).__init__(deck)
self.hidden_card = None
def should_take_another_card(self, player):
if self.get_sum_of_cards() < 17 or\
(self.get_sum_of_cards() is 17 and
self.cards.count('A') is 1):
return True
return False
def draw_card_from_deck(self):
if len(self.cards) is 1 and self.hidden_card is None:
# The second card should be hidden
self.hidden_card = self.deck.get_card()
self.cards.append('X')
elif self.hidden_card is not None:
# At the third time, the hidden card is shown
self.cards.remove('X')
self.cards.append(self.hidden_card)
self.hidden_card = None
else:
self.cards.append(self.deck.get_card())
class Result(object):
def __init__(self, machine, player):
self.machine = machine
self.player = player
self._player_surrended = False
def calculate(self, no_more_moves=False):
if self._player_surrended:
self.winner = "dealer"
self.result_type = "surrended"
self.is_ended = True
self._player_surrended = False
return self
player_score = self.player.get_sum_of_cards()
dealer_score = self.machine.get_sum_of_cards()
self.is_ended = False
self.winner = None
if player_score is 21 and dealer_score is not 21:
self.winner = "player"
self.result_type = "21"
elif dealer_score is 21 and player_score is not 21:
self.winner = "dealer"
self.result_type = 21
elif dealer_score > 21 and player_score <= 21:
self.winner = "player"
self.result_type = "busting"
elif player_score > 21 and dealer_score <= 21:
self.winner = "dealer"
self.result_type = "busting"
elif no_more_moves:
if player_score > dealer_score:
self.winner = "player"
self.result_type = "score"
elif dealer_score > player_score:
self.winner = "dealer"
self.result_type = "score"
elif dealer_score is player_score:
self.winner = "tie"
self.result_type = "push"
if self.winner is not None:
self.is_ended = True
return self
def player_surrended(self):
self._player_surrended = True
class Game(object):
def __init__(self, bookie, num_of_decks=4):
self.deck = Deck(num_of_decks)
self.round = 0
self.bookie = bookie
def print_status(self):
self.round += 1
print color("white") + "Round {0}".format(self.round)
print "Dealer got {0} ({1})".format(self.machine.get_cards(),
self.machine.get_sum_of_cards())
print "You got {0} ({1})".format(self.player.get_cards(),
self.player.get_sum_of_cards())
def start_game(self):
self.machine = MachinePlayer(self.deck)
self.player = Player(self.deck)
self.result = Result(self.machine, self.player)
self.player.draw_card_from_deck()
self.machine.draw_card_from_deck()
self.player.draw_card_from_deck()
self.machine.draw_card_from_deck()
self.print_status()
def is_game_ended(self, no_more_moves=False):
if self.result.calculate(no_more_moves).is_ended:
if self.result.winner is "player":
print color("green")
self.bookie.report_win()
elif self.result.winner is "dealer":
print color("red")
self.bookie.report_lose()
elif self.result.winner is "tie":
print color("yellow")
self.bookie.abort_bet()
if self.result.winner is not "tie":
print "{0} won due to {1}".\
format(self.result.winner, self.result.result_type)
else:
print "Push"
return self.result.is_ended
def dealer_turn(self):
self.machine.draw_card_from_deck()
self.print_status()
while self.machine.should_take_another_card(self.player):
self.machine.draw_card_from_deck()
self.print_status()
def give_hint(self):
def normalize(card):
if card == "J" or card == "Q" or card == "K":
return str(10)
return str(card)
tuple_sum = str(self.player.get_sum_of_cards()),
card1 = normalize(self.player.cards[0])
card2 = normalize(self.player.cards[1])
tuple_cards = tuple(sorted([card1, card2]))
if tuple_cards in hint_table:
hint_raw = hint_table[tuple_cards]
elif tuple_sum in hint_table:
hint_raw = hint_table[tuple_sum]
else:
return "No hint found"
return hint_raw.get(normalize(self.machine.cards[0]))
def player_turn(self):
first = True
ans = None
while self.player.get_sum_of_cards() < 20 and ans != "s":
if first:
ans = raw_input(color("yellow") + "[H]it, [s]tand, su[r]render,\
[d]ouble or h[e]lp?: ")
else:
ans = raw_input(color("yellow") + "[H]it or [s]tand?: ")
if ans == "h":
self.player.draw_card_from_deck()
self.print_status()
elif ans == "e" and first:
print "The hint is: {0}".format(self.give_hint())
# He can still use the first round options
continue
elif ans == "d" and first:
self.bookie.double_bet()
self.player.draw_card_from_deck()
print "Betting on ${0}".format(self.bookie.bet)
self.print_status()
break
elif ans == "r" and first:
self.result.player_surrended()
self.bookie.half_bet()
break
else:
# In case no valid answer we want
# The first status to be kept
continue
first = False
def place_bet(self):
bet = None
while True:
ans = raw_input(color("white") + "How much would you like to bet? \
(1, 5, 10, 50, 100, [s]ame): ")
if ans is 's' or ans is '':
if self.bookie.previous_bet is None:
print "No previous bet was made"
continue
if self.bookie.credit < self.bookie.previous_bet:
print "You don't have enough credit for this bet"
continue
bet = self.bookie.previous_bet
break
try:
bet = int(ans)
except:
print "{0} is not a valid bet".format(ans)
continue
if [1, 5, 10, 50, 100].count(bet) is not 1:
print "{0} is not a valid bet".format(ans)
continue
elif bet > self.bookie.credit:
print "You only have {0} in credit".format(self.bookie.credit)
continue
break
print "Betting on: ${0}".format(bet)
self.bookie.place_bet(bet)
def play(self):
self.place_bet()
self.start_game()
if self.is_game_ended(no_more_moves=False):
return
self.player_turn()
if self.is_game_ended(no_more_moves=False):
return
self.dealer_turn()
if self.is_game_ended(no_more_moves=True):
return
class GameSimulator(Game):
bet = 100
def player_turn(self):
first = True
ans = None
while self.player.get_sum_of_cards() < 20 and ans != "s":
if first:
ans = self.give_hint()
first = False
else:
if self.player.get_sum_of_cards() <= 12:
ans = "h"
else:
ans = "s"
if ans == "h":
self.player.draw_card_from_deck()
self.print_status()
elif ans == "d":
self.bookie.double_bet()
self.player.draw_card_from_deck()
print "Betting on ${0}".format(self.bookie.bet * 2)
self.print_status()
elif ans == "r":
self.result.player_surrended()
self.bookie.half_bet()
else:
ans = "s"
def place_bet(self):
print "Betting on: ${0}".format(self.bet)
self.bookie.place_bet(self.bet)
def main():
bookie = Bookie(credit=500)
top_credit = bookie.credit
games_played = 0
print color("green") + "Your initial credit: {0}".format(bookie.credit)
if "s" == raw_input("[S]imulator or [r]eal? "):
while True:
games_played += 1
game = GameSimulator(bookie)
game.play()
if bookie.credit > top_credit:
top_credit = bookie.credit
print color("white") + "Your current credit: {0}".format(bookie.credit)
if bookie.credit <= 0:
print color("red") + "Man, you just lost everything..."
break
else:
while True:
games_played += 1
game = Game(bookie)
game.play()
if bookie.credit > top_credit:
top_credit = bookie.credit
print color("white") + "Your current credit: {0}".format(bookie.credit)
if bookie.credit > 0:
if "n" == raw_input("Should we play another? (y/n): "):
break
else:
print color("red") + "Man, you just lost everything..."
break
print color("white") + "{0} games were played".format(games_played)
print color("white") + "Your top credit was {0}".format(top_credit)
print "End of game"
if __name__ == "__main__":
main()
| itaiag/blackjack | blackjack.py | Python | apache-2.0 | 17,936 | 0.004014 |
from __future__ import absolute_import
import community
import networkx as nx
from networkx.algorithms.community import asyn_lpa_communities
import numpy as np
from .base import LabelGraphClustererBase
from .helpers import _membership_to_list_of_communities
class NetworkXLabelGraphClusterer(LabelGraphClustererBase):
"""Cluster label space with NetworkX community detection
This clusterer constructs a NetworkX representation of the Label Graph generated by graph builder and detects
communities in it using methods from the NetworkX library. Detected communities are converted to
a label space clustering.
Parameters
----------
graph_builder: a GraphBuilderBase inherited transformer
the graph builder to provide the adjacency matrix and weight map for the underlying graph
method: string
the community detection method to use, this clusterer supports the following community detection methods:
+----------------------+--------------------------------------------------------------------------------+
| Method name string | Description |
+----------------------+--------------------------------------------------------------------------------+
| louvain_ | Detecting communities with largest modularity using incremental greedy search |
+----------------------+--------------------------------------------------------------------------------+
| label_propagation_ | Detecting communities from multiple async label propagation on the graph |
+----------------------+--------------------------------------------------------------------------------+
.. _louvain: https://python-louvain.readthedocs.io/en/latest/
.. _label_propagation: https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.community.label_propagation.asyn_lpa_communities.html
Attributes
----------
graph_ : networkx.Graph
the networkx Graph object containing the graph representation of graph builder's adjacency matrix and weights
weights_ : { 'weight' : list of values in edge order of graph edges }
edge weights stored in a format recognizable by the networkx module
References
----------
If you use this clusterer please cite the igraph paper and the clustering paper:
.. code :: latex
@unknown{networkx,
author = {Hagberg, Aric and Swart, Pieter and S Chult, Daniel},
year = {2008},
month = {01},
title = {Exploring Network Structure, Dynamics, and Function Using NetworkX},
booktitle = {Proceedings of the 7th Python in Science Conference}
}
@article{blondel2008fast,
title={Fast unfolding of communities in large networks},
author={Blondel, Vincent D and Guillaume, Jean-Loup and Lambiotte, Renaud and Lefebvre, Etienne},
journal={Journal of statistical mechanics: theory and experiment},
volume={2008},
number={10},
pages={P10008},
year={2008},
publisher={IOP Publishing}
}
Examples
--------
An example code for using this clusterer with a classifier looks like this:
.. code-block:: python
from sklearn.ensemble import RandomForestClassifier
from skmultilearn.problem_transform import LabelPowerset
from skmultilearn.cluster import NetworkXLabelGraphClusterer, LabelCooccurrenceGraphBuilder
from skmultilearn.ensemble import LabelSpacePartitioningClassifier
# construct base forest classifier
base_classifier = RandomForestClassifier(n_estimators=1000)
# construct a graph builder that will include
# label relations weighted by how many times they
# co-occurred in the data, without self-edges
graph_builder = LabelCooccurrenceGraphBuilder(
weighted = True,
include_self_edges = False
)
# setup problem transformation approach with sparse matrices for random forest
problem_transform_classifier = LabelPowerset(classifier=base_classifier,
require_dense=[False, False])
# setup the clusterer to use, we selected the modularity-based approach
clusterer = NetworkXLabelGraphClusterer(graph_builder=graph_builder, method='louvain')
# setup the ensemble metaclassifier
classifier = LabelSpacePartitioningClassifier(problem_transform_classifier, clusterer)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
For more use cases see `the label relations exploration guide <../labelrelations.ipynb>`_.
"""
def __init__(self, graph_builder, method):
"""Initializes the clusterer
Attributes
----------
graph_builder: a GraphBuilderBase inherited transformer
Class used to provide an underlying graph for NetworkX
"""
super(NetworkXLabelGraphClusterer, self).__init__(graph_builder)
self.method = method
def fit_predict(self, X, y):
"""Performs clustering on y and returns list of label lists
Builds a label graph using the provided graph builder's `transform` method
on `y` and then detects communities using the selected `method`.
Sets :code:`self.weights_` and :code:`self.graph_`.
Parameters
----------
X : None
currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
edge_map = self.graph_builder.transform(y)
if self.graph_builder.is_weighted:
self.weights_ = dict(weight=list(edge_map.values()))
else:
self.weights_ = dict(weight=None)
self.graph_ = nx.Graph()
for n in range(y.shape[1]):
self.graph_.add_node(n)
for e, w in edge_map.items():
self.graph_.add_edge(e[0], e[1], weight=w)
if self.method == 'louvain':
partition_dict = community.best_partition(self.graph_)
memberships = [partition_dict[i] for i in range(y.shape[1])]
return np.array(
_membership_to_list_of_communities(
memberships,
1 + max(memberships)
)
)
else:
return np.array([list(i) for i in asyn_lpa_communities(self.graph_, 'weight')])
| scikit-multilearn/scikit-multilearn | skmultilearn/cluster/networkx.py | Python | bsd-2-clause | 6,829 | 0.003075 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-12-27 14:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0026_auto_20171227_1429'),
]
operations = [
migrations.RemoveField(
model_name='candidate',
name='location',
),
migrations.RemoveField(
model_name='position',
name='location',
),
]
| macwis/simplehr | candidates/migrations/0027_auto_20171227_1432.py | Python | gpl-3.0 | 507 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""General utility functions module."""
from functools import partial
import six
from flask import abort, current_app, jsonify, make_response, request, url_for
from invenio_pidstore.errors import PIDDeletedError, PIDDoesNotExistError, \
PIDMissingObjectError, PIDRedirectedError, PIDUnregistered
from invenio_pidstore.resolver import Resolver
from invenio_records.api import Record
from werkzeug.routing import BaseConverter, BuildError, PathConverter
from werkzeug.utils import cached_property, import_string
from .errors import PIDDeletedRESTError, PIDDoesNotExistRESTError, \
PIDMissingObjectRESTError, PIDRedirectedRESTError, \
PIDUnregisteredRESTError
from .proxies import current_records_rest
def build_default_endpoint_prefixes(records_rest_endpoints):
"""Build the default_endpoint_prefixes map."""
pid_types = set()
guessed = set()
endpoint_prefixes = {}
for key, endpoint in records_rest_endpoints.items():
pid_type = endpoint['pid_type']
pid_types.add(pid_type)
is_guessed = key == pid_type
is_default = endpoint.get('default_endpoint_prefix', False)
if is_default:
if pid_type in endpoint_prefixes and pid_type not in guessed:
raise ValueError('More than one "{0}" defined.'.format(
pid_type
))
endpoint_prefixes[pid_type] = key
guessed -= {pid_type}
elif is_guessed and pid_type not in endpoint_prefixes:
endpoint_prefixes[pid_type] = key
guessed |= {pid_type}
not_found = pid_types - set(endpoint_prefixes.keys())
if not_found:
raise ValueError('No endpoint-prefix for {0}.'.format(
', '.join(not_found)
))
return endpoint_prefixes
def obj_or_import_string(value, default=None):
"""Import string or return object.
:params value: Import path or class object to instantiate.
:params default: Default object to return if the import fails.
:returns: The imported object.
"""
if isinstance(value, six.string_types):
return import_string(value)
elif value:
return value
return default
def load_or_import_from_config(key, app=None, default=None):
"""Load or import value from config.
:returns: The loaded value.
"""
app = app or current_app
imp = app.config.get(key)
return obj_or_import_string(imp, default=default)
def allow_all(*args, **kwargs):
"""Return permission that always allow an access.
:returns: A object instance with a ``can()`` method.
"""
return type('Allow', (), {'can': lambda self: True})()
def deny_all(*args, **kwargs):
"""Return permission that always deny an access.
:returns: A object instance with a ``can()`` method.
"""
return type('Deny', (), {'can': lambda self: False})()
def check_elasticsearch(record, *args, **kwargs):
"""Return permission that check if the record exists in ES index.
:params record: A record object.
:returns: A object instance with a ``can()`` method.
"""
def can(self):
"""Try to search for given record."""
search = request._methodview.search_class()
search = search.get_record(str(record.id))
return search.count() == 1
return type('CheckES', (), {'can': can})()
class LazyPIDValue(object):
"""Lazy PID resolver.
The PID will not be resolved until the `data` property is accessed.
"""
def __init__(self, resolver, value):
"""Initialize with resolver object and the PID value.
:params resolver: Resolves for PID,
see :class:`invenio_pidstore.resolver.Resolver`.
:params value: PID value.
:type value: str
"""
self.resolver = resolver
self.value = value
@cached_property
def data(self):
"""Resolve PID from a value and return a tuple with PID and the record.
:returns: A tuple with the PID and the record resolved.
"""
try:
return self.resolver.resolve(self.value)
except PIDDoesNotExistError as pid_error:
raise PIDDoesNotExistRESTError(pid_error=pid_error)
except PIDUnregistered as pid_error:
raise PIDUnregisteredRESTError(pid_error=pid_error)
except PIDDeletedError as pid_error:
raise PIDDeletedRESTError(pid_error=pid_error)
except PIDMissingObjectError as pid_error:
current_app.logger.exception(
'No object assigned to {0}.'.format(pid_error.pid),
extra={'pid': pid_error.pid})
raise PIDMissingObjectRESTError(pid_error.pid, pid_error=pid_error)
except PIDRedirectedError as pid_error:
try:
location = url_for(
'.{0}_item'.format(
current_records_rest.default_endpoint_prefixes[
pid_error.destination_pid.pid_type]),
pid_value=pid_error.destination_pid.pid_value)
data = dict(
status=301,
message='Moved Permanently',
location=location,
)
response = make_response(jsonify(data), data['status'])
response.headers['Location'] = location
abort(response)
except (BuildError, KeyError):
current_app.logger.exception(
'Invalid redirect - pid_type "{0}" '
'endpoint missing.'.format(
pid_error.destination_pid.pid_type),
extra={
'pid': pid_error.pid,
'destination_pid': pid_error.destination_pid,
})
raise PIDRedirectedRESTError(
pid_error.destination_pid.pid_type, pid_error=pid_error)
class PIDConverter(BaseConverter):
"""Converter for PID values in the route mapping.
This class is a custom routing converter defining the 'PID' type.
See http://werkzeug.pocoo.org/docs/0.12/routing/#custom-converters.
Use ``pid`` as a type in the route pattern, e.g.: the use of
route decorator: ``@blueprint.route('/record/<pid(recid):pid_value>')``,
will match and resolve a path: ``/record/123456``.
"""
def __init__(self, url_map, pid_type, getter=None, record_class=None):
"""Initialize the converter."""
super(PIDConverter, self).__init__(url_map)
getter = obj_or_import_string(getter, default=partial(
obj_or_import_string(record_class, default=Record).get_record,
with_deleted=True
))
self.resolver = Resolver(pid_type=pid_type, object_type='rec',
getter=getter)
def to_python(self, value):
"""Resolve PID value."""
return LazyPIDValue(self.resolver, value)
class PIDPathConverter(PIDConverter, PathConverter):
"""PIDConverter with support for path-like (with slashes) PID values.
This class is a custom routing converter defining the 'PID' type.
See http://werkzeug.pocoo.org/docs/0.12/routing/#custom-converters.
Use ``pidpath`` as a type in the route patter, e.g.: the use of a route
decorator: ``@blueprint.route('/record/<pidpath(recid):pid_value>')``,
will match and resolve a path containing a DOI: ``/record/10.1010/12345``.
"""
| tiborsimko/invenio-records-restapi | invenio_records_rest/utils.py | Python | gpl-2.0 | 7,700 | 0 |
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.views import generic
from polls.models import Choice,Poll
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_poll_list'
def get_queryset(self):
"""
Return the last five published polls (not including those set to be
published in the future).
"""
return Poll.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Poll
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Poll
template_name = 'polls/results.html'
def vote(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# display voting form
return render(request, 'polls/detail.html', {
'poll':p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results',args=(p.id,)))
| Drhealsgood/learning_django | polls/views.py | Python | mit | 1,526 | 0.006553 |
# -*- coding: utf-8 -*-
# File: webframe/models.py
# Author: Kenson Man <kenson@kensonidv.hk>
# Date: 2020-10-17 12:29
# Desc: Provide the basic model for webframe
from datetime import datetime
from deprecation import deprecated
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.shortcuts import get_object_or_404 as getObj
from django.utils import timezone as tz
from django.utils.translation import ngettext, get_language, ugettext_lazy as _
from json import JSONEncoder
from pathlib import Path
from shutil import copyfile
from .CurrentUserMiddleware import get_current_user, get_current_request
from .functions import getBool, getClass, getTime, FMT_DATE, FMT_TIME, FMT_DATETIME, isUUID, TRUE_VALUES, getSecretKey, encrypt, decrypt, ENCRYPTED_PREFIX, LogMessage as lm, cache
import math, uuid, logging, json, pytz, re, sys, os
logger=logging.getLogger('webframe.models')
DATEFMT='%Y-%m-%d %H:%M:%S.%fT%z'
fmt=lambda d: 'null' if d is None else d.strftime(DATEFMT)
rfmt=lambda d: None if d=='null' else datetime.strptime(d, DATEFMT)
nullValue=_('null') #Make sure the null value can be translate
#Make sure the following transaction
_('Traditional Chinese')
_('English')
def valueOf(val):
'''
Parse the value into string format
'''
if isinstance(val, datetime):
rst=fmt(val)
elif isinstance(val, get_user_model()):
rst=val.username
elif isinstance(val, uuid.UUID):
rst=val.hex
elif isinstance(val, ValueObject):
rst=val.id.hex
else:
rst=val
return rst
def parseVal(field, val):
'''
Parse the value from dumpable format
'''
typ=field.get_internal_type()
if val is None:
return None
elif typ in ['AutoField', 'IntegerField', 'SmallIntegerField']:
return int(val)
elif typ in ['BigAutoField', 'BigIntegerField']:
return long(val)
elif typ in ['FloatField', 'DecimalField']:
return float(val)
elif typ == 'BooleanField':
return getBool(val)
elif typ in ['UUIDField']:
return uuid.UUID(val)
elif typ in ['CharField', 'TextField', 'EmailField', 'URLField']:
return str(val)
elif typ == 'DateTimeField':
return datetime.strptime(val, DATEFMT)
elif typ == 'ForeignKey':
if field.related_model is get_user_model():
try:
return get_user_model().objects.get(username=val)
except get_user_model().DoesNotExist:
rst=get_current_user()
logger.warning('Specify user<%s> not found, use current user<%s> instead.'%(val, rst))
return rst
return getObj(field.related_model, id=val)
return str(val)
class ValueEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, uuid.UUID):
return o.hex
return super(ValueEncoder, self).default(o)
class Dictable(object):
'''
The class to provide the import/export json method.
'''
META_TYPE='_type_'
META_VERS='_vers_'
def expDict(self, **kwargs):
'''
The method to export dictionary. It will ignore the properties that:
- prefix with "_"
- subfix with "_id"
'''
src=self.__dict__
rst=dict()
for k in src:
if k.startswith('_'): continue
if k.endswith('_id'): continue
rst[k]=src[k]
rst[Dictable.META_TYPE]="%s.%s"%(self.__class__.__module__, self.__class__.__name__)
rst[Dictable.META_VERS]=self._getDictVers()
for f in self.__class__._meta.get_fields():
if isinstance(f, models.Field):
n=f.name
v=getattr(self, n)
rst[n]=valueOf(v)
return rst
def impDict(self, data, **kwargs):
'''
The method to import from dictionary.
'''
if not Dictable.META_TYPE in data: raise TypeError('This is not the dictionary created by expDict. No type information found')
if not isinstance(self, getClass(data[Dictable.META_TYPE])): raise TypeError('Cannot import %s as %s'%(data[Dictable.META_TYPE], self.__class__))
if hasattr(self, Dictable.META_VERS):
if self._getDictVers() != data[Dictable.META_VERS]: raise IOError('Version mismatched. Requesting %s but %s'%(getattr(self, Dictable.META_VERS), data[Dictable.META_VERS]))
for f in self.__class__._meta.get_fields():
if isinstance(f, models.Field):
n=f.name
v=parseVal(f, data.get(n, None))
setattr(self, n, v)
if getBool(kwargs.get('createNew', 'false')): self.id=None
if getBool(kwargs.get('autoSave', 'false')): self.save()
return self
def _getDictVers(self):
'''
Getter of the dictionary version. It is used to limit the version of dict.
'''
return getattr(self, Dictable.META_VERS, '1')
@staticmethod
def getType( instance ):
mod=instance.__class__.__module__
if mod is None or mod==str.__class__.__module__:
return instance.__class__.__name__
else:
return '{0}.{1}'.format(mod, instance.__class__.__name__)
class ValueObject(models.Model, Dictable):
CACHED='__CACHED__'
class Meta(object):
abstract = True
verbose_name = _('ValueObject')
verbose_name_plural = _('ValueObjects')
# view_* permission becomes the default permissions Django 3.0
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name=_('ValueObject.id'),
help_text=_('ValueObject.id.helptext'),
)
lmd = models.DateTimeField(
auto_now=True,
verbose_name=_('ValueObject.lmd'),
help_text=_('ValueObject.lmd.helptext'),
)
lmb = models.ForeignKey(
settings.AUTH_USER_MODEL,
default=get_current_user,
null=True,
blank=True,
on_delete=models.CASCADE, #Since Django 2.0, the on_delete field is required.
related_name='%(class)s_lmb',
verbose_name=_('ValueObject.lmb'),
help_text=_('ValueObject.lmb.helptext'),
)
cd = models.DateTimeField(
auto_now_add=True,
verbose_name=_('ValueObject.cd'),
help_text=_('ValueObject.cd.helptext'),
)
cb = models.ForeignKey(
settings.AUTH_USER_MODEL,
default=get_current_user,
null=True,
blank=True,
on_delete=models.CASCADE, #Since Django 2.0, the on_delete field is required.
related_name='%(class)s_cb',
verbose_name=_('ValueObject.cb'),
help_text=_('ValueObject.cb.helptext'),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'id' in kwargs: self.id=kwargs['id']
if 'cb' in kwargs: self.cb=kwargs['cb']
if 'cd' in kwargs: self.cd=kwargs['cd']
if 'lmb' in kwargs: self.lmb=kwargs['lmb']
if 'lmd' in kwargs: self.lmd=kwargs['lmd']
@property
def isNew(self):
return self.lmd is None
@property
def isNotNew(self):
return self.lmd is not None
def id_or_new(self):
if self.isNew():
return 'new'
return self.id.hex
def save(self, *args, **kwargs):
'''
Saving the value-object. The method will setup the lmb default value
'''
user=get_current_user()
if user:
if not user.is_authenticated: user=None
if kwargs.get('update_lmb', 'true') in TRUE_VALUES:
self.lmb=user
if kwargs.get('update_cb', 'true') in TRUE_VALUES:
try:
if not self.cb: self.cb=user
except TypeError:
self.cb=user
super(ValueObject, self).save()
def expDict(self):
return {
Dictable.META_VERS: 1
, Dictable.META_TYPE: Dictable.getType(self)
, 'id': self.id
, 'cb': {Dictable.META_TYPE: Dictable.getType(self.cb), 'id': self.cb.id, 'username': self.cb.username, 'email': self.cb.email} if self.cb else None
, 'cd': self.cd
, 'lmb': {Dictable.META_TYPE: Dictable.getType(self.lmb), 'id': self.lmb.id, 'username': self.lmb.username, 'email': self.lmb.email} if self.lmb else None
, 'lmd': self.lmd
}
class AliveObjectManager(models.Manager):
def living(self, timestamp=None):
'''
The alias of alive for backward compatibility.
'''
return self.alive(timestamp)
def alive(self, timestamp=None):
'''
Return the alive objects according to the specified timestamp.
'''
now=tz.now() if timestamp is None else timestamp
return self.filter(enabled=True,effDate__lte=now).filter(models.Q(expDate__isnull=True)|models.Q(expDate__gt=now)).order_by('-effDate')
def dead(self, timestamp=None):
'''
Return the dead objects according to the specified timestamp.
'''
now=tz.now() if timestamp is None else timestamp
return self.filter(
models.Q(enabled=False)|
models.Q(effDate__gt=now)|
(
models.Q(expDate__isnull=False)&
models.Q(expDate__lt=now)
)
).order_by('-effDate')
@deprecated(deprecated_in="v2.2", removed_in="v3.0", current_version="v2.2", details="Use AliveObject.isOverlapped(start, end) instead")
def isOverlaped(self, start, end):
'''
Determinate the specified period is overlaped with the object effective period.
'''
return self.isOverlapped(start, end)
def isOverlapped(self, start, end):
'''
Determinate the specified period is overlaped with the object effective period.
Giving:
- source beging date == self.effDate
- source end date == self.expDate
- target beging date == start
- target end date == end
Refer to https://stackoverflow.com/questions/14002907/query-to-get-date-overlaping, the defination of overlaping is
***** NOT (taget_end_date < source_begin_date or target_begin_date > source_end_date ) *****
That is equal than:
not (target_end_date < source_beging_date) and not (target_beging_date > source_end_date)
Therefore:
(effDate__lte=end, expDate__gte=start)
'''
return self.filter(effDate__lte=end, expDate__gte=start)
class AliveObject(models.Model, Dictable):
class Meta(object):
abstract = True
verbose_name = _('AliveObject')
verbose_name_plural = _('AliveObjects')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'effDate' in kwargs: self.effDate=kwargs['effDate']
if 'expDate' in kwargs: self.expDate=kwargs['expDate']
if 'enabled' in kwargs: self.enabled=kwargs['enabled']
effDate = models.DateTimeField(
default=tz.now,
verbose_name=_('AliveObject.effDate'),
help_text=_('AliveObject.effDate.helptext'),
)
expDate = models.DateTimeField(
null=True,
blank=True,
verbose_name=_('AliveObject.expDate'),
help_text=_('AliveObject.expDate.helptext'),
)
enabled = models.BooleanField(
default=True,
verbose_name=_('AliveObject.enabled'),
help_text=_('AliveObject.enabled.helptext'),
)
objects = AliveObjectManager()
def isNew(self):
'''
Determinate the object is just create (haven't saved).
'''
return self.lmd is None
def isNotNew(self):
'''
Determinate the object is create create (haven't saved).
'''
return not self.lmd is None
def isEffective(self, now=None):
'''
Deprecated. Use isAlive(now=None) instead.
'''
return self.isAlive(now)
def isAlive(self, now=None):
'''
Determinate the objct is alive (is-effective).
'''
if not now: now=tz.now()
return self.enabled and self.effDate<=now and (self.expDate is None or self.expDate>now)
def isDead(self, now=None):
'''
Determinate the objct was dead (is-not-effective).
'''
return not self.isAlive(now)
def asDict(self):
return {'effDate':fmt(self.effDate), 'expDate':fmt(self.expDate), 'enabled':self.enabled}
def fromDict(self, data):
self.effDate=rfmt(data['effDate'])
self.expDate=rfmt(data['expDate'])
self.enabled=getBool(data['enabled'])
@property
def alive(self):
return self.isAlive()
# The abstract value=object that provide the sequence field and related ordering features
class OrderableValueObject(ValueObject):
DISABLED_REORDER = 'DISABLED_REORDER'
sequence = models.FloatField(default=sys.maxsize,verbose_name=_('OrderableValueObject.sequence'))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'sequence' in kwargs: self.sequence=float(kwargs['sequence'])
class Meta:
abstract = True
ordering = ['sequence', 'id',]
# Get the ordering features
def __get_ordered_list__(self):
'''
Get the ordered list. Returns None to disable the re-ordering feature when saving
'''
return self.__class__.objects.all().order_by('sequence')
# Saving and reorder the models
def save(self, *args, **kwargs):
if getBool(getattr(kwargs, OrderableValueObject.DISABLED_REORDER,'False')):
super().save(*args, **kwargs)
else:
self.sequence-=0.5
super().save(*args, **kwargs)
reordered=self.__get_ordered_list__() #Retrieve the list again due to the sequence changed
cnt=1
if reordered:
for i in reordered:
self.__class__.objects.filter(id=i.id).update(sequence=cnt)
cnt+=1
def expDict(self):
rst=super().expDict()
rst['sequence']=self.sequence
return rst
class PrefManager(models.Manager):
def pref(self, name=None, **kwargs):
'''
Get the preference from database.
@name The filter of preference name or UUID
@kwargs['defval'] The filter of default value
@kwargs['owner'] The filter of preference owner
@kwargs['user'] The alias of "owner"
@kwargs['config'] Default False; The boolean value indicate the method should allow None owner as the result; (return the first occurence);
@kwargs['returnValue'] Default True; The boolean value indicate the method return the preference's value instead of preference instance.
@kwargs['returnQuery'] Default False; The boolean value indicate the method return the query instead of others; [for debug];
@kwargs['parent'] The filter of parent preference of result instance
@kwargs['value'] The filter of preference value; the below operators can be used:
== The equals operator. e.g.: "==Abc" will find all preference's value equals to "Abc"
!= The not equals operator. e.g.: "!=Abc" will find all preference's value not equals to "Abc"
^= The starts-with operator. e.g.: "^=Abc" will find all preference's value that starts with "Abc"
$= The ends-with operator. e.g.: "$=Abc" will find all preference's value that ends with "Abc"
*= The constaints operator. e.g.: "*=Abc" will find all preference's value that contains "Abc" (case-insensitive)
~= The case insensitive operator. e.g.: "~=ABC" will find all preference's value that equals to "ABC" (case-insensitive)
@kwargs['lang'] The filter of the language. Accrording to [MDN Web Doc](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Language),
it should be the list of accepted language, e.g.: zn_HK, zh_TW, zh, en, *
'''
defval=kwargs.get('defval', None)
user=kwargs.get('owner', kwargs.get('user', None))
parent=kwargs.get('parent', None)
value=kwargs.get('value', None)
langs=kwargs.get('lang', None)
if langs:
#Preparing the langs list according to "lang" property: parsing "en;q=0.1, zh;q=0.2, zh_HK;q=0.5, zh_TW;q=0.4, zh_CN;q=0.2" into
# [
# {"code":"zh_hk","weight:0.5},
# {"code":"zh_TW","weight":0.4},
# {"code":"zh","weight":0.2},
# {"code":"zh_cn","weight":0.2},
# {"code":"en","weight":0.1},
# ]
def parse(s): #Parsing "zh_HK;q=0.1" into {code, weight}
if ';' in s:
w=s[s.index(';')+1:].strip()
s=s[:s.index(';')]
w=float(re.findall(r'\d*\.\d+|\d+', w)[0])
return {'code': s.lower(), 'weight': w}
return {'code': s.lower(), 'weight': 0.1}
langs=[parse(s.strip()) for s in langs.split(',')]
langs=sorted(langs, key=lambda ele: ele['weight'], reverse=True)
if not name: name=kwargs.get('name', None)
if isinstance(user, str): user=get_user_model().objects.get(username=user)
if isUUID(name):
rst=self.filter(id=name)
else:
rst=self.filter(name=name)
try:
if user and user.is_authenticated:
if len(rst.filter(owner=user))>0:
rst=rst.filter(owner=user)
else:
rst=rst.filter(owner__isnull=True)
else:
if not getBool(kwargs.get('config', False)):
rst=rst.filter(owner__isnull=True)
if parent: rst=rst.filter(parent=parent)
if value:
value=str(value)
if value.startswith('=='):
rst=rst.filter(value=value[2:].strip())
elif value.startswith('!='):
rst=rst.exclude(value=value[2:].strip())
elif value.startswith('^='):
rst=rst.filter(value__startswith=value[2:].strip())
elif value.startswith('$='):
rst=rst.filter(value__endswith=value[2:].strip())
elif value.startswith('*='):
rst=rst.filter(value__icontains=value[2:].strip())
elif value.startswith('~='):
rst=rst.filter(value__iexact=value[2:].strip())
else:
rst=rst.filter(value=value)
if langs:
rst=rst.order_by('owner')
if not '*' in [l['code'] for l in langs]:
rst=rst.filter(models.Q(lang__in=[l['code'] for l in langs])|models.Q(lang__isnull=True))
else:
rst=rst.order_by('lang', '-owner')
# parseing the result
if getBool(kwargs.get('returnQuery', 'False')):
return rst.query
if len(rst)>0:
found=False
if langs:
for l in langs:
for p in rst:
if p.lang==l['code']:
rst=p
found=True
break
if found: break
else:
#If not specified langs, defualt using the fallback. Otherwise, select the first
for p in rst:
if p.lang is None:
rst=p
found=True
break
if not found: rst=rst[0]
else:
rst=Preference(name=name, _value=defval)
if getBool(kwargs.get('returnValue', 'True')):
return rst.value
return rst
except:
logger.exception('Cannot get preferences<%s>'%name)
return defval
def update_or_create(self, *args, **kwargs):
name=kwargs['name']
owner=kwargs.get('owner', None)
if not owner: owner=kwargs.get('user', None)
try:
p=Preference.objects.get(name=name, owner=owner)
except Preference.DoesNotExist:
p=Preference(name=name, owner=owner)
p.parent=kwargs.get('parent', None)
p.sequence=kwargs.get('sequence', 1000)
p.value=kwargs.get('value', None)
p.tipe=kwargs.get('tipe', Preference.TYPE_TEXT)
p.save()
return p
class AbstractPreference(OrderableValueObject):
class Meta(object):
permissions = (
('add_config', 'Can add configuration'),
('change_config', 'Can change configuration'),
('delete_config', 'Can delete configuration'),
('browse_config', 'Can browse system configuration'),
('browse_preference', 'Can browse other preferences'),
('change_preference_type', 'Can change the preference type'),
)
abstract = True
unique_together = [
['parent', 'name', 'lang',],
]
constraints = [
models.UniqueConstraint(fields=('name', 'owner', 'parent'), name='unique_name_owner_n_parent'),
]
def pref_path(self, filename):
ext=os.path.splitext(os.path.basename(str(filename)))[1]
return 'prefs/{0}{1}'.format(self.id, ext)
TYPE_NONE = 0
TYPE_INT = 1
TYPE_DECIMAL = 2
TYPE_BOOLEAN = 3
TYPE_TEXT = 4
TYPE_RICHTEXT = 5
TYPE_URL = 6
TYPE_EMAIL = 7
TYPE_DATE = 8
TYPE_TIME = 9
TYPE_DATETIME = 10
TYPE_UUIDS = 11
TYPE_LIST = 12
TYPE_JSON = 13
TYPE_FILEPATH = 14
TYPES = (
(TYPE_NONE, _('Preference.TYPE.NONE')),
(TYPE_INT, _('Preference.TYPE.INT')),
(TYPE_DECIMAL, _('Preference.TYPE.DECIMAL')),
(TYPE_BOOLEAN, _('Preference.TYPE.BOOLEAN')),
(TYPE_TEXT, _('Preference.TYPE.TEXT')),
(TYPE_RICHTEXT, _('Preference.TYPE.RICHTEXT')),
(TYPE_URL, _('Preference.TYPE.URL')),
(TYPE_EMAIL, _('Preference.TYPE.EMAIL')),
(TYPE_DATE, _('Preference.TYPE.DATE')),
(TYPE_TIME, _('Preference.TYPE.TIME')),
(TYPE_DATETIME, _('Preference.TYPE.DATETIME')),
(TYPE_UUIDS, _('Preference.TYPE.UUIDS')),
(TYPE_LIST, _('Preference.TYPE.LIST')),
(TYPE_JSON, _('Preference.TYPE.JSON')),
(TYPE_FILEPATH, _('Preference.TYPE.FILEPATH')),
)
def get_filecontent_location(self, filename):
filename=os.path.basename(str(filename))
return 'prefs/{0}/{1}'.format(self.id, filename)
name = models.CharField(max_length=100,verbose_name=_('Preference.name'),help_text=_('Preference.name.helptext'))
_value = models.TextField(max_length=4096,null=True,blank=True,verbose_name=_('Preference.value'),help_text=_('Preference.value.helptext'))
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,null=True,
blank=True,
on_delete=models.CASCADE, #Since Django 2.0, the on_delete field is required.
related_name='preference_owner',
verbose_name=_('Preference.owner'),
help_text=_('Preference.owner.helptext'),
)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
on_delete=models.CASCADE, #Since Django 2.0, the on_delete field is required.
verbose_name=_('Preference.parent'),
help_text=_('Preference.parent.helptext'),
)
sequence = models.FloatField(
default=sys.maxsize,
verbose_name=_('Preference.sequence'),
help_text=_('Preference.sequence.helptext'),
)
_tipe = models.IntegerField(choices=TYPES, default=TYPE_TEXT, verbose_name=_('Preference.tipe'), help_text=_('Preference.tipe.helptext'))
encrypted = models.BooleanField(default=False, verbose_name=_('Preference.encrypted'), help_text=_('Preference.encrypted.helptxt'))
helptext = models.TextField(max_length=8192, null=True, blank=True, verbose_name=_('Preference.helptext'), help_text=_('Preference.helptext.helptext'))
regex = models.CharField(max_length=1024, default='^.*$', verbose_name=_('Preference.regex'), help_text=_('Preference.regex.helptext'))
lang = models.CharField(max_length=20, null=True, blank=True, verbose_name=_('Preference.lang'), help_text=_('Preference.lang.helptext'))
filecontent = models.FileField(max_length=1024, null=True, blank=True, upload_to=get_filecontent_location, verbose_name=_('Preference.filecontent'), help_text=_('Preference.filecontent.helptext'))
objects = PrefManager()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'tipe' in kwargs: self._tipe=int(kwargs['tipe'])
if 'name' in kwargs: self.name=kwargs['name']
if 'value' in kwargs: self._value=str(kwargs['value'])
if 'owner' in kwargs: self.owner=kwargs['owner']
if 'parent' in kwargs: self.parent=kwargs['parent']
if 'sequence' in kwargs: self.sequence=kwargs['sequence']
if 'encrypted' in kwargs: self.encrypted=kwargs['encrypted'] in TRUE_VALUES
if 'helptext' in kwargs: self.helptext=kwargs['helptext']
if 'regex' in kwargs: self.regex=kwargs['regex']
if 'lang' in kwargs: self.lang=kwargs['lang']
@staticmethod
def get_identifier(name, owner):
return 'Pref::{0}@{1}'.format(name, owner.id if owner and owner.is_authenticated else 'n/a')
def __str__(self):
return AbstractPreference.get_identifier(self.name, self.owner)
def __unicode__(self):
return self.__str__
def __get_ordered_list__(self):
if hasattr(self.__class__, 'DISABLE_REORDER') or hasattr(settings, 'DISABLE_REORDER'): return None
if self.parent:
result=self.__class__.objects.filter(parent=self.parent)
else:
result=self.__class__.objects.filter(parent__isnull=True)
if self.owner: result=result.filter(owner=self.owner)
return result.order_by('sequence')
def expDict(self):
rst=super().expDict()
rst['tipe']=self._tipe
rst['encrypted']=self.encrypted
rst['helptext']=self.helptext
rst['regex']=self.regex
return rst
@property
def realValue(self):
return self._value
@realValue.setter
def realValue(self, val):
self._value=val
@property
def value(self):
val=decrypt(self._value) if self.encrypted else self._value
if self.filecontent:
return self.filecontent.url
elif self.tipe==AbstractPreference.TYPE_NONE:
return None
elif not val:
return None
elif self.tipe==AbstractPreference.TYPE_INT or self.tipe==AbstractPreference.TYPE_DECIMAL:
return int(val)
elif self.tipe==AbstractPreference.TYPE_BOOLEAN:
return val in TRUE_VALUES
elif self.tipe==AbstractPreference.TYPE_TEXT or self.tipe==AbstractPreference.TYPE_RICHTEXT or self.tipe==AbstractPreference.TYPE_EMAIL or self.tipe==AbstractPreference.TYPE_URL:
return val
elif self.tipe==AbstractPreference.TYPE_DATE:
return getTime(val, fmt=FMT_DATE)
elif self.tipe==AbstractPreference.TYPE_TIME:
return getTime(val, fmt=FMT_TIME)
elif self.tipe==AbstractPreference.TYPE_DATETIME:
return getTime(val, fmt=FMT_DATETIME)
elif self.tipe==AbstractPreference.TYPE_UUIDS:
v=re.findall(r'[^,;|]+', val)
return [uuid.UUID(uid) for uid in v]
elif self.tipe==AbstractPreference.TYPE_LIST:
return re.findall(r'[^,;|]+', val)
elif self.tipe==AbstractPreference.TYPE_JSON:
return json.loads(val)
elif self.tipe==AbstractPreference.TYPE_FILEPATH:
return val
else:
raise TypeError('Unknow DataType: {0}'.format(self.tipe))
@value.setter
def value(self, val):
if self.tipe==AbstractPreference.TYPE_NONE:
val=None
elif self.tipe==AbstractPreference.TYPE_INT or self.tipe==AbstractPreference.TYPE_DECIMAL:
val=str(val)
elif self.tipe==AbstractPreference.TYPE_BOOLEAN:
val=str(val)
elif self.tipe==AbstractPreference.TYPE_TEXT or self.tipe==AbstractPreference.TYPE_RICHTEXT or self.tipe==AbstractPreference.TYPE_EMAIL:
val=str(val)
if not re.match(self.regex, val): raise ValueError('The value [%s] not match with regex: %s'%(self._value,self.regex))
elif self.tipe==AbstractPreference.TYPE_DATE:
if hasattr(val, 'strftime'):
if not val.tzinfo: val=pytz.utc.localize(val)
else:
val=getTime(val, FMT_DATE)
val=val.strftime(FMT_DATE)
elif self.tipe==AbstractPreference.TYPE_TIME:
if hasattr(val, 'strftime'):
if not val.tzinfo: val=pytz.utc.localize(val)
else:
val=getTime(val, FMT_TIME)
val=val.strftime(FMT_TIME)
elif self.tipe==AbstractPreference.TYPE_DATETIME:
if hasattr(val, 'strftime'):
if not val.tzinfo: val=pytz.utc.localize(val)
else:
val=getTime(val, FMT_DATETIME)
val=val.strftime(FMT_DATETIME)
elif self.tipe==AbstractPreference.TYPE_UUIDS:
if hasattr(val, '__iter__'):
val='|'.join([s for s in val if isUUID(s)])
else:
raise ValueError('Expected a list of UUIDs')
elif self.tipe==AbstractPreference.TYPE_LIST:
val='|'.join(val) if hasattr(val, '__iter__') else val
elif self.tipe==AbstractPreference.TYPE_JSON:
val=json.dumps(val)
elif self.tipe==AbstractPreference.TYPE_FILEPATH:
if path:
if not os.path.isfile(path) and not os.path.isdir(path):
raise FileNotFoundError(path)
src=path
trg=os.path.join(settings.MEDIA_ROOT, self.pref_path(os.path.basename(path)))
if not os.path.isdir(os.path.dirname(trg)):
logger.info('Creating the prefs folder: {0}...'.format(trg))
Path(os.path.dirname(trg)).mkdir(parents=True, exist_ok=True)
logger.warning('{0} the file: {1} => {2} ...'.format('Replace' if os.path.isfile(trg) else 'Clone', src, trg))
copyfile(src, trg)
path=trg
val=path
elif self.tipe==AbstractPreference.TYPE_URL:
from urllib.parse import urlparse
val=urlparse(val).geturl()
if not val:
self._value=None
else:
self._value=encrypt(val) if self.encrypted and val else val
@property
def asDict(self):
rst=dict()
for c in self.childs:
rst[c.name]=c.realValue
return rst
@property
def childs(self):
return Preference.objects.filter(parent=self).order_by('sequence', 'name')
@property
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
def intValue(self):
return self.value
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
@intValue.setter
def intValue(self, val):
self.value=val
@property
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
def boolValue(self):
return getBool(self.value)
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
@boolValue.setter
def boolValue(self, val):
self.value=val
@property
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
def datetimeValue(self):
return self.value
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
@datetimeValue.setter
def datetimeValue(self, val):
self.value=val
@property
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
def jsonValue(self):
return self.value
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
@jsonValue.setter
def jsonValue(self, val):
self.value=val
@property
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
def listValue(self):
return self.value
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
@listValue.setter
def listValue(self, val):
self.value=val
@property
def user(self):
return self.owner
@user.setter
def user(self, val):
self.owner=val
@property
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
def pathValue(self):
return self.value
@deprecated(deprecated_in="v2.8", removed_in="v3.0", current_version="v2.8", details="Use value directly")
@pathValue.setter
def pathValue(self, path):
self.value=path
@property
def tipe(self):
return self._tipe
@tipe.setter
def tipe( self, tipe ):
'''
Set the tipe into this preference.
@param tipe can be integer value (refer to AbstractPreference.TYPES) or string value;
'''
if isinstance(tipe, str):
try:
tipe=getattr(AbstractPreference, 'TYPE_{0}'.format(tipe.upper().strip()))
except:
tipe=AbstractPreference.TYPE_TEXT
elif tipe is None:
tipe=AbstractPreference.TYPE_TEXT
if tipe==AbstractPreference.TYPE_EMAIL:
self.regex='^[a-zA-Z0-9\._]+@[a-zA-Z0-9\._]{2,}$'
self._tipe=int(tipe)
@property
def tipeName(self):
return AbstractPreference.TYPES[self.tipe][1]
def save(self, *args, **kwargs):
if self._value:
# If self.encrypted turn on, but not encrypted: e.g.: Read the preference from database, then change the encrypted value
if self.encrypted and not self._value.startswith(ENCRYPTED_PREFIX):
#Just encrypte it
self._value=encrypt(self._value)
if str(self._value).startswith(ENCRYPTED_PREFIX) and not self.encrypted: #Reversed. If self.encrypted turn off but not encrypted
self._value=decrypt(self._value)
if self.lang: self.lang=self.lang.lower()
if self.filecontent:
self._tipe=AbstractPreference.TYPE_FILEPATH
super().save(*args, **kwargs)
class Preference(AbstractPreference):
@property
def reserved(self):
return self.helptext != None
@classmethod
def pref(self, name, **kwargs):
return Preference.objects.pref(name, **kwargs)
@receiver(post_delete, sender=Preference)
def cleanFilepath(sender, **kwargs):
instance=kwargs['instance']
if instance.tipe==AbstractPreference.TYPE_FILEPATH:
logger.debug('Catch delete singal on Preference which is FILEPATH.')
if instance._value and os.path.isfile(instance._value):
logger.debug('Going to delete: {0}'.format(instance._value))
os.unlink(instance._value)
@deprecated(deprecated_in="2020-10-01", details="Use Celery-Result instead")
class AsyncManipulationObject(models.Model):
class Meta(object):
verbose_name = _('AsyncManipulationObject')
verbose_name_plural = _('AsyncManipulationObjects')
abstract = True
task_id = models.CharField(max_length=100, null=True, blank=True, verbose_name=_('AsyncManipulationObject.task_id'))
@property
def is_processing(self):
_('AsyncManipulationObject.is_processing')
return self.task_id is not None
@property
def is_ready(self):
_('AsyncManipulationObject.is_ready')
return self.task_id is None
class Numbering(ValueObject, AliveObject):
class Meta(object):
verbose_name = _('Numbering')
verbose_name_plural = _('Numberings')
permissions = [
('exec_numbering', 'Can execute the number'),
]
name = models.CharField(max_length=100, verbose_name=_('Numbering.name'), help_text=_('Numbering.name.helptxt'), unique=True)
pattern = models.CharField(max_length=100, default='{next}', verbose_name=_('Numbering.pattern'), help_text=_('Numbering.pattern.helptxt'))
next_val = models.IntegerField(default=0, verbose_name=_('Numbering.next_val'), help_text=_('Numbering.next_val.helptxt'))
step_val = models.IntegerField(default=1, verbose_name=_('Numbering.step_val'), help_text=_('Numbering.step_val.helptxt'))
desc = models.CharField(max_length=1024, null=True, blank=True, verbose_name=_('Numbering.desc'), help_text=_('Numbering.desc.helptext'))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'name' in kwargs: self.name=kwargs['name']
if 'pattern' in kwargs: self.pattern=kwargs['pattern']
if 'next_val' in kwargs: self.next_val=kwargs['next_val']
if 'step_val' in kwargs: self.step_val=kwargs['step_val']
def __str__(self):
if self.lmd is None:
return _('Numbering.new')
return _('Numbering[{name}::{next_val}]').format(name=self.name, next_val=self.next_val)
def expDict(self):
rst=super().expDict()
rst['name']=self.name
rst['pattern']=self.pattern
rst['next_val']=self.next_val
rst['step_val']=self.step_val
return rst
@transaction.atomic
def getNextVal(self, **kwargs):
'''
Get the next value and step forward of this numbering.
Usage: numbering.getNextVal(user='username', now=tz.now(), name='object-name')
Result:
if pattern=='INV-{now:%Y%m%d}-{user}-{next:05d}', then return 'INV-20201030-username-00001'
** You HAVE TO check permission by yourself **
'''
params={**{'now': tz.now(), 'name': self.name}, **kwargs}
params['next']=self.next_val #The "next" cannot be defined in kwargs
val=self.pattern.format(**params)
self.next_val+=self.step_val
self.save()
return val
@deprecated(deprecated_in="v2.10", removed_in="v3.0", current_version="v2.10", details="This cannot support the variables; Use Numbering.get_next(name, **kwargs) instead.")
@property
def next(self):
'''
<p><strong>Deprecated</strong> since v2.10. Use Numbering.get_next(name, **kwargs) instead.</p>
The quick way to get the next value of this numbering. It will auto inject the "now" variable.
If you want required more options, use @getNextVal(**kwargs) instead.
'''
return self.getNextVal(user=get_current_user())
@staticmethod
def get_next(name, **kwargs):
num=None
try:
num=Numbering.objects.get(id=name)
except Numbering.DoesNotExist:
num=Number.objects.get(name=name)
if not 'user' in kwargs:
kwargs['user']=get_current_user()
return num.getNextVal(**kwargs)
class Profile(ValueObject, AliveObject):
class Meta(object):
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE, verbose_name=_('Profile'), help_text=_('Profile.helptext'))
@property
def preferences(self):
return Preference.objects.filter(owner=self.user, parent__isnull=True).order_by('sequence', 'name')
@receiver(post_save, sender=get_user_model())
def postsave_user(sender, **kwargs):
if kwargs['created']:
p=Profile(user=kwargs['instance'])
p.effDate=getTime('now')
p.save()
class EnhancedDjangoJSONEncoder(DjangoJSONEncoder):
def default(self, obj):
logger.debug('Encoding object with type: {0}'.format(type(obj)))
if isinstance(obj, uuid.UUID):
return str(obj)
return super().default(obj)
class MenuItem(OrderableValueObject, AliveObject):
class Meta(object):
verbose_name = _('MenuItem')
verbose_name_plural = _('MenuItems')
unique_together = [
['parent', 'user', 'name']
]
def __getImageLocation__(self, filename):
filename=os.path.basename(filename)
return 'menuitems/{0}'.format(filename)
name = models.CharField(max_length=256, default='/', verbose_name=_('MenuItem.name'), help_text=_('MenuItem.name.helptext'))
user = models.ForeignKey(get_user_model(), blank=True, null=True, verbose_name=_('MenuItem.user'), help_text=_('MenuItem.user.helptext'), on_delete=models.CASCADE)
parent = models.ForeignKey('self', blank=True, null=True, verbose_name=_('MenuItem.parent'), help_text=_('MenuItem.parent.helptext'), on_delete=models.CASCADE)
icon = models.CharField(blank=True, null=True, max_length=128, verbose_name=_('MenuItem.icon'), help_text=_('MenuItem.icon.help')) #The icon base on FrontAwesome
label = models.CharField(blank=True, null=True, max_length=1024, verbose_name=_('MenuItem.label'), help_text=_('MenuItem.label.helptext'))
image = models.ImageField(blank=True, null=True, upload_to=__getImageLocation__,verbose_name=_('MenuItem.image'), help_text=_('MenuItem.image.helptext'))
props = models.JSONField(blank=True, null=True, default=dict({'title':None,'target':None,'class':None,'style':None}), verbose_name=_('MenuItem.props'), help_text=_('MenuItem.props.help')) #HTML properties
onclick = models.TextField(max_length=2048,
default='window.location.href=this.data.props.href?this.data.props.href:"#";',
verbose_name=_('MenuItem.onclick'),
help_text=_('MenuItem.onclick.helptext')
)
mousein = models.TextField(max_length=1024,
blank=True, null=True,
verbose_name=_('MenuItem.mousein'),
help_text=_('MenuItem.mousein.helptext')
)
mouseout = models.TextField(max_length=1024,
blank=True, null=True,
verbose_name=_('MenuItem.mouseout'),
help_text=_('MenuItem.mouseout.helptext')
)
def __str__(self):
return '{0}:{1}@{2}'.format(
_('MenuItem'),
self.name,
self.user if self.user else 'Anonymous',
)
@property
def childs(self):
if hasattr(self, '_childs'): #for sometime, the default menuitem when no menuitem provided, user will setup the in-memory menuitem
return getattr(self, '_childs')
return MenuItem.objects.filter(parent=self).order_by('sequence', 'name')
@childs.setter
def childs(self, val):
setattr(self, '_childs', val)
@staticmethod
def filter(qs, user, **kwargs):
rst=list()
for item in qs:
approved=True
#Checking specified permissions
if 'permissions' in item.props and len(item.props['permissions'])>0:
approved=approved and user.has_perm(item.props['permissions'])
#Checking if superuser required
if item.props.get('is_superuser', False):
approved=approved and user.is_superuser
#Checking if staff required
if item.props.get('is_staff', False):
approved=approved and user.is_staff
#Checking if anonymous required
if item.props.get('is_anonymous', False):
approved=approved and not user.is_authenticated
#Checking if authenticated required
if item.props.get('is_authenticated', False):
approved=approved and user.is_authenticated
#Checking for custom authentication script
if item.props.get('authenization', None):
try:
params={'user':user, 'this':item, 'date':Date()}
exec(item.props['authentication'], params)
approved=approved and params.get('result', False)
except:
approved=False
if approved: rst.append(item)
item.childs=MenuItem.filter(item.childs, user)
return rst
def __get_ordered_list__(self):
'''
Get the ordered list. Returns None to disable the re-ordering feature when saving
'''
return MenuItem.objects.filter(parent=self.parent).order_by('sequence', 'name')
def clone4(self, user, **kwargs):
'''
Clone the current menuitem for specified user.
Usage:
user=User.objects.get(id=1) #The target user
mi=MenuItem.objects.filter(parent__isnull=True).order_by('-user')[0] #The target root menu-item
target=mi.clone4(user) #The target has been saved into database
print('The new id of menuitem is {0}'.format(target.id))
'''
target=MenuItem(name=self.name, user=user, parent=kwargs.get('parent', None), icon=self.icon, label=self.label, image=self.image, props=self.props, onclick=self.onclick, mousein=self.mousein, mouseout=self.mouseout)
target.save()
childs=[]
for c in self.childs:
kw=dict(kwargs)
kw['parent']=target
childs.append(c.clone4(user, **kw))
target.childs=c
return target
#2021-09-15 08:30+0100
# Kenson Man <kenson.idv.hk@gmail.com>
@property
def translated_label(self):
req=get_current_request()
try:
trans=Translation.objects.filter(key=self.label).filter(models.Q(locale=None)|models.Q(locale=(get_language()))).order_by('locale')
if len(trans)<1: raise Translation.DoesNotExist
trans=trans[0]
return trans.gettext(1, **self.props)
except Translation.DoesNotExist:
return self.label
class Translation(ValueObject):
class Meta(object):
verbose_name = _('Translation')
verbose_name_plural = _('Translations')
unique_together = [
('key', 'locale')
]
LOCALES = (
('en', _('english'))
,('zh-hant', _('zh-hant'))
,('zh-hans', _('zh-hans'))
)
key = models.CharField(max_length=2048, verbose_name=_('Translation.key'), help_text=_('Translation.key.helptext'))
locale = models.CharField(max_length=100, choices=LOCALES, default='en', verbose_name=_('Translation.locale'), help_text=_('Translation.locale.helptext'))
msg = models.TextField(max_length=4096, blank=True, null=True, verbose_name=_('Translation.msg'), help_text=_('Translation.msg.helptext'))
pmsg = models.TextField(max_length=4096, blank=True, null=True, verbose_name=_('Translation.pmsg'), help_text=_('Translation.pmsg.helptext'))
def gettext(self, cnt=1, **kwargs):
msg=self.msg if self.msg else self.key
pmsg=self.pmsg if self.pmsg else msg
return ngettext(msg, pmsg, cnt).format(**kwargs)
| kensonman/webframe | models.py | Python | apache-2.0 | 46,692 | 0.029256 |
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext_lazy
from django_countries import countries
from django_countries.fields import CountryField
COUNTRIES = [(ugettext_lazy(name), code) for (name, code) in list(countries)]
class Company(models.Model):
# Company credentials
company_name = models.CharField(_('Company name'), max_length=1024, null=True, blank=True)
company_nip = models.CharField(_('NIP'), max_length=10, null=True, blank=True)
company_regon = models.CharField(_('REGON'), max_length=9, null=True, blank=True)
# Common address credentials
address_city = models.CharField(_('City'), max_length=1024, null=True, blank=True)
address_street = models.CharField(_('Street'), max_length=1024, null=True, blank=True)
address_postcode = models.CharField(_('Postal code'), max_length=10, null=True, blank=True)
address_country = CountryField(max_length=512, null=True, blank=True)
@property
def company(self):
return '{company} (NIP: {nip}, REGON: {regon})'.format(
company=self.company_name,
nip=self.company_nip,
regon=self.company_regon
)
@property
def address_country_verbose(self):
return countries.countries[self.address_country]
def __str__(self):
return self.company
class Meta:
verbose_name = _('Company')
verbose_name_plural = _('Companies')
class CompanyBankAccount(models.Model):
slug = models.CharField(
_('Short name'),
max_length=16, unique=True,
)
company = models.ForeignKey(Company)
bank_account_number = models.CharField(
_('Bank account number'),
max_length=512, null=True, blank=True,
)
iban = models.CharField(
_('IBAN'),
max_length=512, null=True, blank=True,
)
swift = models.CharField(
_('SWIFT Code'),
max_length=512, null=True, blank=True,
)
sorting_number = models.CharField(
_('Sorting number'),
max_length=512, null=True, blank=True,
)
bank_name = models.CharField(
_('Bank name'),
max_length=1024, null=True, blank=True,
)
bank_branch = models.CharField(
_('Bank branch'),
max_length=1024, null=True, blank=True,
)
class Meta:
verbose_name = _('Bank Account')
verbose_name_plural = _('Bank Accounts')
def __str__(self):
return self.slug
class Contact(models.Model):
TYPE_PERSONAL = 1
TYPE_COMPANY = 2
TYPE_GOV = 3
TYPES = {
TYPE_PERSONAL: _('Private person'),
TYPE_COMPANY: _('Company'),
TYPE_GOV: _('Government organization'),
}
contact_type = models.PositiveIntegerField(_('Type'), choices=list(TYPES.items()))
# Private person credentials
person_first_name = models.CharField(_('First name'), max_length=256, null=True, blank=True)
person_last_name = models.CharField(_('Last name'), max_length=256, null=True, blank=True)
# Company credentials
company_name = models.CharField(_('Company name'), max_length=1024, null=True, blank=True)
company_nip = models.CharField(_('NIP'), max_length=13, null=True, blank=True)
company_regon = models.CharField(_('REGON'), max_length=9, null=True, blank=True)
# Common address credentials
address_city = models.CharField(_('City'), max_length=1024, null=True, blank=True)
address_street = models.CharField(_('Street'), max_length=1024, null=True, blank=True)
address_postcode = models.CharField(_('Postal code'), max_length=10, null=True, blank=True)
address_country = models.CharField(_('Country'), max_length=512, null=True, blank=True, choices=COUNTRIES)
# Receiver (gov organization)
receiver_name = models.CharField(_('Receiver name'), max_length=1024, null=True, blank=True)
receiver_city = models.CharField(_('Receiver City'), max_length=1024, null=True, blank=True)
receiver_street = models.CharField(_('Receiver Street'), max_length=1024, null=True, blank=True)
receiver_postcode = models.CharField(_('Receiver Postal code'), max_length=10, null=True, blank=True)
receiver_country = models.CharField(_('Receiver Country'), max_length=512, null=True, blank=True, choices=COUNTRIES)
@property
def address(self):
return '{street}, {postcode} {city}, {country}'.format(
street=self.address_street,
postcode=self.address_postcode,
city=self.address_city,
country=self.address_country)
@property
def name(self):
return '{first_name} {last_name}'.format(
first_name=self.person_first_name,
last_name=self.person_last_name)
@property
def company(self):
return '{company} (NIP: {nip}, REGON: {regon})'.format(
company=self.company_name,
nip=self.company_nip,
regon=self.company_regon
)
@property
def address_country_verbose(self):
return countries.countries.get(self.address_country, '')
@property
def receiver_country_verbose(self):
return countries.countries.get(self.receiver_country, '')
@property
def is_company(self):
return self.contact_type == self.TYPE_COMPANY
@property
def is_gov(self):
return self.contact_type == self.TYPE_GOV
def __str__(self):
if self.contact_type == self.TYPE_COMPANY:
credentials = self.company
elif self.contact_type == self.TYPE_GOV:
credentials = '{company} ({receiver_name})'.format(
company=self.company,
receiver_name=self.receiver_name
)
else:
credentials = self.name
return '{type}: {credentials}'.format(
type=self.TYPES.get(self.contact_type),
credentials=credentials
)
class Meta:
verbose_name = _('Contact')
verbose_name_plural = _('Contacts')
| samupl/simpleERP | apps/contacts/models.py | Python | mit | 6,028 | 0.003484 |
# -*- coding: utf-8 -*-
# Description: NSD `nsd-control stats_noreset` netdata python.d module
# Author: <383c57 at gmail.com>
from base import ExecutableService
import re
# default module values (can be overridden per job in `config`)
priority = 60000
retries = 5
update_every = 30
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
CHARTS = {
'queries': {
'options': [
None, "queries", 'queries/s', 'queries', 'nsd.queries', 'line'],
'lines': [
['num_queries', 'queries', 'incremental'],]},
'zones': {
'options': [
None, "zones", 'zones', 'zones', 'nsd.zones', 'stacked'],
'lines': [
['zone_master', 'master', 'absolute'],
['zone_slave', 'slave', 'absolute'],]},
'protocol': {
'options': [
None, "protocol", 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
'lines': [
['num_udp', 'udp', 'incremental'],
['num_udp6', 'udp6', 'incremental'],
['num_tcp', 'tcp', 'incremental'],
['num_tcp6', 'tcp6', 'incremental'],]},
'type': {
'options': [
None, "query type", 'queries/s', 'query type', 'nsd.type', 'stacked'],
'lines': [
['num_type_A', 'A', 'incremental'],
['num_type_NS', 'NS', 'incremental'],
['num_type_CNAME', 'CNAME', 'incremental'],
['num_type_SOA', 'SOA', 'incremental'],
['num_type_PTR', 'PTR', 'incremental'],
['num_type_HINFO', 'HINFO', 'incremental'],
['num_type_MX', 'MX', 'incremental'],
['num_type_NAPTR', 'NAPTR', 'incremental'],
['num_type_TXT', 'TXT', 'incremental'],
['num_type_AAAA', 'AAAA', 'incremental'],
['num_type_SRV', 'SRV', 'incremental'],
['num_type_TYPE255', 'ANY', 'incremental'],]},
'transfer': {
'options': [
None, "transfer", 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
'lines': [
['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
['num_type_TYPE252', 'AXFR', 'incremental'],]},
'rcode': {
'options': [
None, "return code", 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
'lines': [
['num_rcode_NOERROR', 'NOERROR', 'incremental'],
['num_rcode_FORMERR', 'FORMERR', 'incremental'],
['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
['num_rcode_REFUSED', 'REFUSED', 'incremental'],
['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'],]}
}
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
self.command = "nsd-control stats_noreset"
self.order = ORDER
self.definitions = CHARTS
self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
def _get_data(self):
lines = self._get_raw_data()
if not lines:
return None
r = self.regex
stats = dict((k.replace('.', '_'), int(v))
for k, v in r.findall(''.join(lines)))
stats.setdefault('num_opcode_NOTIFY', 0)
stats.setdefault('num_type_TYPE252', 0)
stats.setdefault('num_type_TYPE255', 0)
return stats
| seamless-distribution-systems/galilei | galieli-netdata-installer/netdata/python.d/nsd.chart.py | Python | gpl-3.0 | 3,581 | 0.002793 |
"""
This file contains the DynamicsValidator class for validating component
:copyright: Copyright 2010-2017 by the NineML Python team, see AUTHORS.
:license: BSD-3, see LICENSE for details.
"""
from builtins import object
from nineml.visitors.validators import NoDuplicatedObjectsValidator
from .general import (
TimeDerivativesAreDeclaredDynamicsValidator,
StateAssignmentsAreOnStateVariablesDynamicsValidator,
AliasesAreNotRecursiveDynamicsValidator,
NoUnresolvedSymbolsDynamicsValidator,
RegimeGraphDynamicsValidator,
RegimeOnlyHasOneHandlerPerEventDynamicsValidator,
CheckNoLHSAssignmentsToMathsNamespaceDynamicsValidator,
DimensionalityDynamicsValidator)
from .names import (
LocalNameConflictsDynamicsValidator,
DimensionNameConflictsDynamicsValidator,
DuplicateRegimeNamesDynamicsValidator,
RegimeAliasMatchesBaseScopeValidator)
from .ports import (
EventPortsDynamicsValidator, OutputAnalogPortsDynamicsValidator)
from .types import (
TypesDynamicsValidator)
class DynamicsValidator(object):
"""Class for grouping all the component-validations tests together"""
@classmethod
def validate_componentclass(cls, component_class,
validate_dimensions=True, **kwargs):
"""
Tests a componentclassclass against a variety of tests, to verify its
internal structure
"""
# Check class structure:
TypesDynamicsValidator(component_class, **kwargs)
NoDuplicatedObjectsValidator(component_class, **kwargs)
DuplicateRegimeNamesDynamicsValidator(component_class, **kwargs)
LocalNameConflictsDynamicsValidator(component_class, **kwargs)
DimensionNameConflictsDynamicsValidator(component_class, **kwargs)
RegimeAliasMatchesBaseScopeValidator(component_class, **kwargs)
EventPortsDynamicsValidator(component_class, **kwargs)
OutputAnalogPortsDynamicsValidator(component_class, **kwargs)
TimeDerivativesAreDeclaredDynamicsValidator(component_class, **kwargs)
StateAssignmentsAreOnStateVariablesDynamicsValidator(component_class,
**kwargs)
AliasesAreNotRecursiveDynamicsValidator(component_class, **kwargs)
NoUnresolvedSymbolsDynamicsValidator(component_class, **kwargs)
RegimeGraphDynamicsValidator(component_class, **kwargs)
RegimeOnlyHasOneHandlerPerEventDynamicsValidator(component_class,
**kwargs)
CheckNoLHSAssignmentsToMathsNamespaceDynamicsValidator(component_class,
**kwargs)
if validate_dimensions:
DimensionalityDynamicsValidator(component_class, **kwargs)
| INCF/lib9ML | nineml/abstraction/dynamics/visitors/validators/base.py | Python | bsd-3-clause | 2,819 | 0 |
import socket
# List of the top 25 sites according to Alexa
websites = [ "Google.com",
"Facebook.com",
"Youtube.com",
"Baidu.com",
"Yahoo.com",
"Amazon.com",
"Wikipedia.org",
"Qq.com",
"Twitter.com",
"Google.co.in",
"Taobao.com",
"Live.com",
"Sina.com.cn",
"Linkedin.com",
"Yahoo.co.jp",
"Weibo.com",
"Ebay.com",
"Google.co.jp",
"Yandex.ru",
"Blogspot.com",
"Vk.com",
"Hao123.com",
"T.co",
"Bing.com",
"Google.de"]
ip_addresses = []
# Open a bunch of TCP connections on port 80 and close them. Wait at most 1 sec
# before timing out. Timing out raises a socket.timeout exception. Catch it and
# proceed.
for site in websites:
try:
sock = socket.create_connection((site, 80),1)
sock.close()
except socket.timeout:
pass
| mmirabent/sniffle | generate_connections.py | Python | apache-2.0 | 1,066 | 0.002814 |
from urlparse import urljoin
from django.conf import settings
def trailing_slash_or_none():
"""
Return a slash or empty string based on tastypie setting
"""
if getattr(settings, 'TASTYPIE_ALLOW_MISSING_SLASH', False):
return ''
return '/'
def urljoin_forced(base, path, **kwargs):
"""
urljoin base with path, except append '/' to base if it doesnt exist
"""
base = base.endswith('/') and base or '%s/' % base
return urljoin(base, path, **kwargs)
| benthomasson/django-tastypie-swagger | tastypie_swagger/utils.py | Python | bsd-2-clause | 499 | 0 |
from collections import defaultdict
# Regresar similitudes de un objeto index en un diccionario
def index2dict(index, file_list, num_sims=5):
file_list = [i.replace('.txt','') for i in file_list]
sims = {} #defaultdict(dict)
for i, idx in enumerate(index):
s = []
for j in range(len(file_list)):
s.append({
'name':file_list[j],
'similarity':float(idx[j])
}) # idx[j] es un numpy.float32 y no es compatible con JSON. Por eso lo hacemos float normal
s = sorted(s, key = lambda item: item['similarity'], reverse=True)[:num_sims]
sims[file_list[i]] = {
i:s[i]
for i in range(len(s))
}
return sims
| felipegerard/arte_mexicano_antiguo | felipegerard/entregable/itm/itm/similarity_functions.py | Python | agpl-3.0 | 629 | 0.041335 |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates symlinks to native libraries for an APK.
The native libraries should have previously been pushed to the device (in
options.target_dir). This script then creates links in an apk's lib/ folder to
those native libraries.
"""
import optparse
import os
import sys
from util import build_device
from util import build_utils
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from pylib.utils import apk_helper
def RunShellCommand(device, cmd):
output = device.RunShellCommand(cmd)
if output:
raise Exception(
'Unexpected output running command: ' + cmd + '\n' +
'\n'.join(output))
def CreateSymlinkScript(options):
libraries = build_utils.ReadJson(options.libraries_json)
link_cmd = (
'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n'
'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s '
'$APK_LIBRARIES_DIR/%(lib_basename)s \n'
)
script = '#!/bin/sh \n'
for lib in libraries:
script += link_cmd % { 'lib_basename': lib }
with open(options.script_host_path, 'w') as scriptfile:
scriptfile.write(script)
def TriggerSymlinkScript(options):
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
apk_package = apk_helper.GetPackageName(options.apk)
apk_libraries_dir = '/data/data/%s/lib' % apk_package
device_dir = os.path.dirname(options.script_device_path)
mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' %
{ 'dir': device_dir })
RunShellCommand(device, mkdir_cmd)
device.PushChangedFiles(options.script_host_path, options.script_device_path)
trigger_cmd = (
'APK_LIBRARIES_DIR=%(apk_libraries_dir)s; '
'STRIPPED_LIBRARIES_DIR=%(target_dir)s; '
'. %(script_device_path)s'
) % {
'apk_libraries_dir': apk_libraries_dir,
'target_dir': options.target_dir,
'script_device_path': options.script_device_path
}
RunShellCommand(device, trigger_cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('--apk', help='Path to the apk.')
parser.add_option('--script-host-path',
help='Path on the host for the symlink script.')
parser.add_option('--script-device-path',
help='Path on the device to push the created symlink script.')
parser.add_option('--libraries-json',
help='Path to the json list of native libraries.')
parser.add_option('--target-dir',
help='Device directory that contains the target libraries for symlinks.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
options, _ = parser.parse_args()
required_options = ['apk', 'libraries_json', 'script_host_path',
'script_device_path', 'target_dir', 'configuration_name']
build_utils.CheckOptions(options, parser, required=required_options)
constants.SetBuildType(options.configuration_name)
CreateSymlinkScript(options)
TriggerSymlinkScript(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
| sencha/chromium-spacewalk | build/android/gyp/create_device_library_links.py | Python | bsd-3-clause | 3,509 | 0.014249 |
from django.db import connections
from django.db.models.manager import Manager as DJManager
from django.db.utils import DatabaseError
from bson.objectid import ObjectId
import re
from .utils import dict_keys_to_str
try:
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
except ImportError:
class ObjectDoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
DoesNotExist = ObjectDoesNotExist
__all__ = ['queryset_manager', 'Q', 'InvalidQueryError',
'InvalidCollectionError']
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class InvalidQueryError(Exception):
pass
class OperationError(Exception):
pass
class InvalidCollectionError(Exception):
pass
RE_TYPE = type(re.compile(''))
class InternalMetadata:
def __init__(self, meta):
self.object_name = meta["object_name"]
class InternalModel:
"""
An internal queryset model to be embedded in a query set for django compatibility.
"""
def __init__(self, document):
self.document = document
self._meta = InternalMetadata(document._meta)
self.DoesNotExist = ObjectDoesNotExist
class QuerySet(object):
"""
A set of results returned from a query. Wraps a ES cursor,
providing :class:`~mongoengine.Document` objects as the results.
"""
def __init__(self, document, collection):
self._document = document
self._collection_obj = collection
self._accessed_collection = False
self._query = {}
self._where_clause = None
self._loaded_fields = []
self._ordering = []
self.transform = None
# If inheritance is allowed, only return instances and instances of
# subclasses of the class being used
# if document._meta.get('allow_inheritance'):
# self._query = {'_types': self._document._class_name}
self._cursor_obj = None
self._limit = None
self._skip = None
# required for compatibility with django
# self.model = InternalModel(document)
def __call__(self, q_obj=None, **query):
"""Filter the selected documents by calling the
:class:`~mongoengine.queryset.QuerySet` with a query.
:param q_obj: a :class:`~mongoengine.queryset.Q` object to be used in
the query; the :class:`~mongoengine.queryset.QuerySet` is filtered
multiple times with different :class:`~mongoengine.queryset.Q`
objects, only the last one will be used
:param query: Django-style query keyword arguments
"""
if q_obj:
self._where_clause = q_obj.as_js(self._document)
query = QuerySet._transform_query(_doc_cls=self._document, **query)
self._query.update(query)
return self
def filter(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__(*q_objs, **query)
def find(self, query):
self._query.update(self.transform.transform_incoming(query, self._collection))
return self
def exclude(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
query["not"] = True
return self.__call__(*q_objs, **query)
def all(self):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__()
def distinct(self, *args, **kwargs):
"""
Distinct method
"""
return self._cursor.distinct(*args, **kwargs)
@property
def _collection(self):
"""Property that returns the collection object. This allows us to
perform operations only if the collection is accessed.
"""
return self._collection_obj
def values(self, *args):
return (args and [dict(zip(args, [getattr(doc, key) for key in args])) for doc in self]) or [obj for obj in
self._cursor.clone()]
def values_list(self, *args, **kwargs):
flat = kwargs.pop("flat", False)
if flat and len(args) != 1:
raise Exception("args len must be 1 when flat=True")
return (flat and self.distinct(args[0] if not args[0] in ["id", "pk"] else "_id")) or zip(
*[self.distinct(field if field not in ["id", "pk"] else "_id") for field in args])
@property
def _cursor(self):
if self._cursor_obj is None:
cursor_args = {}
if self._loaded_fields:
cursor_args = {'fields': self._loaded_fields}
self._cursor_obj = self._collection.find(self._query,
**cursor_args)
# Apply where clauses to cursor
if self._where_clause:
self._cursor_obj.where(self._where_clause)
# apply default ordering
# if self._document._meta['ordering']:
# self.order_by(*self._document._meta['ordering'])
return self._cursor_obj.clone()
@classmethod
def _lookup_field(cls, document, fields):
"""
Looks for "field" in "document"
"""
if isinstance(fields, (tuple, list)):
return [document._meta.get_field_by_name((field == "pk" and "id") or field)[0] for field in fields]
return document._meta.get_field_by_name((fields == "pk" and "id") or fields)[0]
@classmethod
def _translate_field_name(cls, doc_cls, field, sep='.'):
"""Translate a field attribute name to a database field name.
"""
parts = field.split(sep)
parts = [f.attname for f in QuerySet._lookup_field(doc_cls, parts)]
return '.'.join(parts)
@classmethod
def _transform_query(self, _doc_cls=None, **parameters):
"""
Converts parameters to elasticsearch queries.
"""
spec = {}
operators = ['ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod', 'all', 'size', 'exists']
match_operators = ['contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact',
'iexact']
exclude = parameters.pop("not", False)
for key, value in parameters.items():
parts = key.split("__")
lookup_type = (len(parts) >= 2) and ( parts[-1] in operators + match_operators and parts.pop()) or ""
# Let's get the right field and be sure that it exists
parts[0] = QuerySet._lookup_field(_doc_cls, parts[0]).attname
if not lookup_type and len(parts) == 1:
if exclude:
value = {"$ne": value}
spec.update({parts[0]: value})
continue
if parts[0] == "id":
parts[0] = "_id"
value = [isinstance(par, basestring) or par for par in value]
if lookup_type in ['contains', 'icontains',
'startswith', 'istartswith',
'endswith', 'iendswith',
'exact', 'iexact']:
flags = 0
if lookup_type.startswith('i'):
flags = re.IGNORECASE
lookup_type = lookup_type.lstrip('i')
regex = r'%s'
if lookup_type == 'startswith':
regex = r'^%s'
elif lookup_type == 'endswith':
regex = r'%s$'
elif lookup_type == 'exact':
regex = r'^%s$'
value = re.compile(regex % value, flags)
elif lookup_type in operators:
value = {"$" + lookup_type: value}
elif lookup_type and len(parts) == 1:
raise DatabaseError("Unsupported lookup type: %r" % lookup_type)
key = '.'.join(parts)
if exclude:
value = {"$ne": value}
spec.update({key: value})
return spec
def get(self, *q_objs, **query):
"""Retrieve the the matching object raising id django is available
:class:`~django.core.exceptions.MultipleObjectsReturned` or
:class:`~django.core.exceptions.ObjectDoesNotExist` exceptions if multiple or
no results are found.
If django is not available:
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` exception if multiple results and
:class:`~mongoengine.queryset.DoesNotExist` or `DocumentName.DoesNotExist`
if no results are found.
.. versionadded:: 0.3
"""
self.__call__(*q_objs, **query)
count = self.count()
if count == 1:
return self[0]
elif count > 1:
message = u'%d items returned, instead of 1' % count
raise self._document.MultipleObjectsReturned(message)
else:
raise self._document.DoesNotExist("%s matching query does not exist."
% self._document._meta.object_name)
def get_or_create(self, *q_objs, **query):
"""Retrieve unique object or create, if it doesn't exist. Returns a tuple of
``(object, created)``, where ``object`` is the retrieved or created object
and ``created`` is a boolean specifying whether a new object was created. Raises
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` if multiple results are found.
A new document will be created if the document doesn't exists; a
dictionary of default values for the new document may be provided as a
keyword argument called :attr:`defaults`.
.. versionadded:: 0.3
"""
defaults = query.get('defaults', {})
if 'defaults' in query:
del query['defaults']
self.__call__(*q_objs, **query)
count = self.count()
if count == 0:
query.update(defaults)
doc = self._document(**query)
doc.save()
return doc, True
elif count == 1:
return self.first(), False
else:
message = u'%d items returned, instead of 1' % count
raise self._document.MultipleObjectsReturned(message)
def first(self):
"""Retrieve the first object matching the query.
"""
try:
result = self[0]
except IndexError:
result = None
return result
def with_id(self, object_id):
"""Retrieve the object matching the id provided.
:param object_id: the value for the id of the document to look up
"""
id_field = self._document._meta['id_field']
object_id = self._document._fields[id_field].to_mongo(object_id)
result = self._collection.find_one(
{'_id': (not isinstance(object_id, ObjectId) and ObjectId(object_id)) or object_id})
if result is not None:
result = self._document(**dict_keys_to_str(result))
return result
def in_bulk(self, object_ids):
"""Retrieve a set of documents by their ids.
:param object_ids: a list or tuple of id's
:rtype: dict of ids as keys and collection-specific
Document subclasses as values.
.. versionadded:: 0.3
"""
doc_map = {}
docs = self._collection.find(
{'_id': {'$in': [(not isinstance(id, ObjectId) and ObjectId(id)) or id for id in object_ids]}})
for doc in docs:
doc_map[str(doc['id'])] = self._document(**dict_keys_to_str(doc))
return doc_map
def count(self):
"""Count the selected elements in the query.
"""
if self._limit == 0:
return 0
return self._cursor.count(with_limit_and_skip=False)
def __len__(self):
return self.count()
def limit(self, n):
"""Limit the number of returned documents to `n`. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[:5]``).
:param n: the maximum number of objects to return
"""
if n == 0:
self._cursor.limit(1)
else:
self._cursor.limit(n)
self._limit = n
# Return self to allow chaining
return self
def skip(self, n):
"""Skip `n` documents before returning the results. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[5:]``).
:param n: the number of objects to skip before returning results
"""
self._cursor.skip(n)
self._skip = n
return self
def __getitem__(self, key):
"""Support skip and limit using getitem and slicing syntax.
"""
# Slice provided
if isinstance(key, slice):
try:
self._cursor_obj = self._cursor[key]
self._skip, self._limit = key.start, key.stop
except IndexError, err:
# PyMongo raises an error if key.start == key.stop, catch it,
# bin it, kill it.
start = key.start or 0
if start >= 0 and key.stop >= 0 and key.step is None:
if start == key.stop:
self.limit(0)
self._skip, self._limit = key.start, key.stop - start
return self
raise err
# Allow further QuerySet modifications to be performed
return self
# Integer index provided
elif isinstance(key, int):
return self._document(**dict_keys_to_str(self._cursor[key]))
def only(self, *fields):
"""Load only a subset of this document's fields. ::
post = BlogPost.objects(...).only("title")
:param fields: fields to include
.. versionadded:: 0.3
"""
self._loaded_fields = []
for field in fields:
if '.' in field:
raise InvalidQueryError('Subfields cannot be used as '
'arguments to QuerySet.only')
# Translate field name
field = QuerySet._lookup_field(self._document, field)[-1].db_field
self._loaded_fields.append(field)
# _cls is needed for polymorphism
if self._document._meta.get('allow_inheritance'):
self._loaded_fields += ['_cls']
return self
def order_by(self, *args):
"""
Order the :class:`~mongoengine.queryset.QuerySet` by the keys. The
order may be specified by prepending each of the keys by a + or a -.
Ascending order is assumed.
:param keys: fields to order the query results by; keys may be
prefixed with **+** or **-** to determine the ordering direction
"""
self._ordering = []
for col in args:
self._ordering.append(((col.startswith("-") and col[1:]) or col, (col.startswith("-") and -1) or 1))
self._cursor.sort(self._ordering)
return self
def explain(self, format=False):
"""Return an explain plan record for the
:class:`~mongoengine.queryset.QuerySet`\ 's cursor.
:param format: format the plan before returning it
"""
plan = self._cursor.explain()
if format:
import pprint
plan = pprint.pformat(plan)
return plan
def delete(self, safe=False):
"""Delete the documents matched by the query.
:param safe: check if the operation succeeded before returning
"""
self._collection.remove(self._query, safe=safe)
@classmethod
def _transform_update(cls, _doc_cls=None, **update):
"""Transform an update spec from Django-style format to Mongo format.
"""
operators = ['set', 'unset', 'inc', 'dec', 'push', 'push_all', 'pull',
'pull_all']
mongo_update = {}
for key, value in update.items():
parts = key.split('__')
# Check for an operator and transform to mongo-style if there is
op = None
if parts[0] in operators:
op = parts.pop(0)
# Convert Pythonic names to Mongo equivalents
if op in ('push_all', 'pull_all'):
op = op.replace('_all', 'All')
elif op == 'dec':
# Support decrement by flipping a positive value's sign
# and using 'inc'
op = 'inc'
if value > 0:
value = -value
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
fields = QuerySet._lookup_field(_doc_cls, parts)
parts = [field.db_field for field in fields]
# Convert value to proper value
field = fields[-1]
if op in (None, 'set', 'unset', 'push', 'pull'):
value = field.prepare_query_value(op, value)
elif op in ('pushAll', 'pullAll'):
value = [field.prepare_query_value(op, v) for v in value]
key = '.'.join(parts)
if op:
value = {key: value}
key = '$' + op
if op is None or key not in mongo_update:
mongo_update[key] = value
elif key in mongo_update and isinstance(mongo_update[key], dict):
mongo_update[key].update(value)
return mongo_update
def update(self, safe_update=True, upsert=False, **update):
pass
def update_one(self, safe_update=True, upsert=False, **update):
pass
def __iter__(self, *args, **kwargs):
for obj in self._cursor:
data = dict_keys_to_str(obj)
if '_id' in data:
data['id'] = data.pop('_id')
yield self._document(**data)
def _sub_js_fields(self, code):
"""When fields are specified with [~fieldname] syntax, where
*fieldname* is the Python name of a field, *fieldname* will be
substituted for the MongoDB name of the field (specified using the
:attr:`name` keyword argument in a field's constructor).
"""
def field_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split('.')
fields = QuerySet._lookup_field(self._document, field_name)
# Substitute the correct name for the field into the javascript
return u'["%s"]' % fields[-1].db_field
return re.sub(u'\[\s*~([A-z_][A-z_0-9.]+?)\s*\]', field_sub, code)
def __repr__(self):
limit = REPR_OUTPUT_SIZE + 1
if self._limit is not None and self._limit < limit:
limit = self._limit
data = list(self[self._skip:limit])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def _clone(self):
return self
class Manager(DJManager):
def __init__(self, manager_func=None):
super(Manager, self).__init__()
self._manager_func = manager_func
self._collection = None
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
# setattr(model, name, ManagerDescriptor(self))
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
model._meta.abstract_managers.append((self.creation_counter, name,
self))
else:
model._meta.concrete_managers.append((self.creation_counter, name,
self))
def __get__(self, instance, owner):
"""
Descriptor for instantiating a new QuerySet object when
Document.objects is accessed.
"""
self.model = owner # We need to set the model to get the db
if instance is not None:
# Document class being used rather than a document object
return self
if self._collection is None:
self._collection = connections[self.db].db_connection[owner._meta.db_table]
# owner is the document that contains the QuerySetManager
queryset = QuerySet(owner, self._collection)
if self._manager_func:
if self._manager_func.func_code.co_argcount == 1:
queryset = self._manager_func(queryset)
else:
queryset = self._manager_func(owner, queryset)
return queryset
| theofilis/elasticsearch-engine | elasticsearch_engine/manager.py | Python | gpl-2.0 | 20,982 | 0.001716 |
#!/usr/bin/env python2.7
# Author: echel0n <echel0n@sickrage.ca>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unittest
import sickrage
from sickrage.core.tv.show import TVShow
from tests import SiCKRAGETestDBCase
class XEMBasicTests(SiCKRAGETestDBCase):
def loadShowsFromDB(self):
"""
Populates the showList with shows from the database
"""
for s in [s['doc'] for s in sickrage.app.main_db.db.all('tv_shows', with_doc=True)]:
try:
curShow = TVShow(int(s["indexer"]), int(s["indexer_id"]))
curShow.saveToDB()
curShow.loadFromDB(skipNFO=True)
sickrage.app.showlist.append(curShow)
except Exception:
pass
def loadFromDB(self):
"""
Populates the showList with shows from the database
"""
for s in [s['doc'] for s in sickrage.app.main_db.db.all('tv_shows', with_doc=True)]:
try:
curShow = TVShow(int(s["indexer"]), int(s["indexer_id"]))
curShow.saveToDB()
curShow.loadFromDB(skipNFO=True)
sickrage.app.showlist.append(curShow)
except Exception as e:
print "There was an error creating the show"
def test_formating(self):
name = "Game.of.Thrones.S03.720p.HDTV.x264-CtrlHD"
release = "Game of Thrones"
# m = re.match('(?P<ep_ab_num>(?>\d{1,3})(?![ip])).+', name)
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(release))
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}\W+[a-zA-Z]{3,}\W+\d{4}.+))'
# print(u"Checking if show " + name + " matches " + curRegex)
match = re.search(curRegex, name, re.I)
# if match:
# print(u"Matched " + curRegex + " to " + name)
if __name__ == "__main__":
print "=================="
print "STARTING - XEM SCENE NUMBERING TESTS"
print "=================="
print "######################################################################"
unittest.main()
| gborri/SickRage | tests/test_xem.py | Python | gpl-3.0 | 2,900 | 0.01 |
# -*- coding: utf-8 -*-
"""Communication commands package."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
| whutch/cwmud | cwmud/core/commands/communication/__init__.py | Python | mit | 246 | 0 |
"""
News resource
=============
It is an alias for archive without filtering out published items.
"""
from superdesk.resource import build_custom_hateoas
from apps.archive.archive import ArchiveResource, ArchiveService
from apps.archive.common import CUSTOM_HATEOAS
class NewsResource(ArchiveResource):
datasource = ArchiveResource.datasource.copy()
datasource.update(
{
"source": "archive",
"elastic_filter": {"bool": {"must_not": {"term": {"version": 0}}}},
}
)
resource_methods = ["GET"]
item_methods = []
class NewsService(ArchiveService):
def enhance_items(self, items):
super().enhance_items(items)
for item in items:
build_custom_hateoas(CUSTOM_HATEOAS, item)
| superdesk/superdesk-core | apps/archive/news.py | Python | agpl-3.0 | 767 | 0 |
from django.conf.urls import url
from . import views
app_name='manage'
urlpatterns = [
url(r'^index/$', views.index, name='index'),
url(r'^db/(\w+)/$', views.db, name='db'),
url(r'^import/index/$', views.import_index, name='import'),
url(r'^import/dir/$', views.import_dir, name='import-dir'),
url(r'^import/status/', views.import_status, name='import-status')
]
| tangyanhan/homesite | manage_videos/urls.py | Python | mit | 370 | 0.016216 |
# (c) 2017, Patrick Deelman <patrick@patrickdeelman.nl>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: passwordstore
version_added: "2.3"
author:
- Patrick Deelman <patrick@patrickdeelman.nl>
short_description: manage passwords with passwordstore.org's pass utility
description:
- Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
It also retrieves YAML style keys stored as multilines in the passwordfile.
options:
_terms:
description: query key
required: True
passwordstore:
description: location of the password store
default: '~/.password-store'
directory:
description: The directory of the password store.
env:
- name: PASSWORD_STORE_DIR
create:
description: Create the password if it does not already exist.
type: bool
default: 'no'
overwrite:
description: Overwrite the password if it does already exist.
type: bool
default: 'no'
returnall:
description: Return all the content of the password, not only the first line.
type: bool
default: 'no'
subkey:
description: Return a specific subkey of the password. When set to C(password), always returns the first line.
default: password
userpass:
description: Specify a password to save, instead of a generated one.
length:
description: The length of the generated password
type: integer
default: 16
backup:
description: Used with C(overwrite=yes). Backup the previous password in a subkey.
type: bool
default: 'no'
version_added: 2.7
nosymbols:
description: use alphanumeric characters
type: bool
default: 'no'
version_added: 2.8
"""
EXAMPLES = """
# Debug is used for examples, BAD IDEA to show passwords on screen
- name: Basic lookup. Fails if example/test doesn't exist
debug:
msg: "{{ lookup('passwordstore', 'example/test')}}"
- name: Create pass with random 16 character password. If password exists just give the password
debug:
var: mypassword
vars:
mypassword: "{{ lookup('passwordstore', 'example/test create=true')}}"
- name: Different size password
debug:
msg: "{{ lookup('passwordstore', 'example/test create=true length=42')}}"
- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
debug:
msg: "{{ lookup('passwordstore', 'example/test create=true overwrite=true')}}"
- name: Create an alphanumeric password
debug: msg="{{ lookup('passwordstore', 'example/test create=true nosymbols=true) }}"
- name: Return the value for user in the KV pair user, username
debug:
msg: "{{ lookup('passwordstore', 'example/test subkey=user')}}"
- name: Return the entire password file content
set_fact:
passfilecontent: "{{ lookup('passwordstore', 'example/test returnall=true')}}"
"""
RETURN = """
_raw:
description:
- a password
"""
import os
import subprocess
import time
from distutils import util
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.encrypt import random_password
from ansible.plugins.lookup import LookupBase
from ansible import constants as C
# backhacked check_output with input for python 2.7
# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
def check_output2(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
del kwargs['input']
kwargs['stdin'] = subprocess.PIPE
else:
b_inputdata = None
process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
try:
b_out, b_err = process.communicate(b_inputdata)
except Exception:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode != 0 or \
b'encryption failed: Unusable public key' in b_out or \
b'encryption failed: Unusable public key' in b_err:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(
retcode,
cmd,
to_native(b_out + b_err, errors='surrogate_or_strict')
)
return b_out
class LookupModule(LookupBase):
def parse_params(self, term):
# I went with the "traditional" param followed with space separated KV pairs.
# Waiting for final implementation of lookup parameter parsing.
# See: https://github.com/ansible/ansible/issues/12255
params = term.split()
if len(params) > 0:
# the first param is the pass-name
self.passname = params[0]
# next parse the optional parameters in keyvalue pairs
try:
for param in params[1:]:
name, value = param.split('=')
if name not in self.paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
self.paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
# check and convert values
try:
for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
if not isinstance(self.paramvals[key], bool):
self.paramvals[key] = util.strtobool(self.paramvals[key])
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if not isinstance(self.paramvals['length'], int):
if self.paramvals['length'].isdigit():
self.paramvals['length'] = int(self.paramvals['length'])
else:
raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
# Set PASSWORD_STORE_DIR if directory is set
if self.paramvals['directory']:
if os.path.isdir(self.paramvals['directory']):
os.environ['PASSWORD_STORE_DIR'] = self.paramvals['directory']
else:
raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
def check_pass(self):
try:
self.passoutput = to_text(
check_output2(["pass", self.passname]),
errors='surrogate_or_strict'
).splitlines()
self.password = self.passoutput[0]
self.passdict = {}
for line in self.passoutput[1:]:
if ':' in line:
name, value = line.split(':', 1)
self.passdict[name.strip()] = value.strip()
except (subprocess.CalledProcessError) as e:
if e.returncode == 1 and 'not in the password store' in e.output:
# if pass returns 1 and return string contains 'is not in the password store.'
# We need to determine if this is valid or Error.
if not self.paramvals['create']:
raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname))
else:
return False
else:
raise AnsibleError(e)
return True
def get_newpass(self):
if self.paramvals['nosymbols']:
chars = C.DEFAULT_PASSWORD_CHARS[:62]
else:
chars = C.DEFAULT_PASSWORD_CHARS
if self.paramvals['userpass']:
newpass = self.paramvals['userpass']
else:
newpass = random_password(length=self.paramvals['length'], chars=chars)
return newpass
def update_password(self):
# generate new password, insert old lines from current result and return new password
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n'
if self.passoutput[1:]:
msg += '\n'.join(self.passoutput[1:]) + '\n'
if self.paramvals['backup']:
msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
try:
check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def generate_password(self):
# generate new file and insert lookup_pass: Generated by Ansible on {date}
# use pwgen to generate the password and insert values with pass -m
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
try:
check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def get_passresult(self):
if self.paramvals['returnall']:
return os.linesep.join(self.passoutput)
if self.paramvals['subkey'] == 'password':
return self.password
else:
if self.paramvals['subkey'] in self.passdict:
return self.passdict[self.paramvals['subkey']]
else:
return None
def run(self, terms, variables, **kwargs):
result = []
self.paramvals = {
'subkey': 'password',
'directory': variables.get('passwordstore'),
'create': False,
'returnall': False,
'overwrite': False,
'nosymbols': False,
'userpass': '',
'length': 16,
'backup': False,
}
for term in terms:
self.parse_params(term) # parse the input into paramvals
if self.check_pass(): # password exists
if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
result.append(self.update_password())
else:
result.append(self.get_passresult())
else: # password does not exist
if self.paramvals['create']:
result.append(self.generate_password())
return result
| alxgu/ansible | lib/ansible/plugins/lookup/passwordstore.py | Python | gpl-3.0 | 11,134 | 0.003413 |
# coding=utf-8
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
import re
from base64 import b64encode
import sickbeard
from sickbeard.clients.generic import GenericClient
class TransmissionAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(TransmissionAPI, self).__init__('Transmission', host, username, password)
self.url = '/'.join((self.host.rstrip('/'), sickbeard.TORRENT_RPCURL.strip('/'), 'rpc'))
def _get_auth(self):
post_data = json.dumps({'method': 'session-get', })
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), timeout=120,
verify=sickbeard.TORRENT_VERIFY_CERT)
self.auth = re.search(r'X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1)
except Exception:
return None
self.session.headers.update({'x-transmission-session-id': self.auth})
# Validating Transmission authorization
post_data = json.dumps({'arguments': {},
'method': 'session-get'})
self._request(method='post', data=post_data)
return self.auth
def _add_torrent_uri(self, result):
arguments = {
'filename': result.url,
'paused': int(sickbeard.TORRENT_PAUSED)
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH + "/" + result.show.name + "/"
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _add_torrent_file(self, result):
arguments = {
'metainfo': b64encode(result.content),
'paused': 1 if sickbeard.TORRENT_PAUSED else 0
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH + "/" + result.show.name + "/"
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_ratio(self, result):
ratio = None
if result.ratio:
ratio = result.ratio
mode = 0
if ratio:
if float(ratio) == -1:
ratio = 0
mode = 2
elif float(ratio) >= 0:
ratio = float(ratio)
mode = 1 # Stop seeding at seedRatioLimit
arguments = {'ids': [result.hash],
'seedRatioLimit': ratio,
'seedRatioMode': mode}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_seed_time(self, result):
if sickbeard.TORRENT_SEED_TIME and sickbeard.TORRENT_SEED_TIME != -1:
time = int(60 * float(sickbeard.TORRENT_SEED_TIME))
arguments = {'ids': [result.hash],
'seedIdleLimit': time,
'seedIdleMode': 1}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
else:
return True
def _set_torrent_priority(self, result):
arguments = {'ids': [result.hash]}
if result.priority == -1:
arguments['priority-low'] = []
elif result.priority == 1:
# set high priority for all files in torrent
arguments['priority-high'] = []
# move torrent to the top if the queue
arguments['queuePosition'] = 0
if sickbeard.TORRENT_HIGH_BANDWIDTH:
arguments['bandwidthPriority'] = 1
else:
arguments['priority-normal'] = []
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
api = TransmissionAPI()
| cortedeltimo/SickRage | sickbeard/clients/transmission_client.py | Python | gpl-3.0 | 5,187 | 0.00135 |
#! /usr/bin/env python
# -*- Mode: python; py-indent-offset: 4; tab-width: 8; indent-tabs-mode: t; -*-
#
# A script for generating a number of flows.
#
# The output of the script should be saved to a file, and the flows from
# that file should be added by the following command:
#
# web/add_flow.py -f filename
#
# NOTE: Currently, some of the parameters fo the flows are hard-coded,
# and all flows are between same source and destination DPID and ports
# (differentiated by different matchSrcMac and matchDstMac).
#
import copy
import pprint
import os
import sys
import subprocess
import json
import argparse
import io
import time
## Global Var ##
DEBUG=0
pp = pprint.PrettyPrinter(indent=4)
## Worker Functions ##
def log_error(txt):
print '%s' % (txt)
def debug(txt):
if DEBUG:
print '%s' % (txt)
if __name__ == "__main__":
usage_msg = "Generate a number of flows by using a pre-defined template.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "NOTE: This script is work-in-progress. Currently all flows are within same\n"
usage_msg = usage_msg + "pair of switch ports and contain auto-generated MAC-based matching conditions.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "Usage: %s <begin-flow-id> <end-flow-id>\n" % (sys.argv[0])
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + " The output should be saved to a file, and the flows should be installed\n"
usage_msg = usage_msg + " by using the command './add_flow.py -f filename'\n"
# app.debug = False;
# Usage info
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
print(usage_msg)
exit(0)
# Check arguments
if len(sys.argv) < 3:
log_error(usage_msg)
exit(1)
# Extract the arguments
begin_flow_id = int(sys.argv[1], 0)
end_flow_id = int(sys.argv[2], 0)
if begin_flow_id > end_flow_id:
log_error(usage_msg)
exit(1)
#
# Do the work
#
# NOTE: Currently, up to 65536 flows are supported.
# More flows can be supported by iterating by, say, iterating over some of
# the other bytes of the autogenereated source/destination MAC addresses.
#
flow_id = begin_flow_id
idx = 0
while flow_id <= end_flow_id:
mac3 = idx / 255
mac4 = idx % 255
str_mac3 = "%0.2x" % mac3
str_mac4 = "%0.2x" % mac4
src_mac = "00:00:" + str_mac3 + ":" + str_mac4 + ":00:00";
dst_mac = "00:01:" + str_mac3 + ":" + str_mac4 + ":00:00";
print "%s FOOBAR 00:00:00:00:00:00:00:01 1 00:00:00:00:00:00:00:01 2 matchSrcMac %s matchDstMac %s" % (flow_id, src_mac, dst_mac)
flow_id = flow_id + 1
idx = idx + 1
| opennetworkinglab/spring-open | scripts/perf-scripts/generate_flows.py | Python | apache-2.0 | 2,622 | 0.017162 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth', size=30),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle', size=64),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id':fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', size=32, required=True),
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', size=64, required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', size=256, required=False, readonly=False),
'visa_no': fields.char('Visa No', size=64, required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
return {'value': {'job_id': job_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/saas3 | openerp/addons/hr_contract/hr_contract.py | Python | agpl-3.0 | 4,997 | 0.005803 |
import asyncio
import enum
import logging
import concurrent.futures
logger = logging.getLogger("cloudbot")
@enum.unique
class EventType(enum.Enum):
message = 0
action = 1
# TODO: Do we actually want to have a 'notice' event type? Should the NOTICE command be a 'message' type?
notice = 2
join = 3
part = 4
kick = 5
other = 6
class Event:
"""
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type type: EventType
:type content: str
:type target: str
:type chan: str
:type nick: str
:type user: str
:type host: str
:type mask: str
:type db: sqlalchemy.orm.Session
:type db_executor: concurrent.futures.ThreadPoolExecutor
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: str
:type irc_ctcp_text: str
"""
def __init__(self, *, bot=None, hook=None, conn=None, base_event=None, event_type=EventType.other, content=None,
target=None, channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None, irc_prefix=None,
irc_command=None, irc_paramlist=None, irc_ctcp_text=None):
"""
All of these parameters except for `bot` and `hook` are optional.
The irc_* parameters should only be specified for IRC events.
Note that the `bot` argument may be left out if you specify a `base_event`.
:param bot: The CloudBot instance this event was triggered from
:param conn: The Client instance this event was triggered from
:param hook: The hook this event will be passed to
:param base_event: The base event that this event is based on. If this parameter is not None, then nick, user,
host, mask, and irc_* arguments are ignored
:param event_type: The type of the event
:param content: The content of the message, or the reason for an join or part
:param target: The target of the action, for example the user being kicked, or invited
:param channel: The channel that this action took place in
:param nick: The nickname of the sender that triggered this event
:param user: The user of the sender that triggered this event
:param host: The host of the sender that triggered this event
:param mask: The mask of the sender that triggered this event (nick!user@host)
:param irc_raw: The raw IRC line
:param irc_prefix: The raw IRC prefix
:param irc_command: The IRC command
:param irc_paramlist: The list of params for the IRC command. If the last param is a content param, the ':'
should be removed from the front.
:param irc_ctcp_text: CTCP text if this message is a CTCP command
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type base_event: cloudbot.event.Event
:type content: str
:type target: str
:type event_type: EventType
:type nick: str
:type user: str
:type host: str
:type mask: str
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: list[str]
:type irc_ctcp_text: str
"""
self.db = None
self.db_executor = None
self.bot = bot
self.conn = conn
self.hook = hook
if base_event is not None:
# We're copying an event, so inherit values
if self.bot is None and base_event.bot is not None:
self.bot = base_event.bot
if self.conn is None and base_event.conn is not None:
self.conn = base_event.conn
if self.hook is None and base_event.hook is not None:
self.hook = base_event.hook
# If base_event is provided, don't check these parameters, just inherit
self.type = base_event.type
self.content = base_event.content
self.target = base_event.target
self.chan = base_event.chan
self.nick = base_event.nick
self.user = base_event.user
self.host = base_event.host
self.mask = base_event.mask
# clients-specific parameters
self.irc_raw = base_event.irc_raw
self.irc_prefix = base_event.irc_prefix
self.irc_command = base_event.irc_command
self.irc_paramlist = base_event.irc_paramlist
self.irc_ctcp_text = base_event.irc_ctcp_text
else:
# Since base_event wasn't provided, we can take these parameters
self.type = event_type
self.content = content
self.target = target
self.chan = channel
self.nick = nick
self.user = user
self.host = host
self.mask = mask
# clients-specific parameters
self.irc_raw = irc_raw
self.irc_prefix = irc_prefix
self.irc_command = irc_command
self.irc_paramlist = irc_paramlist
self.irc_ctcp_text = irc_ctcp_text
@asyncio.coroutine
def prepare(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes a database object on this event, if the hook requires it.
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use prepare_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
logger.debug("Opening database session for {}:threaded=False".format(self.hook.description))
# we're running a coroutine hook with a db, so initialise an executor pool
self.db_executor = concurrent.futures.ThreadPoolExecutor(1)
# be sure to initialize the db in the database executor, so it will be accessible in that thread.
self.db = yield from self.async(self.bot.db_session)
def prepare_threaded(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes the database object on this event, if the hook requires it.
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use prepare.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
logger.debug("Opening database session for {}:threaded=True".format(self.hook.description))
self.db = self.bot.db_session()
@asyncio.coroutine
def close(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use close_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
logger.debug("Closing database session for {}:threaded=False".format(self.hook.description))
# be sure the close the database in the database executor, as it is only accessable in that one thread
yield from self.async(self.db.close)
self.db = None
def close_threaded(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use close.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
logger.debug("Closing database session for {}:threaded=True".format(self.hook.description))
self.db.close()
self.db = None
@property
def event(self):
"""
:rtype: Event
"""
return self
@property
def loop(self):
"""
:rtype: asyncio.events.AbstractEventLoop
"""
return self.bot.loop
@property
def logger(self):
return logger
def message(self, message, target=None):
"""sends a message to a specific or current channel/user
:type message: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
self.conn.message(target, message)
def reply(self, *messages, target=None):
"""sends a message to the current channel/user with a prefix
:type message: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
if not messages: # if there are no messages specified, don't do anything
return
if target == self.nick:
self.conn.message(target, *messages)
else:
self.conn.message(target, "({}) {}".format(self.nick, messages[0]), *messages[1:])
def action(self, message, target=None):
"""sends an action to the current channel/user or a specific channel/user
:type message: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
self.conn.action(target, message)
def ctcp(self, message, ctcp_type, target=None):
"""sends an ctcp to the current channel/user or a specific channel/user
:type message: str
:type ctcp_type: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
if not hasattr(self.conn, "ctcp"):
raise ValueError("CTCP can only be used on IRC connections")
# noinspection PyUnresolvedReferences
self.conn.ctcp(target, ctcp_type, message)
def notice(self, message, target=None):
"""sends a notice to the current channel/user or a specific channel/user
:type message: str
:type target: str
"""
avoid_notices = self.conn.config.get("avoid_notices", False)
if target is None:
if self.nick is None:
raise ValueError("Target must be specified when nick is not assigned")
target = self.nick
# we have a config option to avoid noticing user and PM them instead, so we use it here
if avoid_notices:
self.conn.message(target, message)
else:
self.conn.notice(target, message)
def has_permission(self, permission, notice=True):
""" returns whether or not the current user has a given permission
:type permission: str
:rtype: bool
"""
if not self.mask:
raise ValueError("has_permission requires mask is not assigned")
return self.conn.permissions.has_perm_mask(self.mask, permission, notice=notice)
@asyncio.coroutine
def async(self, function, *args, **kwargs):
if self.db_executor is not None:
executor = self.db_executor
else:
executor = None
if kwargs:
result = yield from self.loop.run_in_executor(executor, function, *args)
else:
result = yield from self.loop.run_in_executor(executor, lambda: function(*args, **kwargs))
return result
class CommandEvent(Event):
"""
:type hook: cloudbot.plugin.CommandHook
:type text: str
:type triggered_command: str
"""
def __init__(self, *, bot=None, hook, text, triggered_command, conn=None, base_event=None, event_type=None,
content=None, target=None, channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None,
irc_prefix=None, irc_command=None, irc_paramlist=None):
"""
:param text: The arguments for the command
:param triggered_command: The command that was triggered
:type text: str
:type triggered_command: str
"""
super().__init__(bot=bot, hook=hook, conn=conn, base_event=base_event, event_type=event_type, content=content,
target=target, channel=channel, nick=nick, user=user, host=host, mask=mask, irc_raw=irc_raw,
irc_prefix=irc_prefix, irc_command=irc_command, irc_paramlist=irc_paramlist)
self.hook = hook
self.text = text
self.triggered_command = triggered_command
def notice_doc(self, target=None):
"""sends a notice containing this command's docstring to the current channel/user or a specific channel/user
:type target: str
"""
if self.triggered_command is None:
raise ValueError("Triggered command not set on this event")
if self.hook.doc is None:
message = "{}{} requires additional arguments.".format(self.conn.config["command_prefix"],
self.triggered_command)
else:
if self.hook.doc.split()[0].isalpha():
# this is using the old format of `name <args> - doc`
message = "{}{}".format(self.conn.config["command_prefix"], self.hook.doc)
else:
# this is using the new format of `<args> - doc`
message = "{}{} {}".format(self.conn.config["command_prefix"], self.triggered_command, self.hook.doc)
self.notice(message, target=target)
class RegexEvent(Event):
"""
:type hook: cloudbot.plugin.RegexHook
:type match: re.__Match
"""
def __init__(self, *, bot=None, hook, match, conn=None, base_event=None, event_type=None, content=None, target=None,
channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None, irc_prefix=None,
irc_command=None, irc_paramlist=None):
"""
:param: match: The match objected returned by the regex search method
:type match: re.__Match
"""
super().__init__(bot=bot, conn=conn, hook=hook, base_event=base_event, event_type=event_type, content=content,
target=target, channel=channel, nick=nick, user=user, host=host, mask=mask, irc_raw=irc_raw,
irc_prefix=irc_prefix, irc_command=irc_command, irc_paramlist=irc_paramlist)
self.match = match
| nidhididi/CloudBot | cloudbot/event.py | Python | gpl-3.0 | 15,182 | 0.003557 |
from time import localtime, gmtime, strftime, strptime
from os.path import expanduser, join
from pprint import pprint
from decimal import *
def scrivi_movimento(path, m):
with open(path, 'a') as f:
f.write(m['tipo'] + m['valore'])
f.write(';')
f.write(m['data'])
f.write(';')
f.write(m['ora'])
f.write(';')
f.write(m['descrizione'])
f.write('\n')
return
def leggi_tipo():
t = 'n'
while not (t == '' or t == '+' or t == '-'):
t = input('tipo (+/-) [-]: ')
if t == '':
t='-'
elif t == '+':
t=''
return t
def leggi_valore():
v = ''
while v == '':
v = input('valore (#####.##) []: ')
return v
def leggi_data():
d = input('data (DD/MM/YYYY) [oggi]: ')
if d == '':
d = strftime("%d/%m/%Y", localtime())
return d
def leggi_ora():
o = input('ora (HH:MM) [adesso]: ')
if o == '':
o = strftime('%H:%M', localtime())
return o
def leggi_descrizione():
d = input('descrizione () []: ')
return d
def leggi_movimento():
tipo = leggi_tipo()
valore = leggi_valore()
data = leggi_data()
ora = leggi_ora()
descrizione = leggi_descrizione()
m = {
'tipo' : tipo,
'valore' : valore,
'data' : data,
'ora' : ora,
'descrizione': descrizione
}
return m
def get_file_dati():
home = expanduser('~')
nome_file_dati = 'movimenti.dat'
file_dati = join(home, 'dati', nome_file_dati)
print('file dati:', file_dati)
return file_dati
def carica_file(f):
dati = []
with open(f, "r") as df:
for l in df:
spl = l.split(';')
d = {
'valore' : spl[0],
'data' : spl[1],
'ora' : spl[2],
'descrizione' : spl[3]
}
dati.append(d)
return dati
def inserimento(file_dati):
m = leggi_movimento()
scrivi_movimento(file_dati, m)
def inserimento_dati():
file_dati = get_file_dati()
inserimento(file_dati)
def riassunto_dati():
file_dati = get_file_dati()
riassunto(file_dati)
def data_default(data):
try:
return strptime(data, '%d/%m/%Y')
except ValueError:
return gmtime(0)
def ora_default(ora):
try:
return strptime(ora, '%H:%M')
except ValueError:
return gmtime(0)
def ordina(dati):
return sorted(
dati,
key = lambda x: (
data_default(x['data']),
ora_default(x['ora'])
),
reverse = True
)
def riassunto(file_dati):
dati = carica_file(file_dati)
dati_ordinati = ordina(dati)
val_attuale = Decimal('0')
spese_tot = Decimal('0')
guadagni_tot = Decimal('0')
for d in dati:
m = Decimal(d['valore'])
val_attuale = val_attuale + m
if m > Decimal('0'):
guadagni_tot = guadagni_tot + m
else:
spese_tot = spese_tot + m
print('valore attuale:', str(val_attuale))
print('guadagni complessivi:', str(guadagni_tot))
print('spese complessive:', str(spese_tot))
print('ultimi 5 movimenti:')
for i in range(5):
if i < len(dati_ordinati):
print(dati_ordinati[i])
| scompo/money | money/money.py | Python | bsd-3-clause | 3,277 | 0.01007 |
from setuptools import setup, find_packages
setup(name='monsql',
version='0.1.7',
packages = find_packages(),
author='firstprayer',
author_email='zhangty10@gmail.com',
description='MonSQL - Mongodb-style way for using mysql.',
url='https://github.com/firstprayer/monsql.git',
install_requires=[
'MySQL-python'
],
)
| firstprayer/monsql | setup.py | Python | mit | 371 | 0.013477 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Script to search vcf files for mutations within specific coordinates
# Input:
# -A vcf file
#
# Output:
# -A Roary-like file with mutations sorted in rows, strains as columns and presence/absence in cells
# -Columns: Chromosome, Position, variant (eg C->T), type (eg missense, synonymous, frameshift etc)
# Reading VCF
# File metainfo starts as ##key=value
# These are always formed and should be caught
# example ##fileformat=VCFv4.3 - give warning if format is off
# Columns 8 MANDATORY
# CHROM POS ID REF ALT QUAL FILTER INFO
# OPTIONAL COLUMNS
# FORMAT SAMPLE1 SAMPLE2 etc
# All data lines are tab-delimited
# CHROM : string, no whitespace
# POS : integer. Can have many lines with same pos. Pos=0 or N+1 for telomere positions
# ID : semicolon-delimited list of strings
# REF : string, ACGTN (can be multiple)
# ALT : comma-separated list, ACGTN* (* = allele is missing due to overlapping deletion)
# (NOTE: Suggest splitting ALT variants into different lines to preserve binarity)
# QUAL : float
# FILTER : PASS or semicolon-delimited list
# INFO : semicolon-delimited list of key=value pairs or flags
# FORMAT (optional) : colon-delimited list.
# Genotype fields - Genotype always first field
# GT encoded as allele values separated by | or /. 0 = reference. 1 = first ALT. 2 = second alt etc
# NOTE: Haploid calls (bacteria) have only 1 value
# NOTE: / means genotype unphased. | means genotype phased
# INFO field SVtypes : DELetion, INSertion, DUPlication, INVersion, CNV
import sys
import argparse
import os
import csv
import re
import traceback
__version__ = '0.1b'
__author__ = 'Ola Brynildsrud'
__credits = ['Ola Brynildsrud']
__email__ = 'olbb@fhi.no'
def main():
"""
Converts VCF files (version 4.x) to Scoary format
"""
##########################################################################
# Parse command line arguments
parser = argparse.ArgumentParser(
description='This script takes in vcf files and creates a '
'presence/absence matrix of mutations in the '
'Roary/Scoary format',
epilog='by Ola Brynildsrud (olbb@fhi.no)')
parser.add_argument(
'--out',
action='store',
default='./mutations_presence_absence.csv',
help='The path to the output file')
parser.add_argument(
'--types',
action='store',
default='ALL',
help='The types of variants to include in the output. NOTE: This '
'works if TYPE=XX can be found in the INFO column of the vcf '
'file. The special keyword ALL includes all types. This is '
'the default setting. Common types are snp, mnp, ins, del '
'and complex. Give as comma-separated list. '
'Example: --types snp,ins,del')
parser.add_argument(
'--version',
action='version',
version=__version__)
parser.add_argument(
'--force',
action='store_true',
default=False,
help='Force overwriting of output file. (If it already '
'exists)')
parser.add_argument(
'vcf',
action='store',
metavar='<VCF_file>',
help='The VCF file to convert to Roary/Scoary format')
args = parser.parse_args()
if args.types is not "ALL":
args.types = args.types.split(",")
if os.path.isfile(args.out) and not args.force:
sys.exit("Outfile already exists. Change name of outfile or "
"run with --force")
if not os.path.isfile(args.vcf):
sys.exit("Unable to locate input file %s" % args.vcf)
with open(args.vcf,'rU') as vcffile, open(args.out,'w') as outfile:
lines = csv.reader(vcffile, delimiter='\t', quotechar='"')
metainfo = {"##INFO" : {},
"##FILTER" : {},
"##FORMAT" : {},
"##ALT" : {},
"##contig" : {},
"##META" : {},
"##SAMPLE" : {},
"##PEDIGREE" : {}
}
#for line in lines:
while True:
try:
line = next(lines)
except StopIteration:
print(traceback.print_exc())
sys.exit("ERROR: There appears to be only metainformation "
"(lines starting with ##) in your VCF file.")
# Get metainfo from file
if line[0][:2] == '##':
infoline = re.split('=',line[0], maxsplit=1)
# Capture list output for complex tags
if infoline[0] in metainfo:
ID=re.search(r'ID=(\w+)',infoline[1]).group(1)
infolist = re.split(',(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)',infoline[1].strip("<>"))
metainfo[infoline[0]][ID] = {}
# Enter all elements in infolist into appropriate dic
for e in infolist:
esplit = e.split("=")
metainfo[infoline[0]][ID][esplit[0]] = esplit[1]
else:
metainfo[infoline[0]] = infoline[1]
else:
# Have reached the data section of the file
data = {"header": line}
break
try:
vcfversion = metainfo["##fileformat"].split("v")[1]
if int(vcfversion[0]) != 4:
print("WARNING: A VCF format other than 4.x detected."
" File parsing may proceed with errors.")
else:
print("VCF version %s detected" % vcfversion)
except:
print("WARNING: Could not detect VCF format. Expected "
"v4.x. File parsing may proceed with errors.")
print(traceback.print_exc())
# Check that genotype fields have a single allele
if metainfo["##FORMAT"]["GT"]["Number"] != "1":
sys.exit("ERROR: Expected a single allele per genotype. Scoary "
"only works for haploid organisms.")
# Have now caught all metainformation. Now get column information
#header = next(line)
#print header
data["header"] = data["header"][:9] + ["DUMMY"] + data["header"][9:]
outfile.write(','.join('"' + c + '"' for c in data["header"]) + '\n')
while True:
try:
line = next(lines)
except StopIteration:
print("Reached the end of the file")
sys.exit(0)
# Check if line is allowed:
if args.types is not "ALL":
vartype = re.search(r'TYPE=(\w+)',line[7]).group(1)
if vartype not in args.types:
continue
# Split line if ALT contains more than one variant
if "," in line[4]:
orgline = line[:]
alts = line[4].split(",")
c = 1
for a in alts:
newline = orgline[:]
newline[4] = a
# Only get GT
newline[9:] = \
[cell.split(":")[0] for cell in orgline[9:]]
# Fix dummy comparisons
newline[9:] = fixdummy(newline[9:], c)
newline = newline[:9] + ["True"] + newline[9:]
c += 1
writeLine(newline, outfile)
# Genotype fields need to be 0 or 1
# GT is always first in colon-separated list
else:
newline = line[:9] + ["False"] + line[9:]
writeLine(newline, outfile)
def writeLine(line, outfile):
writeline = line[:9] + [cell.split(":")[0] for cell in line[9:]]
outfile.write(','.join('"' + c + '"' for c in writeline) + '\n')
def fixdummy(line,c):
newline = line[:]
try:
for x in range(len(line)):
if line[x] == ".":
# Missing data get entered as reference / no presence
newline[x] = "0"
elif int(line[x]) == c:
newline[x] = "1"
else:
newline[x] = "0"
except ValueError:
print(newline, c)
sys.exit(-1)
return newline
########
# MAIN #
########
if __name__ == '__main__':
main()
| AdmiralenOla/Scoary | scoary/vcf2scoary.py | Python | gpl-3.0 | 8,390 | 0.005364 |
from yaml import load_all
try:
from yaml import CLoader as Loader
except ImportError:
print("Using pure python YAML loader, it may be slow.")
from yaml import Loader
from iengine import IDocumentFormatter
__author__ = 'reyoung'
class YAMLFormatter(IDocumentFormatter):
def __init__(self, fn=None, content=None):
IDocumentFormatter.__init__(self)
if fn is not None:
with file(fn, "r") as f:
self.__content = load_all(f, Loader=Loader)
else:
self.__content = load_all(content, Loader=Loader)
def get_command_iterator(self, *args, **kwargs):
for item in self.__content:
yield YAMLFormatter.__process_item(item)
@staticmethod
def __process_item(item):
if isinstance(item, dict) and len(item) == 1:
key = item.iterkeys().__iter__().next()
value = item[key]
return key, value
| reyoung/SlideGen2 | slidegen2/yaml_formatter.py | Python | mit | 933 | 0.001072 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-02-17 23:04
from __future__ import unicode_literals
from django.db import migrations, models
import theme.utils
class Migration(migrations.Migration):
dependencies = [
('hs_access_control', '0032_auto_20210607_2027'),
]
operations = [
migrations.AlterField(
model_name='community',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to=theme.utils.get_upload_path_community),
),
migrations.AlterField(
model_name='groupaccess',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to=theme.utils.get_upload_path_group),
),
]
| hydroshare/hydroshare | hs_access_control/migrations/0033_auto_20220217_2304.py | Python | bsd-3-clause | 756 | 0.002646 |
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import json
from qingcloud.cli.iaas_client.actions.base import BaseAction
class AddAlarmPolicyActionsAction(BaseAction):
action = 'AddAlarmPolicyActions'
command = 'add-alarm-policy-actions'
usage = '%(prog)s [-a <alarm_policy>...] [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-a", "--alarm-policy", dest="alarm_policy",
action="store", type=str, default='',
help="the ID of the alarm policy whose rules you want to add.")
parser.add_argument("-A", "--actions", dest="actions",
action="store", type=str, default='',
help="it's a JSON list of actions you want to add.")
@classmethod
def build_directive(cls, options):
if options.alarm_policy == '':
print('error: alarm_policy should be specified.')
return None
if options.actions == '':
print('error: actions should be specified.')
return None
directive = {
"alarm_policy": options.alarm_policy,
"actions": json.loads(options.actions),
}
return directive
| yunify/qingcloud-cli | qingcloud/cli/iaas_client/actions/alarm_policy/add_alarm_policy_actions.py | Python | apache-2.0 | 2,067 | 0.000968 |
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2015 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
"""The lib389 module.
IMPORTANT: Ternary operator syntax is unsupported on RHEL5
x if cond else y #don't!
The lib389 functionalities are split in various classes
defined in brookers.py
TODO: reorganize method parameters according to SimpleLDAPObject
naming: filterstr, attrlist
"""
try:
from subprocess import Popen, PIPE, STDOUT
HASPOPEN = True
except ImportError:
import popen2
HASPOPEN = False
import io
import sys
import os
import stat
import pwd
import grp
import os.path
import base64
import socket
import ldif
import re
import ldap
import ldapurl
import time
import operator
import shutil
import datetime
import logging
import decimal
import glob
import tarfile
import subprocess
import collections
import signal
import errno
from shutil import copy2
try:
# There are too many issues with this on EL7
# Out of the box, it's just outright broken ...
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
import six
except ImportError:
pass
from ldap.ldapobject import SimpleLDAPObject
from ldap.cidict import cidict
from ldap import LDAPError
# file in this package
from lib389._constants import *
from lib389.properties import *
from lib389._entry import Entry
from lib389._replication import CSN, RUV
from lib389._ldifconn import LDIFConn
from lib389.tools import DirSrvTools
from lib389.mit_krb5 import MitKrb5
from lib389.utils import (
isLocalHost,
is_a_dn,
normalizeDN,
suffixfilt,
escapeDNValue,
update_newhost_with_fqdn,
formatInfData,
ensure_bytes,
ensure_str)
from lib389.paths import Paths
# mixin
# from lib389.tools import DirSrvTools
from lib389.exceptions import *
MAJOR, MINOR, _, _, _ = sys.version_info
if MAJOR >= 3 or (MAJOR == 2 and MINOR >= 7):
from ldap.controls.simple import GetEffectiveRightsControl
from lib389._controls import DereferenceControl
RE_DBMONATTR = re.compile(r'^([a-zA-Z]+)-([1-9][0-9]*)$')
RE_DBMONATTRSUN = re.compile(r'^([a-zA-Z]+)-([a-zA-Z]+)$')
# This controls pyldap debug levels
TRACE_LEVEL = 0
# My logger
log = logging.getLogger(__name__)
# Initiate the paths object here. Should this be part of the DirSrv class
# for submodules?
def wrapper(f, name):
"""
Wrapper of all superclass methods using lib389.Entry.
@param f - DirSrv method inherited from SimpleLDAPObject
@param name - method to call
This seems to need to be an unbound method, that's why it's outside of
DirSrv. Perhaps there is some way to do this with the new classmethod
or staticmethod of 2.4.
We replace every call to a method in SimpleLDAPObject (the superclass
of DirSrv) with a call to inner. The f argument to wrapper is the bound
method of DirSrv (which is inherited from the superclass). Bound means
that it will implicitly be called with the self argument, it is not in
the args list. name is the name of the method to call. If name is a
method that returns entry objects (e.g. result), we wrap the data returned
by an Entry class. If name is a method that takes an entry argument, we
extract the raw data from the entry object to pass in.
"""
def inner(*args, **kwargs):
if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element
return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
return f(*args, **kwargs)
return inner
def pid_exists(pid):
if pid <= 0:
return False
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
elif err.errno == errno.EPERM:
return True
else:
raise
return True
def pid_from_file(pidfile):
pid = None
try:
with open(pidfile, 'rb') as f:
for line in f.readlines():
try:
pid = int(line.strip())
break
except ValueError:
continue
except IOError:
pass
return pid
def _ds_shutil_copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
This is taken from /usr/lib64/python3.5/shutil.py, but removes the
copystat function at the end. Why? Because in a container without
privileges, we don't have access to set xattr. But copystat attempts to
set the xattr when we are root, which causes the copy to fail. Remove it!
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
_ds_shutil_copytree(srcname, dstname, symlinks, ignore,
copy_function)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
_ds_shutil_copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
return dst
class DirSrv(SimpleLDAPObject, object):
def __initPart2(self):
"""Initialize the DirSrv structure filling various fields, like:
self.errlog -> nsslapd-errorlog
self.accesslog -> nsslapd-accesslog
self.auditlog -> nsslapd-auditlog
self.confdir -> nsslapd-certdir
self.inst -> equivalent to self.serverid
self.sroot/self.inst -> nsslapd-instancedir
self.dbdir -> dirname(nsslapd-directory)
self.bakdir -> nsslapd-bakdir
self.ldifdir -> nsslapd-ldifdir
@param - self
@return - None
@raise ldap.LDAPError - if failure during initialization
"""
self.errlog = self.ds_paths.error_log
self.accesslog = self.ds_paths.access_log
self.auditlog = self.ds_paths.audit_log
self.confdir = self.ds_paths.config_dir
self.schemadir = self.ds_paths.schema_dir
self.bakdir = self.ds_paths.backup_dir
self.ldifdir = self.ds_paths.ldif_dir
self.instdir = self.ds_paths.inst_dir
self.dbdir = self.ds_paths.db_dir
def rebind(self):
"""Reconnect to the DS
@raise ldap.CONFIDENTIALITY_REQUIRED - missing TLS:
"""
if hasattr(ldap, 'PYLDAP_VERSION') and MAJOR >= 3:
super(DirSrv, self).__init__(uri, bytes_mode=False, trace_level=TRACE_LEVEL)
else:
super(DirSrv, self).__init__(uri, trace_level=TRACE_LEVEL)
# self.start_tls_s()
self.simple_bind_s(ensure_str(self.binddn), self.bindpw)
def __add_brookers__(self):
from lib389.config import Config
from lib389.aci import Aci
from lib389.nss_ssl import NssSsl
from lib389.config import RSA
from lib389.config import Encryption
from lib389.dirsrv_log import DirsrvAccessLog, DirsrvErrorLog
from lib389.ldclt import Ldclt
from lib389.mappingTree import MappingTrees
from lib389.mappingTree import MappingTreeLegacy as MappingTree
from lib389.backend import Backends
from lib389.backend import BackendLegacy as Backend
from lib389.suffix import Suffix
from lib389.replica import ReplicaLegacy as Replica
from lib389.replica import Replicas
from lib389.changelog import Changelog
from lib389.agreement import Agreement
from lib389.schema import SchemaLegacy as Schema
from lib389.plugins import Plugins
from lib389.tasks import Tasks
from lib389.index import IndexLegacy as Index
from lib389.monitor import Monitor, MonitorLDBM
from lib389.rootdse import RootDSE
# Need updating
self.agreement = Agreement(self)
self.replica = Replica(self)
self.changelog = Changelog(self)
self.backend = Backend(self)
self.config = Config(self)
self.index = Index(self)
self.mappingtree = MappingTree(self)
self.suffix = Suffix(self)
self.schema = Schema(self)
self.plugins = Plugins(self)
self.tasks = Tasks(self)
# Do we have a certdb path?
# if MAJOR < 3:
self.monitor = Monitor(self)
self.monitorldbm = MonitorLDBM(self)
self.rootdse = RootDSE(self)
self.backends = Backends(self)
self.mappingtrees = MappingTrees(self)
self.replicas = Replicas(self)
self.aci = Aci(self)
self.nss_ssl = NssSsl(self)
self.rsa = RSA(self)
self.encryption = Encryption(self)
self.ds_access_log = DirsrvAccessLog(self)
self.ds_error_log = DirsrvErrorLog(self)
self.ldclt = Ldclt(self)
def __init__(self, verbose=False, timeout=10):
"""
This method does various initialization of DirSrv object:
parameters:
- 'state' to DIRSRV_STATE_INIT
- 'verbose' flag for debug purpose
- 'log' so that the use the module defined logger
wrap the methods.
- from SimpleLDAPObject
- from agreement, backends, suffix...
It just create a DirSrv object. To use it the user will likely do
the following additional steps:
- allocate
- create
- open
"""
self.state = DIRSRV_STATE_INIT
self.verbose = verbose
if self.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
self.log = log
self.timeout = timeout
self.confdir = None
self.ds_paths = Paths(instance=self)
# Reset the args (py.test reuses the args_instance for each test case)
args_instance[SER_DEPLOYED_DIR] = os.environ.get('PREFIX', self.ds_paths.prefix)
args_instance[SER_BACKUP_INST_DIR] = os.environ.get('BACKUPDIR', DEFAULT_BACKUPDIR)
args_instance[SER_ROOT_DN] = DN_DM
args_instance[SER_ROOT_PW] = PW_DM
args_instance[SER_HOST] = LOCALHOST
args_instance[SER_PORT] = DEFAULT_PORT
args_instance[SER_SECURE_PORT] = None
args_instance[SER_SERVERID_PROP] = None # "template"
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_instance[SER_USER_ID] = None
args_instance[SER_GROUP_ID] = None
args_instance[SER_REALM] = None
args_instance[SER_INST_SCRIPTS_ENABLED] = None
# We allocate a "default" prefix here which allows an un-allocate or
# un-instantiated DirSrv
# instance to be able to do an an instance discovery. For example:
# ds = lib389.DirSrv()
# ds.list(all=True)
self.prefix = args_instance[SER_DEPLOYED_DIR]
self.containerised = False
self.__wrapmethods()
self.__add_brookers__()
def __str__(self):
"""XXX and in SSL case?"""
return self.host + ":" + str(self.port)
# Should there be an extra boolean to this function to determine to use
# ldapi or not? Or does the settings presence indicate intent?
def allocate(self, args):
'''
Initialize a DirSrv object according to the provided args.
The final state -> DIRSRV_STATE_ALLOCATED
@param args - dictionary that contains the DirSrv properties
properties are
SER_SERVERID_PROP: used for offline op
(create/delete/backup/start/stop..) -> slapd-<serverid>
SER_HOST: hostname [LOCALHOST]
SER_PORT: normal ldap port [DEFAULT_PORT]
SER_SECURE_PORT: secure ldap port
SER_ROOT_DN: root DN [DN_DM]
SER_ROOT_PW: password of root DN [PW_DM]
SER_USER_ID: user id of the create instance [DEFAULT_USER]
SER_GROUP_ID: group id of the create instance [SER_USER_ID]
SER_DEPLOYED_DIR: directory where 389-ds is deployed
SER_BACKUP_INST_DIR: directory where instances will be
backed up
@return None
@raise ValueError - if missing mandatory properties or invalid
state of DirSrv
'''
if self.state != DIRSRV_STATE_INIT and \
self.state != DIRSRV_STATE_ALLOCATED:
raise ValueError("invalid state for calling allocate: %s" %
self.state)
if SER_SERVERID_PROP not in args:
self.log.debug('SER_SERVERID_PROP not provided, assuming non-local instance')
# The lack of this value basically rules it out in most cases
self.isLocal = False
self.ds_paths = Paths(instance=self)
else:
self.ds_paths = Paths(args[SER_SERVERID_PROP], instance=self)
# Do we have ldapi settings?
# Do we really need .strip() on this?
self.ldapi_enabled = args.get(SER_LDAPI_ENABLED, 'off')
self.ldapi_socket = args.get(SER_LDAPI_SOCKET, None)
self.host = None
self.ldapuri = None
self.sslport = None
self.port = None
self.inst_scripts = args.get(SER_INST_SCRIPTS_ENABLED, None)
# Or do we have tcp / ip settings?
if self.ldapi_enabled == 'on' and self.ldapi_socket is not None:
self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
self.isLocal = True
if self.verbose:
self.log.info("Allocate %s with %s" % (self.__class__, self.ldapi_socket))
elif args.get(SER_LDAP_URL, None) is not None:
self.ldapuri = args.get(SER_LDAP_URL)
if self.verbose:
self.log.info("Allocate %s with %s" % (self.__class__, self.ldapuri))
else:
# Settings from args of server attributes
self.strict_hostname = args.get(SER_STRICT_HOSTNAME_CHECKING, False)
if self.strict_hostname is True:
self.host = args.get(SER_HOST, LOCALHOST)
if self.host == LOCALHOST:
DirSrvTools.testLocalhost()
else:
# Make sure our name is in hosts
DirSrvTools.searchHostsFile(self.host, None)
else:
self.host = args.get(SER_HOST, LOCALHOST_SHORT)
self.port = args.get(SER_PORT, DEFAULT_PORT)
self.sslport = args.get(SER_SECURE_PORT)
self.isLocal = isLocalHost(self.host)
if self.verbose:
self.log.info("Allocate %s with %s:%s" % (self.__class__, self.host, (self.sslport or self.port)))
self.binddn = args.get(SER_ROOT_DN, DN_DM)
self.bindpw = args.get(SER_ROOT_PW, PW_DM)
self.creation_suffix = args.get(SER_CREATION_SUFFIX, DEFAULT_SUFFIX)
# These settings are only needed on a local connection.
if self.isLocal:
self.userid = args.get(SER_USER_ID)
if not self.userid:
if os.getuid() == 0:
# as root run as default user
self.userid = DEFAULT_USER
else:
self.userid = pwd.getpwuid(os.getuid())[0]
# Settings from args of server attributes
self.serverid = args.get(SER_SERVERID_PROP, None)
self.groupid = args.get(SER_GROUP_ID, self.userid)
self.backupdir = args.get(SER_BACKUP_INST_DIR, DEFAULT_BACKUPDIR)
# Allocate from the args, or use our env, or use /
if args.get(SER_DEPLOYED_DIR, self.prefix) is not None:
self.prefix = args.get(SER_DEPLOYED_DIR, self.prefix)
self.realm = args.get(SER_REALM, None)
if self.realm is not None:
self.krb5_realm = MitKrb5(realm=self.realm, debug=self.verbose)
# Those variables needs to be revisited (sroot for 64 bits)
# self.sroot = os.path.join(self.prefix, "lib/dirsrv")
# self.errlog = os.path.join(self.prefix,
# "var/log/dirsrv/slapd-%s/errors" % self.serverid)
# additional settings
self.suffixes = {}
self.agmt = {}
self.state = DIRSRV_STATE_ALLOCATED
if self.verbose:
self.log.info("Allocate %s with %s:%s" % (self.__class__,
self.host,
(self.sslport or
self.port)))
def openConnection(self, *args, **kwargs):
"""
Open a new connection to our LDAP server
*IMPORTANT*
This is different to re-opening on the same dirsrv, as bugs in pyldap
mean that ldap.set_option doesn't take effect! You need to use this
to allow some of the start TLS options to work!
"""
server = DirSrv(verbose=self.verbose)
args_instance[SER_HOST] = self.host
args_instance[SER_PORT] = self.port
if self.sslport is not None:
args_instance[SER_SECURE_PORT] = self.sslport
args_instance[SER_SERVERID_PROP] = self.serverid
args_standalone = args_instance.copy()
server.allocate(args_standalone)
server.open(*args, **kwargs)
return server
def list(self, all=False, serverid=None):
"""
Returns a list dictionary. For a created instance that is on the
local file system (e.g. <prefix>/etc/dirsrv/slapd-*), it exists
a file describing its properties
(environment): <prefix>/etc/sysconfig/dirsrv-<serverid> or
$HOME/.dirsrv/dirsv-<serverid>
A dictionary is created with the following properties:
CONF_SERVER_DIR
CONF_SERVERBIN_DIR
CONF_CONFIG_DIR
CONF_INST_DIR
CONF_RUN_DIR
CONF_DS_ROOT
CONF_PRODUCT_NAME
If all=True it builds a list of dictionaries for all created
instances. Else (default), the list will only contain the
dictionary of the calling instance
@param all - True or False . default is [False]
@param instance - The name of the instance to retrieve or None for
the current instance
@return - list of dictionaries, each of them containing instance
properities
@raise IOError - if the file containing the properties is not
foundable or readable
"""
def test_and_set(prop, propname, variable, value):
'''
If variable is 'propname' it adds to
'prop' dictionary the propname:value
'''
if variable == propname:
prop[propname] = value
return 1
return 0
def _parse_configfile(filename=None, serverid=None):
'''
This method read 'filename' and build a dictionary with
CONF_* properties
'''
if not filename:
raise IOError('filename is mandatory')
if not os.path.isfile(filename) or \
not os.access(filename, os.R_OK):
raise IOError('invalid file name or rights: %s' % filename)
prop = {}
prop[CONF_SERVER_ID] = serverid
prop[SER_SERVERID_PROP] = serverid
prop[SER_DEPLOYED_DIR] = self.prefix
myfile = open(filename, 'r')
for line in myfile:
# retrieve the value in line::
# <PROPNAME>=<string> [';' export <PROPNAME>]
# skip comment lines
if line.startswith('#'):
continue
# skip lines without assignment
if '=' not in line:
continue
value = line.split(';', 1)[0]
# skip lines without assignment
if '=' not in value:
continue
variable = value.split('=', 1)[0]
value = value.split('=', 1)[1]
value = value.strip(' \t\n')
for property in (CONF_SERVER_DIR,
CONF_SERVERBIN_DIR,
CONF_CONFIG_DIR,
CONF_INST_DIR,
CONF_RUN_DIR,
CONF_DS_ROOT,
CONF_PRODUCT_NAME):
if test_and_set(prop, property, variable, value):
break
# Now, we have passed the sysconfig environment file.
# read in and parse the dse.ldif to determine our SER_* values.
# probably should use path join?
dsefile = '%s/dse.ldif' % prop[CONF_CONFIG_DIR]
if os.path.exists(dsefile):
ldifconn = LDIFConn(dsefile)
configentry = ldifconn.get(DN_CONFIG)
for key in args_dse_keys:
prop[key] = configentry.getValue(args_dse_keys[key])
# SER_HOST (host) nsslapd-localhost
# SER_PORT (port) nsslapd-port
# SER_SECURE_PORT (sslport) nsslapd-secureport
# SER_ROOT_DN (binddn) nsslapd-rootdn
# SER_ROOT_PW (bindpw) We can't do this
# SER_CREATION_SUFFIX (creation_suffix)
# nsslapd-defaultnamingcontext
# SER_USER_ID (userid) nsslapd-localuser
# SER_SERVERID_PROP (serverid) Already have this
# SER_GROUP_ID (groupid) ???
# SER_DEPLOYED_DIR (prefix) Already provided to for
# discovery
# SER_BACKUP_INST_DIR (backupdir) nsslapd-bakdir <<-- maybe?
# We need to convert these two to int
# because other routines get confused if we don't
for intkey in [SER_PORT, SER_SECURE_PORT]:
if prop[intkey] is not None:
prop[intkey] = int(prop[intkey])
return prop
def search_dir(instances, pattern, stop_value=None):
'''
It search all the files matching pattern.
It there is not stop_value, it adds the properties found in
each file to the 'instances'
Else it searches the specific stop_value (instance's serverid)
to add only its properties in the 'instances'
@param instances - list of dictionary containing the instances
properties
@param pattern - pattern to find the files containing the
properties
@param stop_value - serverid value if we are looking only for
one specific instance
@return True or False - If stop_value is None it returns False.
If stop_value is specified, it returns
True if it added the property
dictionary in instances. Or False if it
did not find it.
'''
added = False
for instance in glob.glob(pattern):
serverid = os.path.basename(instance)[len(DEFAULT_ENV_HEAD):]
# skip removed instance and admin server entry
if '.removed' in serverid or 'dirsrv-admin' in instance:
continue
# it is found, store its properties in the list
if stop_value:
if stop_value == serverid:
instances.append(_parse_configfile(instance, serverid))
added = True
break
else:
# this is not the searched value, continue
continue
else:
# we are not looking for a specific value, just add it
instances.append(_parse_configfile(instance, serverid))
return added
# Retrieves all instances under '/etc/sysconfig' and '/etc/dirsrv'
# Instances/Environment are
#
# file: /etc/sysconfig/dirsrv-<serverid> (env)
# inst: /etc/dirsrv/slapd-<serverid> (conf)
#
# or
#
# file: $HOME/.dirsrv/dirsrv-<serverid> (env)
# inst: <prefix>/etc/dirsrv/slapd-<serverid> (conf)
#
# Don't need a default value now since it's set in init.
prefix = self.prefix
if serverid is None and hasattr(self, 'serverid'):
serverid = self.serverid
# first identify the directories we will scan
sysconfig_head = self.ds_paths.initconfig_dir
privconfig_head = os.path.join(os.getenv('HOME'), ENV_LOCAL_DIR)
if not os.path.isdir(sysconfig_head):
privconfig_head = None
if self.verbose:
self.log.info("dir (sys) : %s" % sysconfig_head)
if privconfig_head and self.verbose:
self.log.info("dir (priv): %s" % privconfig_head)
# list of the found instances
instances = []
# now prepare the list of instances properties
if not all:
# easy case we just look for the current instance
# we have two location to retrieve the self.serverid
# privconfig_head and sysconfig_head
# first check the private repository
if privconfig_head:
pattern = "%s*" % os.path.join(privconfig_head,
DEFAULT_ENV_HEAD)
found = search_dir(instances, pattern, serverid)
if self.verbose and len(instances) > 0:
self.log.info("List from %s" % privconfig_head)
for instance in instances:
self.log.info("list instance %r\n" % instance)
if found:
assert len(instances) == 1
else:
assert len(instances) == 0
else:
found = False
# second, if not already found, search the system repository
if not found:
pattern = "%s*" % os.path.join(sysconfig_head,
DEFAULT_ENV_HEAD)
search_dir(instances, pattern, serverid)
if self.verbose and len(instances) > 0:
self.log.info("List from %s" % privconfig_head)
for instance in instances:
self.log.info("list instance %r\n" % instance)
else:
# all instances must be retrieved
if privconfig_head:
pattern = "%s*" % os.path.join(privconfig_head,
DEFAULT_ENV_HEAD)
search_dir(instances, pattern)
if self.verbose and len(instances) > 0:
self.log.info("List from %s" % privconfig_head)
for instance in instances:
self.log.info("list instance %r\n" % instance)
pattern = "%s*" % os.path.join(sysconfig_head, DEFAULT_ENV_HEAD)
search_dir(instances, pattern)
if self.verbose and len(instances) > 0:
self.log.info("List from %s" % privconfig_head)
for instance in instances:
self.log.info("list instance %r\n" % instance)
return instances
def _createDirsrv(self):
"""Create a new instance of directory server
@param self - containing the set properties
SER_HOST (host)
SER_PORT (port)
SER_SECURE_PORT (sslport)
SER_ROOT_DN (binddn)
SER_ROOT_PW (bindpw)
SER_CREATION_SUFFIX (creation_suffix)
SER_USER_ID (userid)
SER_SERVERID_PROP (serverid)
SER_GROUP_ID (groupid)
SER_DEPLOYED_DIR (prefix)
SER_BACKUP_INST_DIR (backupdir)
SER_REALM (krb5_realm)
@return None
@raise None
}
"""
DirSrvTools.lib389User(user=DEFAULT_USER)
prog = os.path.join(self.ds_paths.sbin_dir, CMD_PATH_SETUP_DS)
if not os.path.isfile(prog):
log.error("Can't find file: %r, removing extension" % prog)
prog = prog[:-3]
# Create and extract a service keytab
args = {SER_HOST: self.host,
SER_PORT: self.port,
SER_SECURE_PORT: self.sslport,
SER_ROOT_DN: self.binddn,
SER_ROOT_PW: self.bindpw,
SER_CREATION_SUFFIX: self.creation_suffix,
SER_USER_ID: self.userid,
SER_SERVERID_PROP: self.serverid,
SER_GROUP_ID: self.groupid,
SER_DEPLOYED_DIR: self.prefix,
SER_BACKUP_INST_DIR: self.backupdir,
SER_STRICT_HOSTNAME_CHECKING: self.strict_hostname}
if self.inst_scripts is not None:
args[SER_INST_SCRIPTS_ENABLED] = self.inst_scripts
content = formatInfData(args)
result = DirSrvTools.runInfProg(prog, content, self.verbose,
prefix=self.prefix)
if result != 0:
raise Exception('Failed to run setup-ds.pl')
if self.realm is not None:
# This may conflict in some tests, we may need to use /etc/host
# aliases or we may need to use server id
self.krb5_realm.create_principal(principal='ldap/%s' % self.host)
ktab = '%s/ldap.keytab' % (self.ds_paths.config_dir)
self.krb5_realm.create_keytab(principal='ldap/%s' % self.host, keytab=ktab)
with open('%s/dirsrv-%s' % (self.ds_paths.initconfig_dir, self.serverid), 'a') as sfile:
sfile.write("\nKRB5_KTNAME=%s/etc/dirsrv/slapd-%s/"
"ldap.keytab\nexport KRB5_KTNAME\n" %
(self.prefix, self.serverid))
self.restart()
# Restart the instance
def _createPythonDirsrv(self, version):
"""
Create a new dirsrv instance based on the new python installer, rather
than setup-ds.pl
version represents the config default and sample entry version to use.
"""
from lib389.instance.setup import SetupDs
from lib389.instance.options import General2Base, Slapd2Base
# Import the new setup ds library.
sds = SetupDs(verbose=self.verbose, dryrun=False, log=self.log)
# Configure the options.
general_options = General2Base(self.log)
general_options.set('strict_host_checking', False)
general_options.verify()
general = general_options.collect()
slapd_options = Slapd2Base(self.log)
slapd_options.set('instance_name', self.serverid)
slapd_options.set('port', self.port)
slapd_options.set('secure_port', self.sslport)
slapd_options.set('root_password', self.bindpw)
slapd_options.set('root_dn', self.binddn)
slapd_options.set('defaults', version)
slapd_options.verify()
slapd = slapd_options.collect()
# In order to work by "default" for tests, we need to create a backend.
userroot = {
'cn': 'userRoot',
'nsslapd-suffix': self.creation_suffix,
BACKEND_SAMPLE_ENTRIES: version,
}
backends = [userroot,]
# Go!
sds.create_from_args(general, slapd, backends, None)
if self.realm is not None:
# This may conflict in some tests, we may need to use /etc/host
# aliases or we may need to use server id
self.krb5_realm.create_principal(principal='ldap/%s' % self.host)
ktab = '%s/ldap.keytab' % (self.ds_paths.config_dir)
self.krb5_realm.create_keytab(principal='ldap/%s' % self.host, keytab=ktab)
with open('%s/dirsrv-%s' % (self.ds_paths.initconfig_dir, self.serverid), 'a') as sfile:
sfile.write("\nKRB5_KTNAME=%s/etc/dirsrv/slapd-%s/"
"ldap.keytab\nexport KRB5_KTNAME\n" %
(self.prefix, self.serverid))
self.restart()
def create(self, pyinstall=False, version=INSTALL_LATEST_CONFIG):
"""
Creates an instance with the parameters sets in dirsrv
The state change from DIRSRV_STATE_ALLOCATED ->
DIRSRV_STATE_OFFLINE
@param - self
@return - None
@raise ValueError - if 'serverid' is missing or if it exist an
instance with the same 'serverid'
"""
# check that DirSrv was in DIRSRV_STATE_ALLOCATED state
self.log.debug("Server is in state %s" % self.state)
if self.state != DIRSRV_STATE_ALLOCATED:
raise ValueError("invalid state for calling create: %s" %
self.state)
if self.exists():
raise ValueError("Error it already exists the instance (%s)" %
self.list()[0][CONF_INST_DIR])
if not self.serverid:
raise ValueError("SER_SERVERID_PROP is missing, " +
"it is required to create an instance")
# Time to create the instance and retrieve the effective sroot
if (not self.ds_paths.perl_enabled or pyinstall):
self._createPythonDirsrv(version)
else:
self._createDirsrv()
# Retrieve sroot from the sys/priv config file
assert(self.exists())
self.sroot = self.list()[0][CONF_SERVER_DIR]
# Now the instance is created but DirSrv is not yet connected to it
self.state = DIRSRV_STATE_OFFLINE
def _deleteDirsrv(self):
'''
Deletes the instance with the parameters sets in dirsrv
The state changes -> DIRSRV_STATE_ALLOCATED
@param self
@return None
@raise None
'''
# Grab all the instances now, before we potentially remove the last one
insts = self.list(all=True)
if self.state == DIRSRV_STATE_ONLINE:
self.close()
if not self.exists():
raise ValueError("Error can not find instance %s[%s:%d]" %
(self.serverid, self.host, self.port))
# Now time to remove the instance
prog = os.path.join(self.ds_paths.sbin_dir, CMD_PATH_REMOVE_DS)
if (not self.prefix or self.prefix == '/') and os.geteuid() != 0:
raise ValueError("Error: without prefix deployment it is required to be root user")
cmd = "%s -i %s%s" % (prog, DEFAULT_INST_HEAD, self.serverid)
self.log.debug("running: %s " % cmd)
try:
os.system(cmd)
except:
log.exception("error executing %r" % cmd)
# If this was the last instance being deleted, remove the DEFAULT_USER
# if lib389 created the default user
if os.getuid() == 0:
# Only the root user could of added the entry
if len(insts) == 1:
# No more instances (this was the last one)
if pwd.getpwnam(DEFAULT_USER).pw_gecos == DEFAULT_USER_COMMENT:
# We created this user, so we will delete it
cmd = ['/usr/sbin/userdel', DEFAULT_USER]
try:
subprocess.call(cmd)
except subprocess.CalledProcessError as e:
log.exception('Failed to delete default user ' +
'(%s): error %s' % (DEFAULT_USER,
e.output))
self.state = DIRSRV_STATE_ALLOCATED
def delete(self, pyinstall=False):
# Time to create the instance and retrieve the effective sroot
if (not self.ds_paths.perl_enabled or pyinstall):
from lib389.instance.remove import remove_ds_instance
remove_ds_instance(self)
else:
self._deleteDirsrv()
# Now, we are still an allocated ds object so we can be re-installed
self.state = DIRSRV_STATE_ALLOCATED
def open(self, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD,
usercert=None, userkey=None):
'''
It opens a ldap bound connection to dirsrv so that online
administrative tasks are possible. It binds with the binddn
property, then it initializes various fields from DirSrv
(via __initPart2)
The state changes -> DIRSRV_STATE_ONLINE
@param self
@param saslmethod - None, or GSSAPI
@param sasltoken - The ldap.sasl token type to bind with.
@param certdir - Certificate directory for TLS
@return None
@raise LDAPError
'''
##################
# WARNING: While you have a python ldap connection open some settings like
# ldap.set_option MAY NOT WORK AS YOU EXPECT.
# There are cases (especially CACERT/USERCERTS) where when one connection
# is open set_option SILENTLY fails!!!!
#
# You MAY need to set post_open=False in your DirSrv start/restart instance!
##################
uri = self.toLDAPURL()
if certdir is None and self.isLocal:
certdir = self.get_cert_dir()
log.debug("Using dirsrv ca certificate %s" % certdir)
if userkey is not None:
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts AT ALL.
ldap.set_option(ldap.OPT_X_TLS_KEYFILE, ensure_str(userkey))
log.debug("Using user private key %s" % userkey)
if usercert is not None:
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts AT ALL.
ldap.set_option(ldap.OPT_X_TLS_CERTFILE, ensure_str(usercert))
log.debug("Using user certificate %s" % usercert)
if certdir is not None:
"""
We have a certificate directory, so lets start up TLS negotiations
"""
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts AT ALL.
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, ensure_str(certdir))
log.debug("Using external ca certificate %s" % certdir)
if certdir or starttls:
try:
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts on reused (ie restart)
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
log.debug("Using certificate policy %s" % reqcert)
log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s" % reqcert)
except ldap.LDAPError as e:
log.fatal('TLS negotiation failed: %s' % str(e))
raise e
## NOW INIT THIS. This MUST be after all the ldap.OPT set above,
# so that we inherit the settings correctly!!!!
if self.verbose:
self.log.info('open(): Connecting to uri %s' % uri)
if hasattr(ldap, 'PYLDAP_VERSION') and MAJOR >= 3:
super(DirSrv, self).__init__(uri, bytes_mode=False, trace_level=TRACE_LEVEL)
else:
super(DirSrv, self).__init__(uri, trace_level=TRACE_LEVEL)
if starttls and not uri.startswith('ldaps'):
self.start_tls_s()
if saslmethod and saslmethod.lower() == 'gssapi':
"""
Perform kerberos/gssapi authentication
"""
try:
sasl_auth = ldap.sasl.gssapi("")
self.sasl_interactive_bind_s("", sasl_auth)
except ldap.LOCAL_ERROR as e:
# No Ticket - ultimately invalid credentials
log.debug("Error: No Ticket (%s)" % str(e))
raise ldap.INVALID_CREDENTIALS
except ldap.LDAPError as e:
log.debug("SASL/GSSAPI Bind Failed: %s" % str(e))
raise e
elif saslmethod == 'EXTERNAL':
# Do nothing.
sasl_auth = ldap.sasl.external()
self.sasl_interactive_bind_s("", sasl_auth)
elif saslmethod and sasltoken is not None:
# Just pass the sasltoken in!
self.sasl_interactive_bind_s("", sasltoken)
elif saslmethod:
# Unknown or unsupported method
log.debug('Unsupported SASL method: %s' % saslmethod)
raise ldap.UNWILLING_TO_PERFORM
elif self.can_autobind():
# Connect via ldapi, and autobind.
# do nothing: the bind is complete.
if self.verbose:
log.info("open(): Using root autobind ...")
sasl_auth = ldap.sasl.external()
self.sasl_interactive_bind_s("", sasl_auth)
else:
"""
Do a simple bind
"""
try:
self.simple_bind_s(ensure_str(self.binddn), self.bindpw)
except ldap.SERVER_DOWN as e:
# TODO add server info in exception
log.debug("Cannot connect to %r" % uri)
raise e
except ldap.LDAPError as e:
log.debug("Error: Failed to authenticate: %s", str(e))
raise e
"""
Authenticated, now finish the initialization
"""
if self.verbose:
log.info("open(): bound as %s" % self.binddn)
if not connOnly:
self.__initPart2()
self.state = DIRSRV_STATE_ONLINE
def close(self):
'''
It closes connection to dirsrv. Online administrative tasks are no
longer possible.
The state changes from DIRSRV_STATE_ONLINE -> DIRSRV_STATE_OFFLINE
@param self
@return None
@raise ValueError - if the instance has not the right state
'''
# check that DirSrv was in DIRSRV_STATE_ONLINE state
if self.state == DIRSRV_STATE_ONLINE:
# Don't raise an error. Just move the state and return
self.unbind_s()
self.state = DIRSRV_STATE_OFFLINE
def start(self, timeout=120, post_open=True):
'''
It starts an instance and rebind it. Its final state after rebind
(open) is DIRSRV_STATE_ONLINE
@param self
@param timeout (in sec) to wait for successful start
@return None
@raise None
'''
if self.status() is True:
return
if self.with_systemd() and not self.containerised:
# Do systemd things here ...
subprocess.check_call(["/usr/bin/systemctl",
"start",
"dirsrv@%s" % self.serverid])
else:
# Start the process.
# Wait for it to terminate
# This means the server is probably ready to go ....
env = {}
if self.has_asan():
log.error("NOTICE: Starting instance with ASAN options")
log.error("This is probably not what you want. Please contact support.")
log.error("ASAN options will be copied from your environment")
env['ASAN_SYMBOLIZER_PATH'] = "/usr/bin/llvm-symbolizer"
env['ASAN_OPTIONS'] = "symbolize=1 detect_deadlocks=1 log_path=%s/ns-slapd-%s.asan" % (self.ds_paths.run_dir, self.serverid)
env.update(os.environ)
subprocess.check_call(["%s/ns-slapd" % self.get_sbin_dir(),
"-D",
self.ds_paths.config_dir,
"-i",
self.ds_paths.pid_file], env=env)
count = timeout
pid = pid_from_file(self.ds_paths.pid_file)
while (pid is None) and count > 0:
count -= 1
time.sleep(1)
pid = pid_from_file(self.ds_paths.pid_file)
if pid == 0 or pid is None:
raise ValueError
# Wait
while not pid_exists(pid) and count > 0:
# It looks like DS changes the value in here at some point ...
# It's probably a DS bug, but if we "keep checking" the file, eventually
# we get the main server pid, and it's ready to go.
pid = pid_from_file(self.ds_paths.pid_file)
time.sleep(1)
count -= 1
if not pid_exists(pid):
raise Exception("Failed to start DS")
if post_open:
self.open()
def stop(self, timeout=120):
'''
It stops an instance.
It changes the state -> DIRSRV_STATE_OFFLINE
@param self
@param timeout (in sec) to wait for successful stop
@return None
@raise None
'''
if self.status() is False:
return
if self.with_systemd() and not self.containerised:
# Do systemd things here ...
subprocess.check_call(["/usr/bin/systemctl",
"stop",
"dirsrv@%s" % self.serverid])
else:
# TODO: Make the pid path in the files things
# TODO: use the status call instead!!!!
count = timeout
pid = pid_from_file(self.ds_paths.pid_file)
if pid == 0 or pid is None:
raise ValueError
os.kill(pid, signal.SIGTERM)
# Wait
while pid_exists(pid) and count > 0:
time.sleep(1)
count -= 1
if pid_exists(pid):
os.kill(pid, signal.SIGKILL)
self.state = DIRSRV_STATE_OFFLINE
def status(self):
"""
Determine if an instance is running or not.
Will update the self.state parameter.
"""
if self.with_systemd() and not self.containerised:
# Do systemd things here ...
rc = subprocess.call(["/usr/bin/systemctl",
"is-active", "--quiet",
"dirsrv@%s" % self.serverid])
if rc == 0:
return True
# This .... probably will mess something up
# self.state = DIRSRV_STATE_RUNNING
self.state = DIRSRV_STATE_OFFLINE
return False
else:
# TODO: Make the pid path in the files things
# TODO: use the status call instead!!!!
pid = pid_from_file(self.ds_paths.pid_file)
if pid is None:
# No pidfile yet ...
self.state = DIRSRV_STATE_OFFLINE
return False
if pid == 0:
self.state = DIRSRV_STATE_OFFLINE
raise ValueError
# Wait
if not pid_exists(pid):
self.state = DIRSRV_STATE_OFFLINE
return False
return True
def restart(self, timeout=120, post_open=True):
'''
It restarts an instance and rebind it. Its final state after rebind
(open) is DIRSRV_STATE_ONLINE.
@param self
@param timeout (in sec) to wait for successful stop
@return None
@raise None
'''
self.stop(timeout)
time.sleep(1)
self.start(timeout, post_open)
def _infoBackupFS(self):
"""
Return the information to retrieve the backup file of a given
instance
It returns:
- Directory name containing the backup
(e.g. /tmp/slapd-standalone.bck)
- The pattern of the backup files
(e.g. /tmp/slapd-standalone.bck/backup*.tar.gz)
"""
backup_dir = "%s/slapd-%s.bck" % (self.backupdir, self.serverid)
backup_pattern = os.path.join(backup_dir, "backup*.tar.gz")
return backup_dir, backup_pattern
def clearBackupFS(self, backup_file=None):
"""
Remove a backup_file or all backup up of a given instance
@param backup_file - optional
@return None
@raise None
"""
if backup_file:
if os.path.isfile(backup_file):
try:
os.remove(backup_file)
except:
log.info("clearBackupFS: fail to remove %s" % backup_file)
pass
else:
backup_dir, backup_pattern = self._infoBackupFS()
list_backup_files = glob.glob(backup_pattern)
for f in list_backup_files:
try:
os.remove(f)
except:
log.info("clearBackupFS: fail to remove %s" % backup_file)
pass
def checkBackupFS(self):
"""
If it exits a backup file, it returns it
else it returns None
@param None
@return file name of the first backup. None if there is no backup
@raise None
"""
backup_dir, backup_pattern = self._infoBackupFS()
list_backup_files = glob.glob(backup_pattern)
if not list_backup_files:
return None
else:
# returns the first found backup
return list_backup_files[0]
def backupFS(self):
"""
Saves the files of an instance under:
/tmp/slapd-<instance_name>.bck/backup_HHMMSS.tar.gz
and return the archive file name.
If it already exists a such file, it assums it is a valid backup
and returns its name
self.sroot : root of the instance (e.g. /usr/lib64/dirsrv)
self.inst : instance name
(e.g. standalone for /etc/dirsrv/slapd-standalone)
self.confdir : root of the instance config (e.g. /etc/dirsrv)
self.dbdir: directory where is stored the database
(e.g. /var/lib/dirsrv/slapd-standalone/db)
self.changelogdir: directory where is stored the changelog
(e.g. /var/lib/dirsrv/slapd-master/changelogdb)
@param None
@return file name of the backup
@raise none
"""
# First check it if already exists a backup file
backup_dir, backup_pattern = self._infoBackupFS()
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
# make the backup directory accessible for anybody so that any user can
# run the tests even if it existed a backup created by somebody else
os.chmod(backup_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# Forget this: Just make a new backup!
# backup_file = self.checkBackupFS()
# if backup_file:
# return backup_file
# goes under the directory where the DS is deployed
listFilesToBackup = []
here = os.getcwd()
if self.prefix:
os.chdir("%s/" % self.prefix)
prefix_pattern = "%s/" % self.prefix
else:
os.chdir("/")
prefix_pattern = None
# build the list of directories to scan
# THIS MUST BE FIXED, No guarantee of sroot!
instroot = "%s/slapd-%s" % (self.sroot, self.serverid)
ldir = [instroot]
if hasattr(self, 'confdir'):
ldir.append(self.confdir)
if hasattr(self, 'dbdir'):
ldir.append(self.dbdir)
if hasattr(self, 'changelogdir'):
ldir.append(self.changelogdir)
if hasattr(self, 'errlog'):
ldir.append(os.path.dirname(self.errlog))
if hasattr(self, 'accesslog') and \
os.path.dirname(self.accesslog) not in ldir:
ldir.append(os.path.dirname(self.accesslog))
if hasattr(self, 'auditlog') and os.path.dirname(self.auditlog) \
not in ldir:
ldir.append(os.path.dirname(self.auditlog))
# now scan the directory list to find the files to backup
for dirToBackup in ldir:
for root, dirs, files in os.walk(dirToBackup):
for b_dir in dirs:
name = os.path.join(root, b_dir)
log.debug("backupFS b_dir = %s (%s) [name=%s]" %
(b_dir, self.prefix, name))
if prefix_pattern:
name = re.sub(prefix_pattern, '', name)
if os.path.isdir(name):
listFilesToBackup.append(name)
log.debug("backupFS add = %s (%s)" %
(name, self.prefix))
for file in files:
name = os.path.join(root, file)
if prefix_pattern:
name = re.sub(prefix_pattern, '', name)
if os.path.isfile(name):
listFilesToBackup.append(name)
log.debug("backupFS add = %s (%s)" %
(name, self.prefix))
# create the archive
name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
backup_file = os.path.join(backup_dir, name)
tar = tarfile.open(backup_file, "w:gz")
for name in listFilesToBackup:
tar.add(name)
tar.close()
log.info("backupFS: archive done : %s" % backup_file)
# return to the directory where we were
os.chdir(here)
return backup_file
def restoreFS(self, backup_file):
"""
Restore a directory from a backup file
@param backup_file - file name of the backup
@return None
@raise ValueError - if backup_file invalid file name
"""
# First check the archive exists
if backup_file is None:
log.warning("Unable to restore the instance (missing backup)")
raise ValueError("Unable to restore the instance (missing backup)")
if not os.path.isfile(backup_file):
log.warning("Unable to restore the instance (%s is not a file)" %
backup_file)
raise ValueError("Unable to restore the instance " +
"(%s is not a file)" % backup_file)
#
# Second do some clean up
#
# previous db (it may exists new db files not in the backup)
log.debug("restoreFS: remove subtree %s/*" % os.path.dirname(self.dbdir))
for root, dirs, files in os.walk(os.path.dirname(self.dbdir)):
for d in dirs:
if d not in ("bak", "ldif"):
log.debug("restoreFS: before restore remove directory" +
" %s/%s" % (root, d))
shutil.rmtree("%s/%s" % (root, d))
# previous error/access logs
log.debug("restoreFS: remove error logs %s" % self.errlog)
for f in glob.glob("%s*" % self.errlog):
log.debug("restoreFS: before restore remove file %s" % (f))
os.remove(f)
log.debug("restoreFS: remove access logs %s" % self.accesslog)
for f in glob.glob("%s*" % self.accesslog):
log.debug("restoreFS: before restore remove file %s" % (f))
os.remove(f)
log.debug("restoreFS: remove audit logs %s" % self.accesslog)
for f in glob.glob("%s*" % self.auditlog):
log.debug("restoreFS: before restore remove file %s" % (f))
os.remove(f)
# Then restore from the directory where DS was deployed
here = os.getcwd()
if self.prefix:
prefix_pattern = "%s/" % self.prefix
os.chdir(prefix_pattern)
else:
prefix_pattern = "/"
os.chdir(prefix_pattern)
tar = tarfile.open(backup_file)
for member in tar.getmembers():
if os.path.isfile(member.name):
#
# restore only writable files
# It could be a bad idea and preferably restore all.
# Now it will be easy to enhance that function.
if os.access(member.name, os.W_OK):
log.debug("restoreFS: restored %s" % member.name)
tar.extract(member.name)
else:
log.debug("restoreFS: not restored %s (no write access)" %
member.name)
else:
log.debug("restoreFS: restored %s" % member.name)
tar.extract(member.name)
tar.close()
#
# Now be safe, triggers a recovery at restart
#
guardian_file = os.path.join(self.dbdir, "guardian")
if os.path.isfile(guardian_file):
try:
log.debug("restoreFS: remove %s" % guardian_file)
os.remove(guardian_file)
except:
log.warning("restoreFS: fail to remove %s" % guardian_file)
pass
os.chdir(here)
def exists(self):
'''
Check if an instance exists.
It checks that both exists:
- configuration directory (<prefix>/etc/dirsrv/slapd-<servid>)
- environment file (/etc/sysconfig/dirsrv-<serverid> or
$HOME/.dirsrv/dirsrv-<serverid>)
@param None
@return True of False if the instance exists or not
@raise None
'''
return len(self.list()) == 1
def toLDAPURL(self):
"""Return the uri ldap[s]://host:[ssl]port."""
host = self.host
if self.ldapi_enabled == 'on' and self.ldapi_socket is not None:
return "ldapi://%s" % (ldapurl.ldapUrlEscape(ensure_str(ldapi_socket)))
elif self.ldapuri:
return self.ldapuri
elif self.sslport:
return "ldaps://%s:%d/" % (ensure_str(host), self.sslport)
else:
return "ldap://%s:%d/" % (ensure_str(host), self.port)
def can_autobind(self):
"""Check if autobind/LDAPI is enabled."""
return self.ldapi_enabled == 'on' and self.ldapi_socket is not None and self.ldapi_autobind == 'on'
def getServerId(self):
"""Return the server identifier."""
return self.serverid
def get_ldif_dir(self):
"""Return the server instance ldif directory."""
return self.ds_paths.ldif_dir
def get_bak_dir(self):
"""Return the server instance ldif directory."""
return self.ds_paths.backup_dir
def get_local_state_dir(self):
return self.ds_paths.local_state_dir
def get_config_dir(self):
return self.ds_paths.config_dir
def get_cert_dir(self):
return self.ds_paths.cert_dir
def get_sysconf_dir(self):
return self.ds_paths.sysconf_dir
def get_initconfig_dir(self):
return self.ds_paths.initconfig_dir
def get_sbin_dir(self):
return self.ds_paths.sbin_dir
def get_bin_dir(self):
return self.ds_paths.bin_dir
def get_plugin_dir(self):
return self.ds_paths.plugin_dir
def get_tmp_dir(self):
return self.ds_paths.tmp_dir
def has_asan(self):
return self.ds_paths.asan_enabled
def with_systemd(self):
return self.ds_paths.with_systemd
#
# Get entries
#
def getEntry(self, *args, **kwargs):
"""Wrapper around SimpleLDAPObject.search. It is common to just get
one entry.
@param - entry dn
@param - search scope, in ldap.SCOPE_BASE (default),
ldap.SCOPE_SUB, ldap.SCOPE_ONE
@param filterstr - filterstr, default '(objectClass=*)' from
SimpleLDAPObject
@param attrlist - list of attributes to retrieve. eg ['cn', 'uid']
@oaram attrsonly - default None from SimpleLDAPObject
eg. getEntry(dn, scope, filter, attributes)
XXX This cannot return None
"""
if self.verbose:
log.debug("Retrieving entry with %r" % [args])
if len(args) == 1 and 'scope' not in kwargs:
args += (ldap.SCOPE_BASE, )
res = self.search(*args, **kwargs)
restype, obj = self.result(res)
# TODO: why not test restype?
if not obj:
raise NoSuchEntryError("no such entry for %r" % [args])
if self.verbose:
log.info("Retrieved entry %s" % obj)
if isinstance(obj, Entry):
return obj
else: # assume list/tuple
if obj[0] is None:
raise NoSuchEntryError("Entry is None")
return obj[0]
def _test_entry(self, dn, scope=ldap.SCOPE_BASE):
try:
entry = self.getEntry(dn, scope)
log.info("Found entry %s" % entry)
return entry
except NoSuchEntryError:
log.exception("Entry %s was added successfully, but I cannot " +
"search it" % dn)
raise MissingEntryError("Entry %s was added successfully, but " +
"I cannot search it" % dn)
def __wrapmethods(self):
"""This wraps all methods of SimpleLDAPObject, so that we can intercept
the methods that deal with entries. Instead of using a raw list of
tuples of lists of hashes of arrays as the entry object, we want to
wrap entries in an Entry class that provides some useful methods"""
for name in dir(self.__class__.__bases__[0]):
attr = getattr(self, name)
if isinstance(attr, collections.Callable):
setattr(self, name, wrapper(attr, name))
def addLDIF(self, input_file, cont=False):
class LDIFAdder(ldif.LDIFParser):
def __init__(self, input_file, conn, cont=False,
ignored_attr_types=None, max_entries=0,
process_url_schemes=None
):
myfile = input_file
if isinstance(input_file, six.string_types):
myfile = open(input_file, "r")
self.conn = conn
self.cont = cont
ldif.LDIFParser.__init__(self, myfile, ignored_attr_types,
max_entries, process_url_schemes)
self.parse()
if isinstance(input_file, six.string_types):
myfile.close()
def handle(self, dn, entry):
if not dn:
dn = ''
newentry = Entry((dn, entry))
try:
self.conn.add_s(newentry)
except ldap.LDAPError as e:
if not self.cont:
raise e
log.exception("Error: could not add entry %s" % dn)
adder = LDIFAdder(input_file, self, cont)
def getDBStats(self, suffix, bename=''):
if bename:
dn = ','.join(("cn=monitor,cn=%s" % bename, DN_LDBM))
else:
entries_backend = self.backend.list(suffix=suffix)
dn = "cn=monitor," + entries_backend[0].dn
dbmondn = "cn=monitor," + DN_LDBM
dbdbdn = "cn=database,cn=monitor," + DN_LDBM
try:
# entrycache and dncache stats
ent = self.getEntry(dn, ldap.SCOPE_BASE)
monent = self.getEntry(dbmondn, ldap.SCOPE_BASE)
dbdbent = self.getEntry(dbdbdn, ldap.SCOPE_BASE)
ret = "cache available ratio count unitsize\n"
mecs = ent.maxentrycachesize or "0"
cecs = ent.currententrycachesize or "0"
rem = int(mecs) - int(cecs)
ratio = ent.entrycachehitratio or "0"
ratio = int(ratio)
count = ent.currententrycachecount or "0"
count = int(count)
if count:
size = int(cecs) / count
else:
size = 0
ret += "entry % 11d % 3d % 8d % 5d" % (rem, ratio, count, size)
if ent.maxdncachesize:
mdcs = ent.maxdncachesize or "0"
cdcs = ent.currentdncachesize or "0"
rem = int(mdcs) - int(cdcs)
dct = ent.dncachetries or "0"
tries = int(dct)
if tries:
ratio = (100 * int(ent.dncachehits)) / tries
else:
ratio = 0
count = ent.currentdncachecount or "0"
count = int(count)
if count:
size = int(cdcs) / count
else:
size = 0
ret += "\ndn % 11d % 3d % 8d % 5d" % (
rem, ratio, count, size)
if ent.hasAttr('entrycache-hashtables'):
ret += "\n\n" + ent.getValue('entrycache-hashtables')
# global db stats
ret += "\n\nglobal db stats"
dbattrs = ('dbcachehits dbcachetries dbcachehitratio ' +
'dbcachepagein dbcachepageout dbcacheroevict ' +
'dbcacherwevict'.split(' '))
cols = {'dbcachehits': [len('cachehits'), 'cachehits'],
'dbcachetries': [10, 'cachetries'],
'dbcachehitratio': [5, 'ratio'],
'dbcachepagein': [6, 'pagein'],
'dbcachepageout': [7, 'pageout'],
'dbcacheroevict': [7, 'roevict'],
'dbcacherwevict': [7, 'rwevict']}
dbrec = {}
for attr, vals in monent.iterAttrs():
if attr.startswith('dbcache'):
val = vals[0]
dbrec[attr] = val
vallen = len(val)
if vallen > cols[attr][0]:
cols[attr][0] = vallen
# construct the format string based on the field widths
fmtstr = ''
ret += "\n"
for attr in dbattrs:
fmtstr += ' %%(%s)%ds' % (attr, cols[attr][0])
ret += ' %*s' % tuple(cols[attr])
ret += "\n" + (fmtstr % dbrec)
# other db stats
skips = {'nsslapd-db-cache-hit': 'nsslapd-db-cache-hit',
'nsslapd-db-cache-try': 'nsslapd-db-cache-try',
'nsslapd-db-page-write-rate':
'nsslapd-db-page-write-rate',
'nsslapd-db-page-read-rate': 'nsslapd-db-page-read-rate',
'nsslapd-db-page-ro-evict-rate':
'nsslapd-db-page-ro-evict-rate',
'nsslapd-db-page-rw-evict-rate':
'nsslapd-db-page-rw-evict-rate'}
hline = '' # header line
vline = '' # val line
for attr, vals in dbdbent.iterAttrs():
if attr in skips:
continue
if attr.startswith('nsslapd-db-'):
short = attr.replace('nsslapd-db-', '')
val = vals[0]
width = max(len(short), len(val))
if len(hline) + width > 70:
ret += "\n" + hline + "\n" + vline
hline = vline = ''
hline += ' %*s' % (width, short)
vline += ' %*s' % (width, val)
# per file db stats
ret += "\n\nper file stats"
# key is number
# data is dict - key is attr name without the number -
# val is the attr val
dbrec = {}
dbattrs = ['dbfilename', 'dbfilecachehit',
'dbfilecachemiss', 'dbfilepagein', 'dbfilepageout']
# cols maps dbattr name to column header and width
cols = {'dbfilename': [len('dbfilename'), 'dbfilename'],
'dbfilecachehit': [9, 'cachehits'],
'dbfilecachemiss': [11, 'cachemisses'],
'dbfilepagein': [6, 'pagein'],
'dbfilepageout': [7, 'pageout']}
for attr, vals in ent.iterAttrs():
match = RE_DBMONATTR.match(attr)
if match:
name = match.group(1)
num = match.group(2)
val = vals[0]
if name == 'dbfilename':
val = val.split('/')[-1]
dbrec.setdefault(num, {})[name] = val
vallen = len(val)
if vallen > cols[name][0]:
cols[name][0] = vallen
match = RE_DBMONATTRSUN.match(attr)
if match:
name = match.group(1)
if name == 'entrycache':
continue
num = match.group(2)
val = vals[0]
if name == 'dbfilename':
val = val.split('/')[-1]
dbrec.setdefault(num, {})[name] = val
vallen = len(val)
if vallen > cols[name][0]:
cols[name][0] = vallen
# construct the format string based on the field widths
fmtstr = ''
ret += "\n"
for attr in dbattrs:
fmtstr += ' %%(%s)%ds' % (attr, cols[attr][0])
ret += ' %*s' % tuple(cols[attr])
for dbf in six.itervalues(dbrec):
ret += "\n" + (fmtstr % dbf)
return ret
except Exception as e:
print ("caught exception", str(e))
return ''
def waitForEntry(self, dn, timeout=7200, attr='', quiet=True):
scope = ldap.SCOPE_BASE
filt = "(objectclass=*)"
attrlist = []
if attr:
filt = "(%s=*)" % attr
attrlist.append(attr)
timeout += int(time.time())
if isinstance(dn, Entry):
dn = dn.dn
# wait for entry and/or attr to show up
if not quiet:
sys.stdout.write("Waiting for %s %s:%s " % (self, dn, attr))
sys.stdout.flush()
entry = None
while not entry and int(time.time()) < timeout:
try:
entry = self.getEntry(dn, scope, filt, attrlist)
except NoSuchEntryError:
pass # found entry, but no attr
except ldap.NO_SUCH_OBJECT:
pass # no entry yet
except ldap.LDAPError as e: # badness
print("\nError reading entry", dn, e)
break
if not entry:
if not quiet:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
if not entry and int(time.time()) > timeout:
print("\nwaitForEntry timeout for %s for %s" % (self, dn))
elif entry:
if not quiet:
print("\nThe waited for entry is:", entry)
else:
print("\nError: could not read entry %s from %s" % (dn, self))
return entry
def setupChainingIntermediate(self):
confdn = ','.join(("cn=config", DN_CHAIN))
try:
self.modify_s(confdn, [(ldap.MOD_ADD, 'nsTransmittedControl',
['2.16.840.1.113730.3.4.12',
'1.3.6.1.4.1.1466.29539.12'])])
except ldap.TYPE_OR_VALUE_EXISTS:
log.error("chaining backend config already has the required ctrls")
def setupChainingMux(self, suffix, isIntermediate, binddn, bindpw, urls):
self.addSuffix(suffix, binddn, bindpw, urls)
if isIntermediate:
self.setupChainingIntermediate()
def setupChainingFarm(self, suffix, binddn, bindpw):
# step 1 - create the bind dn to use as the proxy
self.setupBindDN(binddn, bindpw)
self.addSuffix(suffix) # step 2 - create the suffix
# step 3 - add the proxy ACI to the suffix
try:
acival = ("(targetattr = \"*\")(version 3.0; acl \"Proxied " +
"authorization for database links\"; allow (proxy) " +
"userdn = \"ldap:///%s\";)" % binddn)
self.modify_s(suffix, [(ldap.MOD_ADD, 'aci', [acival])])
except ldap.TYPE_OR_VALUE_EXISTS:
log.error("proxy aci already exists in suffix %s for %s" % (
suffix, binddn))
def setupChaining(self, to, suffix, isIntermediate):
"""Setup chaining from self to to - self is the mux, to is the farm
if isIntermediate is set, this server will chain requests from another
server to to
"""
bindcn = "chaining user"
binddn = "cn=%s,cn=config" % bindcn
bindpw = "chaining"
to.setupChainingFarm(suffix, binddn, bindpw)
self.setupChainingMux(
suffix, isIntermediate, binddn, bindpw, to.toLDAPURL())
def enableChainOnUpdate(self, suffix, bename):
# first, get the mapping tree entry to modify
mtent = self.mappingtree.list(suffix=suffix)
dn = mtent.dn
# next, get the path of the replication plugin
e_plugin = self.getEntry(
"cn=Multimaster Replication Plugin,cn=plugins,cn=config",
attrlist=['nsslapd-pluginPath'])
path = e_plugin.getValue('nsslapd-pluginPath')
mod = [(ldap.MOD_REPLACE, MT_PROPNAME_TO_ATTRNAME[MT_STATE],
MT_STATE_VAL_BACKEND),
(ldap.MOD_ADD, MT_PROPNAME_TO_ATTRNAME[MT_BACKEND], bename),
(ldap.MOD_ADD, MT_PROPNAME_TO_ATTRNAME[MT_CHAIN_PATH], path),
(ldap.MOD_ADD, MT_PROPNAME_TO_ATTRNAME[MT_CHAIN_FCT],
MT_CHAIN_UPDATE_VAL_ON_UPDATE)]
try:
self.modify_s(dn, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
print("chainOnUpdate already enabled for %s" % suffix)
def setupConsumerChainOnUpdate(self, suffix, isIntermediate, binddn,
bindpw, urls, beargs=None):
beargs = beargs or {}
# suffix should already exist
# we need to create a chaining backend
if 'nsCheckLocalACI' not in beargs:
beargs['nsCheckLocalACI'] = 'on' # enable local db aci eval.
chainbe = self.setupBackend(suffix, binddn, bindpw, urls, beargs)
# do the stuff for intermediate chains
if isIntermediate:
self.setupChainingIntermediate()
# enable the chain on update
return self.enableChainOnUpdate(suffix, chainbe)
def setupBindDN(self, binddn, bindpw, attrs=None):
""" Return - eventually creating - a person entry with the given dn
and pwd.
binddn can be a lib389.Entry
"""
try:
assert binddn
if isinstance(binddn, Entry):
assert binddn.dn
binddn = binddn.dn
except AssertionError:
raise AssertionError("Error: entry dn should be set!" % binddn)
ent = Entry(binddn)
ent.setValues('objectclass', "top", "person")
ent.setValues('userpassword', bindpw)
ent.setValues('sn', "bind dn pseudo user")
ent.setValues('cn', "bind dn pseudo user")
# support for uid
attribute, value = binddn.split(",")[0].split("=", 1)
if attribute == 'uid':
ent.setValues('objectclass', "top", "person", 'inetOrgPerson')
ent.setValues('uid', value)
if attrs:
ent.update(attrs)
try:
self.add_s(ent)
except ldap.ALREADY_EXISTS:
log.warn("Entry %s already exists" % binddn)
try:
entry = self._test_entry(binddn, ldap.SCOPE_BASE)
return entry
except MissingEntryError:
log.exception("This entry should exist!")
raise
def setupWinSyncAgmt(self, args, entry):
if 'winsync' not in args:
return
suffix = args['suffix']
entry.setValues("objectclass", "nsDSWindowsReplicationAgreement")
entry.setValues("nsds7WindowsReplicaSubtree",
args.get("win_subtree",
"cn=users," + suffix))
entry.setValues("nsds7DirectoryReplicaSubtree",
args.get("ds_subtree",
"ou=People," + suffix))
entry.setValues(
"nsds7NewWinUserSyncEnabled", args.get('newwinusers', 'true'))
entry.setValues(
"nsds7NewWinGroupSyncEnabled", args.get('newwingroups', 'true'))
windomain = ''
if 'windomain' in args:
windomain = args['windomain']
else:
windomain = '.'.join(ldap.explode_dn(suffix, 1))
entry.setValues("nsds7WindowsDomain", windomain)
if 'interval' in args:
entry.setValues("winSyncInterval", args['interval'])
if 'onewaysync' in args:
if args['onewaysync'].lower() == 'fromwindows' or \
args['onewaysync'].lower() == 'towindows':
entry.setValues("oneWaySync", args['onewaysync'])
else:
raise Exception("Error: invalid value %s for oneWaySync: " +
"must be fromWindows or toWindows"
% args['onewaysync'])
# args - DirSrv consumer (repoth), suffix, binddn, bindpw, timeout
# also need an auto_init argument
def createAgreement(self, consumer, args, cn_format=r'meTo_%s:%s',
description_format=r'me to %s:%s'):
"""Create (and return) a replication agreement from self to consumer.
- self is the supplier,
- consumer is a DirSrv object (consumer can be a master)
- cn_format - use this string to format the agreement name
consumer:
* a DirSrv object if chaining
* an object with attributes: host, port, sslport, __str__
args = {
'suffix': "dc=example,dc=com",
'binddn': "cn=replrepl,cn=config",
'bindpw': "replrepl",
'bindmethod': 'simple',
'log' : True.
'timeout': 120
}
self.suffixes is of the form {
'o=suffix1': 'ldaps://consumer.example.com:636',
'o=suffix2': 'ldap://consumer.example.net:3890'
}
"""
suffix = args['suffix']
if not suffix:
# This is a mandatory parameter of the command... it fails
log.warning("createAgreement: suffix is missing")
return None
# get the RA binddn
binddn = args.get('binddn')
if not binddn:
binddn = defaultProperties.get(REPLICATION_BIND_DN, None)
if not binddn:
# weird, internal error we do not retrieve the default
# replication bind DN this replica agreement will fail
# to update the consumer until the property will be set
log.warning("createAgreement: binddn not provided and " +
"default value unavailable")
pass
# get the RA binddn password
bindpw = args.get('bindpw')
if not bindpw:
bindpw = defaultProperties.get(REPLICATION_BIND_PW, None)
if not bindpw:
# weird, internal error we do not retrieve the default
# replication bind DN password this replica agreement
# will fail to update the consumer until the property will be
# set
log.warning("createAgreement: bindpw not provided and " +
"default value unavailable")
pass
# get the RA bind method
bindmethod = args.get('bindmethod')
if not bindmethod:
bindmethod = defaultProperties.get(REPLICATION_BIND_METHOD, None)
if not bindmethod:
# weird, internal error we do not retrieve the default
# replication bind method this replica agreement will
# fail to update the consumer until the property will be set
log.warning("createAgreement: bindmethod not provided and " +
"default value unavailable")
pass
nsuffix = normalizeDN(suffix)
othhost, othport, othsslport = (
consumer.host, consumer.port, consumer.sslport)
othport = othsslport or othport
# adding agreement to previously created replica
# eventually setting self.suffixes dict.
if nsuffix not in self.suffixes:
replica_entries = self.replica.list(suffix)
if not replica_entries:
raise NoSuchEntryError(
"Error: no replica set up for suffix " + suffix)
replent = replica_entries[0]
self.suffixes[nsuffix] = {
'dn': replent.dn,
'type': int(replent.nsds5replicatype)
}
# define agreement entry
cn = cn_format % (othhost, othport)
dn_agreement = "cn=%s,%s" % (cn, self.suffixes[nsuffix]['dn'])
try:
entry = self.getEntry(dn_agreement, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
entry = None
if entry:
log.warn("Agreement exists:", dn_agreement)
self.suffixes.setdefault(nsuffix, {})[str(consumer)] = dn_agreement
return dn_agreement
if (nsuffix in self.agmt) and (consumer in self.agmt[nsuffix]):
log.warn("Agreement exists:", dn_agreement)
return dn_agreement
# In a separate function in this scope?
entry = Entry(dn_agreement)
entry.update({
'objectclass': ["top", "nsds5replicationagreement"],
'cn': cn,
'nsds5replicahost': othhost,
'nsds5replicatimeout': str(args.get('timeout', 120)),
'nsds5replicabinddn': binddn,
'nsds5replicacredentials': bindpw,
'nsds5replicabindmethod': bindmethod,
'nsds5replicaroot': nsuffix,
'nsds5replicaupdateschedule': '0000-2359 0123456',
'description': description_format % (othhost, othport)
})
if 'starttls' in args:
entry.setValues('nsds5replicatransportinfo', 'TLS')
entry.setValues('nsds5replicaport', str(othport))
elif othsslport:
entry.setValues('nsds5replicatransportinfo', 'SSL')
entry.setValues('nsds5replicaport', str(othsslport))
else:
entry.setValues('nsds5replicatransportinfo', 'LDAP')
entry.setValues('nsds5replicaport', str(othport))
if 'fractional' in args:
entry.setValues('nsDS5ReplicatedAttributeList', args['fractional'])
if 'auto_init' in args:
entry.setValues('nsds5BeginReplicaRefresh', 'start')
if 'fractional' in args:
entry.setValues('nsDS5ReplicatedAttributeList', args['fractional'])
if 'stripattrs' in args:
entry.setValues('nsds5ReplicaStripAttrs', args['stripattrs'])
if 'winsync' in args: # state it clearly!
self.setupWinSyncAgmt(args, entry)
try:
log.debug("Adding replica agreement: [%s]" % entry)
self.add_s(entry)
except:
# TODO check please!
raise
entry = self.waitForEntry(dn_agreement)
if entry:
self.suffixes.setdefault(nsuffix, {})[str(consumer)] = dn_agreement
# More verbose but shows what's going on
if 'chain' in args:
chain_args = {
'suffix': suffix,
'binddn': binddn,
'bindpw': bindpw
}
# Work on `self` aka producer
if self.suffixes[nsuffix]['type'] == MASTER_TYPE:
self.setupChainingFarm(**chain_args)
# Work on `consumer`
# TODO - is it really required?
if consumer.suffixes[nsuffix]['type'] == LEAF_TYPE:
chain_args.update({
'isIntermediate': 0,
'urls': self.toLDAPURL(),
'args': args['chainargs']
})
consumer.setupConsumerChainOnUpdate(**chain_args)
elif consumer.suffixes[nsuffix]['type'] == HUB_TYPE:
chain_args.update({
'isIntermediate': 1,
'urls': self.toLDAPURL(),
'args': args['chainargs']
})
consumer.setupConsumerChainOnUpdate(**chain_args)
self.agmt.setdefault(nsuffix, {})[consumer] = dn_agreement
return dn_agreement
# moved to Replica
def setupReplica(self, args):
"""Deprecated, use replica.add
"""
return self.replica.add(**args)
def startReplication_async(self, agmtdn):
return self.replica.start_async(agmtdn)
def checkReplInit(self, agmtdn):
return self.replica.check_init(agmtdn)
def waitForReplInit(self, agmtdn):
return self.replica.wait_init(agmtdn)
def startReplication(self, agmtdn):
return self.replica.start_and_wait(agmtdn)
def testReplication(self, suffix, *replicas):
'''
Make a "dummy" update on the the replicated suffix, and check
all the provided replicas to see if they received the update.
@param suffix - the replicated suffix we want to check
@param *replicas - DirSrv instance, DirSrv instance, ...
@return True of False if all servers are replicating correctly
@raise None
'''
test_value = ('test replication from ' + self.serverid + ' to ' +
replicas[0].serverid + ': ' + str(int(time.time())))
self.modify_s(suffix, [(ldap.MOD_REPLACE, 'description', test_value)])
for replica in replicas:
loop = 0
replicated = False
while loop <= 30:
try:
entry = replica.getEntry(suffix, ldap.SCOPE_BASE,
"(objectclass=*)")
if entry.hasValue('description', test_value):
replicated = True
break
except ldap.LDAPError as e:
log.fatal('testReplication() failed to modify (%s), error (%s)' % (suffix, str(e)))
return False
loop += 1
time.sleep(2)
if not replicated:
log.fatal('Replication is not in sync with replica server (%s)'
% replica.serverid)
return False
return True
def replicaSetupAll(self, repArgs):
"""setup everything needed to enable replication for a given suffix.
1- eventually create the suffix
2- enable replication logging
3- create changelog
4- create replica user
repArgs is a dict with the following fields:
{
suffix - suffix to set up for replication (eventually create)
optional fields and their default values
bename - name of backend corresponding to suffix, otherwise
it will use the *first* backend found (isn't that
dangerous?
parent - parent suffix if suffix is a sub-suffix - default is
undef
ro - put database in read only mode - default is read write
type - replica type (MASTER_TYPE, HUB_TYPE, LEAF_TYPE) -
default is master
legacy - make this replica a legacy consumer - default is no
binddn - bind DN of the replication manager user - default is
REPLBINDDN
bindpw - bind password of the repl manager - default is
REPLBINDPW
log - if true, replication logging is turned on - default false
id - the replica ID - default is an auto incremented number
}
TODO: passing the repArgs as an object or as a **repArgs could be
a better documentation choiche
eg. replicaSetupAll(self, suffix, type=MASTER_TYPE,
log=False, ...)
"""
repArgs.setdefault('type', MASTER_TYPE)
user = repArgs.get('binddn'), repArgs.get('bindpw')
# eventually create the suffix (Eg. o=userRoot)
# TODO should I check the addSuffix output as it doesn't raise
self.addSuffix(repArgs['suffix'])
if 'bename' not in repArgs:
entries_backend = self.backend.list(suffix=repArgs['suffix'])
# just use first one
repArgs['bename'] = entries_backend[0].cn
if repArgs.get('log', False):
self.enableReplLogging()
# enable changelog for master and hub
if repArgs['type'] != LEAF_TYPE:
self.replica.changelog()
# create replica user without timeout and expiration issues
try:
attrs = list(user)
attrs.append({
'nsIdleTimeout': '0',
'passwordExpirationTime': '20381010000000Z'
})
self.setupBindDN(*attrs)
except ldap.ALREADY_EXISTS:
log.warn("User already exists: %r " % user)
# setup replica
# map old style args to new style replica args
if repArgs['type'] == MASTER_TYPE:
repArgs['role'] = ReplicaRole.MASTER
elif repArgs['type'] == LEAF_TYPE:
repArgs['role'] = ReplicaRole.CONSUMER
else:
repArgs['role'] = ReplicaRole.HUB
repArgs['rid'] = repArgs['id']
# remove invalid arguments from replica.add
for invalid_arg in 'type id bename log'.split():
if invalid_arg in repArgs:
del repArgs[invalid_arg]
ret = self.replica.add(**repArgs)
if 'legacy' in repArgs:
self.setupLegacyConsumer(*user)
return ret
def subtreePwdPolicy(self, basedn, pwdpolicy, **pwdargs):
args = {'basedn': basedn, 'escdn': escapeDNValue(
normalizeDN(basedn))}
condn = "cn=nsPwPolicyContainer,%(basedn)s" % args
poldn = ("cn=cn\\=nsPwPolicyEntry\\,%(escdn)s,cn=nsPwPolicyContainer" +
",%(basedn)s" % args)
temdn = ("cn=cn\\=nsPwTemplateEntry\\,%(escdn)s,cn=nsPwPolicyContain" +
"er,%(basedn)s" % args)
cosdn = "cn=nsPwPolicy_cos,%(basedn)s" % args
conent = Entry(condn)
conent.setValues('objectclass', 'nsContainer')
polent = Entry(poldn)
polent.setValues('objectclass', ['ldapsubentry', 'passwordpolicy'])
tement = Entry(temdn)
tement.setValues('objectclass', ['extensibleObject',
'costemplate', 'ldapsubentry'])
tement.setValues('cosPriority', '1')
tement.setValues('pwdpolicysubentry', poldn)
cosent = Entry(cosdn)
cosent.setValues('objectclass', ['ldapsubentry',
'cosSuperDefinition', 'cosPointerDefinition'])
cosent.setValues('cosTemplateDn', temdn)
cosent.setValues(
'cosAttribute', 'pwdpolicysubentry default operational-default')
for ent in (conent, polent, tement, cosent):
try:
self.add_s(ent)
if self.verbose:
print("created subtree pwpolicy entry", ent.dn)
except ldap.ALREADY_EXISTS:
print("subtree pwpolicy entry", ent.dn,
"already exists - skipping")
self.setPwdPolicy({'nsslapd-pwpolicy-local': 'on'})
self.setDNPwdPolicy(poldn, pwdpolicy, **pwdargs)
def userPwdPolicy(self, user, pwdpolicy, **pwdargs):
ary = ldap.explode_dn(user)
par = ','.join(ary[1:])
escuser = escapeDNValue(normalizeDN(user))
args = {'par': par, 'udn': user, 'escudn': escuser}
condn = "cn=nsPwPolicyContainer,%(par)s" % args
poldn = ("cn=cn\\=nsPwPolicyEntry\\,%(escudn)s,cn=nsPwPolicyCont" +
"ainer,%(par)s" % args)
conent = Entry(condn)
conent.setValues('objectclass', 'nsContainer')
polent = Entry(poldn)
polent.setValues('objectclass', ['ldapsubentry', 'passwordpolicy'])
for ent in (conent, polent):
try:
self.add_s(ent)
if self.verbose:
print("created user pwpolicy entry", ent.dn)
except ldap.ALREADY_EXISTS:
print("user pwpolicy entry", ent.dn,
"already exists - skipping")
mod = [(ldap.MOD_REPLACE, 'pwdpolicysubentry', poldn)]
self.modify_s(user, mod)
self.setPwdPolicy({'nsslapd-pwpolicy-local': 'on'})
self.setDNPwdPolicy(poldn, pwdpolicy, **pwdargs)
def setPwdPolicy(self, pwdpolicy, **pwdargs):
self.setDNPwdPolicy(DN_CONFIG, pwdpolicy, **pwdargs)
def setDNPwdPolicy(self, dn, pwdpolicy, **pwdargs):
"""input is dict of attr/vals"""
mods = []
for (attr, val) in six.iteritems(pwdpolicy):
mods.append((ldap.MOD_REPLACE, attr, str(val)))
if pwdargs:
for (attr, val) in six.iteritems(pwdargs):
mods.append((ldap.MOD_REPLACE, attr, str(val)))
self.modify_s(dn, mods)
# Moved to config
# replaced by loglevel
def enableReplLogging(self):
"""Enable logging of replication stuff (1<<13)"""
val = LOG_REPLICA
return self.config.loglevel([val])
def disableReplLogging(self):
return self.config.loglevel()
def setLogLevel(self, *vals):
"""Set nsslapd-errorlog-level and return its value."""
return self.config.loglevel(vals)
def setAccessLogLevel(self, *vals):
"""Set nsslapd-accesslog-level and return its value."""
return self.config.loglevel(vals, service='access')
def setAccessLogBuffering(self, state):
"""Set nsslapd-accesslog-logbuffering - state is True or False"""
return self.config.logbuffering(state)
def configSSL(self, secport=636, secargs=None):
"""Configure SSL support into cn=encryption,cn=config.
secargs is a dict like {
'nsSSLPersonalitySSL': 'Server-Cert'
}
XXX moved to brooker.Config
"""
return self.config.enable_ssl(secport, secargs)
def getDir(self, filename, dirtype):
"""
@param filename - the name of the test script calling this function
@param dirtype - Either DATA_DIR and TMP_DIR are the allowed values
@return - absolute path of the dirsrvtests data directory, or 'None'
on error
Return the shared data/tmp directory relative to the ticket filename.
The caller should always use "__file__" as the argument to this
function.
Get the script name from the filename that was provided:
'ds/dirsrvtests/tickets/ticket_#####_test.py' -->
'ticket_#####_test.py'
Get the full path to the filename, and convert it to the data directory
'ds/dirsrvtests/tickets/ticket_#####_test.py' -->
'ds/dirsrvtests/data/'
'ds/dirsrvtests/suites/dyanmic-plugins/dynamic-plugins_test.py' -->
'/ds/dirsrvtests/data/'
"""
dir_path = None
if os.path.exists(filename):
filename = os.path.abspath(filename)
if '/suites/' in filename:
idx = filename.find('/suites/')
elif '/tickets/' in filename:
idx = filename.find('/tickets/')
elif '/stress/' in filename:
idx = filename.find('/stress/')
else:
# Unknown script location
return None
if dirtype == TMP_DIR:
dir_path = filename[:idx] + '/tmp/'
elif dirtype == DATA_DIR:
dir_path = filename[:idx] + '/data/'
else:
raise ValueError("Invalid directory type (%s), acceptable" +
" values are DATA_DIR and TMP_DIR" % dirtype)
return dir_path
def clearTmpDir(self, filename):
"""
@param filename - the name of the test script calling this function
@return - nothing
Clear the contents of the "tmp" dir, but leave the README file in
place.
"""
if os.path.exists(filename):
filename = os.path.abspath(filename)
if '/suites/' in filename:
idx = filename.find('/suites/')
elif '/tickets/' in filename:
idx = filename.find('/tickets/')
else:
# Unknown script location
return
dir_path = filename[:idx] + '/tmp/'
if dir_path:
filelist = [tmpfile for tmpfile in os.listdir(dir_path)
if tmpfile != 'README']
for tmpfile in filelist:
tmpfile = os.path.abspath(dir_path + tmpfile)
if os.path.isdir(tmpfile):
# Remove directory and all of its contents
shutil.rmtree(tmpfile)
else:
os.remove(tmpfile)
return
log.fatal('Failed to clear tmp directory (%s)' % filename)
def upgrade(self, upgradeMode):
"""
@param upgradeMode - the upgrade is either "online" or "offline"
"""
if upgradeMode == 'online':
online = True
else:
online = False
DirSrvTools.runUpgrade(self.prefix, online)
#
# The following are the functions to perform offline scripts(when the
# server is stopped)
#
def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt,
import_file):
"""
@param bename - The backend name of the database to import
@param suffixes - List/tuple of suffixes to import
@param excludeSuffixes - List/tuple of suffixes to exclude from import
@param encrypt - Perform attribute encryption
@param input_file - File to import: file
@return - True if import succeeded
"""
DirSrvTools.lib389User(user=DEFAULT_USER)
prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd')
if self.status():
log.error("ldif2db: Can not operate while directory server is running")
return False
if not bename and not suffixes:
log.error("ldif2db: backend name or suffix missing")
return False
if not os.path.isfile(import_file):
log.error("ldif2db: Can't find file: %s" % import_file)
return False
cmd = [
prog,
'ldif2db',
'-D', self.get_config_dir(),
'-i', import_file,
]
if bename:
cmd.append('-n')
cmd.append(bename)
if suffixes:
for suffix in suffixes:
cmd.append('-s')
cmd.append(suffix)
if excludeSuffixes:
for excludeSuffix in excludeSuffixes:
cmd = cmd + ' -x ' + excludeSuffix
cmd.append('-x')
cmd.append(excludeSuffix)
if encrypt:
cmd.append('-E')
result = subprocess.check_output(cmd)
u_result = ensure_str(result)
log.debug("ldif2db output: BEGIN")
for line in u_result.split("\n"):
log.debug(line)
log.debug("ldif2db output: END")
return True
def db2ldif(self, bename, suffixes, excludeSuffixes, encrypt, repl_data,
outputfile):
"""
@param bename - The backend name of the database to export
@param suffixes - List/tuple of suffixes to export
@param excludeSuffixes - List/tuple of suffixes to exclude from export
@param encrypt - Perform attribute encryption
@param repl_data - Export the replication data
@param outputfile - The filename for the exported LDIF
@return - True if export succeeded
"""
DirSrvTools.lib389User(user=DEFAULT_USER)
prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd')
if self.status():
log.error("db2ldif: Can not operate while directory server is running")
return False
if not bename and not suffixes:
log.error("db2ldif: backend name or suffix missing")
return False
cmd = [
prog,
'db2ldif',
'-D', self.get_config_dir()
]
if bename:
cmd.append('-n')
cmd.append(bename)
if suffixes:
for suffix in suffixes:
cmd.append('-s')
cmd.append(suffix)
if excludeSuffixes:
for excludeSuffix in excludeSuffixes:
cmd = cmd + ' -x ' + excludeSuffix
cmd.append('-x')
cmd.append(excludeSuffix)
if encrypt:
cmd.append('-E')
if repl_data:
cmd.append('-r')
if outputfile:
cmd.append('-a')
cmd.append(outputfile)
result = subprocess.check_output(cmd)
u_result = ensure_str(result)
log.debug("db2ldif output: BEGIN")
for line in u_result.split("\n"):
log.debug(line)
log.debug("db2ldif output: END")
return True
def bak2db(self, archive_dir, bename=None):
"""
@param archive_dir - The directory containing the backup
@param bename - The backend name to restore
@return - True if the restore succeeded
"""
DirSrvTools.lib389User(user=DEFAULT_USER)
prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd')
if self.status():
log.error("bak2db: Can not operate while directory server is running")
return False
if not archive_dir:
log.error("bak2db: backup directory missing")
return False
result = subprocess.check_output([
prog,
'archive2db',
'-a', archive_dir,
'-D', self.get_config_dir()
])
u_result = ensure_str(result)
log.debug("bak2db output: BEGIN")
for line in u_result.split("\n"):
log.debug(line)
log.debug("bak2db output: END")
return True
def db2bak(self, archive_dir):
"""
@param archive_dir - The directory to write the backup to
@return - True if the backup succeeded
"""
DirSrvTools.lib389User(user=DEFAULT_USER)
prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd')
if self.status():
log.error("db2bak: Can not operate while directory server is running")
return False
if not archive_dir:
log.error("db2bak: archive directory missing")
return False
result = subprocess.check_output([
prog,
'db2archive',
'-a', archive_dir,
'-D', self.get_config_dir()
])
u_result = ensure_str(result)
log.debug("db2bak output: BEGIN")
for line in u_result.split("\n"):
log.debug(line)
log.debug("db2bak output: END")
return True
def db2index(self, bename=None, suffixes=None, attrs=None, vlvTag=None):
"""
@param bename - The backend name to reindex
@param suffixes - List/tuple of suffixes to reindex
@param attrs - List/tuple of the attributes to index
@param vlvTag - The VLV index name to index
@return - True if reindexing succeeded
"""
DirSrvTools.lib389User(user=DEFAULT_USER)
prog = os.path.join(self.ds_paths.sbin_dir, DB2INDEX)
if not bename and not suffixes:
log.error("db2index: missing required backend name or suffix")
return False
cmd = '%s -Z %s' % (prog, self.serverid)
if bename:
cmd = cmd + ' -n %s' % bename
if suffixes:
for suffix in suffixes:
cmd = cmd + ' -s %s' % suffix
if attrs:
for attr in attrs:
cmd = cmd + ' -t %s' % attr
if vlvTag:
cmd = cmd + ' -T %s' % vlvTag
self.stop(timeout=10)
log.info('Running script: %s' % cmd)
result = True
try:
os.system(cmd)
except:
log.error("db2index: error executing %s" % cmd)
result = False
self.start(timeout=10)
return result
def dbscan(self, bename=None, index=None, key=None, width=None, isRaw=False):
"""
@param bename - The backend name to scan
@param index - index name (e.g., cn or cn.db) to scan
@param key - index key to dump
@param id - entry id to dump
@param width - entry truncate size (bytes)
@param isRaw - dump as raw data
@return - dumped string
"""
DirSrvTools.lib389User(user=DEFAULT_USER)
prog = os.path.join(self.ds_paths.bin_dir, DBSCAN)
if not bename:
log.error("dbscan: missing required backend name")
return False
if not index:
log.error("dbscan: missing required index name")
return False
elif '.db' in index:
indexfile = os.path.join(self.dbdir, bename, index)
else:
indexfile = os.path.join(self.dbdir, bename, index + '.db')
option = ''
if 'id2entry' in index:
if key and key.isdigit():
option = ' -K %s' % key
else:
if key:
option = ' -k %s' % key
if width:
option = option + ' -t %d' % width
if isRaw:
option = option + ' -R'
cmd = '%s -f %s' % (prog, indexfile)
if len(option) > 0:
cmd = cmd + option
self.stop(timeout=10)
log.info('Running script: %s' % cmd)
proc = Popen(cmd.split(), stdout=PIPE)
outs = ''
try:
outs = proc.communicate()
except OSError as e:
log.exception('dbscan: error executing (%s): error %d - %s' %
(cmd, e.errno, e.strerror))
raise e
self.start(timeout=10)
log.info('Output from ' + cmd)
log.info(outs)
return outs
def searchAccessLog(self, pattern):
"""
Search all the access logs
"""
return DirSrvTools.searchFile(self.accesslog + "*", pattern)
def searchAuditLog(self, pattern):
"""
Search all the audit logs
"""
time.sleep(1)
return DirSrvTools.searchFile(self.auditlog + "*", pattern)
def searchErrorsLog(self, pattern):
"""
Search all the error logs
"""
time.sleep(1)
return DirSrvTools.searchFile(self.errlog + "*", pattern)
def detectDisorderlyShutdown(self):
"""
Search the current errors log for a disorderly shutdown message
"""
time.sleep(1)
return DirSrvTools.searchFile(self.errlog, DISORDERLY_SHUTDOWN)
def deleteLog(self, logtype, restart=True):
"""
Delete all the logs for this log type.
"""
if restart:
self.stop()
for log in glob.glob(logtype + "*"):
if os.path.isfile(log):
os.remove(log)
if restart:
self.start()
def deleteAccessLogs(self, restart=True):
"""
Delete all the access logs.
"""
self.deleteLog(self.accesslog, restart)
def deleteAuditLogs(self, restart=True):
"""
Delete all the audit logs.
"""
self.deleteLog(self.auditlog, restart)
def deleteErrorLogs(self, restart=True):
"""
Delete all the error logs.
"""
self.deleteLog(self.errlog, restart)
def deleteAllLogs(self, restart=True):
"""
Delete all the logs.
"""
self.stop()
self.deleteAccessLogs(restart=False)
self.deleteErrorLogs(restart=False)
self.deleteAuditLogs(restart=False)
self.start()
def get_effective_rights(self, sourcedn, base=DEFAULT_SUFFIX,
scope=ldap.SCOPE_SUBTREE, *args, **kwargs):
"""
Conduct a search on effective rights for some object (sourcedn)
against a filter.
For arguments to this function, please see LDAPObject.search_s.
For example:
@param sourcedn - DN of entry to check
@param base - Base DN of the suffix to check
@param scope - search scope
@param args -
@param kwargs -
@return - ldap result
LDAPObject.search_s(base, scope[, filterstr='(objectClass=*)'
[, attrlist=None[, attrsonly=0]]]) -> list|None
The sourcedn is the object that is having it's rights checked against
all objects matched by filterstr
If sourcedn is '', anonymous is checked.
If you set targetattrs to "*" you will see ALL possible attributes for
all possible objectclasses on the object.
If you set targetattrs to "+" you will see operation attributes only.
If you set targetattrs to "*@objectclass" you will only see the
attributes from that class.
You will want to look at entryLevelRights and attributeLevelRights in
the result.
entryLevelRights:
* a - add
* d - delete
* n - rename
* v - view
attributeLevelRights
* r - read
* s - search
* w - write to the attribute (add / replace)
* o - obliterate (Delete the attribute)
* c - Compare the attributes directory side
* W - self write the attribute
* O - self obliterate
"""
# Is there a better way to do this check?
if not (MAJOR >= 3 or (MAJOR == 2 and MINOR >= 7)):
raise Exception("UNSUPPORTED EXTENDED OPERATION ON THIS VERSION " +
"OF PYTHON")
ldap_result = None
# This may not be thread safe. Is there a better way to do this?
try:
gerc = GetEffectiveRightsControl(True, authzId='dn:' +
sourcedn.encode('UTF-8'))
sctrl = [gerc]
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
# ldap_result = self.search_s(base, scope, *args, **kwargs)
res = self.search(base, scope, *args, **kwargs)
restype, ldap_result = self.result(res)
finally:
self.set_option(ldap.OPT_SERVER_CONTROLS, [])
return ldap_result
# Is there a better name for this function?
def dereference(self, deref, base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE,
*args, **kwargs):
"""
Perform a search which dereferences values from attributes such as
member or unique member.
For arguments to this function, please see LDAPObject.search_s. For
example:
@param deref - Dereference query
@param base - Base DN of the suffix to check
@param scope - search scope
@param args -
@param kwargs -
@return - ldap result
LDAPObject.search_s(base, scope[, filterstr='(objectClass=*)'
[, attrlist=None[, attrsonly=0]]]) -> list|None
A deref query is of the format:
"<attribute to derference>:<deref attr1>,<deref attr2>..."
"uniqueMember:dn,objectClass"
This will return the dn's and objectClasses of the dereferenced members
of the group.
"""
if not (MAJOR >= 3 or (MAJOR == 2 and MINOR >= 7)):
raise Exception("UNSUPPORTED EXTENDED OPERATION ON THIS VERSION " +
" OF PYTHON")
# This may not be thread safe. Is there a better way to do this?
try:
drc = DereferenceControl(True, deref=deref.encode('UTF-8'))
sctrl = [drc]
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
# ldap_result = self.search_s(base, scope, *args, **kwargs)
res = self.search(base, scope, *args, **kwargs)
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, _, _ = \
self.result4(res, add_ctrls=1,
resp_ctrl_classes={CONTROL_DEREF:
DereferenceControl})
finally:
self.set_option(ldap.OPT_SERVER_CONTROLS, [])
return resp_data, decoded_resp_ctrls
def buildLDIF(self, num, ldif_file, suffix='dc=example,dc=com', pyinstall=False):
"""Generate a simple ldif file using the dbgen.pl script, and set the
ownership and permissions to match the user that the server runs as.
@param num - number of entries to create
@param ldif_file - ldif file name(including the path)
@suffix - DN of the parent entry in the ldif file
@return - nothing
@raise - OSError
"""
if (not self.ds_paths.perl_enabled or pyinstall):
raise Exception("Perl tools disabled on this system. Try dbgen py module.")
else:
try:
os.system('%s -s %s -n %d -o %s' % (os.path.join(self.ds_paths.bin_dir, 'dbgen.pl'), suffix, num, ldif_file))
os.chmod(ldif_file, 0o644)
if os.getuid() == 0:
# root user - chown the ldif to the server user
uid = pwd.getpwnam(self.userid).pw_uid
gid = grp.getgrnam(self.userid).gr_gid
os.chown(ldif_file, uid, gid)
except OSError as e:
log.exception('Failed to create ldif file (%s): error %d - %s' %
(ldif_file, e.errno, e.strerror))
raise e
def getConsumerMaxCSN(self, replica_entry):
"""
Attempt to get the consumer's maxcsn from its database
"""
host = replica_entry.getValue(AGMT_HOST)
port = replica_entry.getValue(AGMT_PORT)
suffix = replica_entry.getValue(REPL_ROOT)
error_msg = "Unavailable"
# Open a connection to the consumer
consumer = DirSrv(verbose=self.verbose)
args_instance[SER_HOST] = host
args_instance[SER_PORT] = int(port)
args_instance[SER_ROOT_DN] = self.binddn
args_instance[SER_ROOT_PW] = self.bindpw
args_standalone = args_instance.copy()
consumer.allocate(args_standalone)
try:
consumer.open()
except ldap.LDAPError as e:
self.log.debug('Connection to consumer (%s:%s) failed, error: %s' %
(host, port, str(e)))
return error_msg
# Get the replica id from supplier to compare to the consumer's rid
try:
replica_entries = self.replica.list(suffix)
if not replica_entries:
# Error
consumer.close()
return None
rid = replica_entries[0].getValue(REPL_ID)
except:
# Error
consumer.close()
return None
# Search for the tombstone RUV entry
try:
entry = consumer.search_s(suffix, ldap.SCOPE_SUBTREE,
REPLICA_RUV_FILTER, ['nsds50ruv'])
consumer.close()
if not entry:
# Error out?
return error_msg
elements = entry[0].getValues('nsds50ruv')
for ruv in elements:
if ('replica %s ' % rid) in ruv:
ruv_parts = ruv.split()
if len(ruv_parts) == 5:
return ruv_parts[4]
else:
return error_msg
return error_msg
except:
# Search failed, but return 0
consumer.close()
return error_msg
def getReplAgmtStatus(self, agmt_entry):
'''
Return the status message, if consumer is not in synch raise an
exception
'''
agmt_maxcsn = None
suffix = agmt_entry.getValue(REPL_ROOT)
agmt_name = agmt_entry.getValue('cn')
status = "Unknown"
rc = -1
try:
entry = self.search_s(suffix, ldap.SCOPE_SUBTREE,
REPLICA_RUV_FILTER, [AGMT_MAXCSN])
except:
return status
'''
There could be many agmts maxcsn attributes, find ours
agmtMaxcsn: <suffix>;<agmt name>;<host>;<port>;<consumer rid>;<maxcsn>
dc=example,dc=com;test_agmt;localhost;389:4;56536858000100010000
or if the consumer is not reachable:
dc=example,dc=com;test_agmt;localhost;389;unavailable
'''
maxcsns = entry[0].getValues(AGMT_MAXCSN)
for csn in maxcsns:
comps = csn.split(';')
if agmt_name == comps[1]:
# same replica, get maxcsn
if len(comps) < 6:
return "Consumer unavailable"
else:
agmt_maxcsn = comps[5]
if agmt_maxcsn:
con_maxcsn = self.getConsumerMaxCSN(agmt_entry)
if con_maxcsn:
if agmt_maxcsn == con_maxcsn:
status = "In Synchronization"
rc = 0
else:
# Not in sync - attmpt to discover the cause
repl_msg = "Unknown"
if agmt_entry.getValue(AGMT_UPDATE_IN_PROGRESS) == 'TRUE':
# Replication is on going - this is normal
repl_msg = "Replication still in progress"
elif "Can't Contact LDAP" in \
agmt_entry.getValue(AGMT_UPDATE_STATUS):
# Consumer is down
repl_msg = "Consumer can not be contacted"
status = ("Not in Synchronization: supplier " +
"(%s) consumer (%s) Reason(%s)" %
(agmt_maxcsn, con_maxcsn, repl_msg))
if rc != 0:
raise ValueError(status)
return status
# This could be made to delete by filter ....
def delete_branch_s(self, basedn, scope):
ents = self.search_s(basedn, scope)
for ent in ents:
self.log.debug("Delete entry children %s" % (ent.dn))
self.delete_s(ent.dn)
| Ilias95/lib389 | lib389/__init__.py | Python | gpl-3.0 | 123,763 | 0.000525 |
#!/usr/bin/env python
#coding:utf8
class Solution(object):
def permuteUnique(self, nums):
length = len(nums)
if length == 0:
return [[]]
rlists = [[nums[0]]]
for i in range(1, length):
tlists = []
for L in rlists:
v = nums[i]
for j in range(i + 1):
lcopy = L[::]
lcopy.insert(j, v)
tlists.append(lcopy)
rlists = tlists
d = {}
for L in rlists:
d[tuple(L)] = True
return map(lambda x: list(x), d.keys())
def main():
nums = [1, 1, 2]
s = Solution()
rlists = s.permuteUnique(nums)
for L in rlists:
print L
if __name__ == '__main__':
main()
| matrix65537/lab | leetcode/permutations/permutation2.py | Python | mit | 785 | 0.003822 |
#!/usr/bin/env python3
"""
A Module containing the classes which generate schema files from JSON.
"""
import json
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class SchemaGenerator(object):
def __init__(self, jsontext, header=None):
self.data = json.loads(jsontext)
self.header = header
self.macroMap = {}
self.macroMapIndex = {}
if self.data['oidMacros']:
self.__mapMacros()
def __mapMacros(self):
if not self.data['oidMacros']:
return
macros = self.data['oidMacros']
# Find the root
for mac in macros:
if '.' in macros[mac]:
self.macroMap[mac] = macros[mac]
break
if not self.macroMap:
return
while len(macros) != len(self.macroMap):
for mac in macros:
if ':' not in macros[mac]:
continue
oid = macros[mac]
parent, index = oid.split(':')
if parent in self.macroMap:
self.macroMap[mac] = self.macroMap[parent] + '.' + index
self.macroMapIndex[mac] = 1
def __compare_defs(self, m1, m2):
n1 = int(m1[1].split(':')[1])
n2 = int(m2[1].split(':')[1])
return n1 - n2
def __get_macro_order(self, macros, parent):
children = [(k, v) for k, v in list(macros.items()) if parent in v]
items = [parent]
for k, v in sorted(children, key=cmp_to_key(self.__compare_defs)):
items.extend(self.__get_macro_order(macros, k))
return items
def generate_schema(self):
"""Function that generates the schema and returns it as a string"""
self.outString = ''
self.outString += self.header if self.header else ""
if len(self.outString):
self.outString += "\n"
if len(self.data['oidMacros']) > 0:
macros = self.data['oidMacros']
root = ''
for definition in macros:
if '.' in macros[definition]:
root = definition
break
order = self.__get_macro_order(macros, root)
for oid in order:
self.outString += "objectIdentifier {:15} {}\n".format(
oid, macros[oid])
self.outString += '\n'
for attr in self.data['attributeTypes']:
attr_str = "attributetype ( {} NAME ".format(attr['oid'])
if len(attr['names']) > 1:
namestring = ''
for name in attr['names']:
namestring += "'{}' ".format(name)
attr_str += "( {})".format(namestring)
elif len(attr['names']) == 1:
attr_str += "'{}'".format(attr['names'][0])
else:
print("Invalid attribute data. Doesn't define a name")
if 'desc' in attr:
attr_str += "\n\tDESC '{}'".format(attr['desc'])
if 'equality' in attr:
attr_str += "\n\tEQUALITY {}".format(attr['equality'])
if 'substr' in attr:
attr_str += "\n\tSUBSTR {}".format(attr['substr'])
if 'syntax' in attr:
attr_str += "\n\tSYNTAX {}".format(attr['syntax'])
if 'ordering' in attr:
attr_str += "\n\tORDERING {}".format(attr['ordering'])
if 'x_origin' in attr:
attr_str += "\n\tX-ORIGIN '{}'".format(attr['x_origin'])
attr_str += " )\n\n"
self.outString += attr_str
for obc in self.data['objectClasses']:
obc_str = "objectclass ( {} NAME ".format(obc['oid'])
if len(obc['names']) > 1:
namestring = ''
for name in obc['names']:
namestring += "'{}' ".format(name)
obc_str += "( {})".format(namestring)
elif len(obc['names']) == 1:
obc_str += "'{}'".format(obc['names'][0])
else:
print("Invalid objectclass data. Doesn't define a name")
if 'desc' in obc:
obc_str += "\n\tDESC '{}'".format(obc['desc'])
if 'sup' in obc:
sup = " $ ".join(obc['sup'])
obc_str += "\n\tSUP ( {} )".format(sup)
obc_str += "\n\t{}".format(obc['kind'])
if 'must' in obc:
must = " $ ".join(obc['must'])
obc_str += "\n\tMUST ( {} )".format(must)
if 'may' in obc:
may = " $ ".join(obc['may'])
obc_str += "\n\tMAY ( {} )".format(may)
if 'x_origin' in obc:
obc_str += "\n\tX-ORIGIN '{}'".format(obc['x_origin'])
obc_str += " )\n\n"
self.outString += obc_str
return self.outString.strip()
def _getOID(self, model):
oid = model['oid']
if oid.replace('.','').isdigit():
return oid
oid = self.macroMap[oid] + '.' + str(self.macroMapIndex[oid])
self.macroMapIndex[model['oid']] += 1
return oid
def generate_ldif(self):
"""Function which generates the OpenDJ LDIF format schema string."""
self.outString = ''
self.outString += self.header if self.header else ""
if len(self.outString):
self.outString += "\n"
self.outString += "dn: cn=schema\nobjectClass: top\nobjectClass: " \
+ "ldapSubentry\nobjectClass: subschema\ncn: schema\n"
for attr in self.data['attributeTypes']:
attr_str = "attributeTypes: ( {} NAME ".format(self._getOID(attr))
if len(attr['names']) > 1:
namestring = ''
for name in attr['names']:
namestring += "'{}' ".format(name)
attr_str += "( {})".format(namestring)
elif len(attr['names']) == 1:
attr_str += "'{}'".format(attr['names'][0])
else:
print("Invalid attribute data. Doesn't define a name")
if 'desc' in attr:
attr_str += "\n DESC '{}'".format(attr['desc'])
if 'equality' in attr:
attr_str += "\n EQUALITY {}".format(attr['equality'])
if 'substr' in attr:
attr_str += "\n SUBSTR {}".format(attr['substr'])
if 'syntax' in attr:
attr_str += "\n SYNTAX {}".format(attr['syntax'])
if 'ordering' in attr:
attr_str += "\n ORDERING {}".format(attr['ordering'])
if 'x_origin' in attr:
attr_str += "\n X-ORIGIN '{}'".format(attr['x_origin'])
attr_str += " )\n"
self.outString += attr_str
for obc in self.data['objectClasses']:
obc_str = "objectClasses: ( {} NAME ".format(self._getOID(obc))
if len(obc['names']) > 1:
namestring = ''
for name in obc['names']:
namestring += "'{}' ".format(name)
obc_str += "( {})".format(namestring)
elif len(obc['names']) == 1:
obc_str += "'{}'".format(obc['names'][0])
else:
print("Invalid objectclass data. Doesn't define a name")
if 'desc' in obc:
obc_str += "\n DESC '{}'".format(obc['desc'])
if 'sup' in obc:
sup = " $ ".join(obc['sup'])
obc_str += "\n SUP ( {} )".format(sup)
obc_str += "\n {}".format(obc['kind'])
if 'must' in obc:
must = " $ ".join(obc['must'])
obc_str += "\n MUST ( {} )".format(must)
if 'may' in obc:
may = " $ ".join(obc['may'])
obc_str += "\n MAY ( {} )".format(may)
if 'x_origin' in obc:
obc_str += "\n X-ORIGIN '{}'".format(obc['x_origin'])
obc_str += " )\n"
self.outString += obc_str
# Remove excess spaces and a new line at the end of the file
self.outString = self.outString.strip() + '\n\n'
return self.outString
| GluuFederation/community-edition-setup | schema/generator.py | Python | mit | 8,810 | 0.000908 |
import itertools
import logging
import time
from _sha256 import sha256
from collections import defaultdict, OrderedDict, deque
from functools import partial
from typing import Tuple, List, Set, Optional, Dict, Iterable, Callable
from orderedset._orderedset import OrderedSet
from sortedcontainers import SortedList
from common.exceptions import PlenumValueError, LogicError
from common.serializers.serialization import state_roots_serializer, invalid_index_serializer, serialize_msg_for_signing
from crypto.bls.bls_bft_replica import BlsBftReplica
from plenum.common.config_util import getConfig
from plenum.common.constants import POOL_LEDGER_ID, SEQ_NO_DB_LABEL, AUDIT_LEDGER_ID, TXN_TYPE, \
LAST_SENT_PP_STORE_LABEL, AUDIT_TXN_PP_SEQ_NO, AUDIT_TXN_VIEW_NO, AUDIT_TXN_PRIMARIES, AUDIT_TXN_DIGEST, \
PREPREPARE, PREPARE, COMMIT, DOMAIN_LEDGER_ID, TS_LABEL, AUDIT_TXN_NODE_REG, CONFIG_LEDGER_ID
from plenum.common.event_bus import InternalBus, ExternalBus
from plenum.common.exceptions import SuspiciousNode, InvalidClientMessageException, SuspiciousPrePrepare, \
UnknownIdentifier
from plenum.common.ledger import Ledger
from plenum.common.messages.internal_messages import RequestPropagates, BackupSetupLastOrdered, \
RaisedSuspicion, ViewChangeStarted, NewViewCheckpointsApplied, MissingMessage, CheckpointStabilized, \
ReAppliedInNewView, NewViewAccepted, CatchupCheckpointsApplied, MasterReorderedAfterVC
from plenum.common.messages.node_messages import PrePrepare, Prepare, Commit, Reject, ThreePhaseKey, Ordered, \
OldViewPrePrepareRequest, OldViewPrePrepareReply
from plenum.common.metrics_collector import MetricsName, MetricsCollector, NullMetricsCollector
from plenum.common.request import Request
from plenum.common.router import Subscription
from plenum.common.stashing_router import PROCESS
from plenum.common.timer import TimerService, RepeatingTimer
from plenum.common.txn_util import get_payload_digest, get_payload_data, get_seq_no, get_txn_time
from plenum.common.types import f
from plenum.common.util import compare_3PC_keys, updateNamedTuple, SortedDict, getMaxFailures, mostCommonElement, \
get_utc_epoch, max_3PC_key, reasonForClientFromException
from plenum.server.batch_handlers.three_pc_batch import ThreePcBatch
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.batch_id import BatchID
from plenum.server.consensus.metrics_decorator import measure_consensus_time
from plenum.server.consensus.ordering_service_msg_validator import OrderingServiceMsgValidator
from plenum.server.consensus.primary_selector import PrimariesSelector
from plenum.server.consensus.utils import replica_name_to_node_name, get_original_viewno, preprepare_to_batch_id
from plenum.server.replica_helper import PP_APPLY_REJECT_WRONG, PP_APPLY_WRONG_DIGEST, PP_APPLY_WRONG_STATE, \
PP_APPLY_ROOT_HASH_MISMATCH, PP_APPLY_HOOK_ERROR, PP_SUB_SEQ_NO_WRONG, PP_NOT_FINAL, PP_APPLY_AUDIT_HASH_MISMATCH, \
PP_REQUEST_ALREADY_ORDERED, PP_CHECK_NOT_FROM_PRIMARY, PP_CHECK_TO_PRIMARY, PP_CHECK_DUPLICATE, \
PP_CHECK_INCORRECT_POOL_STATE_ROOT, PP_CHECK_OLD, PP_CHECK_REQUEST_NOT_FINALIZED, PP_CHECK_NOT_NEXT, \
PP_CHECK_WRONG_TIME, Stats, OrderedTracker, TPCStat, generateName, PP_WRONG_PRIMARIES
from plenum.server.replica_freshness_checker import FreshnessChecker
from plenum.server.replica_helper import replica_batch_digest
from plenum.server.replica_validator_enums import STASH_VIEW_3PC, STASH_CATCH_UP, STASH_WAITING_FIRST_BATCH_IN_VIEW
from plenum.server.request_handlers.ledgers_freeze.ledger_freeze_helper import StaticLedgersFreezeHelper
from plenum.server.request_managers.write_request_manager import WriteRequestManager
from plenum.server.suspicion_codes import Suspicions
from stp_core.common.log import getlogger
logger = getlogger()
class OrderingService:
def __init__(self,
data: ConsensusSharedData,
timer: TimerService,
bus: InternalBus,
network: ExternalBus,
write_manager: WriteRequestManager,
bls_bft_replica: BlsBftReplica,
freshness_checker: FreshnessChecker,
stasher=None,
get_current_time: Optional[Callable[[], float]] = None,
get_time_for_3pc_batch: Optional[Callable[[], int]] = None,
metrics: MetricsCollector = NullMetricsCollector()):
self.metrics = metrics
self._data = data
self._requests = self._data.requests
self._timer = timer
self._bus = bus
self._network = network
self._write_manager = write_manager
self._name = self._data.name
# TODO: We shouldn't use get_utc_epoch here, time needs to be under full control through TimerService
self.get_time_for_3pc_batch = get_time_for_3pc_batch or get_utc_epoch
# Flag which node set, when it have set new primaries and need to send batch
self.primaries_batch_needed = False
self._config = getConfig()
# TODO: Change just to self._stasher = stasher
self._stasher = stasher
self._subscription = Subscription()
self._validator = OrderingServiceMsgValidator(self._data)
self.get_current_time = get_current_time or self._timer.get_current_time
self._out_of_order_repeater = RepeatingTimer(self._timer,
self._config.PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL,
self._process_stashed_out_of_order_commits,
active=False)
"""
Maps from legacy replica code
"""
self._state_root_serializer = state_roots_serializer
# Keeps a map of PRE-PREPAREs which did not satisfy timestamp
# criteria, they can be accepted if >f PREPAREs are encountered.
# This is emptied on view change. With each PRE-PREPARE, a flag is
# stored which indicates whether there are sufficient acceptable
# PREPAREs or not
self.pre_prepares_stashed_for_incorrect_time = {}
# Time of the last PRE-PREPARE which satisfied all validation rules
# (time, digest, roots were all correct). This time is not to be
# reverted even if the PRE-PREPAREs are not ordered. This implies that
# the next primary would have seen all accepted PRE-PREPAREs or another
# view change will happen
self.last_accepted_pre_prepare_time = None
# PRE-PREPAREs timestamps stored by non primary replica to check
# obsolescence of incoming PrePrepares. Pre-prepares with the same
# 3PC key are not merged since we need to keep incoming timestamps
# for each new PP from every nodes separately.
# Dictionary:
# key: Tuple[pp.viewNo, pp.seqNo]
# value: Dict[Tuple[PrePrepare, sender], timestamp]
self.pre_prepare_tss = defaultdict(dict)
# PRE-PREPAREs that are waiting to be processed but do not have the
# corresponding request finalised. Happens when replica has not been
# forwarded the request by the node but is getting 3 phase messages.
# The value is a list since a malicious entry might send PRE-PREPARE
# with a different digest and since we dont have the request finalised
# yet, we store all PRE-PPREPAREs
# type: List[Tuple[PrePrepare, str, Set[Tuple[str, int]]]]
self.prePreparesPendingFinReqs = []
# PrePrepares waiting for previous PrePrepares, key being tuple of view
# number and pre-prepare sequence numbers and value being tuple of
# PrePrepare and sender
# TODO: Since pp_seq_no will start from 1 in each view, the comparator
# of SortedDict needs to change
self.prePreparesPendingPrevPP = SortedDict(lambda k: (k[0], k[1]))
# PREPAREs that are stored by non primary replica for which it has not
# got any PRE-PREPARE. Dictionary that stores a tuple of view no and
# prepare sequence number as key and a deque of PREPAREs as value.
# This deque is attempted to be flushed on receiving every
# PRE-PREPARE request.
self.preparesWaitingForPrePrepare = {}
# Defines if there was a batch after last catchup
self.first_batch_after_catchup = False
self._lastPrePrepareSeqNo = self._data.low_watermark # type: int
# COMMITs that are stored for which there are no PRE-PREPARE or PREPARE
# received
self.commitsWaitingForPrepare = {}
# type: Dict[Tuple[int, int], deque]
# Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
# element tuple with elements viewNo, pre-prepare seqNo and value
# is the received PRE-PREPARE
self.prePrepares = SortedDict(lambda k: (k[0], k[1]))
# type: Dict[Tuple[int, int], PrePrepare]
# Dictionary to keep track of the which replica was primary during each
# view. Key is the view no and value is the name of the primary
# replica during that view
self.primary_names = OrderedDict() # type: OrderedDict[int, str]
# Did we log a message about getting request while absence of primary
self.warned_no_primary = False
self.requestQueues = {} # type: Dict[int, OrderedSet]
self.stats = Stats(TPCStat)
self.batches = OrderedDict() # type: OrderedDict[Tuple[int, int]]
self.l_bls_bft_replica = bls_bft_replica
# Set of tuples to keep track of ordered requests. Each tuple is
# (viewNo, ppSeqNo).
self.ordered = OrderedTracker()
self.lastBatchCreated = self.get_current_time()
# Commits which are not being ordered since commits with lower
# sequence numbers have not been ordered yet. Key is the
# viewNo and value a map of pre-prepare sequence number to commit
# type: Dict[int,Dict[int,Commit]]
self.stashed_out_of_order_commits = {}
self._freshness_checker = freshness_checker
self._skip_send_3pc_ts = None
self._subscription.subscribe(self._stasher, PrePrepare, self.process_preprepare)
self._subscription.subscribe(self._stasher, Prepare, self.process_prepare)
self._subscription.subscribe(self._stasher, Commit, self.process_commit)
self._subscription.subscribe(self._stasher, NewViewCheckpointsApplied, self.process_new_view_checkpoints_applied)
self._subscription.subscribe(self._stasher, OldViewPrePrepareRequest, self.process_old_view_preprepare_request)
self._subscription.subscribe(self._stasher, OldViewPrePrepareReply, self.process_old_view_preprepare_reply)
self._subscription.subscribe(self._bus, ViewChangeStarted, self.process_view_change_started)
self._subscription.subscribe(self._bus, CheckpointStabilized, self.process_checkpoint_stabilized)
self._subscription.subscribe(self._bus, NewViewAccepted, self.process_new_view_accepted)
self._subscription.subscribe(self._bus, CatchupCheckpointsApplied, self.process_catchup_checkpoints_applied)
# Dict to keep PrePrepares from old view to be re-ordered in the new view
# key is (viewNo, ppSeqNo, ppDigest) tuple, and value is PrePrepare
self.old_view_preprepares = {}
# TODO: find a better place for calling this setter
if self.is_master:
self._write_manager.node_reg_handler.set_internal_bus(self._bus)
def cleanup(self):
self._subscription.unsubscribe_all()
def __repr__(self):
return self.name
@measure_consensus_time(MetricsName.PROCESS_PREPARE_TIME,
MetricsName.BACKUP_PROCESS_PREPARE_TIME)
def process_prepare(self, prepare: Prepare, sender: str):
"""
Validate and process the PREPARE specified.
If validation is successful, create a COMMIT and broadcast it.
:param prepare: a PREPARE msg
:param sender: name of the node that sent the PREPARE
"""
result, reason = self._validate(prepare)
if result != PROCESS:
return result, reason
key = (prepare.viewNo, prepare.ppSeqNo)
logger.debug("{} received PREPARE{} from {}".format(self, key, sender))
# TODO move this try/except up higher
try:
if self._validate_prepare(prepare, sender):
self._add_to_prepares(prepare, sender)
self.stats.inc(TPCStat.PrepareRcvd)
logger.debug("{} processed incoming PREPARE {}".format(
self, (prepare.viewNo, prepare.ppSeqNo)))
else:
# TODO let's have isValidPrepare throw an exception that gets
# handled and possibly logged higher
logger.trace("{} cannot process incoming PREPARE".format(self))
except SuspiciousNode as ex:
self.report_suspicious_node(ex)
return None, None
def _validate_prepare(self, prepare: Prepare, sender: str) -> bool:
"""
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
"""
key = (prepare.viewNo, prepare.ppSeqNo)
primaryStatus = self._is_primary_for_msg(prepare)
ppReq = self.get_preprepare(*key)
# If a non primary replica and receiving a PREPARE request before a
# PRE-PREPARE request, then proceed
# PREPARE should not be sent from primary
if self._is_msg_from_primary(prepare, sender):
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare))
return False
# If non primary replica
if primaryStatus is False:
if self.prepares.hasPrepareFrom(prepare, sender):
self.report_suspicious_node(SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare))
return False
# If PRE-PREPARE not received for the PREPARE, might be slow
# network
if not ppReq:
self._enqueue_prepare(prepare, sender)
self.l_setup_last_ordered_for_non_master()
return False
# If primary replica
if primaryStatus is True:
if self.prepares.hasPrepareFrom(prepare, sender):
self.report_suspicious_node(SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare))
return False
# If PRE-PREPARE was not sent for this PREPARE, certainly
# malicious behavior unless this is re-ordering after view change where a new Primary may not have a
# PrePrepare from old view
elif not ppReq:
if prepare.ppSeqNo <= self._data.prev_view_prepare_cert:
self._enqueue_prepare(prepare, sender)
else:
self.report_suspicious_node(SuspiciousNode(
sender, Suspicions.UNKNOWN_PR_SENT, prepare))
return False
if primaryStatus is None and not ppReq:
self._enqueue_prepare(prepare, sender)
self.l_setup_last_ordered_for_non_master()
return False
if prepare.digest != ppReq.digest:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare))
return False
elif prepare.stateRootHash != ppReq.stateRootHash:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_STATE_WRONG,
prepare))
return False
elif prepare.txnRootHash != ppReq.txnRootHash:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_TXN_WRONG,
prepare))
return False
elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash:
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG,
prepare))
return False
# BLS multi-sig:
self.l_bls_bft_replica.validate_prepare(prepare, sender)
return True
"""Method from legacy code"""
def _enqueue_prepare(self, pMsg: Prepare, sender: str):
key = (pMsg.viewNo, pMsg.ppSeqNo)
logger.debug("{} queueing prepare due to unavailability of PRE-PREPARE. "
"Prepare {} for key {} from {}".format(self, pMsg, key, sender))
if key not in self.preparesWaitingForPrePrepare:
self.preparesWaitingForPrePrepare[key] = deque()
self.preparesWaitingForPrePrepare[key].append((pMsg, sender))
if key not in self.pre_prepares_stashed_for_incorrect_time:
if self.is_master or self.last_ordered_3pc[1] != 0:
self._request_pre_prepare_for_prepare(key)
else:
self._process_stashed_pre_prepare_for_time_if_possible(key)
def _process_stashed_pre_prepare_for_time_if_possible(
self, key: Tuple[int, int]):
"""
Check if any PRE-PREPAREs that were stashed since their time was not
acceptable, can now be accepted since enough PREPAREs are received
"""
logger.debug('{} going to process stashed PRE-PREPAREs with '
'incorrect times'.format(self))
q = self._data.quorums.f
if len(self.preparesWaitingForPrePrepare[key]) > q:
times = [pr.ppTime for (pr, _) in
self.preparesWaitingForPrePrepare[key]]
most_common_time, freq = mostCommonElement(times)
if self._data.quorums.timestamp.is_reached(freq):
logger.debug('{} found sufficient PREPAREs for the '
'PRE-PREPARE{}'.format(self, key))
stashed_pp = self.pre_prepares_stashed_for_incorrect_time
pp, sender, done = stashed_pp[key]
if done:
logger.debug('{} already processed PRE-PREPARE{}'.format(self, key))
return True
# True is set since that will indicate to `is_pre_prepare_time_acceptable`
# that sufficient PREPAREs are received
stashed_pp[key] = (pp, sender, True)
self._network.process_incoming(pp, sender)
return True
return False
def _request_pre_prepare_for_prepare(self, three_pc_key) -> bool:
"""
Check if has an acceptable PRE_PREPARE already stashed, if not then
check count of PREPAREs, make sure >f consistent PREPAREs are found,
store the acceptable PREPARE state (digest, roots) for verification of
the received PRE-PREPARE
"""
if three_pc_key in self.prePreparesPendingPrevPP:
logger.debug('{} not requesting a PRE-PREPARE since already found '
'stashed for {}'.format(self, three_pc_key))
return False
if len(
self.preparesWaitingForPrePrepare[three_pc_key]) < self._data.quorums.prepare.value:
logger.debug(
'{} not requesting a PRE-PREPARE because does not have'
' sufficient PREPAREs for {}'.format(
self, three_pc_key))
return False
digest, state_root, txn_root, _ = \
self._get_acceptable_stashed_prepare_state(three_pc_key)
# Choose a better data structure for `prePreparesPendingFinReqs`
pre_prepares = [pp for pp, _, _ in self.prePreparesPendingFinReqs
if (pp.viewNo, pp.ppSeqNo) == three_pc_key]
if pre_prepares:
if [pp for pp in pre_prepares if (pp.digest, pp.stateRootHash, pp.txnRootHash) == (digest, state_root, txn_root)]:
logger.debug('{} not requesting a PRE-PREPARE since already '
'found stashed for {}'.format(self, three_pc_key))
return False
self._request_pre_prepare(three_pc_key,
stash_data=(digest, state_root, txn_root))
return True
def _get_acceptable_stashed_prepare_state(self, three_pc_key):
prepares = {s: (m.digest, m.stateRootHash, m.txnRootHash) for m, s in
self.preparesWaitingForPrePrepare[three_pc_key]}
acceptable, freq = mostCommonElement(prepares.values())
return (*acceptable, {s for s, state in prepares.items()
if state == acceptable})
def _is_primary_for_msg(self, msg) -> Optional[bool]:
"""
Return whether this replica is primary if the request's view number is
equal this replica's view number and primary has been selected for
the current view.
Return None otherwise.
:param msg: message
"""
return self._data.is_primary if self._is_msg_for_current_view(msg) \
else self._is_primary_in_view(msg.viewNo)
def _is_primary_in_view(self, viewNo: int) -> Optional[bool]:
"""
Return whether this replica was primary in the given view
"""
if viewNo not in self.primary_names:
return False
return self.primary_names[viewNo] == self.name
@measure_consensus_time(MetricsName.PROCESS_COMMIT_TIME,
MetricsName.BACKUP_PROCESS_COMMIT_TIME)
def process_commit(self, commit: Commit, sender: str):
"""
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
"""
result, reason = self._validate(commit)
if result != PROCESS:
return result, reason
logger.debug("{} received COMMIT{} from {}".format(self, (commit.viewNo, commit.ppSeqNo), sender))
if self._validate_commit(commit, sender):
self.stats.inc(TPCStat.CommitRcvd)
self._add_to_commits(commit, sender)
logger.debug("{} processed incoming COMMIT{}".format(
self, (commit.viewNo, commit.ppSeqNo)))
return result, reason
def _validate_commit(self, commit: Commit, sender: str) -> bool:
"""
Return whether the COMMIT specified is valid.
:param commit: the COMMIT to validate
:return: True if `request` is valid, False otherwise
"""
key = (commit.viewNo, commit.ppSeqNo)
if not self._has_prepared(key):
self._enqueue_commit(commit, sender)
return False
if self.commits.hasCommitFrom(commit, sender):
self.report_suspicious_node(SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit))
return False
# BLS multi-sig (call it for non-ordered only to avoid redundant validations):
why_not = None
if not self._validator.has_already_ordered(commit.viewNo, commit.ppSeqNo):
pre_prepare = self.get_preprepare(commit.viewNo, commit.ppSeqNo)
why_not = self.l_bls_bft_replica.validate_commit(commit, sender, pre_prepare)
if why_not == BlsBftReplica.CM_BLS_SIG_WRONG:
logger.warning("{} discard Commit message from {}:{}".format(self, sender, commit))
self.report_suspicious_node(SuspiciousNode(sender,
Suspicions.CM_BLS_SIG_WRONG,
commit))
return False
elif why_not is not None:
logger.warning("Unknown error code returned for bls commit "
"validation {}".format(why_not))
return True
def _enqueue_commit(self, request: Commit, sender: str):
key = (request.viewNo, request.ppSeqNo)
logger.debug("{} - Queueing commit due to unavailability of PREPARE. "
"Request {} with key {} from {}".format(self, request, key, sender))
if key not in self.commitsWaitingForPrepare:
self.commitsWaitingForPrepare[key] = deque()
self.commitsWaitingForPrepare[key].append((request, sender))
@measure_consensus_time(MetricsName.PROCESS_PREPREPARE_TIME,
MetricsName.BACKUP_PROCESS_PREPREPARE_TIME)
def process_preprepare(self, pre_prepare: PrePrepare, sender: str):
"""
Validate and process provided PRE-PREPARE, create and
broadcast PREPARE for it.
:param pre_prepare: message
:param sender: name of the node that sent this message
"""
pp_key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
# the same PrePrepare might come here multiple times
if (pp_key and (pre_prepare.auditTxnRootHash, sender) not in self.pre_prepare_tss[pp_key]):
# TODO more clean solution would be to set timestamps
# earlier (e.g. in zstack)
self.pre_prepare_tss[pp_key][pre_prepare.auditTxnRootHash, sender] = self.get_time_for_3pc_batch()
result, reason = self._validate(pre_prepare)
if result != PROCESS:
return result, reason
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
logger.debug("{} received PRE-PREPARE{} from {}".format(self, key, sender))
# TODO: should we still do it?
# Converting each req_idrs from list to tuple
req_idrs = {f.REQ_IDR.nm: [key for key in pre_prepare.reqIdr]}
pre_prepare = updateNamedTuple(pre_prepare, **req_idrs)
def report_suspicious(reason):
ex = SuspiciousNode(sender, reason, pre_prepare)
self.report_suspicious_node(ex)
why_not = self._can_process_pre_prepare(pre_prepare, sender)
if why_not is None:
why_not_applied = \
self._process_valid_preprepare(pre_prepare, sender)
if why_not_applied is not None:
if why_not_applied == PP_APPLY_REJECT_WRONG:
report_suspicious(Suspicions.PPR_REJECT_WRONG)
elif why_not_applied == PP_APPLY_WRONG_STATE:
report_suspicious(Suspicions.PPR_STATE_WRONG)
elif why_not_applied == PP_APPLY_ROOT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_TXN_WRONG)
elif why_not_applied == PP_APPLY_HOOK_ERROR:
report_suspicious(Suspicions.PPR_PLUGIN_EXCEPTION)
elif why_not_applied == PP_SUB_SEQ_NO_WRONG:
report_suspicious(Suspicions.PPR_SUB_SEQ_NO_WRONG)
elif why_not_applied == PP_NOT_FINAL:
# this is fine, just wait for another
return None, None
elif why_not_applied == PP_APPLY_AUDIT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_AUDIT_TXN_ROOT_HASH_WRONG)
elif why_not_applied == PP_REQUEST_ALREADY_ORDERED:
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
elif why_not_applied == PP_WRONG_PRIMARIES:
report_suspicious(Suspicions.PPR_WITH_WRONG_PRIMARIES)
elif why_not == PP_APPLY_WRONG_DIGEST:
report_suspicious(Suspicions.PPR_DIGEST_WRONG)
elif why_not == PP_CHECK_NOT_FROM_PRIMARY:
report_suspicious(Suspicions.PPR_FRM_NON_PRIMARY)
elif why_not == PP_CHECK_TO_PRIMARY:
report_suspicious(Suspicions.PPR_TO_PRIMARY)
elif why_not == PP_CHECK_DUPLICATE:
report_suspicious(Suspicions.DUPLICATE_PPR_SENT)
elif why_not == PP_CHECK_INCORRECT_POOL_STATE_ROOT:
report_suspicious(Suspicions.PPR_POOL_STATE_ROOT_HASH_WRONG)
elif why_not == PP_CHECK_OLD:
logger.info("PRE-PREPARE {} has ppSeqNo lower "
"then the latest one - ignoring it".format(key))
elif why_not == PP_CHECK_REQUEST_NOT_FINALIZED:
absents = set()
non_fin = set()
non_fin_payload = set()
for key in pre_prepare.reqIdr:
req = self._requests.get(key)
if req is None:
absents.add(key)
elif not req.finalised:
non_fin.add(key)
non_fin_payload.add(req.request.payload_digest)
absent_str = ', '.join(str(key) for key in absents)
non_fin_str = ', '.join(
'{} ({} : {})'.format(str(key),
str(len(self._requests[key].propagates)),
', '.join(self._requests[key].propagates.keys())) for key in non_fin)
logger.warning(
"{} found requests in the incoming pp, of {} ledger, that are not finalized. "
"{} of them don't have propagates: [{}]. "
"{} of them don't have enough propagates: [{}].".format(self, pre_prepare.ledgerId,
len(absents), absent_str,
len(non_fin), non_fin_str))
def signal_suspicious(req):
logger.info("Request digest {} already ordered. Discard {} "
"from {}".format(req, pre_prepare, sender))
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
# checking for payload digest is more effective
for payload_key in non_fin_payload:
if self.db_manager.get_store(SEQ_NO_DB_LABEL).get_by_payload_digest(payload_key) != (None, None):
signal_suspicious(payload_key)
return None, None
# for absents we can only check full digest
for full_key in absents:
if self.db_manager.get_store(SEQ_NO_DB_LABEL).get_by_full_digest(full_key) is not None:
signal_suspicious(full_key)
return None, None
bad_reqs = absents | non_fin
self._enqueue_pre_prepare(pre_prepare, sender, bad_reqs)
# TODO: An optimisation might be to not request PROPAGATEs
# if some PROPAGATEs are present or a client request is
# present and sufficient PREPAREs and PRE-PREPARE are present,
# then the digest can be compared but this is expensive as the
# PREPARE and PRE-PREPARE contain a combined digest
if self._config.PROPAGATE_REQUEST_ENABLED:
self._schedule(partial(self._request_propagates_if_needed, bad_reqs, pre_prepare),
self._config.PROPAGATE_REQUEST_DELAY)
elif why_not == PP_CHECK_NOT_NEXT:
pp_view_no = pre_prepare.viewNo
pp_seq_no = pre_prepare.ppSeqNo
_, last_pp_seq_no = self.__last_pp_3pc
if self.is_master or self.last_ordered_3pc[1] != 0:
seq_frm = last_pp_seq_no + 1
seq_to = pp_seq_no - 1
if seq_to >= seq_frm >= pp_seq_no - self._config.CHK_FREQ + 1:
logger.warning(
"{} missing PRE-PREPAREs from {} to {}, "
"going to request".format(self, seq_frm, seq_to))
self._request_missing_three_phase_messages(
pp_view_no, seq_frm, seq_to)
self._enqueue_pre_prepare(pre_prepare, sender)
self.l_setup_last_ordered_for_non_master()
elif why_not == PP_CHECK_WRONG_TIME:
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
item = (pre_prepare, sender, False)
self.pre_prepares_stashed_for_incorrect_time[key] = item
report_suspicious(Suspicions.PPR_TIME_WRONG)
elif why_not == BlsBftReplica.PPR_BLS_MULTISIG_WRONG:
report_suspicious(Suspicions.PPR_BLS_MULTISIG_WRONG)
else:
logger.warning("Unknown PRE-PREPARE check status: {}".format(why_not))
return None, None
@property
def view_no(self):
return self._data.view_no
@property
def sent_preprepares(self):
return self._data.sent_preprepares
@property
def prepares(self):
return self._data.prepares
@property
def commits(self):
return self._data.commits
@property
def requested_pre_prepares(self):
return self._data.requested_pre_prepares
@property
def last_ordered_3pc(self):
return self._data.last_ordered_3pc
@last_ordered_3pc.setter
def last_ordered_3pc(self, lo_tuple):
self._data.last_ordered_3pc = lo_tuple
pp_seq_no = lo_tuple[1]
if pp_seq_no > self.lastPrePrepareSeqNo:
self.lastPrePrepareSeqNo = pp_seq_no
logger.info('{} set last ordered as {}'.format(self, lo_tuple))
@property
def last_preprepare(self):
last_3pc = (0, 0)
lastPp = None
if self.sent_preprepares:
(v, s), pp = self.sent_preprepares.peekitem(-1)
last_3pc = (v, s)
lastPp = pp
if self.prePrepares:
(v, s), pp = self.prePrepares.peekitem(-1)
if compare_3PC_keys(last_3pc, (v, s)) > 0:
lastPp = pp
return lastPp
@property
def __last_pp_3pc(self):
last_pp = self.last_preprepare
if not last_pp:
return self.last_ordered_3pc
last_3pc = (last_pp.viewNo, last_pp.ppSeqNo)
if compare_3PC_keys(self.last_ordered_3pc, last_3pc) > 0:
return last_3pc
return self.last_ordered_3pc
@property
def db_manager(self):
return self._write_manager.database_manager
@property
def is_master(self):
return self._data.is_master
@property
def primary_name(self):
"""
Name of the primary replica of this replica's instance
:return: Returns name if primary is known, None otherwise
"""
return self._data.primary_name
@property
def name(self):
return self._data.name
@name.setter
def name(self, n):
self._data._name = n
@property
def f(self):
return getMaxFailures(self._data.total_nodes)
def gc(self, till3PCKey):
logger.info("{} cleaning up till {}".format(self, till3PCKey))
tpcKeys = set()
reqKeys = set()
for key3PC, pp in itertools.chain(
self.sent_preprepares.items(),
self.prePrepares.items()
):
if compare_3PC_keys(till3PCKey, key3PC) <= 0:
tpcKeys.add(key3PC)
for reqKey in pp.reqIdr:
reqKeys.add(reqKey)
for key3PC, pp_dict in self.pre_prepare_tss.items():
if compare_3PC_keys(till3PCKey, key3PC) <= 0:
tpcKeys.add(key3PC)
# TODO INDY-1983: was found that it adds additional
# requests to clean, need to explore why
# for (pp, _) in pp_dict:
# for reqKey in pp.reqIdr:
# reqKeys.add(reqKey)
logger.trace("{} found {} 3-phase keys to clean".
format(self, len(tpcKeys)))
logger.trace("{} found {} request keys to clean".
format(self, len(reqKeys)))
self.old_view_preprepares = {k: v for k, v in self.old_view_preprepares.items()
if compare_3PC_keys(till3PCKey, (k[0], k[1])) > 0}
to_clean_up = (
self.pre_prepare_tss,
self.sent_preprepares,
self.prePrepares,
self.prepares,
self.commits,
self.batches,
self.pre_prepares_stashed_for_incorrect_time,
)
for request_key in tpcKeys:
pp = self.get_preprepare(*request_key)
if pp:
self._clear_batch(self.get_preprepare(*request_key))
for coll in to_clean_up:
coll.pop(request_key, None)
for request_key in reqKeys:
self._requests.ordered_by_replica(request_key)
self._requests.free(request_key)
for ledger_id, keys in self.requestQueues.items():
if request_key in keys:
self.discard_req_key(ledger_id, request_key)
logger.trace('{} freed request {} from previous checkpoints'.format(self, request_key))
# ToDo: do we need ordered messages there?
self.ordered.clear_below_view(self.view_no - 1)
# BLS multi-sig:
self.l_bls_bft_replica.gc(till3PCKey)
def discard_req_key(self, ledger_id, req_key):
self.requestQueues[ledger_id].discard(req_key)
def _clear_prev_view_pre_prepares(self):
to_remove = []
for idx, (pp, _, _) in enumerate(self.prePreparesPendingFinReqs):
if pp.viewNo < self.view_no:
to_remove.insert(0, idx)
for idx in to_remove:
self.prePreparesPendingFinReqs.pop(idx)
for (v, p) in list(self.prePreparesPendingPrevPP.keys()):
if v < self.view_no:
self.prePreparesPendingPrevPP.pop((v, p))
def report_suspicious_node(self, ex: SuspiciousNode):
logger.debug("{} raised suspicion on node {} for {}; suspicion code "
"is {}".format(self, ex.node, ex.reason, ex.code))
self._bus.send(RaisedSuspicion(inst_id=self._data.inst_id,
ex=ex))
def _validate(self, msg):
return self._validator.validate(msg)
def _can_process_pre_prepare(self, pre_prepare: PrePrepare, sender: str):
"""
Decide whether this replica is eligible to process a PRE-PREPARE.
:param pre_prepare: a PRE-PREPARE msg to process
:param sender: the name of the node that sent the PRE-PREPARE msg
"""
if self._validator.has_already_ordered(pre_prepare.viewNo, pre_prepare.ppSeqNo):
return None
digest = self.generate_pp_digest(pre_prepare.reqIdr,
get_original_viewno(pre_prepare),
pre_prepare.ppTime)
if digest != pre_prepare.digest:
return PP_APPLY_WRONG_DIGEST
# PRE-PREPARE should not be sent from non primary
if not self._is_msg_from_primary(pre_prepare, sender):
return PP_CHECK_NOT_FROM_PRIMARY
# Already has a PRE-PREPARE with same 3 phase key
if (pre_prepare.viewNo, pre_prepare.ppSeqNo) in self.prePrepares:
return PP_CHECK_DUPLICATE
if not self._is_pre_prepare_time_acceptable(pre_prepare, sender):
return PP_CHECK_WRONG_TIME
if compare_3PC_keys((pre_prepare.viewNo, pre_prepare.ppSeqNo),
self.__last_pp_3pc) > 0:
return PP_CHECK_OLD # ignore old pre-prepare
if self._non_finalised_reqs(pre_prepare.reqIdr):
return PP_CHECK_REQUEST_NOT_FINALIZED
if not self._is_next_pre_prepare(pre_prepare.viewNo,
pre_prepare.ppSeqNo):
return PP_CHECK_NOT_NEXT
if f.POOL_STATE_ROOT_HASH.nm in pre_prepare and \
pre_prepare.poolStateRootHash != self.get_state_root_hash(POOL_LEDGER_ID):
return PP_CHECK_INCORRECT_POOL_STATE_ROOT
# BLS multi-sig:
status = self.l_bls_bft_replica.validate_pre_prepare(pre_prepare,
sender)
if status is not None:
return status
return None
def _schedule(self, func, delay):
self._timer.schedule(delay, func)
def _process_valid_preprepare(self, pre_prepare: PrePrepare, sender: str):
why_not_applied = None
# apply and validate applied PrePrepare if it's not odered yet
if not self._validator.has_already_ordered(pre_prepare.viewNo, pre_prepare.ppSeqNo):
why_not_applied = self._apply_and_validate_applied_pre_prepare(pre_prepare, sender)
if why_not_applied is not None:
return why_not_applied
# add to PrePrepares
if self._data.is_primary:
self._add_to_sent_pre_prepares(pre_prepare)
else:
self._add_to_pre_prepares(pre_prepare)
# we may have stashed Prepares and Commits if this is PrePrepare from old view (re-ordering phase)
self._dequeue_prepares(pre_prepare.viewNo, pre_prepare.ppSeqNo)
self._dequeue_commits(pre_prepare.viewNo, pre_prepare.ppSeqNo)
return None
def _apply_and_validate_applied_pre_prepare(self, pre_prepare: PrePrepare, sender: str):
self.first_batch_after_catchup = False
old_state_root = self.get_state_root_hash(pre_prepare.ledgerId, to_str=False)
old_txn_root = self.get_txn_root_hash(pre_prepare.ledgerId)
if self.is_master:
logger.debug('{} state root before processing {} is {}, {}'.format(
self,
pre_prepare,
old_state_root,
old_txn_root))
# 1. APPLY
reqs, invalid_indices, rejects, suspicious = self._apply_pre_prepare(pre_prepare)
# 2. CHECK IF MORE CHUNKS NEED TO BE APPLIED FURTHER BEFORE VALIDATION
if pre_prepare.sub_seq_no != 0:
return PP_SUB_SEQ_NO_WRONG
if not pre_prepare.final:
return PP_NOT_FINAL
# 3. VALIDATE APPLIED
invalid_from_pp = invalid_index_serializer.deserialize(pre_prepare.discarded)
if suspicious:
why_not_applied = PP_REQUEST_ALREADY_ORDERED
else:
why_not_applied = self._validate_applied_pre_prepare(pre_prepare,
invalid_indices, invalid_from_pp)
# 4. IF NOT VALID AFTER APPLYING - REVERT
if why_not_applied is not None:
if self.is_master:
self._revert(pre_prepare.ledgerId,
old_state_root,
len(pre_prepare.reqIdr) - len(invalid_indices))
return why_not_applied
# 5. TRACK APPLIED
if rejects:
for reject in rejects:
self._network.send(reject)
if self.is_master:
# BLS multi-sig:
self.l_bls_bft_replica.process_pre_prepare(pre_prepare, sender)
logger.trace("{} saved shared multi signature for root".format(self, old_state_root))
if not self.is_master:
self.db_manager.get_store(LAST_SENT_PP_STORE_LABEL).store_last_sent_pp_seq_no(
self._data.inst_id, pre_prepare.ppSeqNo)
self._track_batches(pre_prepare, old_state_root)
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
logger.debug("{} processed incoming PRE-PREPARE{}".format(self, key),
extra={"tags": ["processing"]})
return None
def _enqueue_pre_prepare(self, pre_prepare: PrePrepare, sender: str,
nonFinReqs: Set = None):
if nonFinReqs:
logger.info("{} - Queueing pre-prepares due to unavailability of finalised "
"requests. PrePrepare {} from {}".format(self, pre_prepare, sender))
self.prePreparesPendingFinReqs.append((pre_prepare, sender, nonFinReqs))
else:
# Possible exploit, an malicious party can send an invalid
# pre-prepare and over-write the correct one?
logger.info("Queueing pre-prepares due to unavailability of previous pre-prepares. {} from {}".
format(pre_prepare, sender))
self.prePreparesPendingPrevPP[pre_prepare.viewNo, pre_prepare.ppSeqNo] = (pre_prepare, sender)
def _request_propagates_if_needed(self, bad_reqs: list, pre_prepare: PrePrepare):
if any(pre_prepare is pended[0] for pended in self.prePreparesPendingFinReqs):
self._bus.send(RequestPropagates(bad_reqs))
def _request_missing_three_phase_messages(self, view_no: int, seq_frm: int, seq_to: int) -> None:
for pp_seq_no in range(seq_frm, seq_to + 1):
key = (view_no, pp_seq_no)
self._request_pre_prepare(key)
self._request_prepare(key)
self._request_commit(key)
def _request_three_phase_msg(self, three_pc_key: Tuple[int, int],
msg_type: str,
recipients: Optional[List[str]] = None,
stash_data: Optional[Tuple[str, str, str]] = None):
self._bus.send(MissingMessage(msg_type,
three_pc_key,
self._data.inst_id,
recipients,
stash_data))
def _request_pre_prepare(self, three_pc_key: Tuple[int, int],
stash_data: Optional[Tuple[str, str, str]] = None):
"""
Request preprepare
"""
if not self._config.PRE_PREPARE_REQUEST_ENABLED:
return
recipients = [replica_name_to_node_name(self.primary_name)]
self._request_three_phase_msg(three_pc_key,
PREPREPARE,
recipients,
stash_data)
def _request_prepare(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None,
stash_data: Optional[Tuple[str, str, str]] = None):
"""
Request preprepare
"""
if not self._config.PREPARE_REQUEST_ENABLED:
return
if recipients is None:
recipients = self._network.connecteds.copy()
primary_node_name = replica_name_to_node_name(self.primary_name)
if primary_node_name in recipients:
recipients.remove(primary_node_name)
return self._request_three_phase_msg(three_pc_key, PREPARE, recipients, stash_data)
def _request_commit(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None):
"""
Request commit
"""
if not self._config.COMMIT_REQUEST_ENABLED:
return
if recipients is None:
recipients = self._network.connecteds.copy()
self._request_three_phase_msg(three_pc_key, COMMIT, recipients)
"""Method from legacy code"""
def l_setup_last_ordered_for_non_master(self):
"""
Since last ordered view_no and pp_seq_no are only communicated for
master instance, backup instances use this method for restoring
`last_ordered_3pc`
:return:
"""
if not self.is_master and self.first_batch_after_catchup and \
not self._data.is_primary:
# If not master instance choose last ordered seq no to be 1 less
# the lowest prepared certificate in this view
lowest_prepared = self.l_get_lowest_probable_prepared_certificate_in_view(
self.view_no)
if lowest_prepared is not None:
# now after catch up we have in last_ordered_3pc[1] value 0
# it value should change last_ordered_3pc to lowest_prepared - 1
logger.info('{} Setting last ordered for non-master as {}'.
format(self, self.last_ordered_3pc))
self.last_ordered_3pc = (self.view_no, lowest_prepared - 1)
self._bus.send(BackupSetupLastOrdered(inst_id=self._data.inst_id))
self.first_batch_after_catchup = False
def get_state_root_hash(self, ledger_id: str, to_str=True, committed=False):
return self.db_manager.get_state_root_hash(ledger_id, to_str, committed) \
if self.is_master \
else None
def get_txn_root_hash(self, ledger_id: str, to_str=True):
return self.db_manager.get_txn_root_hash(ledger_id, to_str) \
if self.is_master \
else None
def _is_msg_from_primary(self, msg, sender: str) -> bool:
"""
Return whether this message was from primary replica
:param msg:
:param sender:
:return:
"""
if self._is_msg_for_current_view(msg):
return self.primary_name == sender
try:
return self.primary_names[msg.viewNo] == sender
except KeyError:
return False
def _is_msg_for_current_view(self, msg):
"""
Return whether this request's view number is equal to the current view
number of this replica.
"""
viewNo = getattr(msg, "viewNo", None)
return viewNo == self.view_no
def _is_pre_prepare_time_correct(self, pp: PrePrepare, sender: str) -> bool:
"""
Check if this PRE-PREPARE is not older than (not checking for greater
than since batches maybe sent in less than 1 second) last PRE-PREPARE
and in a sufficient range of local clock's UTC time.
:param pp:
:return:
"""
tpcKey = (pp.viewNo, pp.ppSeqNo)
if (self.last_accepted_pre_prepare_time and
pp.ppTime < self.last_accepted_pre_prepare_time):
return False
elif ((tpcKey not in self.pre_prepare_tss) or
((pp.auditTxnRootHash, sender) not in self.pre_prepare_tss[tpcKey])):
return False
else:
return (
abs(pp.ppTime - self.pre_prepare_tss[tpcKey][pp.auditTxnRootHash, sender]) <=
self._config.ACCEPTABLE_DEVIATION_PREPREPARE_SECS
)
def _is_pre_prepare_time_acceptable(self, pp: PrePrepare, sender: str) -> bool:
"""
Returns True or False depending on the whether the time in PRE-PREPARE
is acceptable. Can return True if time is not acceptable but sufficient
PREPAREs are found to support the PRE-PREPARE
:param pp:
:return:
"""
key = (pp.viewNo, pp.ppSeqNo)
if key in self.requested_pre_prepares:
# Special case for requested PrePrepares
return True
correct = self._is_pre_prepare_time_correct(pp, sender)
if not correct:
if key in self.pre_prepares_stashed_for_incorrect_time and \
self.pre_prepares_stashed_for_incorrect_time[key][-1]:
logger.debug('{} marking time as correct for {}'.format(self, pp))
correct = True
else:
logger.warning('{} found {} to have incorrect time.'.format(self, pp))
return correct
def _non_finalised_reqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
# TODO: fix comment, write tests
return {key for key in reqKeys if key not in self._requests}
# return {key for key in reqKeys if not self._requests.is_finalised(key)}
"""Method from legacy code"""
def _is_next_pre_prepare(self, view_no: int, pp_seq_no: int):
if pp_seq_no == 1:
# First PRE-PREPARE
return True
(last_pp_view_no, last_pp_seq_no) = self.__last_pp_3pc
return pp_seq_no - last_pp_seq_no == 1
def _apply_pre_prepare(self, pre_prepare: PrePrepare):
"""
Applies (but not commits) requests of the PrePrepare
to the ledger and state
"""
reqs = []
idx = 0
rejects = []
invalid_indices = []
suspicious = False
# 1. apply each request
for req_key in pre_prepare.reqIdr:
req = self._requests[req_key].request
try:
self._process_req_during_batch(req,
pre_prepare.ppTime)
except InvalidClientMessageException as ex:
logger.warning('{} encountered exception {} while processing {}, '
'will reject'.format(self, ex, req))
rejects.append((req.key, Reject(req.identifier, req.reqId,
reasonForClientFromException(ex), ex.code)))
invalid_indices.append(idx)
except SuspiciousPrePrepare:
suspicious = True
invalid_indices.append(idx)
finally:
reqs.append(req)
idx += 1
# 2. call callback for the applied batch
if self.is_master:
three_pc_batch = ThreePcBatch.from_pre_prepare(
pre_prepare,
state_root=self.get_state_root_hash(pre_prepare.ledgerId, to_str=False),
txn_root=self.get_txn_root_hash(pre_prepare.ledgerId, to_str=False),
valid_digests=self._get_valid_req_ids_from_all_requests(reqs, invalid_indices)
)
self.post_batch_creation(three_pc_batch)
return reqs, invalid_indices, rejects, suspicious
def _get_valid_req_ids_from_all_requests(self, reqs, invalid_indices):
return [req.key for idx, req in enumerate(reqs) if idx not in invalid_indices]
def _validate_applied_pre_prepare(self, pre_prepare: PrePrepare,
invalid_indices, invalid_from_pp) -> Optional[int]:
if len(invalid_indices) != len(invalid_from_pp):
return PP_APPLY_REJECT_WRONG
if self.is_master:
if pre_prepare.stateRootHash != self.get_state_root_hash(pre_prepare.ledgerId):
return PP_APPLY_WRONG_STATE
if pre_prepare.txnRootHash != self.get_txn_root_hash(pre_prepare.ledgerId):
return PP_APPLY_ROOT_HASH_MISMATCH
# TODO: move this kind of validation to batch handlers
if f.AUDIT_TXN_ROOT_HASH.nm in pre_prepare and pre_prepare.auditTxnRootHash != self.get_txn_root_hash(AUDIT_LEDGER_ID):
return PP_APPLY_AUDIT_HASH_MISMATCH
return None
"""Method from legacy code"""
def l_get_lowest_probable_prepared_certificate_in_view(
self, view_no) -> Optional[int]:
"""
Return lowest pp_seq_no of the view for which can be prepared but
choose from unprocessed PRE-PREPAREs and PREPAREs.
"""
# TODO: Naive implementation, dont need to iterate over the complete
# data structures, fix this later
seq_no_pp = SortedList() # pp_seq_no of PRE-PREPAREs
# pp_seq_no of PREPAREs with count of PREPAREs for each
seq_no_p = set()
for (v, p) in self.prePreparesPendingPrevPP:
if v == view_no:
seq_no_pp.add(p)
if v > view_no:
break
for (v, p), pr in self.preparesWaitingForPrePrepare.items():
if v == view_no and len(pr) >= self._data.quorums.prepare.value:
seq_no_p.add(p)
for n in seq_no_pp:
if n in seq_no_p:
return n
return None
def _revert(self, ledgerId, stateRootHash, reqCount):
# A batch should only be reverted if all batches that came after it
# have been reverted
ledger = self.db_manager.get_ledger(ledgerId)
state = self.db_manager.get_state(ledgerId)
logger.info('{} reverting {} txns and state root from {} to {} for ledger {}'
.format(self, reqCount, Ledger.hashToStr(state.headHash),
Ledger.hashToStr(stateRootHash), ledgerId))
state.revertToHead(stateRootHash)
ledger.discardTxns(reqCount)
self.post_batch_rejection(ledgerId)
def _track_batches(self, pp: PrePrepare, prevStateRootHash):
# pp.discarded indicates the index from where the discarded requests
# starts hence the count of accepted requests, prevStateRoot is
# tracked to revert this PRE-PREPARE
logger.trace('{} tracking batch for {} with state root {}'.format(self, pp, prevStateRootHash))
if self.is_master:
self.metrics.add_event(MetricsName.THREE_PC_BATCH_SIZE, len(pp.reqIdr))
else:
self.metrics.add_event(MetricsName.BACKUP_THREE_PC_BATCH_SIZE, len(pp.reqIdr))
self.batches[(pp.viewNo, pp.ppSeqNo)] = [pp.ledgerId, pp.discarded,
pp.ppTime, prevStateRootHash, len(pp.reqIdr)]
@property
def lastPrePrepareSeqNo(self):
return self._lastPrePrepareSeqNo
@lastPrePrepareSeqNo.setter
def lastPrePrepareSeqNo(self, n):
"""
This will _lastPrePrepareSeqNo to values greater than its previous
values else it will not. To forcefully override as in case of `revert`,
directly set `self._lastPrePrepareSeqNo`
"""
if n > self._lastPrePrepareSeqNo:
self._lastPrePrepareSeqNo = n
else:
logger.debug(
'{} cannot set lastPrePrepareSeqNo to {} as its '
'already {}'.format(
self, n, self._lastPrePrepareSeqNo))
def _add_to_pre_prepares(self, pp: PrePrepare) -> None:
"""
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
"""
key = (pp.viewNo, pp.ppSeqNo)
# ToDo:
self.prePrepares[key] = pp
self._preprepare_batch(pp)
self.lastPrePrepareSeqNo = pp.ppSeqNo
self.last_accepted_pre_prepare_time = pp.ppTime
self.stats.inc(TPCStat.PrePrepareRcvd)
self.try_prepare(pp)
def _add_to_sent_pre_prepares(self, pp: PrePrepare) -> None:
self.sent_preprepares[pp.viewNo, pp.ppSeqNo] = pp
self._preprepare_batch(pp)
self.lastPrePrepareSeqNo = pp.ppSeqNo
def _dequeue_prepares(self, viewNo: int, ppSeqNo: int):
key = (viewNo, ppSeqNo)
if key in self.preparesWaitingForPrePrepare:
i = 0
# Keys of pending prepares that will be processed below
while self.preparesWaitingForPrePrepare[key]:
prepare, sender = self.preparesWaitingForPrePrepare[
key].popleft()
logger.debug("{} popping stashed PREPARE{}".format(self, key))
self._network.process_incoming(prepare, sender)
i += 1
self.preparesWaitingForPrePrepare.pop(key)
logger.debug("{} processed {} PREPAREs waiting for PRE-PREPARE for"
" view no {} and seq no {}".format(self, i, viewNo, ppSeqNo))
def _dequeue_commits(self, viewNo: int, ppSeqNo: int):
key = (viewNo, ppSeqNo)
if key in self.commitsWaitingForPrepare:
if not self._has_prepared(key):
logger.debug('{} has not pre-prepared {}, will dequeue the '
'COMMITs later'.format(self, key))
return
i = 0
# Keys of pending prepares that will be processed below
while self.commitsWaitingForPrepare[key]:
commit, sender = self.commitsWaitingForPrepare[
key].popleft()
logger.debug("{} popping stashed COMMIT{}".format(self, key))
self._network.process_incoming(commit, sender)
i += 1
self.commitsWaitingForPrepare.pop(key)
logger.debug("{} processed {} COMMITs waiting for PREPARE for"
" view no {} and seq no {}".format(self, i, viewNo, ppSeqNo))
def try_prepare(self, pp: PrePrepare):
"""
Try to send the Prepare message if the PrePrepare message is ready to
be passed into the Prepare phase.
"""
rv, msg = self._can_prepare(pp)
if rv:
self._do_prepare(pp)
else:
logger.debug("{} cannot send PREPARE since {}".format(self, msg))
def _can_prepare(self, ppReq) -> (bool, str):
"""
Return whether the batch of requests in the PRE-PREPARE can
proceed to the PREPARE step.
:param ppReq: any object with identifier and requestId attributes
"""
if self.prepares.hasPrepareFrom(ppReq, self.name):
return False, 'has already sent PREPARE for {}'.format(ppReq)
return True, ''
@measure_consensus_time(MetricsName.SEND_PREPARE_TIME,
MetricsName.BACKUP_SEND_PREPARE_TIME)
def _do_prepare(self, pp: PrePrepare):
logger.debug("{} Sending PREPARE{} at {}".format(self, (pp.viewNo, pp.ppSeqNo), self.get_current_time()))
params = [self._data.inst_id,
pp.viewNo,
pp.ppSeqNo,
pp.ppTime,
pp.digest,
pp.stateRootHash,
pp.txnRootHash]
if f.AUDIT_TXN_ROOT_HASH.nm in pp:
params.append(pp.auditTxnRootHash)
# BLS multi-sig:
params = self.l_bls_bft_replica.update_prepare(params, pp.ledgerId)
prepare = Prepare(*params)
self._send(prepare, stat=TPCStat.PrepareSent)
self._add_to_prepares(prepare, self.name)
def _has_prepared(self, key):
if not self.get_preprepare(*key):
return False
if ((key not in self.prepares and key not in self.sent_preprepares) and
(key not in self.preparesWaitingForPrePrepare)):
return False
return True
def get_preprepare(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
if key in self.sent_preprepares:
return self.sent_preprepares[key]
if key in self.prePrepares:
return self.prePrepares[key]
return None
def _add_to_prepares(self, prepare: Prepare, sender: str):
"""
Add the specified PREPARE to this replica's list of received
PREPAREs and try sending COMMIT
:param prepare: the PREPARE to add to the list
"""
# BLS multi-sig:
self.l_bls_bft_replica.process_prepare(prepare, sender)
self.prepares.addVote(prepare, sender)
self._dequeue_commits(prepare.viewNo, prepare.ppSeqNo)
self._try_commit(prepare)
def _try_commit(self, prepare: Prepare):
"""
Try to commit if the Prepare message is ready to be passed into the
commit phase.
"""
rv, reason = self._can_commit(prepare)
if rv:
pp = self.get_preprepare(prepare.viewNo, prepare.ppSeqNo)
self._prepare_batch(pp)
self._do_commit(prepare)
else:
logger.debug("{} cannot send COMMIT since {}".format(self, reason))
@measure_consensus_time(MetricsName.SEND_COMMIT_TIME,
MetricsName.BACKUP_SEND_COMMIT_TIME)
def _do_commit(self, p: Prepare):
"""
Create a commit message from the given Prepare message and trigger the
commit phase
:param p: the prepare message
"""
key_3pc = (p.viewNo, p.ppSeqNo)
logger.debug("{} Sending COMMIT{} at {}".format(self, key_3pc, self.get_current_time()))
params = [
self._data.inst_id, p.viewNo, p.ppSeqNo
]
# BLS multi-sig:
if p.stateRootHash is not None:
pre_prepare = self.get_preprepare(*key_3pc)
params = self.l_bls_bft_replica.update_commit(params, pre_prepare)
commit = Commit(*params)
self._send(commit, stat=TPCStat.CommitSent)
self._add_to_commits(commit, self.name)
def _add_to_commits(self, commit: Commit, sender: str):
"""
Add the specified COMMIT to this replica's list of received
commit requests.
:param commit: the COMMIT to add to the list
:param sender: the name of the node that sent the COMMIT
"""
# BLS multi-sig:
self.l_bls_bft_replica.process_commit(commit, sender)
self.commits.addVote(commit, sender)
self._try_order(commit)
def _try_order(self, commit: Commit):
"""
Try to order if the Commit message is ready to be ordered.
"""
if self._validator.has_already_ordered(commit.viewNo, commit.ppSeqNo):
self._try_finish_reordering_after_vc(commit.ppSeqNo)
canOrder, reason = self._can_order(commit)
if canOrder:
logger.trace("{} returning request to node".format(self))
self._do_order(commit)
else:
logger.trace("{} cannot return request to node: {}".format(self, reason))
return canOrder
def _try_finish_reordering_after_vc(self, pp_seq_no):
if self.is_master and self._data.prev_view_prepare_cert + 1 == pp_seq_no:
self._bus.send(MasterReorderedAfterVC())
self._stasher.process_all_stashed(STASH_WAITING_FIRST_BATCH_IN_VIEW)
def _do_order(self, commit: Commit):
key = (commit.viewNo, commit.ppSeqNo)
logger.debug("{} ordering COMMIT {}".format(self, key))
return self._order_3pc_key(key)
@measure_consensus_time(MetricsName.ORDER_3PC_BATCH_TIME,
MetricsName.BACKUP_ORDER_3PC_BATCH_TIME)
def _order_3pc_key(self, key):
pp = self.get_preprepare(*key)
if pp is None:
raise ValueError(
"{} no PrePrepare with a 'key' {} found".format(self, key)
)
self._freshness_checker.update_freshness(ledger_id=pp.ledgerId,
ts=pp.ppTime)
# warning: we use uncommitted state here. So, if this batch contains LEDGERS FREEZE txn we don't consider it.
frozen_ledgers = StaticLedgersFreezeHelper.get_frozen_ledgers(self.db_manager.get_state(CONFIG_LEDGER_ID))
self._freshness_checker.remove_ledgers(frozen_ledgers.keys())
self._data.last_batch_timestamp = pp.ppTime
self._add_to_ordered(*key)
invalid_indices = invalid_index_serializer.deserialize(pp.discarded)
invalid_reqIdr = []
valid_reqIdr = []
for ind, reqIdr in enumerate(pp.reqIdr):
if ind in invalid_indices:
invalid_reqIdr.append(reqIdr)
else:
valid_reqIdr.append(reqIdr)
self._requests.ordered_by_replica(reqIdr)
original_view_no = get_original_viewno(pp)
# TODO: Replace Ordered msg by ThreePcBatch
ordered = Ordered(
self._data.inst_id,
pp.viewNo,
valid_reqIdr,
invalid_reqIdr,
pp.ppSeqNo,
pp.ppTime,
pp.ledgerId,
pp.stateRootHash,
pp.txnRootHash,
pp.auditTxnRootHash if f.AUDIT_TXN_ROOT_HASH.nm in pp else None,
self._get_primaries_for_ordered(pp),
self._get_node_reg_for_ordered(pp),
original_view_no,
pp.digest,
)
self._discard_ordered_req_keys(pp)
# BLS multi-sig:
self.l_bls_bft_replica.process_order(key, self._data.quorums, pp)
self._bus.send(ordered)
ordered_msg = "{} ordered batch request, view no {}, ppSeqNo {}, ledger {}, " \
"state root {}, txn root {}, audit root {}".format(self, pp.viewNo, pp.ppSeqNo, pp.ledgerId,
pp.stateRootHash, pp.txnRootHash,
pp.auditTxnRootHash)
logger.debug("{}, requests ordered {}, discarded {}".
format(ordered_msg, valid_reqIdr, invalid_reqIdr))
logger.info("{}, requests ordered {}, discarded {}".
format(ordered_msg, len(valid_reqIdr), len(invalid_reqIdr)))
if self.is_master:
self.metrics.add_event(MetricsName.ORDERED_BATCH_SIZE, len(valid_reqIdr) + len(invalid_reqIdr))
self.metrics.add_event(MetricsName.ORDERED_BATCH_INVALID_COUNT, len(invalid_reqIdr))
else:
self.metrics.add_event(MetricsName.BACKUP_ORDERED_BATCH_SIZE, len(valid_reqIdr))
# do it after Ordered msg is sent
self._try_finish_reordering_after_vc(key[1])
return True
def _add_to_ordered(self, view_no: int, pp_seq_no: int):
self.ordered.add(view_no, pp_seq_no)
self.last_ordered_3pc = (view_no, pp_seq_no)
def _get_primaries_for_ordered(self, pp):
txn_primaries = self._get_from_audit_for_ordered(pp, AUDIT_TXN_PRIMARIES)
if txn_primaries is None:
# TODO: it's possible to get into this case if we have txns being ordered after catch-up is finished
# when we have no batches applied (uncommitted txns are reverted when catchup is started)
# Re-applying of batches will be done in apply_stashed_reqs in node.py,
# but as we need to fill primaries field in Ordered, we have to emulate what NodeRegHandler would do here
# TODO: fix this by getting rid of Ordered msg and using ThreePcBatch instead
txn_primaries = self._write_manager.primary_reg_handler.primaries_selector.select_primaries(self.view_no)
return txn_primaries
def _get_node_reg_for_ordered(self, pp):
txn_node_reg = self._get_from_audit_for_ordered(pp, AUDIT_TXN_NODE_REG)
if txn_node_reg is None:
# TODO: it's possible to get into this case if we have txns being ordered after catch-up is finished
# when we have no batches applied (uncommitted txns are reverted when catchup is started)
# Re-applying of batches will be done in apply_stashed_reqs in node.py,
# but as we need to fill node_reg field in Ordered, we have to emulate what NodeRegHandler would do here
# TODO: fix this by getting rid of Ordered msg and using ThreePcBatch instead
txn_node_reg = list(self._write_manager.node_reg_handler.uncommitted_node_reg)
return txn_node_reg
def _get_from_audit_for_ordered(self, pp, field):
if not self.is_master:
return []
ledger = self.db_manager.get_ledger(AUDIT_LEDGER_ID)
for index, txn in enumerate(ledger.get_uncommitted_txns()):
payload_data = get_payload_data(txn)
pp_view_no = get_original_viewno(pp)
if pp.ppSeqNo == payload_data[AUDIT_TXN_PP_SEQ_NO] and \
pp_view_no == payload_data[AUDIT_TXN_VIEW_NO]:
txn_data = payload_data.get(field)
if isinstance(txn_data, Iterable):
return txn_data
elif isinstance(txn_data, int):
last_txn_seq_no = get_seq_no(txn) - txn_data
return get_payload_data(
ledger.get_by_seq_no_uncommitted(last_txn_seq_no)).get(field)
break
return None
def _discard_ordered_req_keys(self, pp: PrePrepare):
for k in pp.reqIdr:
# Using discard since the key may not be present as in case of
# primary, the key was popped out while creating PRE-PREPARE.
# Or in case of node catching up, it will not validate
# PRE-PREPAREs or PREPAREs but will only validate number of COMMITs
# and their consistency with PRE-PREPARE of PREPAREs
self.discard_req_key(pp.ledgerId, k)
def _can_order(self, commit: Commit) -> Tuple[bool, Optional[str]]:
"""
Return whether the specified commitRequest can be returned to the node.
Decision criteria:
- If have got just n-f Commit requests then return request to node
- If less than n-f of commit requests then probably don't have
consensus on the request; don't return request to node
- If more than n-f then already returned to node; don't return request
to node
:param commit: the COMMIT
"""
quorum = self._data.quorums.commit.value
if not self.commits.hasQuorum(commit, quorum):
return False, "no quorum ({}): {} commits where f is {}". \
format(quorum, commit, self.f)
key = (commit.viewNo, commit.ppSeqNo)
if self._validator.has_already_ordered(*key):
return False, "already ordered"
if commit.ppSeqNo > 1 and not self._all_prev_ordered(commit):
viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
if viewNo not in self.stashed_out_of_order_commits:
self.stashed_out_of_order_commits[viewNo] = {}
self.stashed_out_of_order_commits[viewNo][ppSeqNo] = commit
self._out_of_order_repeater.start()
return False, "stashing {} since out of order". \
format(commit)
return True, None
def _process_stashed_out_of_order_commits(self):
# This method is called periodically to check for any commits that
# were stashed due to lack of commits before them and orders them if it
# can
if not self.can_order_commits():
return
logger.debug('{} trying to order from out of order commits. '
'Len(stashed_out_of_order_commits) == {}'
.format(self, len(self.stashed_out_of_order_commits)))
if self.last_ordered_3pc:
lastOrdered = self.last_ordered_3pc
vToRemove = set()
for v in self.stashed_out_of_order_commits:
if v < lastOrdered[0]:
logger.debug(
"{} found commits {} from previous view {}"
" that were not ordered but last ordered"
" is {}".format(
self, self.stashed_out_of_order_commits[v], v, lastOrdered))
vToRemove.add(v)
continue
pToRemove = set()
for p, commit in self.stashed_out_of_order_commits[v].items():
if (v, p) in self.ordered or \
self._validator.has_already_ordered(*(commit.viewNo, commit.ppSeqNo)):
pToRemove.add(p)
continue
if (v == lastOrdered[0] and lastOrdered == (v, p - 1)) or \
(v > lastOrdered[0] and self._is_lowest_commit_in_view(commit)):
logger.debug("{} ordering stashed commit {}".format(self, commit))
if self._try_order(commit):
lastOrdered = (v, p)
pToRemove.add(p)
for p in pToRemove:
del self.stashed_out_of_order_commits[v][p]
if not self.stashed_out_of_order_commits[v]:
vToRemove.add(v)
for v in vToRemove:
del self.stashed_out_of_order_commits[v]
if not self.stashed_out_of_order_commits:
self._out_of_order_repeater.stop()
else:
logger.debug('{} last_ordered_3pc if False. '
'Len(stashed_out_of_order_commits) == {}'
.format(self, len(self.stashed_out_of_order_commits)))
def _is_lowest_commit_in_view(self, commit):
view_no = commit.viewNo
if view_no > self.view_no:
logger.debug('{} encountered {} which belongs to a later view'.format(self, commit))
return False
return commit.ppSeqNo == 1
def _all_prev_ordered(self, commit: Commit):
"""
Return True if all previous COMMITs have been ordered
"""
# TODO: This method does a lot of work, choose correct data
# structures to make it efficient.
viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
if self.last_ordered_3pc[1] == ppSeqNo - 1:
# Last ordered was in same view as this COMMIT
return True
# if some PREPAREs/COMMITs were completely missed in the same view
toCheck = set()
toCheck.update(set(self.sent_preprepares.keys()))
toCheck.update(set(self.prePrepares.keys()))
toCheck.update(set(self.prepares.keys()))
toCheck.update(set(self.commits.keys()))
for (v, p) in toCheck:
if v < viewNo and (v, p) not in self.ordered:
# Have commits from previous view that are unordered.
return False
if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered:
# If unordered commits are found with lower ppSeqNo then this
# cannot be ordered.
return False
return True
def _can_commit(self, prepare: Prepare) -> (bool, str):
"""
Return whether the specified PREPARE can proceed to the Commit
step.
Decision criteria:
- If this replica has got just n-f-1 PREPARE requests then commit request.
- If less than n-f-1 PREPARE requests then probably there's no consensus on
the request; don't commit
- If more than n-f-1 then already sent COMMIT; don't commit
:param prepare: the PREPARE
"""
quorum = self._data.quorums.prepare.value
if not self.prepares.hasQuorum(prepare, quorum):
return False, 'does not have prepare quorum for {}'.format(prepare)
if self._has_committed(prepare):
return False, 'has already sent COMMIT for {}'.format(prepare)
return True, ''
def _has_committed(self, request) -> bool:
return self.commits.hasCommitFrom(ThreePhaseKey(
request.viewNo, request.ppSeqNo), self.name)
def _is_the_last_old_preprepare(self, pp_seq_no):
return self._data.prev_view_prepare_cert == pp_seq_no
def post_batch_creation(self, three_pc_batch: ThreePcBatch):
"""
A batch of requests has been created and has been applied but
committed to ledger and state.
:param ledger_id:
:param state_root: state root after the batch creation
:return:
"""
ledger_id = three_pc_batch.ledger_id
if self._write_manager.is_valid_ledger_id(ledger_id):
self._write_manager.post_apply_batch(three_pc_batch)
else:
logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
def post_batch_rejection(self, ledger_id):
"""
A batch of requests has been rejected, if stateRoot is None, reject
the current batch.
:param ledger_id:
:param stateRoot: state root after the batch was created
:return:
"""
if self._write_manager.is_valid_ledger_id(ledger_id):
self._write_manager.post_batch_rejected(ledger_id)
else:
logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id))
def _ledger_id_for_request(self, request: Request):
if request.operation.get(TXN_TYPE) is None:
raise ValueError(
"{} TXN_TYPE is not defined for request {}".format(self, request)
)
typ = request.operation[TXN_TYPE]
return self._write_manager.type_to_ledger_id[typ]
def _do_dynamic_validation(self, request: Request, req_pp_time: int):
"""
State based validation
"""
# Digest validation
# TODO implicit caller's context: request is processed by (master) replica
# as part of PrePrepare 3PC batch
ledger_id, seq_no = self.db_manager.get_store(SEQ_NO_DB_LABEL).get_by_payload_digest(request.payload_digest)
if ledger_id is not None and seq_no is not None:
raise SuspiciousPrePrepare('Trying to order already ordered request')
ledger = self.db_manager.get_ledger(self._ledger_id_for_request(request))
for txn in ledger.uncommittedTxns:
if get_payload_digest(txn) == request.payload_digest:
raise SuspiciousPrePrepare('Trying to order already ordered request')
# TAA validation
# For now, we need to call taa_validation not from dynamic_validation because
# req_pp_time is required
self._write_manager.do_taa_validation(request, req_pp_time, self._config)
self._write_manager.dynamic_validation(request, req_pp_time)
@measure_consensus_time(MetricsName.REQUEST_PROCESSING_TIME,
MetricsName.BACKUP_REQUEST_PROCESSING_TIME)
def _process_req_during_batch(self,
req: Request,
cons_time: int):
"""
This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised
"""
if self.is_master:
self._do_dynamic_validation(req, cons_time)
self._write_manager.apply_request(req, cons_time)
# ToDo: Maybe we should remove this,
# because we have the same one in replica's validator
def can_send_3pc_batch(self):
if not self._data.is_primary:
return False
if not self._data.is_participating:
return False
if not self.is_master and not self._data.master_reordered_after_vc:
return False
if self._data.waiting_for_new_view:
return False
if self._data.prev_view_prepare_cert > self._lastPrePrepareSeqNo:
return False
# do not send new 3PC batches in a new view until the first batch is ordered
if self.view_no > 0 and self._lastPrePrepareSeqNo > self._data.prev_view_prepare_cert \
and self.last_ordered_3pc[1] < self._data.prev_view_prepare_cert + 1:
return False
if self.view_no < self.last_ordered_3pc[0]:
return False
if self.view_no == self.last_ordered_3pc[0]:
if self._lastPrePrepareSeqNo < self.last_ordered_3pc[1]:
return False
# This check is done for current view only to simplify logic and avoid
# edge cases between views, especially taking into account that we need
# to send a batch in new view as soon as possible
if self._config.Max3PCBatchesInFlight is not None:
batches_in_flight = self._lastPrePrepareSeqNo - self.last_ordered_3pc[1]
if batches_in_flight >= self._config.Max3PCBatchesInFlight:
if self._can_log_skip_send_3pc():
logger.info("{} not creating new batch because there already {} in flight out of {} allowed".
format(self.name, batches_in_flight, self._config.Max3PCBatchesInFlight))
return False
self._skip_send_3pc_ts = None
return True
def _can_log_skip_send_3pc(self):
current_time = time.perf_counter()
if self._skip_send_3pc_ts is None:
self._skip_send_3pc_ts = current_time
return True
if current_time - self._skip_send_3pc_ts > self._config.Max3PCBatchWait:
self._skip_send_3pc_ts = current_time
return True
return False
def can_order_commits(self):
if self._data.is_participating:
return True
if self._data.is_synced and self._data.legacy_vc_in_progress:
return True
return False
def dequeue_pre_prepares(self):
"""
Dequeue any received PRE-PREPAREs that did not have finalized requests
or the replica was missing any PRE-PREPAREs before it
:return:
"""
ppsReady = []
# Check if any requests have become finalised belonging to any stashed
# PRE-PREPAREs.
for i, (pp, sender, reqIds) in enumerate(
self.prePreparesPendingFinReqs):
finalised = set()
for r in reqIds:
if self._requests.is_finalised(r):
finalised.add(r)
diff = reqIds.difference(finalised)
# All requests become finalised
if not diff:
ppsReady.append(i)
self.prePreparesPendingFinReqs[i] = (pp, sender, diff)
for i in sorted(ppsReady, reverse=True):
pp, sender, _ = self.prePreparesPendingFinReqs.pop(i)
self.prePreparesPendingPrevPP[pp.viewNo, pp.ppSeqNo] = (pp, sender)
r = 0
while self.prePreparesPendingPrevPP and self._can_dequeue_pre_prepare(
*self.prePreparesPendingPrevPP.iloc[0]):
_, (pp, sender) = self.prePreparesPendingPrevPP.popitem(last=False)
if not self._can_pp_seq_no_be_in_view(pp.viewNo, pp.ppSeqNo):
self._discard(pp, "Pre-Prepare from a previous view",
logger.debug)
continue
logger.info("{} popping stashed PREPREPARE{} "
"from sender {}".format(self, (pp.viewNo, pp.ppSeqNo), sender))
self._network.process_incoming(pp, sender)
r += 1
return r
def _can_dequeue_pre_prepare(self, view_no: int, pp_seq_no: int):
return self._is_next_pre_prepare(view_no, pp_seq_no) or compare_3PC_keys(
(view_no, pp_seq_no), self.last_ordered_3pc) >= 0
# TODO: Convert this into a free function?
def _discard(self, msg, reason, logMethod=logging.error, cliOutput=False):
"""
Discard a message and log a reason using the specified `logMethod`.
:param msg: the message to discard
:param reason: the reason why this message is being discarded
:param logMethod: the logging function to be used
:param cliOutput: if truthy, informs a CLI that the logged msg should
be printed
"""
reason = "" if not reason else " because {}".format(reason)
logMethod("{} discarding message {}{}".format(self, msg, reason),
extra={"cli": cliOutput})
def _can_pp_seq_no_be_in_view(self, view_no, pp_seq_no):
"""
Checks if the `pp_seq_no` could have been in view `view_no`. It will
return False when the `pp_seq_no` belongs to a later view than
`view_no` else will return True
:return:
"""
if view_no > self.view_no:
raise PlenumValueError(
'view_no', view_no,
"<= current view_no {}".format(self.view_no),
prefix=self
)
return view_no == self.view_no or (view_no < self.view_no and self._data.legacy_last_prepared_before_view_change and
compare_3PC_keys((view_no, pp_seq_no),
self._data.legacy_last_prepared_before_view_change) >= 0)
def send_3pc_batch(self):
if not self.can_send_3pc_batch():
return 0
sent_batches = set()
# 1. send 3PC batches with requests for every ledger
self._send_3pc_batches_for_ledgers(sent_batches)
# 2. for every ledger we haven't just sent a 3PC batch check if it's not fresh enough,
# and send an empty 3PC batch to update the state if needed
self._send_3pc_freshness_batch(sent_batches)
# 3. send 3PC batch if new primaries elected
self.l_send_3pc_primaries_batch(sent_batches)
# 4. update ts of last sent 3PC batch
if len(sent_batches) > 0:
self.lastBatchCreated = self.get_current_time()
return len(sent_batches)
def l_send_3pc_primaries_batch(self, sent_batches):
# As we've selected new primaries, we need to send 3pc batch,
# so this primaries can be saved in audit ledger
if not sent_batches and self.primaries_batch_needed:
logger.debug("Sending a 3PC batch to propagate newly selected primaries")
self.primaries_batch_needed = False
sent_batches.add(self._do_send_3pc_batch(ledger_id=DOMAIN_LEDGER_ID))
def _send_3pc_freshness_batch(self, sent_batches):
if not self._config.UPDATE_STATE_FRESHNESS:
return
if not self.is_master:
return
# Update freshness for all outdated ledgers sequentially without any waits
# TODO: Consider sending every next update in Max3PCBatchWait only
outdated_ledgers = self._freshness_checker.check_freshness(self.get_time_for_3pc_batch())
for ledger_id, ts in outdated_ledgers.items():
if ledger_id in sent_batches:
logger.debug("Ledger {} is not updated for {} seconds, "
"but a 3PC for this ledger has been just sent".format(ledger_id, ts))
continue
logger.info("Ledger {} is not updated for {} seconds, "
"so its freshness state is going to be updated now".format(ledger_id, ts))
sent_batches.add(
self._do_send_3pc_batch(ledger_id=ledger_id))
def _send_3pc_batches_for_ledgers(self, sent_batches):
# TODO: Consider sending every next update in Max3PCBatchWait only
for ledger_id, q in self.requestQueues.items():
if len(q) == 0:
continue
queue_full = len(q) >= self._config.Max3PCBatchSize
timeout = self.lastBatchCreated + self._config.Max3PCBatchWait < self.get_current_time()
if not queue_full and not timeout:
continue
sent_batches.add(
self._do_send_3pc_batch(ledger_id=ledger_id))
def _do_send_3pc_batch(self, ledger_id):
oldStateRootHash = self.get_state_root_hash(ledger_id, to_str=False)
pre_prepare = self.create_3pc_batch(ledger_id)
self.send_pre_prepare(pre_prepare)
if not self.is_master:
self.db_manager.get_store(LAST_SENT_PP_STORE_LABEL).store_last_sent_pp_seq_no(
self._data.inst_id, pre_prepare.ppSeqNo)
self._track_batches(pre_prepare, oldStateRootHash)
return ledger_id
@measure_consensus_time(MetricsName.CREATE_3PC_BATCH_TIME,
MetricsName.BACKUP_CREATE_3PC_BATCH_TIME)
def create_3pc_batch(self, ledger_id):
pp_seq_no = self.lastPrePrepareSeqNo + 1
pool_state_root_hash = self.get_state_root_hash(POOL_LEDGER_ID)
logger.debug("{} creating batch {} for ledger {} with state root {}".format(
self, pp_seq_no, ledger_id,
self.get_state_root_hash(ledger_id, to_str=False)))
if self.last_accepted_pre_prepare_time is None:
last_ordered_ts = self._get_last_timestamp_from_state(ledger_id)
if last_ordered_ts:
self.last_accepted_pre_prepare_time = last_ordered_ts
# DO NOT REMOVE `view_no` argument, used while replay
# tm = self.utc_epoch
tm = self._get_utc_epoch_for_preprepare(self._data.inst_id, self.view_no,
pp_seq_no)
reqs, invalid_indices, rejects = self._consume_req_queue_for_pre_prepare(
ledger_id, tm, self.view_no, pp_seq_no)
req_ids = [req.digest for req in reqs]
digest = self.generate_pp_digest(req_ids, self.view_no, tm)
if self.is_master:
three_pc_batch = ThreePcBatch(
ledger_id=ledger_id,
inst_id=self._data.inst_id,
view_no=self.view_no,
pp_seq_no=pp_seq_no,
pp_time=tm,
state_root=self.get_state_root_hash(ledger_id, to_str=False),
txn_root=self.get_txn_root_hash(ledger_id, to_str=False),
valid_digests=self._get_valid_req_ids_from_all_requests(reqs, invalid_indices),
pp_digest=digest,
original_view_no=self.view_no,
)
self.post_batch_creation(three_pc_batch)
state_root_hash = self.get_state_root_hash(ledger_id)
audit_txn_root_hash = self.get_txn_root_hash(AUDIT_LEDGER_ID)
# TODO: for now default value for fields sub_seq_no is 0 and for final is True
params = [
self._data.inst_id,
self.view_no,
pp_seq_no,
tm,
req_ids,
invalid_index_serializer.serialize(invalid_indices, toBytes=False),
digest,
ledger_id,
state_root_hash,
self.get_txn_root_hash(ledger_id),
0,
True,
pool_state_root_hash,
audit_txn_root_hash,
]
# BLS multi-sig:
params = self.l_bls_bft_replica.update_pre_prepare(params, ledger_id)
pre_prepare = PrePrepare(*params)
logger.trace('{} created a PRE-PREPARE with {} requests for ledger {}'.format(self, len(reqs), ledger_id))
self.last_accepted_pre_prepare_time = tm
if self.is_master and rejects:
for reject in rejects:
self._network.send(reject)
self._add_to_sent_pre_prepares(pre_prepare)
return pre_prepare
def _get_last_timestamp_from_state(self, ledger_id):
if ledger_id == DOMAIN_LEDGER_ID:
ts_store = self.db_manager.get_store(TS_LABEL)
if ts_store:
last_timestamp = ts_store.get_last_key()
if last_timestamp:
last_timestamp = int(last_timestamp.decode())
logger.debug("Last ordered timestamp from store is : {}".format(last_timestamp))
return last_timestamp
return None
# This is to enable replaying, inst_id, view_no and pp_seq_no are used
# while replaying
def _get_utc_epoch_for_preprepare(self, inst_id, view_no, pp_seq_no):
tm = self.get_time_for_3pc_batch()
if self.last_accepted_pre_prepare_time and \
tm < self.last_accepted_pre_prepare_time:
tm = self.last_accepted_pre_prepare_time
return tm
def _consume_req_queue_for_pre_prepare(self, ledger_id, tm,
view_no, pp_seq_no):
reqs = []
rejects = []
invalid_indices = []
idx = 0
while len(reqs) < self._config.Max3PCBatchSize \
and self.requestQueues[ledger_id]:
key = self.requestQueues[ledger_id].pop(0)
if key in self._requests:
fin_req = self._requests[key].finalised
malicious_req = False
try:
self._process_req_during_batch(fin_req,
tm)
except (
InvalidClientMessageException
) as ex:
logger.warning('{} encountered exception {} while processing {}, '
'will reject'.format(self, ex, fin_req))
rejects.append((fin_req.key, Reject(fin_req.identifier, fin_req.reqId,
reasonForClientFromException(ex), ex.code)))
invalid_indices.append(idx)
except SuspiciousPrePrepare:
malicious_req = True
finally:
if not malicious_req:
reqs.append(fin_req)
if not malicious_req:
idx += 1
else:
logger.debug('{} found {} in its request queue but the '
'corresponding request was removed'.format(self, key))
return reqs, invalid_indices, rejects
@measure_consensus_time(MetricsName.SEND_PREPREPARE_TIME,
MetricsName.BACKUP_SEND_PREPREPARE_TIME)
def send_pre_prepare(self, ppReq: PrePrepare):
key = (ppReq.viewNo, ppReq.ppSeqNo)
logger.debug("{} sending PRE-PREPARE{}".format(self, key))
self._send(ppReq, stat=TPCStat.PrePrepareSent)
def _send(self, msg, dst=None, stat=None) -> None:
"""
Send a message to the node on which this replica resides.
:param stat:
:param rid: remote id of one recipient (sends to all recipients if None)
:param msg: the message to send
"""
if stat:
self.stats.inc(stat)
self._network.send(msg, dst=dst)
def revert_unordered_batches(self):
"""
Revert changes to ledger (uncommitted) and state made by any requests
that have not been ordered.
"""
i = 0
for key in sorted(self.batches.keys(), reverse=True):
if compare_3PC_keys(self.last_ordered_3pc, key) > 0:
ledger_id, discarded, _, prevStateRoot, len_reqIdr = self.batches.pop(key)
discarded = invalid_index_serializer.deserialize(discarded)
logger.debug('{} reverting 3PC key {}'.format(self, key))
self._revert(ledger_id, prevStateRoot, len_reqIdr - len(discarded))
pre_prepare = self.get_preprepare(*key)
if pre_prepare:
for req_id in pre_prepare.reqIdr:
self.requestQueues[ledger_id].add(req_id)
self._lastPrePrepareSeqNo -= 1
i += 1
else:
break
last_txn = self.db_manager.get_ledger(AUDIT_LEDGER_ID).get_last_committed_txn()
self.last_accepted_pre_prepare_time = None if last_txn is None else get_txn_time(last_txn)
logger.info('{} reverted {} batches before starting catch up'.format(self, i))
return i
def l_last_prepared_certificate_in_view(self) -> Optional[Tuple[int, int]]:
# Pick the latest sent COMMIT in the view.
# TODO: Consider stashed messages too?
if not self.is_master:
raise LogicError("{} is not a master".format(self))
keys = []
quorum = self._data.quorums.prepare.value
for key in self.prepares.keys():
if self.prepares.hasQuorum(ThreePhaseKey(*key), quorum):
keys.append(key)
return max_3PC_key(keys) if keys else None
def _caught_up_till_3pc(self, last_caught_up_3PC):
self.last_ordered_3pc = last_caught_up_3PC
self._remove_till_caught_up_3pc(last_caught_up_3PC)
# Get all pre-prepares and prepares since the latest stable checkpoint
audit_ledger = self.db_manager.get_ledger(AUDIT_LEDGER_ID)
last_txn = audit_ledger.get_last_txn()
if not last_txn:
return
self._data.last_batch_timestamp = get_txn_time(last_txn)
to = get_payload_data(last_txn)[AUDIT_TXN_PP_SEQ_NO]
frm = to - to % self._config.CHK_FREQ + 1
try:
batch_ids = [
BatchID(
view_no=get_payload_data(txn)[AUDIT_TXN_VIEW_NO],
pp_view_no=get_payload_data(txn)[AUDIT_TXN_VIEW_NO],
pp_seq_no=get_payload_data(txn)[AUDIT_TXN_PP_SEQ_NO],
pp_digest=get_payload_data(txn)[AUDIT_TXN_DIGEST]
)
for _, txn in audit_ledger.getAllTxn(frm=frm, to=to)
]
self._data.preprepared.extend(batch_ids)
self._data.prepared.extend(batch_ids)
except KeyError as e:
logger.warning(
'Pre-Prepared/Prepared not restored as Audit TXN is missing needed fields: {}'.format(e)
)
def catchup_clear_for_backup(self):
if not self._data.is_primary:
self.sent_preprepares.clear()
self.prePrepares.clear()
self.prepares.clear()
self.commits.clear()
self._data.prepared.clear()
self._data.preprepared.clear()
self.first_batch_after_catchup = True
def _remove_till_caught_up_3pc(self, last_caught_up_3PC):
"""
Remove any 3 phase messages till the last ordered key and also remove
any corresponding request keys
"""
outdated_pre_prepares = {}
for key, pp in self.prePrepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
for key, pp in self.sent_preprepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
logger.trace('{} going to remove messages for {} 3PC keys'.format(
self, len(outdated_pre_prepares)))
for key, pp in outdated_pre_prepares.items():
self.batches.pop(key, None)
self.sent_preprepares.pop(key, None)
self.prePrepares.pop(key, None)
self.prepares.pop(key, None)
self.commits.pop(key, None)
self._discard_ordered_req_keys(pp)
self._clear_batch(pp)
def get_sent_preprepare(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
return self.sent_preprepares.get(key)
def get_sent_prepare(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
if key in self.prepares:
prepare = self.prepares[key].msg
if self.prepares.hasPrepareFrom(prepare, self.name):
return prepare
return None
def get_sent_commit(self, viewNo, ppSeqNo):
key = (viewNo, ppSeqNo)
if key in self.commits:
commit = self.commits[key].msg
if self.commits.hasCommitFrom(commit, self.name):
return commit
return None
@staticmethod
def generate_pp_digest(req_digests, original_view_no, pp_time):
return sha256(serialize_msg_for_signing([original_view_no, pp_time, *req_digests])).hexdigest()
def replica_batch_digest(self, reqs):
return replica_batch_digest(reqs)
def _clear_all_3pc_msgs(self):
# Clear the 3PC log
self.batches.clear()
self.prePrepares.clear()
self.prepares.clear()
self.commits.clear()
self.pre_prepare_tss.clear()
self.prePreparesPendingFinReqs.clear()
self.prePreparesPendingPrevPP.clear()
self.sent_preprepares.clear()
def process_view_change_started(self, msg: ViewChangeStarted):
# 1. update shared data
self._data.preprepared = []
self._data.prepared = []
# 2. save existing PrePrepares
self._update_old_view_preprepares(itertools.chain(self.prePrepares.values(), self.sent_preprepares.values()))
# 3. revert unordered transactions
if self.is_master:
self.revert_unordered_batches()
# 4. clear all 3pc messages
self._clear_all_3pc_msgs()
# 5. clear ordered from previous view
self.ordered.clear_below_view(msg.view_no)
return PROCESS, None
def process_new_view_accepted(self, msg: NewViewAccepted):
self._setup_for_non_master_after_view_change(msg.view_no)
def _setup_for_non_master_after_view_change(self, current_view):
if not self.is_master:
for v in list(self.stashed_out_of_order_commits.keys()):
if v < current_view:
self.stashed_out_of_order_commits.pop(v)
def process_catchup_checkpoints_applied(self, msg: CatchupCheckpointsApplied):
if compare_3PC_keys(msg.master_last_ordered,
msg.last_caught_up_3PC) > 0:
if self.is_master:
self._caught_up_till_3pc(msg.last_caught_up_3PC)
else:
self.first_batch_after_catchup = True
self.catchup_clear_for_backup()
self._clear_prev_view_pre_prepares()
self._stasher.process_all_stashed(STASH_CATCH_UP)
self._stasher.process_all_stashed(STASH_WAITING_FIRST_BATCH_IN_VIEW)
self._finish_master_reordering()
def _update_old_view_preprepares(self, pre_prepares: List[PrePrepare]):
for pp in pre_prepares:
view_no = get_original_viewno(pp)
self.old_view_preprepares[(view_no, pp.ppSeqNo, pp.digest)] = pp
def process_new_view_checkpoints_applied(self, msg: NewViewCheckpointsApplied):
result, reason = self._validate(msg)
if result != PROCESS:
return result, reason
logger.info("{} processing {}".format(self, msg))
missing_batches = []
if self.is_master:
# apply PrePrepares from NewView that we have
# request missing PrePrepares from NewView
for batch_id in msg.batches:
pp = self.old_view_preprepares.get((batch_id.pp_view_no, batch_id.pp_seq_no, batch_id.pp_digest))
if pp is None:
missing_batches.append(batch_id)
else:
self._process_pre_prepare_from_old_view(pp)
if len(msg.batches) == 0:
self._finish_master_reordering()
if missing_batches:
self._request_old_view_pre_prepares(missing_batches)
self.primaries_batch_needed = True
if not missing_batches:
self._reapplied_in_new_view()
def process_old_view_preprepare_request(self, msg: OldViewPrePrepareRequest, sender):
result, reason = self._validate(msg)
if result != PROCESS:
return result, reason
old_view_pps = []
for batch_id in msg.batch_ids:
batch_id = BatchID(*batch_id)
pp = self.old_view_preprepares.get((batch_id.pp_view_no, batch_id.pp_seq_no, batch_id.pp_digest))
if pp is not None:
old_view_pps.append(pp)
rep = OldViewPrePrepareReply(self._data.inst_id, old_view_pps)
self._send(rep, dst=[replica_name_to_node_name(sender)])
def process_old_view_preprepare_reply(self, msg: OldViewPrePrepareReply, sender):
result, reason = self._validate(msg)
if result != PROCESS:
return result, reason
for pp_dict in msg.preprepares:
try:
pp = PrePrepare(**pp_dict)
if self._data.new_view is None or \
preprepare_to_batch_id(pp) not in self._data.new_view.batches:
logger.info("Skipped useless PrePrepare {} from {}".format(pp, sender))
continue
self._process_pre_prepare_from_old_view(pp)
except Exception as ex:
# TODO: catch more specific error here
logger.error("Invalid PrePrepare in {}: {}".format(msg, ex))
if self._data.prev_view_prepare_cert and self._data.prev_view_prepare_cert <= self.lastPrePrepareSeqNo:
self._reapplied_in_new_view()
def _request_old_view_pre_prepares(self, batches):
old_pp_req = OldViewPrePrepareRequest(self._data.inst_id, batches)
self._send(old_pp_req)
def _process_pre_prepare_from_old_view(self, pp):
new_pp = updateNamedTuple(pp, viewNo=self.view_no, originalViewNo=get_original_viewno(pp))
# PrePrepare is accepted from the current Primary only
sender = generateName(self._data.primary_name, self._data.inst_id)
self.process_preprepare(new_pp, sender)
return PROCESS, None
def _reapplied_in_new_view(self):
self._stasher.process_all_stashed(STASH_VIEW_3PC)
self._bus.send(ReAppliedInNewView())
def process_checkpoint_stabilized(self, msg: CheckpointStabilized):
self.gc(msg.last_stable_3pc)
def _preprepare_batch(self, pp: PrePrepare):
"""
After pp had validated, it placed into _preprepared list
"""
batch_id = preprepare_to_batch_id(pp)
if batch_id not in self._data.preprepared:
self._data.preprepared.append(batch_id)
def _prepare_batch(self, pp: PrePrepare):
"""
After prepared certificate for pp had collected,
it removed from _preprepared and placed into _prepared list
"""
batch_id = preprepare_to_batch_id(pp)
if batch_id not in self._data.prepared:
self._data.prepared.append(batch_id)
def _clear_batch(self, pp: PrePrepare):
"""
When 3pc batch processed, it removed from _prepared list
"""
batch_id = preprepare_to_batch_id(pp)
if batch_id in self._data.preprepared:
self._data.preprepared.remove(batch_id)
if batch_id in self._data.prepared:
self._data.prepared.remove(batch_id)
def _finish_master_reordering(self):
if not self.is_master:
self._data.master_reordered_after_vc = True
| evernym/plenum | plenum/server/consensus/ordering_service.py | Python | apache-2.0 | 109,275 | 0.00205 |
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import uuid
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from novaclient.v1_1 import floating_ip_pools
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class NetworkClientTestCase(test.APITestCase):
def test_networkclient_no_neutron(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(False)
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.nova.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager)
def test_networkclient_neutron(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.neutronclient = self.stub_neutronclient()
self.neutronclient.list_extensions() \
.AndReturn({'extensions': self.api_extensions.list()})
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.neutron.SecurityGroupManager)
def test_networkclient_neutron_with_nova_security_group(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.neutronclient = self.stub_neutronclient()
self.neutronclient.list_extensions().AndReturn({'extensions': []})
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager)
class NetworkApiNovaTestBase(test.APITestCase):
def setUp(self):
super(NetworkApiNovaTestBase, self).setUp()
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(False)
class NetworkApiNovaSecurityGroupTests(NetworkApiNovaTestBase):
def test_server_update_security_groups(self):
all_secgroups = self.security_groups.list()
added_secgroup = all_secgroups[2]
rm_secgroup = all_secgroups[0]
cur_secgroups_raw = [{'id': sg.id, 'name': sg.name,
'rules': []}
for sg in all_secgroups[0:2]]
cur_secgroups_ret = {'security_groups': cur_secgroups_raw}
new_sg_ids = [sg.id for sg in all_secgroups[1:3]]
instance_id = self.servers.first().id
novaclient = self.stub_novaclient()
novaclient.security_groups = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.client = self.mox.CreateMockAnything()
novaclient.security_groups.list().AndReturn(all_secgroups)
url = '/servers/%s/os-security-groups' % instance_id
novaclient.client.get(url).AndReturn((200, cur_secgroups_ret))
novaclient.servers.add_security_group(instance_id, added_secgroup.name)
novaclient.servers.remove_security_group(instance_id, rm_secgroup.name)
self.mox.ReplayAll()
api.network.server_update_security_groups(
self.request, instance_id, new_sg_ids)
class NetworkApiNovaFloatingIpTests(NetworkApiNovaTestBase):
def test_floating_ip_pools_list(self):
pool_names = ['pool1', 'pool2']
pools = [floating_ip_pools.FloatingIPPool(
None, {'name': pool}) for pool in pool_names]
novaclient = self.stub_novaclient()
novaclient.floating_ip_pools = self.mox.CreateMockAnything()
novaclient.floating_ip_pools.list().AndReturn(pools)
self.mox.ReplayAll()
ret = api.network.floating_ip_pools_list(self.request)
self.assertEqual(pool_names, [p.name for p in ret])
def test_floating_ip_list(self):
fips = self.api_floating_ips.list()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.list().AndReturn(fips)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_list(self.request)
for r, e in zip(ret, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(e, attr), getattr(r, attr))
self.assertEqual(e.instance_id, r.port_id)
exp_instance_type = 'compute' if e.instance_id else None
self.assertEqual(exp_instance_type, r.instance_type)
def test_floating_ip_get(self):
fip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.get(fip.id).AndReturn(fip)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_get(self.request, fip.id)
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(fip, attr), getattr(ret, attr))
self.assertEqual(fip.instance_id, ret.port_id)
self.assertEqual(fip.instance_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
def test_floating_ip_allocate(self):
pool_name = 'fip_pool'
fip = [fip for fip in self.api_floating_ips.list()
if not fip.instance_id][0]
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.create(pool=pool_name).AndReturn(fip)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_allocate(self.request, pool_name)
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(fip, attr), getattr(ret, attr))
self.assertIsNone(ret.port_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_release(self):
fip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.delete(fip.id)
self.mox.ReplayAll()
api.network.tenant_floating_ip_release(self.request, fip.id)
def test_floating_ip_associate(self):
server = api.nova.Server(self.servers.first(), self.request)
floating_ip = self.floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
novaclient.floating_ips.get(floating_ip.id).AndReturn(floating_ip)
novaclient.servers.add_floating_ip(server.id, floating_ip.ip) \
.AndReturn(server)
self.mox.ReplayAll()
api.network.floating_ip_associate(self.request,
floating_ip.id,
server.id)
def test_floating_ip_disassociate(self):
server = api.nova.Server(self.servers.first(), self.request)
floating_ip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
novaclient.floating_ips.get(floating_ip.id).AndReturn(floating_ip)
novaclient.servers.remove_floating_ip(server.id, floating_ip.ip) \
.AndReturn(server)
self.mox.ReplayAll()
api.network.floating_ip_disassociate(self.request,
floating_ip.id)
def test_floating_ip_target_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list().AndReturn(servers)
self.mox.ReplayAll()
targets = api.network.floating_ip_target_list(self.request)
for target, server in zip(targets, servers):
self.assertEqual(server.id, target.id)
self.assertEqual('%s (%s)' % (server.name, server.id), target.name)
def test_floating_ip_target_get_by_instance(self):
self.mox.ReplayAll()
instance_id = self.servers.first().id
ret = api.network.floating_ip_target_get_by_instance(self.request,
instance_id)
self.assertEqual(instance_id, ret)
class NetworkApiNeutronTestBase(test.APITestCase):
def setUp(self):
super(NetworkApiNeutronTestBase, self).setUp()
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.qclient = self.stub_neutronclient()
class NetworkApiNeutronTests(NetworkApiNeutronTestBase):
def _get_expected_addresses(self, server, no_fip_expected=True):
server_ports = self.ports.filter(device_id=server.id)
addresses = collections.defaultdict(list)
for p in server_ports:
net_name = self.networks.get(id=p['network_id']).name
for ip in p.fixed_ips:
addresses[net_name].append(
{'version': 4,
'addr': ip['ip_address'],
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'fixed'})
if no_fip_expected:
continue
fips = self.q_floating_ips.filter(port_id=p['id'])
if not fips:
continue
# Only one FIP should match.
fip = fips[0]
addresses[net_name].append(
{'version': 4,
'addr': fip.floating_ip_address,
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'floating'})
return addresses
def _check_server_address(self, res_server_data, no_fip_expected=False):
expected_addresses = self._get_expected_addresses(res_server_data,
no_fip_expected)
self.assertEqual(len(expected_addresses),
len(res_server_data.addresses))
for net, addresses in expected_addresses.items():
self.assertIn(net, res_server_data.addresses)
self.assertEqual(addresses, res_server_data.addresses[net])
def _test_servers_update_addresses(self, router_enabled=True):
tenant_id = self.request.user.tenant_id
servers = copy.deepcopy(self.servers.list())
server_ids = [server.id for server in servers]
server_ports = [p for p in self.api_ports.list()
if p['device_id'] in server_ids]
server_port_ids = [p['id'] for p in server_ports]
if router_enabled:
assoc_fips = [fip for fip in self.api_q_floating_ips.list()
if fip['port_id'] in server_port_ids]
server_network_ids = [p['network_id'] for p in server_ports]
server_networks = [net for net in self.api_networks.list()
if net['id'] in server_network_ids]
self.qclient.list_ports(device_id=server_ids) \
.AndReturn({'ports': server_ports})
if router_enabled:
self.qclient.list_floatingips(tenant_id=tenant_id,
port_id=server_port_ids) \
.AndReturn({'floatingips': assoc_fips})
self.qclient.list_ports(tenant_id=tenant_id) \
.AndReturn({'ports': self.api_ports.list()})
self.qclient.list_networks(id=set(server_network_ids)) \
.AndReturn({'networks': server_networks})
self.qclient.list_subnets() \
.AndReturn({'subnets': self.api_subnets.list()})
self.mox.ReplayAll()
api.network.servers_update_addresses(self.request, servers)
self.assertEqual(self.servers.count(), len(servers))
self.assertEqual([server.id for server in self.servers.list()],
[server.id for server in servers])
no_fip_expected = not router_enabled
# server[0] has one fixed IP and one floating IP
# if router ext isenabled.
self._check_server_address(servers[0], no_fip_expected)
# The expected is also calculated, we examine the result manually once.
addrs = servers[0].addresses['net1']
if router_enabled:
self.assertEqual(2, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('floating', addrs[1]['OS-EXT-IPS:type'])
else:
self.assertEqual(1, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
# server[1] has one fixed IP.
self._check_server_address(servers[1], no_fip_expected)
# manual check.
addrs = servers[1].addresses['net2']
self.assertEqual(1, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
# server[2] has no corresponding ports in neutron_data,
# so it should be an empty dict.
self.assertFalse(servers[2].addresses)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_servers_update_addresses(self):
self._test_servers_update_addresses()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_servers_update_addresses_router_disabled(self):
self._test_servers_update_addresses(router_enabled=False)
class NetworkApiNeutronSecurityGroupTests(NetworkApiNeutronTestBase):
def setUp(self):
super(NetworkApiNeutronSecurityGroupTests, self).setUp()
self.qclient.list_extensions() \
.AndReturn({'extensions': self.api_extensions.list()})
self.sg_dict = dict([(sg['id'], sg['name']) for sg
in self.api_q_secgroups.list()])
def _cmp_sg_rule(self, exprule, retrule):
self.assertEqual(exprule['id'], retrule.id)
self.assertEqual(exprule['security_group_id'],
retrule.parent_group_id)
self.assertEqual(exprule['direction'],
retrule.direction)
self.assertEqual(exprule['ethertype'],
retrule.ethertype)
self.assertEqual(exprule['port_range_min'],
retrule.from_port)
self.assertEqual(exprule['port_range_max'],
retrule.to_port,)
if (exprule['remote_ip_prefix'] is None and
exprule['remote_group_id'] is None):
expcidr = ('::/0' if exprule['ethertype'] == 'IPv6'
else '0.0.0.0/0')
else:
expcidr = exprule['remote_ip_prefix']
self.assertEqual(expcidr, retrule.ip_range.get('cidr'))
self.assertEqual(self.sg_dict.get(exprule['remote_group_id']),
retrule.group.get('name'))
def _cmp_sg(self, exp_sg, ret_sg):
self.assertEqual(exp_sg['id'], ret_sg.id)
self.assertEqual(exp_sg['name'], ret_sg.name)
exp_rules = exp_sg['security_group_rules']
self.assertEqual(len(exp_rules), len(ret_sg.rules))
for (exprule, retrule) in itertools.izip(exp_rules, ret_sg.rules):
self._cmp_sg_rule(exprule, retrule)
def test_security_group_list(self):
sgs = self.api_q_secgroups.list()
tenant_id = self.request.user.tenant_id
# use deepcopy to ensure self.api_q_secgroups is not modified.
self.qclient.list_security_groups(tenant_id=tenant_id) \
.AndReturn({'security_groups': copy.deepcopy(sgs)})
self.mox.ReplayAll()
rets = api.network.security_group_list(self.request)
self.assertEqual(len(sgs), len(rets))
for (exp, ret) in itertools.izip(sgs, rets):
self._cmp_sg(exp, ret)
def test_security_group_get(self):
secgroup = self.api_q_secgroups.first()
sg_ids = set([secgroup['id']] +
[rule['remote_group_id'] for rule
in secgroup['security_group_rules']
if rule['remote_group_id']])
related_sgs = [sg for sg in self.api_q_secgroups.list()
if sg['id'] in sg_ids]
# use deepcopy to ensure self.api_q_secgroups is not modified.
self.qclient.show_security_group(secgroup['id']) \
.AndReturn({'security_group': copy.deepcopy(secgroup)})
self.qclient.list_security_groups(id=sg_ids, fields=['id', 'name']) \
.AndReturn({'security_groups': related_sgs})
self.mox.ReplayAll()
ret = api.network.security_group_get(self.request, secgroup['id'])
self._cmp_sg(secgroup, ret)
def test_security_group_create(self):
secgroup = self.api_q_secgroups.list()[1]
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description'],
'tenant_id': self.request.user.project_id}}
self.qclient.create_security_group(body) \
.AndReturn({'security_group': copy.deepcopy(secgroup)})
self.mox.ReplayAll()
ret = api.network.security_group_create(self.request, secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
def test_security_group_update(self):
secgroup = self.api_q_secgroups.list()[1]
secgroup = copy.deepcopy(secgroup)
secgroup['name'] = 'newname'
secgroup['description'] = 'new description'
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description']}}
self.qclient.update_security_group(secgroup['id'], body) \
.AndReturn({'security_group': secgroup})
self.mox.ReplayAll()
ret = api.network.security_group_update(self.request,
secgroup['id'],
secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
def test_security_group_delete(self):
secgroup = self.api_q_secgroups.first()
self.qclient.delete_security_group(secgroup['id'])
self.mox.ReplayAll()
api.network.security_group_delete(self.request, secgroup['id'])
def test_security_group_rule_create(self):
sg_rule = [r for r in self.api_q_secgroup_rules.list()
if r['protocol'] == 'tcp' and r['remote_ip_prefix']][0]
sg_id = sg_rule['security_group_id']
secgroup = [sg for sg in self.api_q_secgroups.list()
if sg['id'] == sg_id][0]
post_rule = copy.deepcopy(sg_rule)
del post_rule['id']
del post_rule['tenant_id']
post_body = {'security_group_rule': post_rule}
self.qclient.create_security_group_rule(post_body) \
.AndReturn({'security_group_rule': copy.deepcopy(sg_rule)})
self.qclient.list_security_groups(id=set([sg_id]),
fields=['id', 'name']) \
.AndReturn({'security_groups': [copy.deepcopy(secgroup)]})
self.mox.ReplayAll()
ret = api.network.security_group_rule_create(
self.request, sg_rule['security_group_id'],
sg_rule['direction'], sg_rule['ethertype'], sg_rule['protocol'],
sg_rule['port_range_min'], sg_rule['port_range_max'],
sg_rule['remote_ip_prefix'], sg_rule['remote_group_id'])
self._cmp_sg_rule(sg_rule, ret)
def test_security_group_rule_delete(self):
sg_rule = self.api_q_secgroup_rules.first()
self.qclient.delete_security_group_rule(sg_rule['id'])
self.mox.ReplayAll()
api.network.security_group_rule_delete(self.request, sg_rule['id'])
def _get_instance(self, cur_sg_ids):
instance_port = [p for p in self.api_ports.list()
if p['device_owner'].startswith('compute:')][0]
instance_id = instance_port['device_id']
# Emulate an intance with two ports
instance_ports = []
for _i in range(2):
p = copy.deepcopy(instance_port)
p['id'] = str(uuid.uuid4())
p['security_groups'] = cur_sg_ids
instance_ports.append(p)
return (instance_id, instance_ports)
def test_server_security_groups(self):
cur_sg_ids = [sg['id'] for sg in self.api_q_secgroups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports(device_id=instance_id) \
.AndReturn({'ports': instance_ports})
secgroups = copy.deepcopy(self.api_q_secgroups.list())
self.qclient.list_security_groups(id=set(cur_sg_ids)) \
.AndReturn({'security_groups': secgroups})
self.mox.ReplayAll()
api.network.server_security_groups(self.request, instance_id)
def test_server_update_security_groups(self):
cur_sg_ids = [self.api_q_secgroups.first()['id']]
new_sg_ids = [sg['id'] for sg in self.api_q_secgroups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports(device_id=instance_id) \
.AndReturn({'ports': instance_ports})
for p in instance_ports:
body = {'port': {'security_groups': new_sg_ids}}
self.qclient.update_port(p['id'], body=body).AndReturn({'port': p})
self.mox.ReplayAll()
api.network.server_update_security_groups(
self.request, instance_id, new_sg_ids)
def test_security_group_backend(self):
self.mox.ReplayAll()
self.assertEqual('neutron',
api.network.security_group_backend(self.request))
class NetworkApiNeutronFloatingIpTests(NetworkApiNeutronTestBase):
def setUp(self):
super(NetworkApiNeutronFloatingIpTests, self).setUp()
self.qclient.list_extensions() \
.AndReturn({'extensions': self.api_extensions.list()})
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_floating_ip_supported(self):
self.mox.ReplayAll()
self.assertTrue(api.network.floating_ip_supported(self.request))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_floating_ip_supported_false(self):
self.mox.ReplayAll()
self.assertFalse(api.network.floating_ip_supported(self.request))
def test_floating_ip_pools_list(self):
search_opts = {'router:external': True}
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
self.qclient.list_networks(**search_opts) \
.AndReturn({'networks': ext_nets})
self.mox.ReplayAll()
rets = api.network.floating_ip_pools_list(self.request)
for attr in ['id', 'name']:
self.assertEqual([p[attr] for p in ext_nets],
[getattr(p, attr) for p in rets])
def test_floating_ip_list(self):
fips = self.api_q_floating_ips.list()
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_floatingips(**filters) \
.AndReturn({'floatingips': fips})
self.qclient.list_ports(**filters) \
.AndReturn({'ports': self.api_ports.list()})
self.mox.ReplayAll()
rets = api.network.tenant_floating_ip_list(self.request)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(exp[attr], getattr(ret, attr))
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_list_all_tenants(self):
fips = self.api_q_floating_ips.list()
self.qclient.list_floatingips().AndReturn({'floatingips': fips})
self.qclient.list_ports().AndReturn({'ports': self.api_ports.list()})
self.mox.ReplayAll()
# all_tenants option for floating IP list is api.neutron specific,
# so we call api.neutron.FloatingIpManager directly and
# actually we don't need NetworkClient in this test.
# setUp() in the base class sets up mox to expect
# api.base.is_service_enabled() is called and we need to call
# NetworkClient even if we don't use it so that mox.VerifyAll
# doesn't complain it.
api.network.NetworkClient(self.request)
fip_manager = api.neutron.FloatingIpManager(self.request)
rets = fip_manager.list(all_tenants=True)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(getattr(ret, attr), exp[attr])
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def _test_floating_ip_get_associated(self, assoc_port, exp_instance_type):
fip = self.api_q_floating_ips.list()[1]
self.qclient.show_floatingip(fip['id']).AndReturn({'floatingip': fip})
self.qclient.show_port(assoc_port['id']) \
.AndReturn({'port': assoc_port})
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertEqual(assoc_port['device_id'], ret.instance_id)
self.assertEqual(exp_instance_type, ret.instance_type)
def test_floating_ip_get_associated(self):
assoc_port = self.api_ports.list()[1]
self._test_floating_ip_get_associated(assoc_port, 'compute')
def test_floating_ip_get_associated_with_loadbalancer_vip(self):
assoc_port = copy.deepcopy(self.api_ports.list()[1])
assoc_port['device_owner'] = 'neutron:LOADBALANCER'
assoc_port['device_id'] = str(uuid.uuid4())
assoc_port['name'] = 'vip-' + str(uuid.uuid4())
self._test_floating_ip_get_associated(assoc_port, 'loadbalancer')
def test_floating_ip_get_unassociated(self):
fip = self.api_q_floating_ips.list()[0]
self.qclient.show_floatingip(fip['id']).AndReturn({'floatingip': fip})
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_allocate(self):
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
ext_net = ext_nets[0]
fip = self.api_q_floating_ips.first()
self.qclient.create_floatingip(
{'floatingip': {'floating_network_id': ext_net['id'],
'tenant_id': self.request.user.project_id}}) \
.AndReturn({'floatingip': fip})
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_allocate(self.request,
ext_net['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_release(self):
fip = self.api_q_floating_ips.first()
self.qclient.delete_floatingip(fip['id'])
self.mox.ReplayAll()
api.network.tenant_floating_ip_release(self.request, fip['id'])
def test_floating_ip_associate(self):
fip = self.api_q_floating_ips.list()[1]
assoc_port = self.api_ports.list()[1]
ip_address = assoc_port['fixed_ips'][0]['ip_address']
target_id = '%s_%s' % (assoc_port['id'], ip_address)
params = {'port_id': assoc_port['id'],
'fixed_ip_address': ip_address}
self.qclient.update_floatingip(fip['id'],
{'floatingip': params})
self.mox.ReplayAll()
api.network.floating_ip_associate(self.request, fip['id'], target_id)
def test_floating_ip_disassociate(self):
fip = self.api_q_floating_ips.list()[1]
self.qclient.update_floatingip(fip['id'],
{'floatingip': {'port_id': None}})
self.mox.ReplayAll()
api.network.floating_ip_disassociate(self.request, fip['id'])
def _get_target_id(self, port):
param = {'id': port['id'],
'addr': port['fixed_ips'][0]['ip_address']}
return '%(id)s_%(addr)s' % param
def _get_target_name(self, port):
param = {'svrid': port['device_id'],
'addr': port['fixed_ips'][0]['ip_address']}
return 'server_%(svrid)s: %(addr)s' % param
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_lb': True})
def test_floating_ip_target_list(self):
ports = self.api_ports.list()
# Port on the first subnet is connected to a router
# attached to external network in neutron_data.
subnet_id = self.subnets.first().id
target_ports = [(self._get_target_id(p),
self._get_target_name(p)) for p in ports
if (not p['device_owner'].startswith('network:') and
subnet_id in [ip['subnet_id']
for ip in p['fixed_ips']])]
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_ports(**filters).AndReturn({'ports': ports})
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
search_opts = {'project_id': self.request.user.tenant_id}
novaclient.servers.list(True, search_opts).AndReturn(servers)
search_opts = {'router:external': True}
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
self.qclient.list_networks(**search_opts) \
.AndReturn({'networks': ext_nets})
self.qclient.list_routers().AndReturn({'routers':
self.api_routers.list()})
self.qclient.list_vips().AndReturn({'vips': self.vips.list()})
self.mox.ReplayAll()
rets = api.network.floating_ip_target_list(self.request)
self.assertEqual(len(target_ports), len(rets))
for ret, exp in zip(rets, target_ports):
self.assertEqual(exp[0], ret.id)
self.assertEqual(exp[1], ret.name)
def test_floating_ip_target_get_by_instance(self):
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == '1']
search_opts = {'device_id': '1'}
self.qclient.list_ports(**search_opts).AndReturn({'ports': candidates})
self.mox.ReplayAll()
ret = api.network.floating_ip_target_get_by_instance(self.request, '1')
self.assertEqual(self._get_target_id(candidates[0]), ret)
def test_target_floating_ip_port_by_instance(self):
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == '1']
search_opts = {'device_id': '1'}
self.qclient.list_ports(**search_opts).AndReturn({'ports': candidates})
self.mox.ReplayAll()
ret = api.network.floating_ip_target_list_by_instance(self.request,
'1')
self.assertEqual(self._get_target_id(candidates[0]), ret[0])
self.assertEqual(len(candidates), len(ret))
def test_floating_ip_target_get_by_instance_with_preloaded_target(self):
target_list = [{'name': 'name11', 'id': 'id11', 'instance_id': 'vm1'},
{'name': 'name21', 'id': 'id21', 'instance_id': 'vm2'},
{'name': 'name22', 'id': 'id22', 'instance_id': 'vm2'}]
self.mox.ReplayAll()
ret = api.network.floating_ip_target_get_by_instance(
self.request, 'vm2', target_list)
self.assertEqual('id21', ret)
def test_target_floating_ip_port_by_instance_with_preloaded_target(self):
target_list = [{'name': 'name11', 'id': 'id11', 'instance_id': 'vm1'},
{'name': 'name21', 'id': 'id21', 'instance_id': 'vm2'},
{'name': 'name22', 'id': 'id22', 'instance_id': 'vm2'}]
self.mox.ReplayAll()
ret = api.network.floating_ip_target_list_by_instance(
self.request, 'vm2', target_list)
self.assertEqual(['id21', 'id22'], ret)
| orbitfp7/horizon | openstack_dashboard/test/api_tests/network_tests.py | Python | apache-2.0 | 34,720 | 0 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom neural network layers.
Low-level primitives such as custom convolution with custom initialization.
"""
import math
import numpy as np
import tensorflow as tf
NCHW, NHWC = 'NCHW', 'NHWC'
DATA_FORMAT_ORDER = {
'channels_first': NCHW,
'channels_last': NHWC
}
def smart_shape(x):
s, t = x.shape, tf.shape(x)
return [t[i] if s[i].value is None else s[i] for i in range(len(s))]
def to_nchw(x):
return tf.transpose(x, [0, 3, 1, 2])
def to_nhwc(x):
return tf.transpose(x, [0, 2, 3, 1])
def torus_pad(x, w, order=NCHW):
if w < 1:
return x
if order == NCHW:
y = tf.concat([x[:, :, -w:], x, x[:, :, :w]], axis=2)
y = tf.concat([y[:, :, :, -w:], y, y[:, :, :, :w]], axis=3)
else:
y = tf.concat([x[:, -w:], x, x[:, :w]], axis=1)
y = tf.concat([y[:, :, -w:], y, y[:, :, :w]], axis=2)
return y
def downscale2d(x, n=2, order=NCHW):
"""Box downscaling.
Args:
x: 4D tensor.
n: integer scale.
order: NCHW or NHWC.
Returns:
4D tensor down scaled by a factor n.
"""
if n <= 1:
return x
if order == NCHW:
return tf.nn.avg_pool(x, [1, 1, n, n], [1, 1, n, n], 'VALID', 'NCHW')
else:
return tf.nn.avg_pool(x, [1, n, n, 1], [1, n, n, 1], 'VALID', 'NHWC')
def upscale2d(x, n=2, order=NCHW):
"""Box upscaling (also called nearest neighbors).
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor up scaled by a factor n.
"""
if n == 1:
return x
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1], ts[2], 1, ts[3], 1])
x = tf.tile(x, [1, 1, 1, n, 1, n])
x = tf.reshape(x, [-1, s[1], ts[2] * n, ts[3] * n])
else:
x = tf.reshape(x, [-1, ts[1], 1, ts[2], 1, s[3]])
x = tf.tile(x, [1, 1, n, 1, n, 1])
x = tf.reshape(x, [-1, ts[1] * n, ts[2] * n, s[3]])
return x
def remove_details2d(x, n=2):
"""Remove box details by upscaling a downscaled image.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor image with removed details of size nxn.
"""
if n == 1:
return x
return upscale2d(downscale2d(x, n), n)
def bicubic_downscale2d(x, n=2, order=NCHW):
"""Downscale x by a factor of n, using dense bicubic weights.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor down scaled by a factor n.
"""
def kernel_weight(x):
"""https://clouard.users.greyc.fr/Pantheon/experiments/rescaling/index-en.html#bicubic"""
x = abs(x)
if x <= 1:
return 1.5 * x ** 3 - 2.5 * x ** 2 + 1
elif 1 < x < 2:
return - 0.5 * x ** 3 + 2.5 * x ** 2 - 4 * x + 2
else:
return 0
def kernel():
k1d = np.array([kernel_weight((x + 0.5) / n) for x in range(-2 * n, 2 * n)])
k1d /= k1d.sum()
k2d = np.outer(k1d, k1d.T).astype('f')
return tf.constant(k2d.reshape((4 * n, 4 * n, 1, 1)))
if order == NHWC:
x = to_nchw(x)
y = tf.pad(x, [[0, 0], [0, 0], [2 * n - 1, 2 * n], [2 * n - 1, 2 * n]], mode='REFLECT')
s, ts = y.shape, tf.shape(y)
y = tf.reshape(y, [ts[0] * s[1], 1, ts[2], ts[3]])
y = tf.nn.conv2d(y, filter=kernel(), strides=[1, 1, n, n], padding='VALID', data_format='NCHW')
y = tf.reshape(y, [ts[0], s[1], tf.shape(y)[2], tf.shape(y)[3]])
return y if order == NCHW else to_nhwc(y)
def space_to_channels(x, n=2, order=NCHW):
"""Reshape image tensor by moving space to channels.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
Reshaped 4D tensor image of shape (N, C * n**2, H // n, W // n).
"""
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1], ts[2] // n, n, ts[3] // n, n])
x = tf.transpose(x, [0, 1, 3, 5, 2, 4])
x = tf.reshape(x, [-1, s[1] * (n ** 2), ts[2] // n, ts[3] // n])
else:
x = tf.reshape(x, [-1, ts[1] // n, n, ts[2] // n, n, s[3]])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [-1, ts[1] // n, ts[2] // n, s[3] * (n ** 2)])
return x
def channels_to_space(x, n=2, order=NCHW):
"""Reshape image tensor by moving channels to space.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
Reshaped 4D tensor image of shape (N, C // n**2, H * n, W * n).
"""
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1] // (n ** 2), n, n, ts[2], ts[3]])
x = tf.transpose(x, [0, 1, 4, 2, 5, 3])
x = tf.reshape(x, [-1, s[1] // (n ** 2), ts[2] * n, ts[3] * n])
elif order == NHWC:
x = tf.reshape(x, [-1, ts[1], ts[2], n, n, s[3] // (n ** 2)])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [-1, ts[1] * n, ts[2] * n, s[3] // (n ** 2)])
else:
assert 0, 'Only supporting NCHW and NHWC.'
return x
class HeNormalInitializer(tf.initializers.random_normal):
def __init__(self, slope, dtype=tf.float32):
self.slope = slope
self.dtype = dtype
def get_config(self):
return dict(slope=self.slope, dtype=self.dtype.name)
def __call__(self, shape, dtype=None, partition_info=None):
del partition_info
if dtype is None:
dtype = self.dtype
std = np.sqrt(2) * tf.rsqrt((1. + self.slope ** 2) *
tf.cast(tf.reduce_prod(shape[:-1]),
tf.float32))
return tf.random_normal(shape, stddev=std, dtype=dtype)
def blend_resolution(lores, hires, alpha):
"""Blend two images.
Args:
lores: 4D tensor in NCHW, low resolution image.
hires: 4D tensor in NCHW, high resolution image.
alpha: scalar tensor in [0, 1], 0 produces the low resolution, 1 the high one.
Returns:
4D tensor in NCHW of blended images.
"""
return lores + alpha * (hires - lores)
class SingleUpdate:
COLLECTION = 'SINGLE_UPDATE'
@classmethod
def get_update(cls, variable):
for v, u in tf.get_collection(cls.COLLECTION):
if v == variable:
return u
return None
@classmethod
def register_update(cls, variable, update):
assert cls.get_update(variable) is None
tf.add_to_collection(cls.COLLECTION, (variable, update))
return update
class Conv2DSpectralNorm(tf.layers.Conv2D):
def build(self, input_shape):
was_built = self.built
tf.layers.Conv2D.build(self, input_shape)
self.built = was_built
shape = self.kernel.shape.as_list()
self.u = self.add_variable(name='u', shape=[1, shape[-1]], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(),
trainable=False)
self.built = True
def call(self, inputs):
shape = self.kernel.shape.as_list()
kernel = self.kernel
if self.data_format == 'channels_first':
kernel = tf.transpose(kernel, [0, 2, 3, 1])
kernel = tf.reshape(kernel, [-1, shape[-1]])
u = self.u
v_ = tf.nn.l2_normalize(tf.matmul(u, kernel, transpose_b=True))
u_ = tf.nn.l2_normalize(tf.matmul(v_, kernel))
sigma = tf.squeeze(tf.matmul(tf.matmul(v_, kernel), u_, transpose_b=True))
if SingleUpdate.get_update(u) is None:
self.add_update(SingleUpdate.register_update(u, tf.assign(u, u_)))
outputs = self._convolution_op(inputs, self.kernel / sigma)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias, data_format=DATA_FORMAT_ORDER[self.data_format])
if self.activation is None:
return outputs
return self.activation(outputs)
def conv2d_spectral_norm(x, filters, kernel_size, strides=1, padding='same',
activation=None, data_format='channels_last', **kwargs):
layer = Conv2DSpectralNorm(filters, kernel_size, strides, padding,
activation=activation,
data_format=data_format, **kwargs)
return layer.apply(x)
class DenseSpectralNorm(tf.layers.Dense):
"""Spectral Norm version of tf.layers.Dense."""
def build(self, input_shape):
self.u = self.add_variable(name='u', shape=[1, self.units], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(),
trainable=False)
return tf.layers.Dense.build(self, input_shape)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
u = self.u
v_ = tf.nn.l2_normalize(tf.matmul(u, self.kernel, transpose_b=True))
u_ = tf.nn.l2_normalize(tf.matmul(v_, self.kernel))
sigma = tf.squeeze(tf.matmul(tf.matmul(v_, self.kernel), u_, transpose_b=True))
if SingleUpdate.get_update(u) is None:
self.add_update(SingleUpdate.register_update(u, tf.assign(u, u_)))
outputs = tf.matmul(inputs, self.kernel / sigma)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def dense_spectral_norm(inputs, units, activation=None, **kwargs):
"""Spectral Norm version of tf.layers.dense."""
layer = DenseSpectralNorm(units, activation, **kwargs)
return layer.apply(inputs)
class DenseSpectralNormCustom(tf.layers.Dense):
"""Spectral Norm version of tf.layers.Dense."""
def build(self, input_shape):
shape = [input_shape[-1], self.units]
self.u = self.add_variable(name='u', shape=[1, shape[0]], dtype=tf.float32, trainable=False)
self.v = self.add_variable(name='v', shape=[shape[1], 1], dtype=tf.float32, trainable=False)
return tf.layers.Dense.build(self, input_shape)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
u, v = self.u, self.v
v_ = tf.nn.l2_normalize(tf.reshape(tf.matmul(u, self.kernel), v.shape))
u_ = tf.nn.l2_normalize(tf.reshape(tf.matmul(self.kernel, v), u.shape))
sigma = tf.matmul(tf.matmul(u, self.kernel), v)[0, 0]
if SingleUpdate.get_update(u) is None:
self.add_update(SingleUpdate.register_update(u, tf.assign(u, u_)))
self.add_update(SingleUpdate.register_update(v, tf.assign(v, v_)))
outputs = tf.matmul(inputs, self.kernel / sigma)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def dense_spectral_norm_custom(inputs, units, activation=None, **kwargs):
"""Spectral Norm version of tf.layers.dense."""
layer = DenseSpectralNormCustom(units, activation, **kwargs)
return layer.apply(inputs)
def kaiming_scale(shape, activation):
activation_slope = {
tf.nn.relu: 0,
tf.nn.leaky_relu: 0.2
}
slope = activation_slope.get(activation, 1)
fanin = np.prod(shape[:-1])
return np.sqrt(2. / ((1 + slope ** 2) * fanin))
class DenseScaled(tf.layers.Dense):
def call(self, inputs):
scale = kaiming_scale(self.kernel.get_shape().as_list(), self.activation)
if hasattr(self, 'gain'):
scale *= self.gain
outputs = tf.matmul(inputs, self.kernel * scale)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is None:
return outputs
return self.activation(outputs)
def set_gain(self, gain):
self.gain = gain
class Conv2DScaled(tf.layers.Conv2D):
def call(self, inputs):
scale = kaiming_scale(self.kernel.get_shape().as_list(), self.activation)
if hasattr(self, 'gain'):
scale *= self.gain
outputs = self._convolution_op(inputs, self.kernel * scale)
assert self.rank == 2
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias, DATA_FORMAT_ORDER[self.data_format])
if self.activation is None:
return outputs
return self.activation(outputs)
def set_gain(self, gain):
self.gain = gain
def conv2d_scaled(x, filters, kernel_size, strides=1, padding='same',
activation=None, gain=1, data_format='channels_first', **kwargs):
layer = Conv2DScaled(filters, kernel_size, strides, padding,
activation=activation,
data_format=data_format,
kernel_initializer=tf.initializers.random_normal(stddev=1.), **kwargs)
layer.set_gain(gain)
return layer.apply(x)
def dense_scaled(x, filters, activation=tf.nn.leaky_relu, gain=1, **kwargs):
layer = DenseScaled(filters,
activation=activation,
kernel_initializer=tf.initializers.random_normal(stddev=1.),
**kwargs)
layer.set_gain(gain)
return layer.apply(x)
def channel_norm(x):
"""Channel normalization.
Args:
x: nD tensor with channels in dimension 1.
Returns:
nD tensor with normalized channels.
"""
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), [1], keepdims=True) + 1e-8)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: nD tensor for which to compute standard deviation average.
Returns:
a scalar, the mean standard deviation of variable x.
"""
mean = tf.reduce_mean(x, 0, keepdims=True)
vals = tf.sqrt(tf.reduce_mean(tf.squared_difference(x, mean), 0) + 1e-8)
vals = tf.reduce_mean(vals)
return vals
def scalar_concat(x, scalar):
"""Concatenate a scalar to a 4D tensor as an extra channel.
Args:
x: 4D image tensor in NCHW format.
scalar: a scalar to concatenate to the tensor.
Returns:
a 4D tensor with one extra channel containing the value scalar at
every position.
"""
s = tf.shape(x)
return tf.concat([x, tf.ones([s[0], 1, s[2], s[3]]) * scalar], axis=1)
class ClassBiasScale(tf.layers.Layer):
"""For a class c, return x*gamma[c] + beta[c]"""
def __init__(self, nclass, name=None, trainable=True, **kwargs):
super(ClassBiasScale, self).__init__(
name=name, trainable=trainable, **kwargs)
self.nclass = nclass
self.gamma = None
self.beta = None
def build(self, input_shape):
self.beta = self.add_variable(name='beta', shape=[self.nclass, input_shape[1]], dtype=tf.float32,
initializer=tf.initializers.zeros, trainable=True)
self.gamma = self.add_variable(name='gamma', shape=[self.nclass, input_shape[1]], dtype=tf.float32,
initializer=tf.initializers.zeros, trainable=True)
self.built = True
def call(self, inputs, labels):
ndims = len(inputs.get_shape())
with tf.colocate_with(self.beta):
beta = tf.gather(self.beta, labels)
with tf.colocate_with(self.gamma):
gamma = tf.gather(self.gamma, labels)
gamma = tf.nn.sigmoid(gamma)
reshape = [tf.shape(inputs)[0], inputs.shape[1]] + [1] * (ndims - 2)
return inputs * tf.reshape(gamma, reshape) + tf.reshape(beta, reshape)
def compute_output_shape(self, input_shape):
return input_shape
def conv2d_mono(x, kernel, order=NCHW):
"""2D convolution using the same filter for every channel.
:param x: 4D input tensor of the images.
:param kernel: 2D input tensor of the convolution to apply.
:param order: enum {NCHW, NHWC}, the format of the input tensor.
:return: a 4D output tensor resulting from the convolution.
"""
y = x if order == NCHW else tf.transpose(x, [0, 3, 1, 2])
s = smart_shape(y)
y = tf.reshape(y, [s[0] * s[1], 1, s[2], s[3]])
y = tf.nn.conv2d(y, kernel[:, :, None, None], [1] * 4, 'VALID', data_format=NCHW)
t = smart_shape(y)
y = tf.reshape(y, [s[0], s[1], t[2], t[3]])
return y if order == NCHW else tf.transpose(y, [0, 2, 3, 1])
def class_bias_scale(inputs, labels, nclass):
"""For a class c, return x*gamma[c] + beta[c]"""
layer = ClassBiasScale(nclass)
return layer.apply(inputs, labels)
def blur_kernel_area(radius):
"""Compute an area blurring kernel.
:param radius: float in [0, inf[, the ratio of the area.
:return: a 2D convolution kernel.
"""
radius = max(radius, 1e-8)
cr = 1 + round(math.ceil(radius))
m = np.ones((cr, cr), 'f')
m[-1] *= (radius + 2 - cr)
m[:, -1] *= (radius + 2 - cr)
m = np.concatenate([m[::-1], m[1:]], axis=0)
m = np.concatenate([m[:, ::-1], m[:, 1:]], axis=1)
return m / m.sum()
def blur_apply(x, kernel, order=NCHW):
h, w = kernel.shape[0], kernel.shape[1]
if order == NCHW:
x = tf.pad(x, [[0] * 2, [0] * 2, [h // 2] * 2, [w // 2] * 2], 'REFLECT')
else:
x = tf.pad(x, [[0] * 2, [h // 2] * 2, [w // 2] * 2, [0] * 2], 'REFLECT')
return conv2d_mono(x, kernel, order)
| google-research/lag | libml/layers.py | Python | apache-2.0 | 18,151 | 0.001598 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.