repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
DoubleNegativeVisualEffects/cortex
|
test/IECoreHoudini/FromHoudiniPointsConverter.py
|
Python
|
bsd-3-clause
| 39,834 | 0.052945 |
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
class TestFromHoudiniPointsConverter( IECoreHoudini.TestCase ) :
def createBox( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
return box
def createTorus( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
return torus
def createPoints( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
points = geo.createNode( "scatter" )
facet.setInput( 0, box )
points.setInput( 0, facet )
return points
# creates a converter
def testCreateConverter( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniPointsConverter( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
return converter
# creates a converter
def testFactory( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.Parameter )
self.assertEqual( converter, None )
self.failUnless( IECore.TypeId.PointsPrimitive in IECoreHoudini.FromHoudiniGeometryConverter.supportedTypes() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( [ IECore.TypeId.PointsPrimitive ] )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# performs geometry conversion
def testDoConversion( self ) :
converter = self.testCreateConverter()
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
def testConvertFromHOMGeo( self ) :
geo = self.createPoints().geometry()
converter = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo )
self.failUnless( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.failUnless( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
converter2 = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo, IECore.TypeId.PointsPrimitive )
self.failUnless( converter2.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# convert a mesh
def testConvertMesh( self ) :
torus = self.createTorus()
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() )
bbox = result.bound()
self.assertEqual( bbox.min.x, -1.5 )
self.assertEqual( bbox.max.x, 1.5 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test prim/vertex attributes
def testConvertPrimVertAttributes( self ) :
torus = self.createTorus()
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3
|
.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_i1 = geo.createN
|
ode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_f3 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attrib
|
ovnicraft/openerp-restaurant
|
website_sale/models/product_characteristics.py
|
Python
|
agpl-3.0
| 3,642 | 0.006315 |
from openerp.osv import osv, fields
class attributes(osv.Model):
_name = "product.attribute"
def _get_float_max(self, cr, uid, ids, field_name, arg, context=None):
result = dict.fromkeys(ids, 0)
if ids:
cr.execute("""
SELECT attribute_id, MAX(value)
FROM product_attribute_line
WHERE attribute_id in (%s)
GROUP BY attribute_id
""" % ",".join(map(str, ids)))
result.update(dict(cr.fetchall()))
return result
def _get_float_min(self, cr, uid, ids, field_name, arg, context=None):
result = dict.fromkeys(ids, 0)
if ids:
cr.execute("""
SELECT attribute_id, MIN(value)
FROM product_attribute_line
WHERE attribute_id in (%s)
GROUP BY attribute_id
""" % ",".join(map(str, ids)))
result.update(dict(cr.fetchall()))
return result
def _get_min_max(self, cr, uid, ids, context=None):
result = {}
for value in self.pool.get('product.attribute.line').browse(cr, uid, ids, context=context):
if value.type == 'float':
result[value.attribute_id.id] = True
return result.keys()
_columns = {
'name': fields.char('Name', translate=True, required=True),
'type': fields.selection([('distinct', 'Textual Value'), ('float', 'Numeric Value')], "Type", required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values'),
'attr_product_ids': fields.one2many('product.attribute.line', 'attribute_id', 'Products'),
'float_max': fields.function(_get_float_max, type='float', string="Max", store={
'product.attribute.line': (_get_min_max, ['value','attribute_id'], 20),
}),
'float_min': fields.function(_get_float_min, type='float', string="Min", store={
'product.attribute.line': (_get_min_max, ['value','attribute_id'], 20),
}),
'visible': fields.boolean('Display Filter on Website'),
}
_defaults = {
'type': 'distinct',
'visible': True,
}
class attributes_value(osv.Model):
_name = "product.attribute.value"
_columns = {
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'atr_product_ids': fields.one2many('product.attribute.line', 'value_id', 'Products'),
}
class attributes_product(osv.Model):
|
_name = "product.attribute.line"
_order = 'attribute_id, value_id, value'
_columns = {
'value': fields.float('Numeric Value'),
'value_id': fields.many2one('product.attribute.value', 'Textual Value'),
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'product_tmpl_id': fields.m
|
any2one('product.template', 'Product', required=True),
'type': fields.related('attribute_id', 'type', type='selection',
selection=[('distinct', 'Distinct'), ('float', 'Float')], string='Type'),
}
def onchange_attribute_id(self, cr, uid, ids, attribute_id, context=None):
attribute = self.pool.get('product.attribute').browse(cr, uid, attribute_id, context=context)
return {'value': {'type': attribute.type, 'value_id': False, 'value': ''}}
class product_template(osv.Model):
_inherit = "product.template"
_columns = {
'attribute_lines': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product attributes'),
}
|
nathawes/swift
|
utils/build_swift/tests/build_swift/test_shell.py
|
Python
|
apache-2.0
| 26,680 | 0.000037 |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import collections
import sys
import unittest
from build_swift import shell
import six
from six import StringIO
from .. import utils
try:
# Python 3.4
from pathlib import Path
except ImportError:
pass
try:
# Python 3.3
from unittest import mock
from unittest.mock import patch, mock_open, MagicMock
except ImportError:
mock, mock_open = None, None
class MagicMock(object):
def __init__(self, *args, **kwargs):
pass
def _id(obj):
return obj
def patch(*args, **kwargs):
return _id
# -----------------------------------------------------------------------------
# Constants
_OPEN_NAME = '{}.open'.format(six.moves.builtins.__name__)
# -----------------------------------------------------------------------------
# Test Cases
class TestHelpers(unittest.TestCase):
"""Unit tests for the helper functions defined in the build_swift.shell
module.
"""
# -------------------------------------------------------------------------
# _flatmap
def test_flatmap(self):
def duplicate(x):
return [x, x]
result = shell._flatmap(duplicate, [1, 2, 3])
self.assertIsInstance(result, collections.Iterable)
self.assertEqual(list(result), [1, 1, 2, 2, 3, 3])
# -------------------------------------------------------------------------
# _convert_pathlib_path
@utils.requires_module('unittest.mock')
@utils.requires_module('pathlib')
@patch('build_swift.shell.Path', None)
def test_convert_pathlib_path_pathlib_not_imported(self):
path = Path('/path/to/file.txt')
self.assertEqual(shell._convert_pathlib_path(path), path)
@utils.requires_module('pathlib')
def test_convert_pathlib_path(self):
path = Path('/path/to/file.txt')
self.assertEqual(shell._convert_pathlib_path(''), '')
self.assertEqual(
shell._convert_pathlib_path(path),
six.text_type(path))
# -------------------------------------------------------------------------
# _get_stream_file
def test_get_stream_file(self):
self.assertEqual(shell._get_stream_file(shell.PIPE), sys.stdout)
self.assertEqual(shell._get_stream_file(shell.STDOUT), sys.stdout)
self.assertEqual(shell._get_stream_file(sys.stdout), sys.stdout)
self.assertEqual(shell._get_stream_file(sys.stderr), sys.stderr)
def test_get_stream_file_raises_devnull(self):
with self.assertRaises(ValueError):
shell._get_stream_file(shell.DEVNULL)
# -------------------------------------------------------------------------
# _echo_command
@utils.requires_module('unittest.mock')
def test_echo_command(self):
test_command = ['sudo', 'rm', '-rf', '/tmp/*']
mock_stream = MagicMock()
shell._echo_command(test_command, mock_stream)
mock_stream.write.assert_called_with(
'>>> {}\n'.format(shell.quote(test_command)))
assert(mock_stream.flush.called)
@utils.requires_module('unittest.mock')
def test_echo_command_custom_prefix(self):
mock_stream = MagicMock()
shell._echo_command('ls', mock_stream, prefix='$ ')
mock_stream.write.assert_called_with('$ ls\n')
assert(mock_stream.flush.called)
# -------------------------------------------------------------------------
# _normalize_args
def test_normalize_args_splits_basestring(self):
command = 'rm -rf /Applications/Xcode.app'
self.assertEqual(
shell._normalize_args(command),
['rm', '-rf', '/Applications/Xcode.app'])
def test_normalize_args_list_str(self):
command = ['rm', '-rf', '/Applications/Xcode.app']
self.assertEqual(shell._normalize_args(command), command)
def test_normalize_args_converts_wrappers(self):
sudo = shell.wraps('sudo')
rm = shell.wraps('rm')
command = [sudo, r
|
m, '-rf', '/Applications/Xcode.app']
self.assertEqual(
shell._normalize_args(command),
['sudo', 'rm', '-rf', '/Applications/Xcode.app'])
def test_normalize_args_converts_complex_wrapper_commands(self):
sudo_rm_rf = shell.wraps('sudo rm -rf')
command = [sudo_rm_rf, '/Applications/Xcode.app']
self.assertEqual(
shell._n
|
ormalize_args(command),
['sudo', 'rm', '-rf', '/Applications/Xcode.app'])
@utils.requires_module('pathlib')
def test_normalize_args_accepts_single_wrapper_arg(self):
rm_xcode = shell.wraps(['rm', '-rf', Path('/Applications/Xcode.app')])
self.assertEqual(
shell._normalize_args(rm_xcode),
['rm', '-rf', '/Applications/Xcode.app'])
@utils.requires_module('pathlib')
def test_normalize_args_converts_pathlib_path(self):
command = ['rm', '-rf', Path('/Applications/Xcode.app')]
self.assertEqual(
shell._normalize_args(command),
['rm', '-rf', '/Applications/Xcode.app'])
@utils.requires_module('pathlib')
def test_normalize_args_converts_pathlib_path_in_wrapper_commands(self):
rm_xcode = shell.wraps(['rm', '-rf', Path('/Applications/Xcode.app')])
self.assertEqual(
shell._normalize_args([rm_xcode]),
['rm', '-rf', '/Applications/Xcode.app'])
class TestDecorators(unittest.TestCase):
"""Unit tests for the decorators defined in the build_swift.shell module
used to backport or add functionality to the subprocess wrappers.
"""
# -------------------------------------------------------------------------
# _backport_devnull
@utils.requires_module('unittest.mock')
@patch(_OPEN_NAME, new_callable=mock_open)
@patch('build_swift.shell._PY_VERSION', (3, 2))
def test_backport_devnull_stdout_kwarg(self, mock_open):
mock_file = MagicMock()
mock_open.return_value.__enter__.return_value = mock_file
@shell._backport_devnull
def func(command, **kwargs):
self.assertEqual(kwargs['stdout'], mock_file)
func('', stdout=shell.DEVNULL)
assert(mock_open.return_value.__enter__.called)
assert(mock_open.return_value.__exit__.called)
@utils.requires_module('unittest.mock')
@patch(_OPEN_NAME, new_callable=mock_open)
@patch('build_swift.shell._PY_VERSION', (3, 2))
def test_backport_devnull_stderr_kwarg(self, mock_open):
mock_file = MagicMock()
mock_open.return_value.__enter__.return_value = mock_file
@shell._backport_devnull
def func(command, **kwargs):
self.assertEqual(kwargs['stderr'], mock_file)
func('', stderr=shell.DEVNULL)
assert(mock_open.return_value.__enter__.called)
assert(mock_open.return_value.__exit__.called)
@utils.requires_module('unittest.mock')
@patch(_OPEN_NAME, new_callable=mock_open)
def test_backport_devnull_does_not_open(self, mock_open):
@shell._backport_devnull
def func(command):
pass
func('')
mock_open.return_value.__enter__.assert_not_called()
mock_open.return_value.__exit__.assert_not_called()
@utils.requires_module('unittest.mock')
@patch('build_swift.shell._PY_VERSION', (3, 3))
def test_backport_devnull_noop_starting_with_python_3_3(self):
def func():
pass
self.assertEqual(shell._backport_devnull(func), func)
# -------------------------------------------------------------------------
# _normalize_command
def test_normalize_command_basestring_command_noop(self):
test_command = 'touch test.txt'
@shell._normalize_command
def func(command):
self.assertE
|
ptroja/spark2014
|
testsuite/gnatprove/tests/intro/test.py
|
Python
|
gpl-3.0
| 65 | 0.030769 |
from test_support impo
|
rt *
prove_all(no_fail=True, st
|
eps = 400)
|
awni/tensorflow
|
tensorflow/python/kernel_tests/softmax_op_test.py
|
Python
|
apache-2.0
| 4,506 | 0.006214 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================
|
===================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
class SoftmaxTest(tf.test.Te
|
stCase):
def _npSoftmax(self, features, log=False):
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features -
np.reshape(np.amax(features, axis=class_dim), [batch_size, 1]))
softmax = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
if log:
return np.log(softmax)
else:
return softmax
def _testSoftmax(self, np_features, log=False, use_gpu=False):
np_softmax = self._npSoftmax(np_features, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
tf_softmax = tf.nn.log_softmax(np_features)
else:
tf_softmax = tf.nn.softmax(np_features)
out = tf_softmax.eval()
self.assertAllClose(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in each
# batch element.
self.assertAllClose(np.ones(out.shape[0]),
np.sum(out, axis=1))
def _testAll(self, features):
self._testSoftmax(features, use_gpu=False)
self._testSoftmax(features, log=True, use_gpu=False)
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5, atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5, atol=1.e-5)
def testShapeMismatch(self):
with self.assertRaises(ValueError):
tf.nn.softmax([0., 1., 2., 3.])
with self.assertRaises(ValueError):
tf.nn.log_softmax([0., 1., 2., 3.])
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32
else:
type = np.float64
max = np.finfo(type).max
features = np.array(
[[1., 1., 1., 1.],
[max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = tf.nn.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5, atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testEmpty(self):
with self.test_session():
x = tf.constant([[]], shape=[0, 3])
self.assertEqual(0, tf.size(x).eval())
expected_y = np.array([]).reshape(0, 3)
np.testing.assert_array_equal(expected_y, tf.nn.softmax(x).eval())
if __name__ == "__main__":
tf.test.main()
|
hecanjog/pattern.studies
|
orc/hat.py
|
Python
|
cc0-1.0
| 364 | 0.013736 |
from pippi import dsp
from hcj import snds, fx
hat = snds.load('mc303/hat2.wav')
|
def make(length, i):
#h = dsp.bln(length / 4, dsp.rand(6000, 8000), dsp.rand(9000, 16000))
#h = dsp.amp(h, dsp.rand(0.5, 1))
#h = d
|
sp.env(h, 'phasor')
h = hat
h = dsp.fill(h, length, silence=True)
if dsp.rand() > 0.5:
h = fx.penv(h)
return h
|
weahwww/python
|
bytes_to_str.py
|
Python
|
gpl-2.0
| 351 | 0.008547 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
def python3xByte
|
sToStr():
f = open("BytesToStr.txt", "wb")
zhcnBytes = b'\x63\x68\x61\x72\x43\x6f\x64\x65\x41\x74'
zhcnUnicodeStr = zhcnBytes.decode('gbk')
print(zhcnUnicodeStr)
f.write(zhcnUnicodeStr.encode('utf-8'))
f.close()
if __name__ == "__main__":
pytho
|
n3xBytesToStr()
|
milki/morph
|
morph/tests/test_patternchain.py
|
Python
|
bsd-2-clause
| 3,569 | 0.001121 |
# test_patternchain.py -- Tests for Pattern Chains
"""Tests for Pattern Chain objects"""
from morph import (
pattern,
patternchain
)
from morph.pattern import (
LiteralPattern,
NumericCounterPattern,
)
from morph.patternchain import (
generateFullReplaceChain,
PatternChain,
FilePatternChain,
)
from morph.errors import (
PatternModeError
)
from morph.tests import TestCase
class PatternChainTestCase(TestCase):
def testGenFullReplace(self):
chain = patternchain.generateFullReplaceChain([
'abc_',
'###'])
litpat = LiteralPattern('abc_', mode = pattern.MODE_REPLACE)
numcountpat = NumericCounterPattern(1, 3)
self.assertEqual(PatternChain([litpat, numcountpat]), chain)
def testStr(self):
chain = patternchain.generateFullReplaceChain([
'abc_',
'###'])
self.assertEqual("\tLiteral (replace, abc_)\n"
"\tNumericCounter (append, 1, 1, 3)\n",
str(chain))
def testAppendApply(self):
appendPat0 = LiteralPattern('abc')
appendPat1 = LiteralPattern('123')
chain = PatternChain([appendPat0, appendPat1])
self.assertEqual(['fileabc123'],
chain.apply_to_strings(['file']))
self.assertEqual(['file0abc123', 'file1abc123', 'file2abc123'],
chain.apply_to_strings(['file0', 'file1', 'file2']))
def testReplaceApply(self):
appendPat0 = LiteralPattern('abc_', mode = pattern.MODE_REPLACE)
appendPat1 = NumericCounterPattern(1, 2)
chain = PatternChain([appendPat0, appendPat1])
self.assertEqual(['abc_01'],
chain.apply_to_strings(['file']))
chain.reset()
self.assertEqual(['abc_01', 'abc_02', 'abc_03'],
chain.apply_to_strings(['file0', 'file1', 'file2']))
class FilePatternChainTestCase(TestCase):
def testApply(self):
chain = FilePatternChain()
chain.insert_file('file5', 5)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual(
['file1.5', 'file2', 'file3', 'file4', 'file5'],
chain.apply_to_strings(
['file0', 'file1', 'file2', 'file3', 'file4'])
)
def testMap(self):
chain = FilePatternChain()
chain.insert_file('file5', 5)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual(
[(None, 'file1.5'),
('file2', 'file2'),
('file3', 'file3'),
('file4', 'file4'),
(None, 'file5'),
('file0', None),
('file1', None)],
chain.map_to_strings(
|
['file0', 'file1', 'file2', 'file3', 'file4'])
)
def testStr(self):
chain = FilePatternChain()
chain.insert_file('file5', 4)
chain.insert_file('file
|
1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual("\t('insert', 'file5', 4)\n"
"\t('insert', 'file1.5', 2)\n"
"\t('delete', 0)\n"
"\t('move', 0, 2)\n"
"\t('delete', 2)\n",
str(chain))
|
kanishka-linux/kawaii-player
|
kawaii_player/hls_webkit/netmon_webkit.py
|
Python
|
gpl-3.0
| 3,417 | 0.009072 |
"""
Copyright (C) 2017 kanishka-linux kanishka.linux@gmail.com
This file is part of hlspy.
hlspy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
hlspy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with hlspy. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from PyQt5 import QtCore, QtNetwork
from PyQt5.QtNetwork import QNetworkAccessManager
from PyQt5.QtCore import pyqtSignal
class NetManager(QNetworkAccessManager):
netS = pyqtSignal(str)
def __init__(
self, parent=None, url=None, print_request=None, block_request=None,
default_block=None, select_request=None, get_link=None):
super(NetManager, self).__init__()
self.url = url
self.print_request = print_request
if block_request:
self.block_request = block_request.split(',')
else:
self.block_request = []
self.default_block = default_block
self.select_request = select_request
self.get_link = get_link
def createRequest(self, op, request, device = None ):
global block_list
try:
urlL
|
nk = (request.url().toString())
except UnicodeEncodeError:
urlLnk = (request.url().path())
if self.get_link:
if self.get_link in urlLnk:
self.netS.emit(urlLnk)
lower_case = urlLnk.lower()
lst = []
if self.d
|
efault_block:
lst = [
"doubleclick.net", 'adnxs', r"||youtube-nocookie.com/gen_204?",
r"youtube.com###watch-branded-actions", "imagemapurl",
"b.scorecardresearch.com", "rightstuff.com", "scarywater.net",
"popup.js", "banner.htm", "_tribalfusion",
"||n4403ad.doubleclick.net^$third-party",
".googlesyndication.com", "graphics.js", "fonts.googleapis.com/css",
"s0.2mdn.net", "server.cpmstar.com", "||banzai/banner.$subdocument",
"@@||anime-source.com^$document", "/pagead2.", "frugal.gif",
"jriver_banner.png", "show_ads.js",
'##a[href^="http://billing.frugalusenet.com/"]',
"http://jriver.com/video.html", "||animenewsnetwork.com^*.aframe?",
"||contextweb.com^$third-party", ".gutter", ".iab", 'revcontent',
".ads", "ads.", ".bebi", "mgid"
]
if self.block_request:
lst = lst + self.block_request
block = False
for l in lst:
if lower_case.find(l) != -1:
block = True
break
if (self.select_request and self.select_request in urlLnk) or self.print_request:
print(urlLnk)
if block:
return QNetworkAccessManager.createRequest(self, QNetworkAccessManager.GetOperation, QtNetwork.QNetworkRequest(QtCore.QUrl()))
else:
return QNetworkAccessManager.createRequest(self, op, request, device)
|
sih4sing5hong5/hue7jip8
|
試驗/test教育部臺灣閩南語字詞頻調查工作.py
|
Python
|
mit
| 539 | 0 |
import io
from django.core.management import call_command
from django.test.testcases import TestCase
from 臺灣言語服務.models import 訓練過渡格式
class KIPsu試驗(TestCase):
@classmethod
def setUpClass(cls):
with io.StringIO() as tshogoo:
call_command('教育部臺灣閩南語字詞頻調查工作', stderr=tshogoo)
print(tshogoo.getvalue()[:10
|
00])
return super().setUpClass()
def test數量(self
|
):
self.assertGreater(訓練過渡格式.資料數量(), 50000)
|
konradko/cnav-bot
|
cnavbot/services/pi2go.py
|
Python
|
mit
| 4,857 | 0 |
import time
import logging
from cnavbot import settings
logger = logging.getLogger()
class Driver(object):
def __init__(self, *args, **kwargs):
self.driver = kwargs.pop('driver', settings.BOT_DRIVER)
class Motors(Driver):
def __init__(self, speed=None, *args, **kwargs):
super(Motors, self).__init__(*args, **kwargs)
self.speed = kwargs.pop('speed', settings.BOT_DEFAULT_SPEED)
self.validate_speed(self.speed)
logger.info('Speed set to {}'.format(self.speed))
@staticmethod
def validate_speed(speed):
if not (1 <= speed <= 100):
raise Exception(
"Invalid speed value '{}', must be between 1 an 100".format(
speed
)
)
def forward(self, steps=None):
"""Sets both motors to go forward"""
logger.debug('Going forward')
self.driver.forward(self.speed)
if steps:
self.keep_running(steps)
def reverse(self, steps=None):
"""Sets both motors to reverse"""
logger.debug('Reversing')
self.driver.reverse(self.speed)
if steps:
self.keep_running(steps)
def left(self, steps=None):
"""Sets motors to turn opposite directions for left spin"""
logger.debug('Spinning left')
self.driver.spinLeft(self.speed)
if steps:
self.keep_running(steps)
def right(self, steps=None):
"""Sets motors to turn opposite directions for right spin"""
logger.debug('Spinning right')
self.driver.spinRight(self.speed)
if steps:
self.keep_running(steps)
def keep_running(self, steps):
logger.debug('Keeping running for {} steps'.format(steps))
time.sleep(0.1 * steps)
self.stop()
def stop(self):
logger.debug('Stopping')
self.driver.stop()
class Lights(Driver):
led_numbers = (1, 2, 3, 4)
def validate_led_number(self, led_number):
if not(led_number in self.led_numbers):
raise Exception(
"Invalid led number '{}', must be in {}".format(
led_number,
self.led_numbers
)
)
def set_led_rbg(self, led_number, red, blue, green):
"""Spins right specified number of steps"""
self.validate_led_number(led_number)
logger.debug('Setting LED {} to red: {}, green: {}. blue: {}'.format(
led_number, red, green, blue
))
self.driver.setLED(led_number, red, green, blue)
def set_all_leds_rbg(self, red, blue, green):
"""Spins right specified number of steps"""
for led_number in self.led_numbers:
self.driver.setLED(led_number, red, green, blue)
class ObstacleSensor(Driver):
def __init__(self, *args, **kwargs):
super(ObstacleSensor, self).__init__(*args, **kwargs)
self.max_distance = kwargs.pop(
'max_distance', settings.BOT_DEFAULT_MAX_DISTANCE
|
)
logger.info('Max dista
|
nce set to {}'.format(self.max_distance))
def left(self):
"""Returns true if there is an obstacle to the left"""
obstacle = self.driver.irLeft()
logger.debug('Left obstacle: {}'.format(obstacle))
return obstacle
def right(self):
"""Returns true if there is an obstacle to the right"""
obstacle = self.driver.irRight()
logger.debug('Right obstacle: {}'.format(obstacle))
return obstacle
def front(self):
"""Returns true if there is an obstacle in front"""
obstacle = self.driver.irCentre()
logger.debug('Front obstacle: {}'.format(obstacle))
return obstacle
def front_close(self):
front_close = self.distance() <= self.max_distance
logger.debug('Front obstacle close: {}'.format(front_close))
return front_close
def distance(self):
"""
Returns the distance in cm to the nearest reflecting object
in front of the bot
"""
distance = self.driver.getDistance()
logger.debug('Distance: {}'.format(distance))
return distance
def any(self):
"""Returns true if there is any obstacle"""
any_obstacle = self.driver.irAll()
logger.debug('Any obstacle: {}'.format(any_obstacle))
return any_obstacle
class LineSensor(Driver):
def left(self):
"""Returns True if left line sensor detected dark line"""
left = not self.driver.irLeftLine()
logger.debug('Left line detected: {}'.format(left))
return left
def right(self):
"""Returns True if right line sensor detected dark line"""
right = not self.driver.irRightLine()
logger.debug('Right line detected: {}'.format(right))
return right
|
Paul-Ezell/cinder-1
|
cinder/volume/drivers/glusterfs.py
|
Python
|
apache-2.0
| 17,430 | 0 |
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import stat
import warnings
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers import remotefs as remotefs_drv
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('glusterfs_shares_config',
default='/etc/cinder/glusterfs_shares',
help='File with the list of available gluster shares'),
cfg.StrOpt('glusterfs_mount_point_base',
default='$state_path/mnt',
help='Base dir containing mount points for gluster shares.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD,
driver.ExtendVD):
"""Gluster based cinder driver.
Creates file on Gluster share for using it as block device on hypervisor.
Operations such as create/delete/extend volume/snapshot use locking on a
per-process basis to prevent multiple threads from modifying qcow2 chains
or the snapshot .info file simultaneously.
"""
driver_volume_type = 'glusterfs'
driver_prefix = 'glusterfs'
volume_backend_name = 'GlusterFS'
VERSION = '1.3.0'
def __init__(self, execute=processutils.execute, *args, **kwargs):
self._remotefsclient = None
super(GlusterfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
root_helper = utils.get_root_helper()
self.base = getattr(self.configuration,
'glusterfs_mount_point_base',
CONF.glusterfs_mount_point_base)
self._remotefsclient = remotefs_brick.RemoteFsClient(
'glusterfs', root_helper, execute,
glusterfs_mount_point_base=self.base)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(GlusterfsDriver, self).do_setup(context)
config = self.configuration.glusterfs_shares_config
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
'glusterfs_shares_config')
LOG.warning(msg)
raise exception.GlusterfsException(msg)
if not os.path.exists(config):
msg = (_("Gluster config file at %(config)s doesn't exist") %
{'config': config})
LOG.warning(msg)
raise exception.GlusterfsException(msg)
self.shares = {}
try:
self._execute('mount.glusterfs', check_exit_code=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.GlusterfsException(
_('mount.glusterfs is not installed'))
else:
raise
self._refresh_mounts()
def _unmount_shares(self):
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._do_umount(True, share)
except Exception as exc:
LOG.warning(_LW('Exception during unmounting %s'), exc)
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
command = ['umount', mount_path]
try:
self._execute(*command, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ignore_not_mounted and 'not mounted' in exc.stderr:
LOG.info(_LI("%s is already umounted"), share)
else:
LOG.error(_LE("Failed to umount %(share)s, reason=%(stderr)s"),
{'share': share, 'stderr': exc.stderr})
raise
def _refresh_mounts(self):
try:
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
LOG.warning(_LW("Failed to refresh mounts, reason=%s"),
exc.stderr)
else:
raise
self._ensure_shares_mounted()
def _qemu_img_info(self, path, volume_name):
return super(GlusterfsDriver, self)._qemu_img_info_base(
path, volume_name, self.configuration.glusterfs_mount_point_base)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def _local_volume_dir(self, volume):
hashed = self._get_hash_str(volume['provider_location'])
path = '%s/%s' % (self.configuration.glusterfs_mount_point_base,
hashed)
return path
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(GlusterfsDriver, self)._update_volume_stats()
data = self._stats
global_capacity = data['total_capacity_gb']
global_free = data['free_capacity_gb']
thin_enabled = self.configuration.nas_volume_prov_type == 'thin'
if thin_enabled:
provisioned_capacity = self._get_provisioned_capacity()
else:
provisioned_capacity = round(global_capacity - global_free, 2)
data['provisioned_capacity_gb'] = provisioned_capacity
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio)
data['thin_provisioning_support'] = thin_enabled
data['thick_p
|
rovisioning_support'] = not thin_enabled
self._stats = data
@remotefs_drv.locked_volume_id_operation
def create_volume(self, volume):
"""Creates a volume."""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_locatio
|
n']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_path = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_path, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path,
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_path, img_info.backing_file)
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.nas_volume_prov_type == 'thin':
out_format = 'qcow2'
else:
ou
|
rafallo/p2c
|
settings.py
|
Python
|
mit
| 760 | 0.003947 |
# -*- coding: utf-8 -*-
import os, tempfile
PROJECT_ROOT = os.path.dirname(__file__)
TMP_DIR = tempfile.mkdtemp()
DOWNLOAD_DIR = os.path.join(TMP_DIR, "download")
LOG_DIR = os.path.join(TMP_DIR, "logs")
try:
os.makedirs(DOWNLOAD_DIR)
except OSError:
pass
try:
os.makedirs(LOG_DIR)
except OSError:
pass
TEST_DIR = os.path.join(PROJECT_ROOT, "test_data")
try:
os.makedirs(TEST_DIR)
except OSError:
pass
STORAGE_PATH = os.path.join(TMP_DIR, "download", "configur
|
ation.json")
START_PORT = 6841
END_PORT = 6851
SUPPORTED_MOVIE_EXTENSIONS = (
"mp4", "avi", "mkv", "ogv", "ogg", "mpeg", "flv", "wmv")
SUPPORTED_SUBTITLE_EXTENSIONS = ("txt", "srt")
DOWNLOAD_PIECE_SIZE = 1024 * 1024 *
|
5
# for 1MB piece length
PRIORITY_INTERVAL = 2
|
asommer70/photolandia
|
photolandia/urls.py
|
Python
|
mit
| 678 | 0.00295 |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('albums/', include('albums.urls')),
path('photos/', include('photos.urls')),
path('admin/', admin.site.urls),
path('login/', auth_views.login, name='login'),
path('logou
|
t/', auth_views.logout, {'template_name': 'account/logout.html'}, name='logout'),
path('api/login', views.api_login, name="api_login"),
pa
|
th('', views.IndexView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
codingforentrepreneurs/digital-marketplace
|
src/products/migrations/0012_auto_20151120_0434.py
|
Python
|
mit
| 630 | 0.001587 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.files.storage
import products.models
class Migration(migrations.Migration):
dependencies = [
('prod
|
ucts', '0011_auto_20151120_0137'),
]
operations = [
migrations.AlterField(
model_name='product',
name='media',
field=models.FileField(storage=djang
|
o.core.files.storage.FileSystemStorage(location=b'/Users/jmitch/Desktop/dm/static_cdn/protected'), null=True, upload_to=products.models.download_media_location, blank=True),
),
]
|
laterpay/rubberjack-cli
|
tests/test_cli.py
|
Python
|
mit
| 6,380 | 0.003135 |
import boto
import mock
import moto
import tempfile
import unittest
from click.testing import CliRunner
from rubberjackcli.click import rubberjack
class CLITests(unittest.TestCase):
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy(self, cav, ue):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_promote(self, ue, de):
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': 'laterpay-devnull-live', # FIXME Remove hardcoded EnvName
'VersionLabel': 'old',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'new',
},
],
},
},
}
CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)
@moto.mock_s3_deprecated
@mock.patch('sys.exit')
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_promoting_same_version(self, ue, de, se):
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': 'laterpay-devnull-live', # FIXME Remove hardcoded EnvName
'VersionLabel': 'same',
},
{
'EnvironmentName': 'laterpay-devnull-dev',
|
# FIXME Remove hardcoded EnvName
'VersionLabel': 'same',
},
],
},
},
}
CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)
self.assertTrue(se.called)
@moto.mock_s3_deprecated
def test_sigv4(self):
CliRunner().invoke(rubberjack, ['--sigv4-host', '
|
foo', 'deploy'], catch_exceptions=False)
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_to_custom_environment(self, ue, cav):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', '--environment', 'wibble', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_without_updating_the_environment(self, ue, cav):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', '--no-update-environment', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 0, "update_environment was called, but it shouldn't")
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_to_custom_bucket(self, ue, cav):
bucket_name = 'rbbrjck-test'
s3 = boto.connect_s3()
s3.create_bucket(bucket_name)
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['--bucket', bucket_name, 'deploy', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")
_, cav_kwargs = cav.call_args
self.assertEqual(bucket_name, cav_kwargs['s3_bucket'])
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
def test_promote_to_custom_environment(self, de, ue):
CUSTOM_TO_ENVIRONMENT = "loremipsum"
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': CUSTOM_TO_ENVIRONMENT,
'VersionLabel': 'old',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'new',
},
],
},
},
}
result = CliRunner().invoke(rubberjack, ['promote', '--to-environment', CUSTOM_TO_ENVIRONMENT], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
|
bhupennewalkar1337/erpnext
|
erpnext/buying/doctype/request_for_quotation/request_for_quotation.py
|
Python
|
gpl-3.0
| 8,686 | 0.023947 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import get_url, random_string, cint
from frappe.utils.user import get_user_fullname
from frappe.utils.print_format import download_pdf
from frappe.desk.form.load import get_attachments
from frappe.core.doctype.communication.email import make
from erpnext.accounts.party import get_party_account_currency, get_party_details
from erpnext.stock.doctype.material_request.material_request import set_missing_values
from erpnext.controllers.buying_controller import BuyingController
STANDARD_USERS = ("Guest", "Administrator")
class RequestforQuotation(BuyingController):
def validate(self):
self.validate_duplicate_supplier()
self.validate_common()
self.update_email_id()
def validate_duplicate_supplier(self):
supplier_list = [d.supplier for d in self.suppliers]
if len(supplier_list) != len(set(supplier_list)):
frappe.throw(_("Same supplier has been entered multiple times"))
def validate_common(self):
pc = frappe.get_doc('Purchase Common')
pc.validate_for_items(self)
def update_email_id(self):
for rfq_supplier in self.suppliers:
if not rfq_supplier.email_id:
rfq_supplier.email_id = frappe.db.get_value("Contact", rfq_supplier.contact, "email_id")
def validate_email_id(self, args):
if not args.email_id:
frappe.throw(_("Row {0}: For supplier {0} email id is required to send email").format(args.idx, args.supplier))
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
def send_to_supplier(self):
for rfq_supplier in self.suppliers:
if rfq_supplier.send_email:
self.validate_email_id(rfq_supplier)
# make new user if required
update_password_link = self.update_supplier_contact(rfq_supplier, self.get_link())
self.update_supplier_part_no(rfq_supplier)
self.supplier_rfq_mail(rfq_supplier, update_password_link, self.get_link())
def get_link(self):
# RFQ link for supplier portal
return get_url("/rfq/" + self.name)
def update_supplier_part_no(self, args):
self.vendor = args.supplier
for item in self.items:
item.supplier_part_no = frappe.db.get_value('Item Supplier',
{'parent': item.item_code, 'supplier': args.supplier}, 'supplier_part_no')
def update_supplier_contact(self, rfq_supplier, link):
'''Create a new user for the supplier if not set in contact'''
update_password_link = ''
if frappe.db.exists("User", rfq_supplier.email_id):
user = frappe.get_doc("User", rfq_supplier.email_id)
else:
user, update_password_link = self.create_user(rfq_supplier, link)
self.update_contact_of_supplier(rfq_supplier, user)
return update_password_link
def update_contact_of_supplier(self, rfq_supplier, user):
if rfq_supplier.contact:
contact = frappe.get_doc("Contact", rfq_supplier.contact)
else:
contact = frappe.new_doc("Contact")
contact.first_name = rfq_supplier.supplier_name or rfq_supplier.supplier
contact.supplier = rfq_supplier.supplier
if not contact.email_id and not contact.user:
contact.email_id = user.name
contact.user = user.name
contact.save(ignore_pe
|
rmissions=True)
def create_user(self, rfq_supplier, link):
user = frappe.get_doc({
'doctype': 'User',
'send_welcome_email': 0,
'email': rfq_supplier.email_id,
'first_name': rfq_supplier.supplier_name or rfq_supplier.supplier,
'user_type': 'Website User',
'redirect_url': link
})
user.save(ignore_permissions=True)
update_password_link = user.reset_password()
return user, update_password_link
def supplier_rfq_mail(self, data, update_password_link, rfq_link):
fu
|
ll_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'update_password_link': update_password_link,
'message': frappe.render_template(self.message_for_supplier, data.as_dict()),
'rfq_link': rfq_link,
'user_fullname': full_name
}
subject = _("Request for Quotation")
template = "templates/emails/request_for_quotation.html"
sender = frappe.session.user not in STANDARD_USERS and frappe.session.user or None
message = frappe.get_template(template).render(args)
attachments = self.get_attachments()
self.send_email(data, sender, subject, message, attachments)
def send_email(self, data, sender, subject, message, attachments):
make(subject = subject, content=message,recipients=data.email_id,
sender=sender,attachments = attachments, send_email=True,
doctype=self.doctype, name=self.name)["name"]
frappe.msgprint(_("Email sent to supplier {0}").format(data.supplier))
def get_attachments(self):
attachments = [d.name for d in get_attachments(self.doctype, self.name)]
attachments.append(frappe.attach_print(self.doctype, self.name, doc=self))
return attachments
@frappe.whitelist()
def send_supplier_emails(rfq_name):
check_portal_enabled('Request for Quotation')
rfq = frappe.get_doc("Request for Quotation", rfq_name)
if rfq.docstatus==1:
rfq.send_to_supplier()
def check_portal_enabled(reference_doctype):
if not frappe.db.get_value('Portal Menu Item',
{'reference_doctype': reference_doctype}, 'enabled'):
frappe.throw(_("Request for Quotation is disabled to access from portal, for more check portal settings."))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["show_sidebar"] = True
return list_context
# This method is used to make supplier quotation from material request form.
@frappe.whitelist()
def make_supplier_quotation(source_name, for_supplier, target_doc=None):
def postprocess(source, target_doc):
target_doc.supplier = for_supplier
args = get_party_details(for_supplier, party_type="Supplier", ignore_permissions=True)
target_doc.currency = args.currency or get_party_account_currency('Supplier', for_supplier, source.company)
target_doc.buying_price_list = args.buying_price_list or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
set_missing_values(source, target_doc)
doclist = get_mapped_doc("Request for Quotation", source_name, {
"Request for Quotation": {
"doctype": "Supplier Quotation",
"validation": {
"docstatus": ["=", 1]
}
},
"Request for Quotation Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"name": "request_for_quotation_item",
"parent": "request_for_quotation"
},
}
}, target_doc, postprocess)
return doclist
# This method is used to make supplier quotation from supplier's portal.
@frappe.whitelist()
def create_supplier_quotation(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
try:
sq_doc = frappe.get_doc({
"doctype": "Supplier Quotation",
"supplier": doc.get('supplier'),
"terms": doc.get("terms"),
"company": doc.get("company"),
"currency": doc.get('currency') or get_party_account_currency('Supplier', doc.get('supplier'), doc.get('company')),
"buying_price_list": doc.get('buying_price_list') or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
})
add_items(sq_doc, doc.get('supplier'), doc.get('items'))
sq_doc.flags.ignore_permissions = True
sq_doc.run_method("set_missing_values")
sq_doc.save()
frappe.msgprint(_("Supplier Quotation {0} created").format(sq_doc.name))
return sq_doc.name
except Exception:
return None
def add_items(sq_doc, supplier, items):
for data in items:
if data.get("qty") > 0:
if isinstance(data, dict):
data = frappe._dict(data)
create_rfq_items(sq_doc, supplier, data)
def create_rfq_items(sq_doc, supplier, data):
sq_doc.append('items', {
"item_code": data.item_code,
"item_name": data.item_name,
"description": data.description,
"qty": data.qty,
"rate": data.rate,
"supplier_part_no": frappe.db.get_value("Item Supplier", {'parent': data.item_code, 'supplier': supplier}, "supplier
|
gwpy/gwsumm
|
gwsumm/triggers.py
|
Python
|
gpl-3.0
| 14,721 | 0 |
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Read and store transient event triggers
"""
import warnings
from urllib.parse import urlparse
from astropy.table import vstack as vstack_tables
from lal.utils import CacheEntry
from ligo.lw import lsctables
from glue.lal import Cache
from gwpy.io.cache import cache_segments
from gwpy.table import (EventTable, filters as table_filters)
from gwpy.table.filter import parse_column_filters
from gwpy.table.io.pycbc import filter_empty_files as filter_pycbc_live_files
from gwpy.segments import (DataQualityFlag, SegmentList)
import gwtrigfind
from . import globalv
from .utils import (re_cchar, vprint, safe_eval)
from .config import GWSummConfigParser
from .channels import get_channel
# build list of default keyword arguments for reading ETGs
ETG_READ_KW = {
'cwb': {
'format': 'root',
'treename': 'waveburst',
},
'daily_ihope': {
'format': 'ligolw',
'tablename': 'sngl_inspiral',
'use_numpy_dtypes': True,
},
'daily_ahope': {
'format': 'ligolw',
'tablename': 'sngl_inspiral',
'use_numpy_dtypes': True,
},
'dmt_omega': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'dmt_wsearch': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'kleinewelle': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'kw': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'omega': {
'format': 'ascii',
},
'omegadq': {
'format': 'ascii',
},
'omicron': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'pycbc_live': {
'format': 'hdf5.pycbc_live',
'timecolumn': 'end_time',
'extended_metadata': False,
},
}
# set default for all LIGO_LW
for name in lsctables.TableByName:
ETG_READ_KW[name] = {
'format': 'ligolw',
'tablename': name,
'use_numpy_dtypes': True,
}
def get_etg_table(etg):
"""Find which table should be used for the given etg
Parameters
----------
etg : `str`
name of Event Trigger Generator for which to query
Returns
-------
table : `type`, subclass of `~ligo.lw.table.Table`
LIGO_LW table registered to the given ETG
Raises
------
KeyError
if the ETG is not registered
"""
try:
kw_ = get_etg_read_kwargs(etg)
form = kw_['format']
tablename = kw_['tablename']
except KeyError as e:
e.args = ('No LIGO_LW table registered to etg %r' % etg,)
raise
if form == 'ligolw':
return lsctables.TableByName[tablename]
raise KeyError("No LIGO_LW table registered to etg %r" % etg)
def get_triggers(channel, etg, segments, config=GWSummConfigParser(),
cache=None, columns=None, format=None, query=True,
nproc=1, ligolwtable=None, filter=None,
timecolumn=None, verbose=False, return_=True):
"""Read a table of transient event triggers for a given channel.
"""
key = '%s,%s' % (str(channel), etg.lower())
# convert input segments to a segmentlist (for convenience)
if isinstance(segments, DataQualityFlag):
segments = segments.active
segments = SegmentList(segments)
# get read keywords for this etg
read_kw = get_etg_read_kwargs(etg, config=config, exclude=[])
read_kw['verbose'] = verbose
# extract columns (using function keyword if given)
if columns:
read_kw['columns'] = columns
columns = read_kw.pop('columns', Non
|
e)
# override with user options
if format:
read_kw['format'] = format
elif not read_kw.get('format', None):
read_kw['format'] = etg.lower()
if timecolumn:
|
read_kw['timecolumn'] = timecolumn
elif columns is not None and 'time' in columns:
read_kw['timecolumn'] = 'time'
# replace columns keyword
if read_kw['format'].startswith('ascii.'):
read_kw['include_names'] = columns
else:
read_kw['columns'] = columns
# parse filters
if filter:
read_kw['selection'].extend(parse_column_filters(filter))
# read segments from global memory
try:
havesegs = globalv.TRIGGERS[key].meta['segments']
except KeyError:
new = segments
else:
new = segments - havesegs
# read new triggers
if query and abs(new) != 0:
ntrigs = 0
vprint(" Grabbing %s triggers for %s" % (etg, str(channel)))
# -- setup ----------
# get find/read kwargs
trigfindkwargs = dict(
(k[9:], read_kw.pop(k)) for k in list(read_kw) if
k.startswith('trigfind-'))
trigfindetg = trigfindkwargs.pop('etg', etg)
# customise kwargs for this ETG
if etg.lower().replace('-', '_') in ['pycbc_live']:
read_kw['ifo'] = get_channel(channel).ifo
if etg.lower() in ['kw', 'kleinewelle']:
read_kw['selection'].append('channel == "%s"' % channel)
if etg.lower() in ['cwb'] and 'root' not in read_kw['format']:
read_kw.pop('treename')
# filter on segments
if 'timecolumn' in read_kw:
read_kw['selection'].append((
read_kw['timecolumn'], table_filters.in_segmentlist, new))
# -- read -----------
# if single file
if cache is not None and len(cache) == 1:
trigs = read_cache(cache, new, etg, nproc=nproc, **read_kw)
if trigs is not None:
add_triggers(trigs, key)
ntrigs += len(trigs)
# otherwise, loop over segments
else:
for segment in new:
# find trigger files
if cache is None and not etg.lower() == 'hacr':
try:
segcache = gwtrigfind.find_trigger_files(
str(channel), trigfindetg, segment[0], segment[1],
**trigfindkwargs)
except ValueError as e:
warnings.warn("Caught %s: %s"
% (type(e).__name__, str(e)))
continue
elif cache is not None:
segcache = cache
# read table
if etg.lower() == 'hacr':
from gwpy.table.io.hacr import get_hacr_triggers
trigs = get_hacr_triggers(channel, segment[0], segment[1],
columns=columns)
trigs.meta['segments'] = SegmentList([segment])
else:
trigs = read_cache(segcache, SegmentList([segment]), etg,
nproc=nproc, **read_kw)
# record triggers
if trigs is not None:
# add metadata
add_triggers(trigs, key)
ntrigs += len(trigs)
vprint(".")
vprint(" | %d events read\n" % ntrigs)
# if asked to read triggers, but didn't actually read any,
# create an empty table so that subsequent calls don't raise KeyErrors
if query and key not in globalv.TRIGGERS:
# find LIGO_LW table
|
JWDebelius/scikit-bio
|
skbio/parse/sequences/tests/__init__.py
|
Python
|
bsd-3-clause
| 378 | 0 |
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified
|
BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------
|
------------------------------------------------------------
|
jeetsukumaran/pstrudel
|
test/scripts/calc-tree-unlabeled-symmetric-difference.py
|
Python
|
gpl-2.0
| 1,420 | 0.003521 |
#! /usr/bin/env python
import sys
import math
import collections
import dendropy
def count_set_size_difference(v1, v2):
c1 = collections.Counter(v1)
c2 = collections.Counter(v2)
|
counted_matched = c1 & c2
matched = sorted(list(counted_matched.elements()))
counted_diffs = (c1 - c2) + (c2 - c1)
unmatched = sorted(list(counted_diffs.elements()))
diff = len(unmatched)
return diff
def count_subtree_leaf_set_sizes(tree):
internal_nodes = tree.internal_nodes()
subtree_leaf_set_sizes
|
= {}
for nd in internal_nodes:
leaf_count = 0
for leaf in nd.leaf_iter():
leaf_count += 1
if nd.taxon is not None:
label = nd.taxon.label
else:
label = nd.label
subtree_leaf_set_sizes[label] = leaf_count
return sorted(subtree_leaf_set_sizes.values())
def main():
trees = dendropy.TreeList.get_from_path(sys.argv[1], sys.argv[2])
for tree in trees:
tree.subtree_leaf_set_sizes = count_subtree_leaf_set_sizes(tree)
for tidx1 in range(len(trees)):
for tidx2 in range(len(trees)):
sys.stdout.write("{}\t{}\t{}\n".format(
tidx1,
tidx2,
count_set_size_difference(
trees[tidx1].subtree_leaf_set_sizes,
trees[tidx2].subtree_leaf_set_sizes)))
if __name__ == "__main__":
main()
|
burito/PyUI
|
pyui/themeBase.py
|
Python
|
lgpl-2.1
| 7,045 | 0.013343 |
# PyUI
# Copyright (C) 2001-2002 Sean C. Riley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Pyui Themes.
Themes are a method of customizing the drawing of widgets in a pyui GUI.
This modules keeps NO state for the drable objects - it just draws them on demand
from the widgets themselves which hold all the state.
The constants for the theme objects live in pyui/locals.py
Themes have a default font that is used for any widgets that dont specify a font.
"""
import locals
import pyui
from pyui.desktop import getRenderer, getDesktop
class ThemeBase:
"""Theme objects (like buttons) are drawn through methods for each type of widget.
It returns the rect that the object draw fit into.
The rect passed in should always be in window coordinates.
"""
def __init__(self, renderer, fontFace="times", fontSize=12, fontFlags=0):
self.renderer = renderer
self.desktop = getDesktop()
self.fgColor = renderer.packColor(255,255,255)
self.bgColor = renderer.packColor(0,0,0)
# setup default font
self.defaultFontFace = fontFace
self.defaultFontSize = fontSize
self.defaultFontFlags = fontFlags
self.defaultFont = renderer.createFont(fontFace, fontSize, fontFlags)
#(self.defaultTextWidth, self.defaultTextHeight) = renderer.getTextSize("M", self.defaultFont)
self.defaultTextHeight = fontSize*2
# setup widget offsets
self.frameBorderLeft = 4
self.frameBorderRight = 4
self.frameBorderTop = int(fontSize *2.2)
self.frameBorderBottom = 4
self.tabsHeight = int(fontSize * 1.3)
self.scrollerSize = 10
### Information about the theme..
def getFrameBorderTop(self):
return self.frameBorderTop
def getFrameBorderLeft(self):
return self.frameBorderLeft
def getFrameBorderRight(self):
return self.frameBorderRight
def getFrameBorderBottom(self):
return self.frameBorderBottom
def getTabsHeight(self):
return self.tabsHeight
def getScrollerSize(self):
return self.scrollerSize
def getFgColor(self):
return self.fgColor
def getBgColor(self):
return self.bgColor
### mouse cursor functions
def setArrowCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_POINTER)
def setResizeCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_RESIZE)
def setButtonCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_HAND)
def setWaitCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_WAIT)
def setMovingCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_DRAG)
#####################################################################
###
### Utility drawing functions not specific to any widgets
###
#####################################################################
def draw3DRect(self, rect, color, reverse, thick=1):
"""Draw a 3D rectangle
"""
(r,g,b,a) = self.renderer.unpackColor(color)
a=255
colorLo = self.renderer.packColor(0,0,0,255)
colorHi = self.renderer.packColor(255- r/4, 255-g/4, 255-b/4, a)
if reverse:
(colorLo, colorHi) = (colorHi, colorLo)
(x,y,w,h) = rect
if w < 2 or h < 2:
return
self.renderer.drawRect( colorHi, (x, y, w-thick, thick) )
self.renderer.drawRect( colorHi, (x, y+thick, thick, h-thick) )
if w > 2 and h > 2:
self.renderer.drawRect( color, (x+thick, y+thick, w-thick*2, h-thick*2) )
self.renderer.drawRect( colorLo, (x+thick, y+h-thick, w-thick, thick) )
self.renderer.drawRect( colorLo, (x+w-thick, y+thick, thick, h-thick*2) )
def drawOutlineRect(self, rect, color, thick=1):
(x,y,w,h) = rect
self.renderer.drawRect(color, (x,y,w,thick))
self.renderer.drawRect(color, (x,y+thick,thick,h-2*thick))
self.renderer.drawRect(color, (x+w-thick,y+thick,thick,h-2*thick))
self.renderer.drawRect(color, (x,y+h-thick,w,thick))
def drawGradient(self, rect, vertical, c1, c2):
if vertical:
self.renderer.drawGradient(rect, c1, c2, c1, c2)
else:
self.renderer.drawGradient(rect, c1, c1, c2, c2)
|
#####################################################################
###
### Widgets specific drawing functions.
### These are the methods for actual themes to implement.
###
#####################################################################
def drawButton(self, rect, title, hasFocus, status, enabled, font=None, shadow=0,fgColor=0, bgColor=0,roColor=0):
return rect
def drawImageButton(self, rect, filename, title, hasFocus, status):
return rec
|
t
def drawLabel(self, rect, title, color = None, font = None, shadow=0, align=0 ):
return rect
def drawCheckBox(self, rect, text, checkState):
return rect
def drawSliderBar(self, rect, range, position, BARWIDTH=8):
return rect
def drawEdit(self, rect, text, hasFocus, caretPos, selectPos):
return rect
def drawSplitter(self, rect):
return rect
def drawToolTip(self, text, rect):
return rect
# scroll bar methods
def drawScrollBack(self, rect):
return rect
def drawScrollButtonUp(self, rect):
return rect
def drawScrollButtonDown(self, rect):
return rect
def drawScrollBar(self, rect):
return rect
# tabbed panel methods
def drawTabItem(self, rect, title, active):
return rect
def drawTabHeader(self, rect):
return rect
# menu methods
def drawMenuBar(self, rect):
return rect
def drawMenuBarItem(self, rect, title, selected):
return rect
def drawMenu(self, rect):
return rect
def drawMenuItem(self, rect, title, selected, icon = None):
return rect
# list box methods
def drawListBox(self, rect):
return rect
def drawListBoxItem(self, rect, title, selected, color):
return rect
# frame methods
def drawFrame(self, rect, title):
return rect
|
sile16/python-isilon-api
|
isilon/__init__.py
|
Python
|
mit
| 500 | 0.018 |
import session
import namespace
import platform
from .
|
exceptions import ObjectNotFound, APIError, ConnectionError, IsilonLibraryError
class API(object):
'''Implements higher level functionality to interface with an Isilon cluster'''
def __init__(self, *args, **kwargs):
self.session = session.Session(*args, **kwargs)
self.namespace = namespace.Namespace(self.session)
self.platform =
|
platform.Platform(self.session)
|
regionbibliotekhalland/digitalasagor
|
progressdlg.py
|
Python
|
gpl-3.0
| 4,102 | 0.015115 |
# Copyright 2013 Regionbibliotek Halland
#
# This file is part of Digitala sagor.
#
# Digitala sagor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Digitala sagor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILIT
|
Y or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
|
#
# You should have received a copy of the GNU General Public License
# along with Digitala sagor. If not, see <http://www.gnu.org/licenses/>.
import Tkinter as tki
import ttk
from language import lang
import language as lng
import dialog
import thread
class _ProgressDialog(tki.Frame):
"""A Frame intended to be placed in a Toplevel object.
This class will load the images of a datamodel, visualize the progress and
then close the Toplevel window. It will not be possible to abort the
load operation by closing the Toplevel.
"""
def __init__(self, toplevel, datamodel):
"""Initiate and make a pending call to _load()
Arguments
toplevel -- Toplevel object in which this Frame will be placed
datamodel -- datamodel in which to load the images
"""
tki.Frame.__init__(self, toplevel)
self._parent = toplevel
self._datamodel = datamodel
self._progresstext = tki.StringVar()
self.grid()
self.rowconfigure(1, weight = 1)
l = tki.Label(self, textvariable = self._progresstext)
l.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = tki.W)
pbar = ttk.Progressbar(self, orient = tki.HORIZONTAL, length = 400, mode = 'determinate', maximum = 1.0)
self._pbar = pbar
pbar.grid(row = 1, column = 0, columnspan = 2, padx = 5, pady = 5, sticky = tki.W + tki.E)
toplevel.after_idle(self._load)
toplevel.protocol("WM_DELETE_WINDOW", self._dontquit)
def _updateBar(self, progress):
"""Callback function to Datamodel.loadImageData
Argument
progress -- current load progress; 0.0 <= progress <= 1.0
"""
self._pbar.config(value = progress)
self._progresstext.set(lang[lng.txtLoadImageProgress].format(progress))
def _load(self):
"""Start the load operation by launching a thread"""
thread.start_new(self._thrLoadImages, (self, None))
def _thrLoadImages(self, dummy, d2):
"""Perform the load operation and make a pending call to _quit
Arguments
dummy -- unused
d2 -- unused
"""
self._datamodel.loadImageData(self._updateBar)
self._pbar.config(value = 1)
self._parent.after_idle(self._quit)
def _dontquit(self):
"""Event handler for WM_DELETE_WINDOW that does nothing"""
pass
def _quit(self):
"""Close the Toplevel object"""
self._parent.destroy()
class DataModelLoader:
"""Display a progress bar while loading a datamodel"""
def __init__(self, root, datamodel):
"""Initiate
Arguments
root -- Tk object
datamodel -- datamodel in which to load the images
previewsize -- tuple containing dimensions for preview images
playersize -- tuple containing dimensions for playback images
"""
self._root = root
self._datamodel = datamodel
def load(self):
"""Load the images in the datamodel while displaying a progress dialog"""
if(self._datamodel.isEmpty()):
return
dlg = dialog.getDlg(self._root, lang[lng.dlgLoadImages])
pd = _ProgressDialog(dlg, self._datamodel)
dialog.showDlg(dlg)
|
mhbu50/frappe
|
frappe/core/doctype/log_settings/test_log_settings.py
|
Python
|
mit
| 182 | 0.010989 |
# -*- coding: utf-8 -*-
# Copyright (c) 202
|
0, Frappe Technologies and Contribut
|
ors
# See license.txt
# import frappe
import unittest
class TestLogSettings(unittest.TestCase):
pass
|
jdmonaco/vmo-feedback-model
|
src/spike_reset.py
|
Python
|
mit
| 3,535 | 0.040181 |
#!/usr/bin/env python
#encoding: utf-8
import numpy as np
from pylab import *
dt=0.01 # msec
tau=40.0 # msec
tmax=1000 # msec
V_spk=-20
V_thres=-50.0
V_reset=-70.0
E_leak=V_reset
R_m=10.0 # MΩ
tt=np.arange(0, tmax, dt) #0:dt:tmax
Nt=len(tt) #length(tt)
V=np.zeros((Nt,))
V2=np.zeros((Nt,))
S=np.zeros((Nt,))
S2=np.zeros((Nt,))
#I0=np.zeros((Nt,))
# Plot characteristics
Vlim=E_leak-10,V_spk+10
# tlim=0,1000 #msec
tlim=200,800 #msec
nrows=4
LW=2
colors=[]
cmap = cm.hsv
# Solved Dayan & Abbott (2001) Ch.5 Eq. 5.12 for I_e using r_isi = 7 Hz:
theta_freq = 7
def I_e(f):
tau_isi = 1000.0/f
return -(1/R_m) * (E_leak + (V_reset - V_thres*exp(tau_isi/tau))/(exp(tau_isi/tau) - 1))
I_const=I_e(theta_freq) # 2.0578580 # 2.1 # constant current
print 'I_const = %.4f nA'%I_const
Dt=25 # msec: STDP half window
n=int(Dt/dt)
hPlus=1.0*I_const # max height
hMinus=2.0*hPlus
dI=np.r_[np.linspace(0,hPlus,n),0,np.linspace(-hMinus,0,n)]
## first simulation
V[0]=V_reset
for i in xrange(1, Nt): #=2:Nt
V[i]=((tau-dt)/tau)*V[i-1]+(dt/tau)*(E_leak+R_m*I_const)
if V[i]>=V_thres:
V[i]=V_reset
S[i]=1
k=np.nonzero(S>0)[0]
Nspk=len(k)
ioff()
figure(1, figsize=(10.0, 14.7625))
clf()
subplot(nrows,1,1)
plot(tt,V,'k-',lw=LW)
# hold(True)
# plot([[k*dt,k*dt]*Nspk,[V_reset,V_spk],'b-',lw=LW)
title('control')
xlim(tlim)
ylim(Vlim)
## second simulation
T=(k[2]-k[1])*dt # period
Nsuper=5 # number of super-cycle for testing different timing
timeList=np.linspace((-T/2), T/2,Nsuper)
phaseList=np.zeros((Nsuper,))
plot_spikes =True
for i_super in xrange(Nsuper): #=1:Nsuper
k0=k[2]+int(timeList[i_super]/dt)
I=np.zeros((Nt,))
I[k0-n:k0+n+1]=dI
V2[0]=V_reset
S2=np.zeros((Nt,))
for i in xrange(1, Nt): #=2:Nt
V2[i]=((tau-dt)/tau)*V2[i-1]+(dt/tau)*(E_leak+R_m*(I_const+I[i]))
if V2[i]>=V_thres:
V2[i]=V_reset
S2[i]=1
k2=np.nonzero(S2>0)[0]
Nspk2=len(k2)
subplot(nrows,1,2)
color = cmap(i_super/float(Nsuper))
colors.append(color)
plot(tt,V2,'-',zorder=-Nsuper+i_super,lw=LW,c=color)
if plot_spikes:
hold(True)
plot([k2*dt]*2, [V_reset,V_spk], '-',zorder=-Nsuper+i_super,c=color,lw=LW)
title('Adding input')
subplot(nrows,1,3)
plot(tt,I,c=color,lw=LW,zorder=-Nsuper+i_super)
draw()
# Wrap new phase around half-cycles
newphase=(k2[4]-k[4])*2*dt/T
if newphase<-1:
newphase+=2
elif newphase >=1:
newphase-=2
phaseList[i_super]=
|
newphase
subplot(nrows,1,2)
plot([k*dt]*2, [V_reset,V_spk], 'k-',lw=LW,zorder=-50)
xlim(tlim)
ylim(Vlim)
ylabel('V')
subplot(nrows,1,3)
xlim(tlim)
ylim(-25, 25)
ylabel(r'$I_e$ (pA)')
# plot(timeList/T, phaseList,'o-')
# xlabel('Pulse timing (Period)')
# ylabel('Phase reset (degree)')
# grid(True)
subplot(nrows,2,7)
X=2*timeList/T
Y=phaseList+0.0
# Unwrap phases
jump_ix = np.argmax(np.abs(np.diff(Y)))+1
X = r_[X[
|
jump_ix:]-2, X[:jump_ix]]
Y = r_[Y[jump_ix:], Y[:jump_ix]]
colors = colors[jump_ix:] + colors[:jump_ix]
midX = X[int(Nsuper/2)+1]
for i_super in xrange(Nsuper):
plot(X[i_super],Y[i_super],'o',mec='k',
mfc=colors[i_super],ms=6,mew=1,zorder=i_super)
print X[i_super],Y[i_super]
# p=np.polyfit(x,y,1)
# yp=np.polyval(p,x)
# plot(x,yp,'r-',zorder=0)
# plot(X,Y,'b-',lw=1,zorder=0)
ylabel(r'Phase Reset ($\pi$)')
ax = gca()
ax.set_xticks(linspace(-1, 1, 5))
ax.set_yticks(linspace(-1, 1, 5))
axis('equal')
axis('image')
xlim(midX-1.2, midX+1.2)
ylim(-1.2, 1.2)
ion()
show()
|
gengwg/leetcode
|
142_linked_list_cycle_ii.py
|
Python
|
apache-2.0
| 976 | 0 |
# 142. Linked List Cycle II
# Given a linked list, return the node where the cycle begins.
|
# If there is no cycle, return null.
#
# Note: Do not modify the linked list.
#
# Follow up:
# Can you solve it without using extra space?
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
|
return None
slow = head.next
fast = head.next.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
break
if fast is None or fast.next is None:
return None
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
|
anryko/ansible
|
lib/ansible/modules/cloud/google/gcpubsub_info.py
|
Python
|
gpl-3.0
| 4,597 | 0.00261 |
#!/usr/bin/python
# Copyright 2016 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcpubsub_info
version_added: "2.3"
short_description: List Topics/Subscriptions and Messages from Google PubSub.
description:
- List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
topic/subscription management.
See U(https://cloud.google.com/pubsub/docs) for an overview.
- This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- list state enables user to list topics or subscriptions in the project. See examples for details.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: False
view:
description:
- Choices are 'topics' or 'subscriptions'
required: True
state:
description:
- list is the only valid option.
required: False
'''
EXAMPLES = '''
## List all Topics in a project
- gcpubsub_info:
view: topics
state: list
## List all Subscriptions in a project
- gcpubsub_info:
view: subscriptions
state: list
## List all Subscriptions for a Topic in a project
- gcpubsub_info:
view: subscriptions
topic: my-topic
state: list
'''
RETURN = '''
subscriptions:
description: List of subscriptions.
returned: When view is set to subscriptions.
type: list
sample: ["mysubscription", "mysubscription2"]
topic:
description: Name of topic. Used to filter subscriptions.
returned: Always
type: str
sample: "mytopic"
topics:
description: List of topics.
returned: When view is set to topics.
type: list
sample: ["mytopic", "mytopic2"]
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
def list_func(data, member='name'):
"""Used for state=list."""
return [getattr(x, member) for x in data]
def main():
module = AnsibleModule(argument_spec=dict(
view=dict(choices=['topics', 'subscriptions'], default='topics'),
topic=dict(required=False),
state=dict(choices=['list'], default='list'),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if module._name == 'gcpubsub_facts':
module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'", version='2.13')
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
CLIENT_MINIMUM_VERSION = '0.22.0'
if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
mod_params = {}
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['view'] = module.params.get('view')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = 'ansible-pubsub-0.1'
json_output = {}
if mod_params['view'] == 'topics':
|
json_output['topics'] = list_func(pubsub_client.list_topics())
elif mod_params['view'] == 'subscriptions':
if mod_params['topic']:
t = p
|
ubsub_client.topic(mod_params['topic'])
json_output['subscriptions'] = list_func(t.list_subscriptions())
else:
json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
json_output['changed'] = False
json_output.update(mod_params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
lmr/autotest
|
client/shared/base_packages.py
|
Python
|
gpl-2.0
| 45,844 | 0.000284 |
"""
This module defines the BasePackageManager Class which provides an
implementation of the packaging system API providing methods to fetch,
upload and remove packages. Site specific extensions to any of these methods
should inherit this class.
"""
import fcntl
import logging
import os
import re
import shutil
from autotest.client import os_dep
from autotest.client.shared import error, utils
from autotest.client.shared.settings import settings, SettingsError
# the name of the checksum file that stores the packages' checksums
CHECKSUM_FILE = "packages.checksum"
def has_pbzip2():
'''
Check if parallel bzip2 is available on this system.
:return: True if pbzip2 is available, False otherwise
'''
try:
os_dep.command('pbzip2')
except ValueError:
return False
return True
# is parallel bzip2 available for use?
_PBZIP2_AVAILABLE = has_pbzip2()
def parse_ssh_path(repo):
'''
Parse an SSH url
:type repo: string
:param repo: a repo uri like ssh://xx@xx/path/to/
:return: tuple with (host, remote_path)
'''
match = re.search('^ssh://([^/]+)(/.*)$', repo)
if match:
return match.groups()
else:
raise error.PackageUploadError(
"Incorrect SSH path in settings: %s" % repo)
def repo_run_command(repo, cmd, ignore_status=False, cd=True):
"""
Run a command relative to the repo path
This is basically a utils.run() wrapper that sets itself in a repo
directory if it is appropriate, so parameters such as cmd and ignore_status
are passed along to it.
:type repo: string
:param repo: a repository url
:type cmd: string
:param cmd: the command to be executed. This is passed along to utils.run()
:type ignore_status: boolean
:param ignore_status: do not raise an exception, no matter what the exit
code of the command is.
:type cd: boolean
:param cd: wether to change the working directory to the repo directory
before running the specified command.
:return: a CmdResult object or None
:raise CmdError: the exit code of the command execution was not 0
"""
os_dep.command("ssh")
repo = repo.strip()
run_cmd = None
cd_str = ''
if repo.startswith('ssh://'):
username = None
hostline, remote_path = parse_ssh_path(repo)
if cd:
cd_str = 'cd %s && ' % remote_path
if '@' in hostline:
username, host = hostline.split('@')
run_cmd = 'ssh %s@%s "%s%s"' % (username, host, cd_str, cmd)
else:
run_cmd = 'ssh %s "%s%s"' % (hostline, cd_str, cmd)
else:
if cd:
cd_str = 'cd %s && ' % repo
run_cmd = "%s%s" % (cd_str, cmd)
if run_cmd:
return utils.run(run_cmd, ignore_status=ignore_status)
def create_directory(repo):
'''
Create a directory over at the remote repository
:type repo: string
:param repo: the repo URL containing the remote directory path
:return: a CmdResult object or None
'''
remote_path = repo
if repo.startswith('ssh://'):
_, remote_path = parse_ssh_path(repo)
repo_run_command(repo, 'mkdir -p %s' % remote_path, cd=False)
def check_diskspace(repo, min_free=None):
'''
Check if the remote directory over at the pkg repo has available diskspace
If the amount of free space is not supplied, it is taken from the global
configuration file, section [PACKAGES], key 'mininum_free_space'. The unit
used are in SI, that is, 1 GB = 10**9 bytes.
:type repo: string
:param repo: a remote package repo URL
:type min_free: int
:param: min_free mininum amount of free space, in GB (10**9 bytes)
:raise error.RepoUnknownError: general repository error condition
:raise error.RepoDiskFullError: repository does not have at least the
requested amount of free disk space.
'''
if min_free is None:
min_free = settings.get_value('PACKAGES', 'minimum_free_space',
type=int, default=1)
try:
df = repo_run_command(repo,
'df -PB %d . | tail -1' % 10 ** 9).stdout.split()
free_space_gb = int(df[3])
except Exception, e:
raise error.RepoUnknownError('Unknown Repo Error: %s' % e)
if free_space_gb < min_free:
raise error.RepoDiskFullError('Not enough disk space available '
'%sg < %sg' % (free_space_gb, min_free))
def check_write(repo):
'''
Checks that the remote repository directory is writable
:type repo: string
:param repo: a remote package repo URL
:raise error.RepoWriteError: repository write error
'''
try:
repo_testfile = '.repo_test_file'
repo_run_command(repo, 'touch %s' % repo_testfile).stdout.strip()
repo_run_command(repo, 'rm ' + repo_testfile)
except error.CmdError:
raise error.RepoWriteError('Unable to write to ' + repo)
def trim_custom_directories(repo, older_than_days=None):
'''
Remove old files from the remote repo directory
The age of the files, if not provided by the older_than_days parameter is
taken from the global configuration file, at section [PACKAGES],
configuration item 'custom_max_age'.
:type repo: string
:param repo: a remote package repo URL
'''
if not repo:
return
if older_than_days is None:
older_than_days = settings.get_value('PACKAGES', 'custom_max_age',
type=int, default=40)
cmd = 'find . -type f -atime +%s -exec rm -f {} \;' % older_than_days
repo_run_command(repo, cmd, ignore_status=True)
class RepositoryFetcher(object):
'''
Base class with common functionality for repository fetchers
'''
url = None
def __init__(self, package_manager, repository_url):
"""
Initializes a new Repository Fetcher
:type package_manager: BasePackageManager instance
:param package_manager: and instance of BasePackageManager class
:type repository_url: string
:param repository_url: The base URL of the repository
"""
self.run_command = package_manager._run_command
self.url = repository_url
self.pkgmgr = package_manager
def install_pkg_setup(self, name, fetch_dir, install):
"""
Install setup for a package based on fetcher type.
:type name: string
:param name: The filename to be munged
:type fetch_dir: string
:param fetch_dir: The destination path to be munged
:type install: boolean
:param install: Whether this is be called from the install path or not
:return: tuple with (name, fetch_dir)
"""
if install:
|
fetch_dir = os.path.join(fetch_dir, re.sub("/", "_", name))
return (name, fetch_dir)
def fetch_pkg_file(self, filename, dest_path):
"""
Fetch a package file from a package repository.
:type filename: string
:param filename: The filename of the package file to fetch.
:type dest_path: string
:param dest_path: Destination path to download the file to.
:raise PackageFetchError: if the fetch failed
"""
raise NotImp
|
lementedError()
def install_pkg_post(self, filename, fetch_dir,
install_dir, preserve_install_dir=False):
"""
Fetcher specific post install
:type filename: string
:param filename: The filename of the package to install
:type fetch_dir: string
:param fetch_dir: The fetched path of the package
:type install_dir: string
:param install_dir: The path to install the package to
:type preserve_install_dir: boolean
@preserve_install_dir: Preserve the install directory
"""
# check to see if the install_dir exists and if it does
# then check to see if the .checksum file is the latest
install_dir_exists = False
try:
self.pkgmgr._run_command("ls %s" % install_dir)
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/f2py/tests/test_callback.py
|
Python
|
bsd-2-clause
| 3,040 | 0 |
from __future__ import division, absolute_import, print_function
import math
import textwrap
from numpy import array
from numpy.testing import run_module_suite, assert_, assert_equal, dec
import util
class TestF77Callback(util.F2PyTest):
code = """
subroutine t(fun,a)
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine func(a)
cf2py intent(in,out) a
integer a
a = a + 11
end
subroutine func0(a)
cf2py intent(out) a
integer a
a = 11
end
subroutine t2(a)
cf2py intent(callback) fun
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine string_callback(callback, a)
external callback
double precision callback
double precision a
character*1 r
cf2py intent(out) a
r = 'r'
a = callback(r)
end
"""
@dec.slow
def test_all(self):
for name in "t,t2".split(","):
self.check_function(name)
@dec.slow
def test_docstring(self):
expected = """
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
"""
assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip())
def check_function(self, name):
|
t = getattr(self.module, name)
|
r = t(lambda: 4)
assert_(r == 4, repr(r))
r = t(lambda a: 5, fun_extra_args=(6,))
assert_(r == 5, repr(r))
r = t(lambda a: a, fun_extra_args=(6,))
assert_(r == 6, repr(r))
r = t(lambda a: 5 + a, fun_extra_args=(7,))
assert_(r == 12, repr(r))
r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,))
assert_(r == 180, repr(r))
r = t(math.degrees, fun_extra_args=(math.pi,))
assert_(r == 180, repr(r))
r = t(self.module.func, fun_extra_args=(6,))
assert_(r == 17, repr(r))
r = t(self.module.func0)
assert_(r == 11, repr(r))
r = t(self.module.func0._cpointer)
assert_(r == 11, repr(r))
class A(object):
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert_(r == 7, repr(r))
r = t(a.mth)
assert_(r == 9, repr(r))
def test_string_callback(self):
def callback(code):
if code == 'r':
return 0
else:
return 1
f = getattr(self.module, 'string_callback')
r = f(callback)
assert_(r == 0, repr(r))
if __name__ == "__main__":
run_module_suite()
|
gdsfactory/gdsfactory
|
gdsfactory/tests/test_port_from_csv.py
|
Python
|
mit
| 241 | 0 |
from gdsfactory.port import csv2port
def test_csv2port(data_regression):
import gdsfactory as gf
name = "straight"
|
csvpath = gf.CONFIG["gdsdir"] / f"{name}.ports"
ports = csv2port(csvpath)
data_reg
|
ression.check(ports)
|
dnjohnstone/hyperspy
|
hyperspy/tests/learn/test_mlpca.py
|
Python
|
gpl-3.0
| 2,332 | 0.000429 |
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy.learn.mlpca import mlpca
from hyperspy.signals import Signal1D
@pytest.mark.parametrize("tol", [1e-9, 1e-6])
@pytest.mark.parametrize("max_iter", [100, 500])
def test_mlpca(tol, max_iter):
# Define shape etc.
m = 100 # Dimensionality
n = 101 # Number of samples
r = 3
rng = np.random.RandomState(101)
|
U = rng.uniform(0, 1, size=(m, r))
V = rng.uniform(0, 10, size=(n, r))
|
varX = U @ V.T
X = rng.poisson(varX)
rank = r
# Test tolerance
tol = 300
U, S, V, Sobj = mlpca(X, varX, output_dimension=rank, tol=tol, max_iter=max_iter)
X = U @ np.diag(S) @ V.T
# Check the low-rank component MSE
normX = np.linalg.norm(X - X)
assert normX < tol
# Check singular values
S_norm = S / np.sum(S)
np.testing.assert_allclose(S_norm[:rank].sum(), 1.0)
def test_signal():
# Define shape etc.
m = 100 # Dimensionality
n = 101 # Number of samples
r = 3
rng = np.random.RandomState(101)
U = rng.uniform(0, 1, size=(m, r))
V = rng.uniform(0, 10, size=(n, r))
varX = U @ V.T
X = rng.poisson(varX).astype(float)
# Test tolerance
tol = 300
x = X.copy().reshape(10, 10, 101)
s = Signal1D(x)
s.decomposition(algorithm="mlpca", output_dimension=r)
# Check singular values
v = s.get_explained_variance_ratio().data
np.testing.assert_allclose(v[:r].sum(), 1.0)
# Check the low-rank component MSE
Y = s.get_decomposition_model(r).data
normX = np.linalg.norm(Y.reshape(m, n) - X)
assert normX < tol
|
NoMoKeTo/lircpy
|
lircpy/__init__.py
|
Python
|
apache-2.0
| 142 | 0 |
from .lircpy import LircPy
from .exceptions import Invali
|
dResponseError, LircError
__all__ = ['LircPy', 'InvalidResponseError', '
|
LircError']
|
aykut/django-oscar
|
oscar/apps/order/abstract_models.py
|
Python
|
bsd-3-clause
| 19,620 | 0.007594 |
from itertools import chain
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.db.models import Sum
from django.template import Template, Context
class AbstractOrder(models.Model):
u"""An order"""
number = models.CharField(_("Order number"), max_length=128, db_index=True)
# We track the site that each order is placed within
site = models.ForeignKey('sites.Site')
basket = models.ForeignKey('basket.Basket', null=True, blank=True)
# Orders can be anonymous so we don't always have a customer ID
user = models.ForeignKey(User, related_name='orders', null=True, blank=True)
# Billing address is not always required (eg paying by gift card)
billing_address = models.ForeignKey('order.BillingAddress', null=True, blank=True)
# Total price looks like it could be calculated by adding up the
# prices of the associated lines, but in some circumstances extra
# order-level charges are added and so we need to store it separately
total_incl_tax = models.DecimalField(_("Order total (inc. tax)"), decimal_places=2, max_digits=12)
total_excl_tax = models.DecimalField(_("Order total (excl. tax)"), decimal_places=2, max_digits=12)
# Shipping charges
shipping_incl_tax = models.DecimalField(_("Shipping charge (inc. tax)"), decimal_places=2, max_digits=12, default=0)
shipping_excl_tax = models.DecimalField(_("Shipping charge (excl. tax)"), decimal_places=2, max_digits=12, default=0)
# Not all lines are actually shipped (such as downloads), hence shipping address
# is not mandatory.
shipping_address = models.ForeignKey('order.ShippingAddress', null=True, blank=True)
shipping_method = models.CharField(_("Shipping method"), max_length=128, null=True, blank=True)
# Use this field to indicate that an order is on hold / awaiting payment
status = models.CharField(_("Status"), max_length=100, null=True, blank=True)
# Index added to this field for reporting
date_placed = models.DateTimeField(auto_now_add=True, db_index=True)
@property
def basket_total_incl_tax(self):
u"""Return basket total including tax"""
return self.total_incl_tax - self.shipping_incl_tax
@property
def basket_total_excl_tax(self):
u"""Return basket total excluding tax"""
return self.total_excl_tax - self.shipping_excl_tax
@property
def num_lines(self):
return self.lines.count()
@property
def num_items(self):
u"""
Returns the number of items in this order.
"""
num_items = 0
for line in self.lines.all():
num_items += line.quantity
return num_items
@property
def shipping_status(self):
events = self.shipping_events.all()
if not len(events):
return ''
# Collect all events by event-type
map = {}
for event in events:
event_name = event.event_type.name
if event_name not in map:
map[event_name] = []
map[event_name] = list(chain(map[event_name], event.line_quantities.all()))
# Determine last complete event
status = _("In progress")
for event_name, event_line_quantities in map.items():
if self._is_event_complete(event_line_quantities):
status = event_name
return status
def _is_event_complete(self, event_quantites):
# Form map of line to quantity
map = {}
for event_quantity in event_quantites:
line_id = event_quantity.line_id
map.setdefault(line_id, 0)
map[line_id] += event_quantity.quantity
for line in self.lines.all():
if map[line.id] != line.quantity:
return False
return True
class Meta:
abstract = True
ordering = ['-date_placed',]
permissions = (
("can_view", "Can view orders (eg for reporting)"),
)
def __unicode__(self):
return u"#%s" % (self.number,)
class AbstractOrderNote(models.Model):
u"""A note against an order."""
order = models.ForeignKey('order.Order', related_name="notes")
user = models.ForeignKey('auth.User')
message = models.TextField()
date = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
def __unicode__(self):
return u"'%s' (%s)" % (self.message[0:50], self.user)
class AbstractCommunicationEvent(models.Model):
u"""
An order-level event involving a communication to the customer, such
as an confirmation email being sent."""
order = models.ForeignKey('order.Order', related_name="communication_events")
type = models.ForeignKey('order.CommunicationEventType')
date = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
def __unicode__(self):
return u"'%s' event for order #%s" % (self.type.name, self.order.number)
class AbstractCommunicationEventType(models.Model):
u"""Communication events are things like 'OrderConfirmationEmailSent'"""
# Code is used in forms
code = models.SlugField(max_length=128)
# Name is the friendly description of an event
name = models.CharField(max_length=255)
# Template content for emails
email_subject_template = models.CharField(max_length=255, blank=True)
email_body_template = models.TextField(blank=True, null=True)
# Template content for SMS messages
sms
|
_template = models.CharField(max_length=170, blank=True)
def save(self, *args, **kwargs):
if not self.code:
self.code = slugify(self.name)
super(AbstractCommunicationEventType, self).save(*args, **kwargs)
class Meta:
abstract = True
verbose_name_plural = _("Communication event types")
def __unicode__(self):
return self.name
def has_email_templates(self):
return
|
self.email_subject_template and self.email_body_template
def get_email_subject_for_order(self, order, **kwargs):
return self._merge_template_with_context(self.email_subject_template, order, **kwargs)
def get_email_body_for_order(self, order, **kwargs):
return self._merge_template_with_context(self.email_body_template, order, **kwargs)
def _merge_template_with_context(self, template, order, **kwargs):
ctx = {'order': order}
ctx.update(**kwargs)
return Template(template).render(Context(ctx))
class AbstractLine(models.Model):
u"""
A order line (basically a product and a quantity)
Not using a line model as it's difficult to capture and payment
information when it splits across a line.
"""
order = models.ForeignKey('order.Order', related_name='lines')
# We store the partner, their SKU and the title for cases where the product has been
# deleted from the catalogue. We also store the partner name in case the partner
# gets deleted at a later date.
partner = models.ForeignKey('partner.Partner', related_name='order_lines', blank=True, null=True, on_delete=models.SET_NULL)
partner_name = models.CharField(_("Partner name"), max_length=128)
partner_sku = models.CharField(_("Partner SKU"), max_length=128)
title = models.CharField(_("Title"), max_length=255)
# We don't want any hard links between orders and the products table
product = models.ForeignKey('product.Item', on_delete=models.SET_NULL, null=True)
quantity = models.PositiveIntegerField(default=1)
# Price information (these fields are actually redundant as the information
# can be calculated from the LinePrice models
line_price_incl_tax = models.DecimalField(decimal_places=2, max_digits=12)
line_price_excl_tax = models.DecimalField(decimal_places=2, max_digits=12)
# Price information before discounts are applied
|
Yukarumya/Yukarum-Redfoxes
|
taskcluster/taskgraph/task/test.py
|
Python
|
mpl-2.0
| 5,066 | 0.000987 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
from . import transform
from ..util.yaml import load_yaml
logger = logging.getLogger(__name__)
class TestTask(transform.TransformTask):
"""
A task implementing a Gecko test.
"""
@classmethod
def get_inputs(cls, kind, path, config, params, loaded_tasks):
# the kind on which this one depends
if len(config.get('kind-dependencies', [])) != 1:
raise Exception("TestTask kinds must have exactly one item in kind-dependencies")
dep_kind = config['kind-dependencies'][0]
# get build tasks, keyed by build platform
builds_by_platform = cls.get_builds_by_platform(dep_kind, loaded_tasks)
# get the test platforms for those build tasks
test_platforms_cfg = load_yaml(path, 'test-platforms.yml')
test_platforms = cls.get_test_platforms(test_platforms_cfg, builds_by_platform)
# expand the test sets for each of those platforms
test_sets_cfg = load_yaml(path, 'test-sets.yml')
test_platforms = cls.expand_tests(test_sets_cfg, test_platforms)
# load the test descriptions
test_descriptions = load_yaml(path, 'tests.yml')
# generate all tests for all test platforms
for test_platform_name, test_platform in test_platforms.iteritems():
for test_name in test_platform['test-names']:
test = copy.deepcopy(test_descriptions[test_name])
test['build-platform'] = test_platform['build-platform']
test['test-platform'] = test_platform_name
test['build-label'] = test_platform['build-label']
test['test-name'] = test_name
if test_platform['nightly']:
test.setdefault('attributes', {})['nightly'] = True
|
logger.debug("Generating tasks for test {} on platform {}".format(
test_name, test['test-platform']))
yield test
@classmethod
def get_builds_by_platform(cls, dep_kind, loaded_tasks):
"""Find the build tasks on which tests will depend, keyed by
platform/type. Retur
|
ns a dictionary mapping build platform to task."""
builds_by_platform = {}
for task in loaded_tasks:
if task.kind != dep_kind:
continue
build_platform = task.attributes.get('build_platform')
build_type = task.attributes.get('build_type')
if not build_platform or not build_type:
continue
platform = "{}/{}".format(build_platform, build_type)
if platform in builds_by_platform:
raise Exception("multiple build jobs for " + platform)
builds_by_platform[platform] = task
return builds_by_platform
@classmethod
def get_test_platforms(cls, test_platforms_cfg, builds_by_platform):
"""Get the test platforms for which test tasks should be generated,
based on the available build platforms. Returns a dictionary mapping
test platform to {test-set, build-platform, build-label}."""
test_platforms = {}
for test_platform, cfg in test_platforms_cfg.iteritems():
build_platform = cfg['build-platform']
if build_platform not in builds_by_platform:
logger.warning(
"No build task with platform {}; ignoring test platform {}".format(
build_platform, test_platform))
continue
test_platforms[test_platform] = {
'nightly': builds_by_platform[build_platform].attributes.get('nightly', False),
'build-platform': build_platform,
'build-label': builds_by_platform[build_platform].label,
}
test_platforms[test_platform].update(cfg)
return test_platforms
@classmethod
def expand_tests(cls, test_sets_cfg, test_platforms):
"""Expand the test sets in `test_platforms` out to sets of test names.
Returns a dictionary like `get_test_platforms`, with an additional
`test-names` key for each test platform, containing a set of test
names."""
rv = {}
for test_platform, cfg in test_platforms.iteritems():
test_sets = cfg['test-sets']
if not set(test_sets) < set(test_sets_cfg):
raise Exception(
"Test sets {} for test platform {} are not defined".format(
', '.join(test_sets), test_platform))
test_names = set()
for test_set in test_sets:
test_names.update(test_sets_cfg[test_set])
rv[test_platform] = cfg.copy()
rv[test_platform]['test-names'] = test_names
return rv
|
fhcrc/taxtastic
|
tests/test_taxonomy.py
|
Python
|
gpl-3.0
| 10,512 | 0 |
#!/usr/bin/env python
import os
from os import path
import logging
import shutil
from sqlalchemy import create_engine
from . import config
from .config import TestBase
import taxtastic
from taxtastic.taxonomy import Taxonomy, TaxonIntegrityError
import taxtastic.ncbi
import taxtastic.utils
log = logging
datadir = config.datadir
echo = False
dbname = config.ncbi_master_db
class TestTaxonomyBase(TestBase):
def setUp(self):
self.engine = create_engine('sqlite:///' + self.dbname, echo=echo)
self.tax = Taxonomy(self.engine)
def tearDown(self):
self.engine.dispose()
class TestAddNode(TestTaxonomyBase):
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddNode, self).setUp()
def tearDown(self):
pass
def test01(self):
self.tax.add_node(
tax_id='1280_
|
1',
parent_id='1280',
rank='subspecies',
names=[{'tax_name': 'foo'}],
source_name='ncbi'
)
lineage = self.tax.lineage('1280_1')
self.assertEqual(lineage['ta
|
x_id'], '1280_1')
self.assertEqual(lineage['tax_name'], 'foo')
def test02(self):
new_taxid = '1279_1'
new_taxname = 'between genus and species'
children = ['1280', '1281']
self.tax.add_node(
tax_id=new_taxid,
parent_id='1279',
rank='species_group',
names=[{'tax_name': new_taxname}],
children=children,
source_name='foo'
)
lineage = self.tax.lineage(new_taxid)
self.assertTrue(lineage['tax_id'] == new_taxid)
self.assertTrue(lineage['tax_name'] == new_taxname)
for taxid in children:
lineage = self.tax.lineage(taxid)
self.assertTrue(lineage['parent_id'] == new_taxid)
def test03(self):
new_taxid = '1279_1'
new_taxname = 'between genus and species'
children = ['1280', '1281']
self.assertRaises(
TaxonIntegrityError,
self.tax.add_node,
tax_id=new_taxid,
parent_id='1279',
rank='genus',
names=[{'tax_name': new_taxname}],
children=children,
source_name='ncbi')
def test04(self):
# existing node
self.assertRaises(
ValueError,
self.tax.add_node,
tax_id='1280',
parent_id='1279',
rank='species',
names=[{'tax_name': 'I already exist'}],
source_name='ncbi'
)
def test05(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo', 'is_primary': True},
{'tax_name': 'bar'},
],
source_name='ncbi'
)
lineage = self.tax.lineage('1280_1')
self.assertEqual(lineage['tax_id'], '1280_1')
self.assertEqual(lineage['tax_name'], 'foo')
def test06(self):
# multiple names, none primary
self.assertRaises(
ValueError,
self.tax.add_node,
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo'},
{'tax_name': 'bar'},
],
source_name='ncbi')
def test07(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo', 'is_primary': True},
{'tax_name': 'bar'},
],
source_name='ncbi',
execute=False
)
self.assertRaises(ValueError, self.tax.lineage, '1280_1')
def test08(self):
# test has_node()
self.assertTrue(self.tax.has_node('1280'))
self.assertFalse(self.tax.has_node('foo'))
class TestAddName(TestTaxonomyBase):
"""
test tax.add_node
"""
def count_names(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select count(*) from names where tax_id = ?', (tax_id,))
return result.fetchone()[0]
def count_primary_names(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select count(*) from names where tax_id = ? and is_primary',
(tax_id,))
return result.fetchone()[0]
def primary_name(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select tax_name from names where tax_id = ? and is_primary',
(tax_id,))
val = result.fetchone()
return val[0] if val else None
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddName, self).setUp()
def test_name01(self):
names_before = self.count_names('1280')
self.tax.add_name(tax_id='1280', tax_name='SA', source_name='ncbi')
self.assertEqual(names_before + 1, self.count_names('1280'))
def test_name02(self):
# number of primary names should remain 1
names_before = self.count_names('1280')
self.assertEqual(self.count_primary_names('1280'), 1)
self.tax.add_name(tax_id='1280', tax_name='SA', is_primary=True,
source_name='ncbi')
self.tax.add_name(tax_id='1280', tax_name='SA2', is_primary=True,
source_name='ncbi')
self.assertEqual(names_before + 2, self.count_names('1280'))
self.assertEqual(self.count_primary_names('1280'), 1)
def test_name03(self):
# insertion of duplicate row fails
self.tax.add_name(tax_id='1280', tax_name='SA', is_primary=True,
source_name='ncbi')
self.assertRaises(
ValueError, self.tax.add_name, tax_id='1280', tax_name='SA',
is_primary=True, source_name='ncbi')
self.assertEqual(self.primary_name('1280'), 'SA')
class TestGetSource(TestTaxonomyBase):
def setUp(self):
self.dbname = dbname
super(TestGetSource, self).setUp()
def test01(self):
self.assertRaises(ValueError, self.tax.get_source)
def test02(self):
self.assertRaises(ValueError, self.tax.get_source, 1, 'ncbi')
def test03(self):
result = self.tax.get_source(source_id=1)
self.assertDictEqual(result, {
'description': 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip',
'id': 1, 'name': 'ncbi'})
def test04(self):
result = self.tax.get_source(source_name='ncbi')
self.assertDictEqual(result, {
'description': 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip',
'id': 1, 'name': 'ncbi'})
def test05(self):
self.assertRaises(ValueError, self.tax.get_source, source_id=2)
class TestAddSource(TestTaxonomyBase):
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddSource, self).setUp()
def tearDown(self):
pass
def sources(self):
with self.tax.engine.connect() as con:
result = con.execute('select * from source')
return result.fetchall()
def test01(self):
self.tax.add_source('foo')
self.assertEqual(self.sources()[1], (2, 'foo', None))
def test02(self):
self.tax.add_source('ncbi')
self.assertEqual(
self.sources(),
[(1, 'ncbi', 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip')])
def test__node():
engine = create_engine(
'sqlite:///../testfiles/small_taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax._node(None) is None
assert tax._node('9106
|
lferr/charm
|
charm/toolbox/policytree.py
|
Python
|
lgpl-3.0
| 6,070 | 0.014333 |
#!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
def labelDuplicates(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.requiredAttributes(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def requiredAttributes(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.requiredAttributes(Left, attrList)
if Right: resultRight, rightAttr = self.requiredAttributes(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result
|
== False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
|
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
return (False, None)
return
if __name__ == "__main__":
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
|
csengstock/tcpserv
|
tcpserv.py
|
Python
|
lgpl-3.0
| 5,176 | 0.000773 |
# tcpserv
#
# Copyright (c) 2015 Christian Sengstock, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU
|
Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License f
|
or more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
"""
Simple python socket helper library to implement
stateless tcp-servers.
Usage:
# Interface
>>> from tcpserv import listen, request
# Define server logic by a handler function:
# Gets a request string and returns a response string
>>> def my_handler(request): return "".join(reversed(request))
# Start the server
>>> listen("localhost", 55555, my_handler)
# Make requests
>>> for i in xrange(100):
>>> print request("localhost", 55555, "request %d" % i)
"""
import thread
import socket
import struct
DATA_SIZE_TYPE = "!I" # unsigned 4-byte int, network byte-order
# num of bytes; should always be 4;
# don't know if struct ensures this.
DATA_SIZE_LEN = len(struct.pack(DATA_SIZE_TYPE, 0))
if DATA_SIZE_LEN != 4:
raise ValueError(
"To work on different machines struct <!I> type should have " + \
"4 bytes. This is an implementation error!")
MAX_DATA = 2**(DATA_SIZE_LEN*8)
def listen(host, port, handler):
"""
Listens on "host:port" for requests
and forwards traffic to the handler.
The handler return value is then send
to the client socket. A simple
echo server handler:
>>> def my_handler(request_string) return request_string
The function blocks forever. Surround
with an appropriate signal handler
to quit the call (e.g., wait for
a KeyboardInterrupt event):
>>> try:
>>> listen("localhost", 55555, my_handler)
>>> except KeyboardInterrupt, e:
>>> pass
Args:
host<str>: Listening host
port<int>: Listening port
handler<function>:
Function 'f(request_string)->response_string'
processing the request.
"""
# Taken from
# http://code.activestate.com/recipes/578247-basic-threaded-python-tcp-server/
# Starts a new handler-thread for each request.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
while 1:
clientsock, addr = sock.accept()
thread.start_new_thread(_server, (clientsock, handler))
def request(host, port, data):
"""
Sends data to server listening on "host:port" and returns
the response.
Args:
host<str>: Server host
port<int>: Server port
data<str>: Request data
Returns<str>:
The response data
"""
if type(data) != str:
raise ValueError("data must be of type <str>")
if len(data) > MAX_DATA:
raise ValueError("request data must have len <= %d", MAX_DATA)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
b4 = struct.pack(DATA_SIZE_TYPE, len(data))
sock.sendall(b4)
sock.sendall(data)
b4 = _recvn(sock, DATA_SIZE_LEN)
n = struct.unpack(DATA_SIZE_TYPE, b4)[0]
data = _recvn(sock, n)
sock.close()
return data
def _recvn(sock, n):
"""
Reads exactly n bytes from the socket.
"""
buf = []
m = 0
while m < n:
pack = sock.recv(n-m)
m += len(pack)
buf.append(pack)
return "".join(buf)
def _server(clientsock, handler):
"""
Reads the request from the client socket
and calls the handler callback to process the data.
Sends back the response (return value of the
handler callback) to the client socket.
"""
b4 = _recvn(clientsock, DATA_SIZE_LEN)
n = struct.unpack(DATA_SIZE_TYPE, b4)[0]
req = _recvn(clientsock, n)
resp = handler(req)
if type(resp) != str:
raise ValueError("handler return value must be of type <str>")
if len(resp) > MAX_DATA:
raise ValueError("handler return value must have len <= %d", MAX_DATA)
b4 = struct.pack(DATA_SIZE_TYPE, len(resp))
clientsock.sendall(b4)
clientsock.sendall(resp)
def _test():
import time
def echo_handler(data):
return data
thread.start_new_thread(listen, ("localhost", 55555, echo_handler))
# listen("localhost", 55555, echo_handler)
time.sleep(1)
print "generating data..."
data = "1"*(2**28)
print "starting communication..."
for i in xrange(1000):
print "request", i
resp = request("localhost", 55555, data)
print "received %.02f KB" % (len(resp)/1000.0)
print "validation..."
assert len(resp) == len(data)
#for j,c in enumerate(data):
# assert(resp[j] == c)
if __name__ == "__main__":
_test()
|
binary-signal/mass-apk-installer
|
mass_apk/__main__.py
|
Python
|
bsd-3-clause
| 154 | 0 |
"""Main entry for mass apk when invoked as python module.
>>> python -m massapk
"""
from mass_apk import cli
if __name__ ==
|
"__main__":
cli.mai
|
n()
|
kikocorreoso/brython
|
www/src/Lib/importlib/_bootstrap.py
|
Python
|
bsd-3-clause
| 39,424 | 0.00038 |
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_bootstrap_external = None
_thread = None # Brython
import _weakref # Brython
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
# Dictionary protected by the global import lock
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
|
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
|
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
self._lock = _get_module_lock(self._name)
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Acquire/release internally the global import lock to protect
_module_locks."""
_imp.acquire_lock()
try:
try:
lock = _module_locks[name]()
except KeyError:
lock = None
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(ref, name=name):
_imp.acquire_lock()
try:
# bpo-31070: Check if another thread created a new lock
# after the previous lock was destroyed
# but before the weakref callback was called.
if _module_locks.get(name) is ref:
del _module_locks[name]
finally:
_imp.release_lock()
_module_locks[name] = _weakref.ref(lock, cb)
finally:
_imp.release_lock()
return lock
def _lock_unlock_module(name):
"""Acquires then releases the module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
"""
lock = _get_module_lock(name)
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
# Typically used by loader classes as a method replacement.
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is dep
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-search-customsearch/azure/cognitiveservices/search/customsearch/models/_models_py3.py
|
Python
|
mit
| 29,187 | 0.000171 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ResponseBase(Model):
"""ResponseBase.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Identifiable
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'_type': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
}
_subtype_map = {
'_type': {'Identifiable': 'Identifiable'}
}
def __init__(self, **kwargs) -> None:
super(ResponseBase, self).__init__(**kwargs)
self._type = None
class Identifiable(ResponseBase):
"""Defines the identity of a resource.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Response
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
_subtype_map = {
'_type': {'Response': 'Response'}
}
def __init__(self, **kwargs) -> None:
super(Identifiable, self).__init__(**kwargs)
self.id = None
self._type = 'Identifiable'
class Response(Identifiable):
"""Defines a response. All schemas that could be returned at the root of a
response should inherit from this.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResponse, ErrorResponse, Answer, Thing
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
}
_subtype_map = {
'_type': {'SearchResponse': 'SearchResponse', 'ErrorResponse': 'ErrorResponse', 'Answer': 'Answer', 'Thing': 'Thing'}
}
def __init__(self, **kwargs) -> None:
super(Response, self).__init__(**kwargs)
self.web_search_url = None
self._type = 'Response'
class Answer(Response):
"""Answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResultsAnswer
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.customsearch.models.Query]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
}
_subtype_map = {
'_type': {'SearchResultsAnswer': 'SearchResultsAnswer'}
}
def __init__(self, **kwargs) -> None:
super(Answer, self).__init__(**kwargs)
self.follow_up_queries = None
self._type = 'Answer'
class Thing(Response):
"""Thing.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CreativeWork
Variables are only populated by the server, and will be igno
|
red when
sending a request.
All required parameters must be populated in
|
order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
}
_subtype_map = {
'_type': {'CreativeWork': 'CreativeWork'}
}
def __init__(self, **kwargs) -> None:
super(Thing, self).__init__(**kwargs)
self.name = None
self.url = None
self.description = None
self.bing_id = None
self._type = 'Thing'
class CreativeWork(Thing):
"""CreativeWork.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: WebPage
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.customsearch.models.Thing]
:ivar text:
:vartype text: str
"""
_validation = {
'_
|
bpeck/tumblr-display
|
src/Drawable.py
|
Python
|
apache-2.0
| 76 | 0.013158 |
class Drawable(
|
objec
|
t):
def draw(self, display_screen, dT):
pass
|
mmanhertz/elopic
|
tests/test_db.py
|
Python
|
bsd-2-clause
| 3,055 | 0.001309 |
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
from elopic.data.elopicdb import EloPicDB, EloPicDBError
from elopic.logic.elo import INITIAL_ELO_SCORE
from tests.utils import copy_all_files, delete_files_matching_pattern
class TestDatabase(unittest.TestCase):
"""Test cases for the data package"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.picdir = 'testdata/pics1'
copy_all_files(self.picdir, self.tempdir)
self._initDB()
def tearDown(self):
self.db.close()
shutil.rmtree(self.tempdir)
def _initDB(self):
self.db = EloPicDB()
self.db.load_from_disk(self.tempdir)
def _assert_db_matches_dir(self, dir):
expected = self._get_imagepaths_in_dir(dir)
result = self.db.to_list()
self.assertEquals(len(result), len(expected), 'Number of pictures does not match.')
self.assertListEqual(
|
[r[0] for r in result],
expected,
'Paths do not match'
)
for r in result:
self.assertEqual(r[1], 0)
self.assertEqual(r[2], INITIAL_ELO_SCORE)
|
self.assertEqual(r[3], 0)
def _get_imagepaths_in_dir(self, dir):
return [os.path.join(self.tempdir, e) for e in os.listdir(dir) if e.endswith('.jpg')]
def test_load_from_disk_new_folder(self):
self._assert_db_matches_dir(self.tempdir)
def test_load_additional_files(self):
self.db.close()
# delete_files_matching_pattern(self.tempdir, r'^\d+\.jpg$')
copy_all_files('testdata/pics2', self.tempdir)
self._initDB()
self._assert_db_matches_dir(self.tempdir)
@unittest.skip('Support for deleted files is not in yet')
def test_load_deleted_files(self):
self.db.close()
delete_files_matching_pattern(self.tempdir, r'^\d+\.jpg$')
copy_all_files('testdata/pics2', self.tempdir)
self._initDB()
self._assert_db_matches_dir(self.tempdir)
def test_rating(self):
images = self._get_imagepaths_in_dir(self.tempdir)
for path in images:
self.assertEqual(INITIAL_ELO_SCORE, self.db.get_rating(path))
for idx, path in enumerate(images):
self.db.update_rating(path, idx)
for idx, path in enumerate(images):
self.assertEqual(idx, self.db.get_rating(path))
self.assertListEqual(images[:-4:-1], self.db.get_top_x_filepaths_by_rating(3))
def test_headers(self):
expected = [
'ignore',
'path',
'rating',
'seen_count',
]
result = self.db.get_headers()
result.sort()
self.assertEqual(expected, result)
def test_ignore(self):
images = self._get_imagepaths_in_dir(self.tempdir)
self.db.ignore_pictures(images[:3])
self.db.ignore_pictures(images[-1:])
self.maxDiff = None
self.assertListEqual(images[3:-1], [i['path'] for i in self.db.get_all()])
|
the-invoice/nab
|
nwaddrbook/icmp/util.py
|
Python
|
gpl-3.0
| 701 | 0 |
import os
import pwd
import grp
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
if os.getuid() != 0:
# We're not root so, like, whatever dude
return
# Get the uid/gid from the name
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
pwnam = pwd.getpwnam(sudo_user)
running_uid = pwnam.pw_uid
running_gid = pwna
|
m.pw_gid
else:
running_uid = pwd.getpwnam(uid_name).pw_uid
running_gid = grp.getgrnam(gid_name).gr_gid
#
|
Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(running_gid)
os.setuid(running_uid)
# Ensure a very conservative umask
os.umask(0o22)
|
DMRookie/RoomAI
|
roomai/doudizhu/DouDiZhuPokerAction.py
|
Python
|
mit
| 7,614 | 0.010901 |
#!/bin/python
import os
import roomai.common
import copy
#
#0, 1, 2, 3, ..., 7, 8, 9, 10, 11, 12, 13, 14
#^ ^ ^ ^ ^
#| | | | |
#3, 10, J, Q, K, A, 2, r, R
#
class DouDiZhuActionElement:
str_to_rank = {'3':0, '4':1, '5':2, '6':3, '7':4, '8':5, '9':6, 'T':7, 'J':8, 'Q':9, 'K':10, 'A':11, '2':12, 'r':13, 'R':14, 'x':15, 'b':16}
# x means check, b means bid
rank_to_str = {0: '3', 1: '4', 2: '5', 3: '6', 4: '7', 5: '8', 6: '9', 7: 'T', 8: 'J', 9: 'Q', 10: 'K', 11: 'A', 12: '2', 13: 'r', 14: 'R', 15: 'x', 16: 'b'}
total_normal_cards = 15
class DouDiZhuPokerAction(roomai.common.AbstractAction):
"""
"""
def __init__(self):
"""
"""
pass
def __init__(self, masterCards, slaveCards):
self.__masterCards__ = [c for c in masterCards]
self.__slaveCards__ = [c for c in slaveCards]
self.__masterPoints2Count__ = None
self.__slavePoints2Count__ = None
self.__isMasterStraight__ = None
self.__maxMasterPoint__ = None
self.__minMasterPoint__ = None
self.__pattern__ =
|
None
self.__action2pattern__()
self.__key__ = DouDiZhuPokerAction.__master_slave_cards_to_key__(masterCards, slaveCards)
def __get_key__(self): return sel
|
f.__key__
key = property(__get_key__, doc="The key of DouDiZhu Action")
def __get_masterCards__(self): return self.__masterCards__
masterCards = property(__get_masterCards__, doc="The cards act as the master cards")
def __get_slaveCards__(self): return self.__slaveCards__
slaveCards = property(__get_slaveCards__, doc="The cards act as the slave cards")
def __get_masterPoints2Count__(self): return self.__masterPoints2Count__
masterPoints2Count = property(__get_masterPoints2Count__, doc="The count of different points in the masterCards")
def __get_slavePoints2Count__(self): return self.__slavePoints2Count__
slavePoints2Count = property(__get_slavePoints2Count__, doc="The count of different points in the slaveCards")
def __get_isMasterStraight__(self): return self.__isMasterStraight__
isMasterStraight = property(__get_isMasterStraight__, doc="The master cards are straight")
def __get_maxMasterPoint__(self): return self.__maxMasterPoint__
maxMasterPoint = property(__get_maxMasterPoint__, doc="The max point in the master cards")
def __get_minMasterPoint__(self): return self.__minMasterPoint__
minMasterPoint = property(__get_minMasterPoint__, doc="The min point in the master cards")
def __get_pattern__(self): return self.__pattern__
pattern = property(__get_pattern__, doc="The pattern of the action")
@classmethod
def lookup(cls, key):
return AllActions["".join(sorted(key))]
@classmethod
def __master_slave_cards_to_key__(cls, masterCards, slaveCards):
key_int = (masterCards + slaveCards)
key_str = []
for key in key_int:
key_str.append(DouDiZhuActionElement.rank_to_str[key])
key_str.sort()
return "".join(key_str)
def __action2pattern__(self):
self.__masterPoints2Count__ = dict()
for c in self.__masterCards__:
if c in self.__masterPoints2Count__:
self.__masterPoints2Count__[c] += 1
else:
self.__masterPoints2Count__[c] = 1
self.__slavePoints2Count__ = dict()
for c in self.__slaveCards__:
if c in self.__slavePoints2Count__:
self.__slavePoints2Count__[c] += 1
else:
self.__slavePoints2Count__[c] = 1
self.__isMasterStraight__ = 0
num = 0
for v in self.__masterPoints2Count__:
if (v + 1) in self.__masterPoints2Count__ and (v + 1) < DouDiZhuActionElement.str_to_rank["2"]:
num += 1
if num == len(self.__masterPoints2Count__) - 1 and len(self.__masterPoints2Count__) != 1:
self.__isMasterStraight__ = 1
self.__maxMasterPoint__ = -1
self.__minMasterPoint__ = 100
for c in self.__masterPoints2Count__:
if self.__maxMasterPoint__ < c:
self.__maxMasterPoint__ = c
if self.__minMasterPoint__ > c:
self.__minMasterPoint__ = c
########################
## action 2 pattern ####
########################
# is cheat?
if len(self.__masterCards__) == 1 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] == DouDiZhuActionElement.str_to_rank["x"]:
self.__pattern__ = AllPatterns["i_cheat"]
# is roblord
elif len(self.__masterCards__) == 1 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] == DouDiZhuActionElement.str_to_rank["b"]:
self.__pattern__ = AllPatterns["i_bid"]
# is twoKings
elif len(self.__masterCards__) == 2 \
and len(self.__masterPoints2Count__) == 2 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] in [DouDiZhuActionElement.str_to_rank["r"], DouDiZhuActionElement.str_to_rank["R"]] \
and self.__masterCards__[1] in [DouDiZhuActionElement.str_to_rank["r"], DouDiZhuActionElement.str_to_rank["R"]]:
self.__pattern__ = AllPatterns["x_rocket"]
else:
## process masterCards
masterPoints = self.__masterPoints2Count__
if len(masterPoints) > 0:
count = masterPoints[self.__masterCards__[0]]
for c in masterPoints:
if masterPoints[c] != count:
self.__pattern__ = AllPatterns["i_invalid"]
if self.__pattern__ == None:
pattern = "p_%d_%d_%d_%d_%d" % (len(self.__masterCards__), len(masterPoints), \
self.__isMasterStraight__, \
len(self.__slaveCards__), 0)
if pattern in AllPatterns:
self.__pattern__= AllPatterns[pattern]
else:
self.__pattern__ = AllPatterns["i_invalid"]
def __deepcopy__(self, memodict={}, newinstance = None):
return self.lookup(self.key)
############## read data ################
AllPatterns = dict()
AllActions = dict()
from roomai.doudizhu import doudizhu_action_data
from roomai.doudizhu import doudizhu_pattern_data
for line in doudizhu_pattern_data:
line = line.replace(" ", "").strip()
line = line.split("#")[0]
if len(line) == 0 or len(line[0].strip()) == 0:
continue
lines = line.split(",")
for i in range(1, len(lines)):
lines[i] = int(lines[i])
AllPatterns[lines[0]] = lines
for line in doudizhu_action_data:
line = line.replace(" ", "").strip()
lines = line.split("\t")
if lines[3] not in AllPatterns:
continue
m = [int(str1) for str1 in lines[1].split(",")]
s = []
if len(lines[2]) > 0:
s = [int(str1) for str1 in lines[2].split(",")]
action = DouDiZhuPokerAction(m, s)
if "b" in line:
b = 0
if action.key != lines[0] or action.pattern[0] != lines[3]:
raise ValueError("%s is wrong. The generated action has key(%s) and pattern(%s)"%(line, action.key,action.pattern[0]))
AllActions[action.key] = action
|
denismakogon/tosca-vcloud-plugin
|
vcloud_plugin_common/__init__.py
|
Python
|
apache-2.0
| 17,925 | 0.000167 |
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import atexit
from functools import wraps
import yaml
import os
import requests
import time
from pyvcloud import vcloudair
from pyvcloud.schema.vcd.v1_5.schemas.vcloud import taskType
from cloudify import ctx
from cloudify import context
from cloudify import exceptions as cfy_exc
TASK_RECHECK_TIMEOUT = 3
RELOGIN_TIMEOUT = 3
LOGIN_RETRY_NUM = 5
TASK_STATUS_SUCCESS = 'success'
TASK_STATUS_ERROR = 'error'
STATUS_COULD_NOT_BE_CREATED = -1
STATUS_UNRESOLVED = 0
STATUS_RESOLVED = 1
STATUS_DEPLOYED = 2
STATUS_SUSPENDED = 3
STATUS_POWERED_ON = 4
STATUS_POWERED_OFF = 8
STATUS_WAITING_FOR_USER_INPUT = 5
STATUS_UNKNOWN_STATE = 6
STATUS_UNRECOGNIZED_STATE = 7
STATUS_INCONSISTENT_STATE = 9
VCLOUD_STATUS_MAP = {
-1: "Could not be created",
0: "Unresolved",
1: "Resolved",
2: "Deployed",
3: "Suspended",
4: "Powered on",
5: "Waiting for user input",
6: "Unknown state",
7: "Unrecognized state",
8: "Powered off",
9: "Inconsistent state",
10: "Children do not all have the same status",
11: "Upload initiated, OVF descriptor pending",
12: "Upload initiated, copying contents",
13: "Upload initiated , disk contents pending",
14: "Upload has been quarantined",
15: "Upload quarantine period has expired"
}
SUBSCRIPTION_SERVICE_TYPE = 'subscription'
ONDEMAND_SERVICE_TYPE = 'ondemand'
PRIVATE_SERVICE_TYPE = 'vcd'
SESSION_TOKEN = 'session_token'
ORG_URL = 'org_url'
VCLOUD_CONFIG = 'vcloud_config'
def transform_resource_name(res, ctx):
"""
return name as prefix from bootstrap context + resource name
"""
if isinstance(res, basestring):
res = {'name': res}
if not isinstance(res, dict):
raise ValueError("transform_resource_name() expects either string or "
"dict as the first parameter")
pfx = ctx.bootstrap_context.resources_prefix
if not pfx:
return get_mandatory(res, 'name')
name = get_mandatory(res, 'name')
res['name'] = pfx + name
if name.startswith(pfx):
ctx.logger.warn("Prefixing resource '{0}' with '{1}' but it "
"already has this prefix".format(name, pfx))
else:
ctx.logger.info("Transformed resource name '{0}' to '{1}'".format(
name, res['name']))
return res['name']
class Config(object):
"""
load global config
"""
VCLOUD_CONFIG_PATH_ENV_VAR = 'VCLOUD_CONFIG_PATH'
VCLOUD_CONFIG_PATH_DEFAULT = '~/vcloud_config.yaml'
def get(self):
"""
return settings from ~/vcloud_config.yaml
"""
cfg = {}
env_name = self.VCLOUD_CONFIG_PATH_ENV_VAR
default_location_tpl = self.VCLOUD_CONFIG_PATH_DEFAULT
default_location = os.path.expanduser(default_location_tpl)
config_path = os.getenv(env_name, default_location)
try:
with open(config_path) as f:
cfg = yaml.load(f.read())
if not cfg:
cfg = {}
except IOError:
pass
return cfg
class VcloudAirClient(object):
config = Config
def get(self, config=None, *args, **kw):
"""
return new vca client
"""
static_config = self.__class__.config().get()
cfg = {}
cfg.update(static_config)
if config:
cfg.update(config)
return self.connect(cfg)
def connect(self, cfg):
"""
login to instance described in settings
"""
url = cfg.get('url')
username = cfg.get('username')
password = cfg.get('password')
token = cfg.get('token')
service = cfg.get('service')
org_name = cfg.get('org')
service_type = cfg.get('service_type', SUBSCRIPTION_SERVICE_TYPE)
instance = cfg.get('instance')
org_url = cfg.get(ORG_URL, None)
api_version = cfg.get('api_version', '5.6')
session_token = cfg.ge
|
t(SESSION_TOKEN)
org_url = cfg.get(ORG_URL)
if not (all([url, token]) or all([url, username, password]) or session_token):
raise cfy_exc.NonRecoverableError(
"Login credentials must be specified.")
if (service_type == SUBSCRIPTION_SERVICE_TYPE and not (
s
|
ervice and org_name
)):
raise cfy_exc.NonRecoverableError(
"vCloud service and vDC must be specified")
if service_type == SUBSCRIPTION_SERVICE_TYPE:
vcloud_air = self._subscription_login(
url, username, password, token, service, org_name,
session_token, org_url)
elif service_type == ONDEMAND_SERVICE_TYPE:
vcloud_air = self._ondemand_login(
url, username, password, token, instance,
session_token, org_url)
# The actual service type for private is 'vcd', but we should accept
# 'private' as well, for user friendliness of inputs
elif service_type in (PRIVATE_SERVICE_TYPE, 'private'):
vcloud_air = self._private_login(
url, username, password, token, org_name, org_url, api_version)
else:
raise cfy_exc.NonRecoverableError(
"Unrecognized service type: {0}".format(service_type))
return vcloud_air
def _subscription_login(self, url, username, password, token, service,
org_name, session_token=None, org_url=None):
"""
login to subscription service
"""
version = '5.6'
logined = False
vdc_logined = False
vca = vcloudair.VCA(
url, username, service_type=SUBSCRIPTION_SERVICE_TYPE,
version=version)
if session_token:
if session_login(vca, org_url, session_token, version):
return vca
else:
raise cfy_exc.NonRecoverableError("Invalid session credentials")
# login with token
if token:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(token=token)
if logined is False:
ctx.logger.info("Login using token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using token successful.")
break
# outdated token, try login by password
if logined is False and password:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(password)
if logined is False:
ctx.logger.info("Login using password failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using password successful.")
break
# can't login to system at all
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
for _ in range(LOGIN_RETRY_NUM):
vdc_logined = vca.login_to_org(service, org_name)
if vdc_logined is False:
ctx.logger.info("Login to VDC failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login to VDC successful.")
break
# we can login to system,
# but have some troubles with login to organization,
# lets retry later
if vdc_
|
nglrt/virtual_energy_sensor
|
virtual_energy_sensor/loadtrain.py
|
Python
|
mit
| 3,264 | 0.016544 |
import numpy as np
import fnmatch, os
import h5py
class Hdf5Loader():
def loadDirectory(self, dirname):
"""
Loads all hdf5 files in the directory dirname
@param dirname: The directory which contains the files to load
@returns: list of h5py File objects
"""
cachelist=os.listdir(dirname)
testlist=fnmatch.filter(cachelist,'*.hdf5')
for file_ in testlist:
print("Using {0}".format(file_))
files = [h5py.File(os.path.join(dirname, fn),'r') for fn in testlist]
return files
def getDatasets(self, dirname, dataset_list):
"""
Loads all hdf5
|
files in a given directory. It extracts all datasets
|
which are specified in :dataset_list and merges the datasets from
all files.
Finally it returns a numpy array for each dataset in the :dataset_list
@param dirname: The directory containing the hdf5 files
@param dataset_list: List of datasets to load
@returns: A list of numpy arrays loaded from the dataset files
"""
files = self.loadDirectory(dirname)
result = []
for dataset_name in dataset_list:
arr = np.concatenate([f[dataset_name] for f in files])
result.append(arr)
return result
class LoadData():
"""
This class extracts data from features and corresponding powervalues and returns them as array
"""
def __init__(self, sep=";", groundtruth_elements=2, skiprows=1, skipcols=1):
self.sep = sep
self.num_groundtruth_elements = groundtruth_elements
self.skiprows=1
self.skipcols = skipcols
def getFeatureCount(self, file_):
fd = open(file_, 'r')
fd.readline()
count = len(fd.readline().split(self.sep))
return count - self.num_groundtruth_elements
def getFeaturesData(self,csvname):
cols = range(self.skipcols, self.getFeatureCount(csvname))
print cols
log = np.loadtxt(csvname,delimiter=self.sep,skiprows=self.skiprows,usecols=cols)
return log
def getPowerData(self,csvname):
cols = [self.getFeatureCount(csvname)]
power = np.loadtxt(csvname,delimiter=self.sep,skiprows=self.skiprows,usecols=cols)
return power
def load_dir(self, dirname):
"""
Loads all files of a directory to a single feature and power data set
"""
cachelist=os.listdir(dirname)
testlist=fnmatch.filter(cachelist,'*.csv')
testFeatureDataLst = []
testPowerDataLst = []
"""Testdaten laden"""
for file_ in testlist:
testFeatureDataLst.append(self.getFeaturesData(os.path.join(dirname,file_)))
testPowerDataLst.append(self.getPowerData(os.path.join(dirname,file_)))
testFeatureData = np.concatenate(testFeatureDataLst)
testPowerData = np.concatenate(testPowerDataLst)
return testPowerData, testFeatureData
|
75py/Download-Confirm
|
work/generate_extentions_res.py
|
Python
|
apache-2.0
| 1,723 | 0.004643 |
from jinja2 import Environment, FileSystemLoader
data = {
"extensionInfoList": [
{"ext": "apk", "mimeTypes": ["application/vnd.android.package-archive"]}
, {"ext": "zip", "mimeTypes": []}
, {"ext": "tgz", "mimeTypes": []}
, {"ext": "gz", "mimeTypes": []}
, {"ext": "pdf", "mimeTypes": ["application/pdf"]}
]
}
xmlTemplates = [
{
"template": "template.AndroidManifest.xml",
"output": "../app/src/main/AndroidManifest.xml"
},
{
"template": "template.strings_ext.xml",
"output": "../app/src/main/res/values/strings_ext.xml"
},
{
"template": "template.pref_general.xml",
"output": "../app/src/main/res/xml/pref_general.xml"
},
]
javaTemplates = [
{
"template": "template.ConfirmActivity.java",
"output": "../app/src/main/java/com/nagopy/android/downloadconfirm/extension/{}ConfirmActivity.java"
},
{
"template": "template.HookTest.java",
"output": "../app/src/androidTest/java/com/nagopy/and
|
roid/downloadconfirm/extension/{}HookTest.java"
},
]
env = Environment(loader=FileSystemLoader('.'))
for xmlTemplate in xmlTemplates:
template = env.get_template(xmlTemplate['template'])
rendered = template.render(data)
with open(xmlTemplate['output'], 'w') as f:
f.write(rendered)
f.close()
for javaTemplate in javaTemplates:
for extInfo in data['extensionInfoList']:
templat
|
e = env.get_template(javaTemplate['template'])
rendered = template.render({'extInfo': extInfo})
with open(javaTemplate['output'].format(extInfo['ext'].capitalize()), 'w') as f:
f.write(rendered)
f.close()
|
weberwang/WeRoBot
|
travis/terryfy/travisparse.py
|
Python
|
mit
| 1,763 | 0.001134 |
""" Parse travis.yml file, partly
"""
import sys
if sys.version_info[0] > 2:
basestring = str
class TravisError(Exception):
pass
def get_yaml_entry(yaml_dict, name):
""" Get entry `name` from dict `yaml_dict`
Parameters
----------
yaml_dict : dict
dict or subdict from parsing .travis.yml file
name : str
key to analyze and return
Returns
-------
entry : None or list
If `name` not in `yaml_dict` return None. If key value is a string
return a single entry list. Otherwise return the key value.
"""
entry = yaml_dict.get(name)
if entry is None:
return None
if isinstance(entry, basestring):
return [entry]
return entry
def get_envs(yaml_dict):
""" Get first env combination from travis yaml dict
Parameters
----------
yaml_dict : dict
dict or subdict from parsing .travis.yml file
Returns
-------
bash_str : str
bash scripting lines as string
"""
env = get_yaml_entry(yaml_dict, 'env')
if env is None:
return ''
# Bare string
if isinstance(env, basestring):
return env + '\n'
# Simple list defining matrix
if isinstance(env, (list, tuple)):
return env[0] + '\n'
# More complex dictey things
globals, matrix = [get_yaml_entry(env, name)
for name in ('global', 'matrix')]
if hasattr(matrix, 'keys'):
raise TravisError('Oops, envs too complicated')
lines = []
|
if not globals is None:
if matrix is None:
raise TravisError('global section needs matrix section')
lines += globals
if not matrix is None:
lines.append(matrix[0])
return '\n'.join(lines) + '\n'
|
|
zamonia500/PythonTeacherMythenmetz
|
300문제/96.py
|
Python
|
gpl-3.0
| 106 | 0 |
pet = ['dog', 'cat', 'parrot', 'squirrel', 'goldfish']
fo
|
r anima
|
l in pet:
print(animal, len(animal))
|
dsweet04/rekall
|
rekall-core/rekall/plugins/common/__init__.py
|
Python
|
gpl-2.0
| 471 | 0 |
"""Plugins that are not OS-specific"""
# pylint: disable=unused-import
from rekall.plugins.common import address_resolver
from rekall.plugins.common import api
from rekall.plugins.common import bovine
from rekall.plugins.common import efilter_plugins
from rekall.plugins.common import inspection
from rekall.plugins.common import memmap
from rekall.plugins.common import profile_index
|
from rekall.plugin
|
s.common import scanners
from rekall.plugins.common import sigscan
|
mbrukman/cloud-launcher
|
apps/cloudera/director/py/rhel6.py
|
Python
|
apache-2.0
| 1,057 | 0 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Licens
|
e at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WA
|
RRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
#
# Deployment for Cloudera Director using a RHEL 6 image.
#
##########################################################################
import director_base
from gce import *
GCE.setDefaults(
project='curious-lemmings-42',
zone='us-central1-a',
)
resources = director_base.DirectorServer(
sourceImage='rhel-6-latest',
startupScript='../scripts/rhel-6/init.gen.sh')
|
abulte/Flask-Bootstrap-Fanstatic
|
application/__init__.py
|
Python
|
mpl-2.0
| 730 | 0.005487 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Alexandre Bulté <alexandre[at]bulte[dot]net>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from flask import Flask, render_template
from flask_fanstatic import Fanstatic
# configuration
DEBUG = True
FANSTATIC_OPTIONS = {'bottom': True, 'minified': True}
app = Flask(__name__)
app.config.from_object(__name__)
fanstatic
|
= Fanstatic(app)
# define your own ressources this way
fanstatic.resource('js/app.js', name='
|
app_js', bottom=True)
@app.route('/')
def index():
return render_template('index.html')
|
natasasdj/OpenWPM
|
analysis/12_images_third-domains2.py
|
Python
|
gpl-3.0
| 19,012 | 0.016148 |
import os
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
from matplotlib.ticker import FuncFormatter
def thousands(x, pos):
if x>=1e9:
return '%.1fB' % (x*1e-9)
elif x>=1e6:
return '%.1fM' % (x*1e-6)
elif x>=1e3:
return '%.1fK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
def ecdf_for_plot(sample):
#x = np.linspace(min(sample), max(sample))
print "sample: ",type(sample)
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
# print ecdf
print "ecdf: ",type(ecdf)
y = ecdf(x)
#print y
print "y: ", type(y)
return (x,y)
res_dir = '/home/nsarafij/project/OpenWPM/analysis/results/'
db = res_dir + 'images.sqlite'
conn = sqlite3.connect(db)
query = 'SELECT * FROM Images'
df = pd.read_sql_query(query,conn)
df.columns = ['respDom_id' if x=='resp_domain' else x for x in df.columns]
query = 'SELECT * FROM Domain_DomainTwoPart'
df_domdom2 = pd.read_sql_query(query,conn)
df=df.merge(df_domdom2,left_on='site_id',right_on='domain_id',how='left')
df.drop('domain_id',axis=1,inplace=True)
df.columns = ['site_id2' if x=='domainTwoPart_id' else x for x in df.columns]
df=df.merge(df_domdom2,left_on='respDom_id',right_on='domain_id',how='left')
df.drop('domain_id',axis=1,inplace=True)
df.columns = ['respDom_id2' if x=='domainTwoPart_id' else x for x in df.columns]
query = 'SELECT * FROM DomainsTwoPart'
df_dom2 = pd.read_sql_query(query,conn)
df=df.merge(df_dom2, left_on = 'site_id2', right_on = 'id', how = 'left')
df.drop('id',inplace=True,axis=1)
df.columns = ['site_domain2' if x=='domainTwoPart' else x for x in df.columns]
df=df.merge(df_dom2, left_on = 'respDom_id2', right_on = 'id', how = 'left')
df.drop('id',inplace=True,axis=1)
df.columns = ['respDom_domain2' if x=='domainTwoPart' else x for x in df.columns]
query = 'SELECT * FROM Domain2Company'
df_dom2com = pd.read_sql_query(query,conn)
df=df.merge(df_dom2com,left_on='respDom_id2',right_on='domainTwoPart_id',how='left')
df.drop('domainTwoPart_id',axis=1,inplace=True)
query = 'SELECT * FROM Companies'
df_com = pd.read_sql_query(query,conn)
df=df.merge(df_com,left_on='company_id',right_on='id',how='left')
df.drop('id',axis=1,inplace=True)
#conn.close()
df1=df.loc[df['site_id2']==df['respDom_id2']]
df2=df.loc[df['site_id2']!=df['respDom_id2']]
df2.shape[0]/float(df.shape[0]) #0.6757349672921374
# how many sites and links have third-party images
sites = []
links = 0
for site_id in range(1,10001):
if site_id % 100 == 0: print site_id
df3=df2.loc[df2['site_id']==site_id]
df3_size = df3['link_id'].unique().shape[0]
links += df3_size
if df3_size: sites.append(site_id)
len(sites) #8343
8343/8965 = 0.9306190741773563
links #912363
912363/964315.
# distinct response domains
df['respDom_id2'].unique().size #29009
df1['respDom_id2'].unique().size #7863
df2['respDom_id2'].unique().size #23235
domains2 = df2[['respDom_id2','respDom_domain2']].groupby(['respDom_id2','respDom_domain2']).size().sort_values(ascending = False).reset_index()
domains2.to_csv('/home/nsarafij/project/OpenWPM/analysis/results/third-domains2_owners',index=False,encoding='utf-8')
# companies
############## considering third-party domains only
# all images: counts per each response domain
domains = df2['respDom_domain2'].value_counts()
total = df2.shape[0]
domains_cum = domains.cumsum()
dom_perc = domains/float(total)
dom_perc_cum = dom_perc.cumsum()
# all images: counts per each company
com = df2['company'].value_counts()
com_cum = com.cumsum()
com_perc = com/df2.shape[0]
com_perc_cum = com_perc.cumsum()
# all images - response domains
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
# cdf of number of third-party images per third-party domains
(x,y) = ecdf_for_plot(domains)
plt.figure()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('no of zero images per domain')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'third-domains2_cdf.png'))
plt.show()
# counts
fig, ax = plt.subplots()
plt.plot(range(1,domains.shape[0]+1),domains,marker='.')
plt.xscale('log')
plt.xlabel('domain rank')
plt.ylabel('count of images')
plt.xlim([1,domains.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,domains.shape[0]+1),dom_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.xlim([1,domains.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,domains.shape[0]+1),domains_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('domain rank')
plt.ylabel('count of all images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,domains.shape[0]+1),dom_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_perc_cum.png',format='png')
# top 30 domains - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,domains[0:n],align='center')
plt.xlabel('domains')
plt.ylabel('count of images')
labels = list(domains.index[0:n])
plt.xticks(x, labels, rotation=80)
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_count_top30.png',format='png')
# top 30 domains - percentages
fig = plt.figure()
plt.bar(x,dom_perc[0:n]*100,align='center')
plt.xlabel('domains')
plt.ylabel('percentage of total number of images')
labels = list(domains.index[0:n])
plt.xticks(x, labels, rotation=80)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_perc_top30.png',format='png')
domcom = df2[['respDom_domain2','company']].groupby(['respDom_domain2','company']).size().reset
|
_index(name='img_perc').sort_values('img_perc',ascending=False)
domcom['img_perc']=domcom['img_perc']/float(df2.shape[0])*100
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-domain2company_perc_top30.txt'),'w+')
### table domains - companies
for i in range(0,n):
dom = domcom.iloc[i,0]
comp = domcom.iloc[i,1]
perc = domcom.iloc[i,2]
s = str(i+1) + ' & ' + dom + ' & ' + comp + ' & ' + '%.2f' % perc + '\\\\ \\hline'
print s
|
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
### companies
# counts
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
fig, ax = plt.subplots()
plt.plot(range(1,com.shape[0]+1),com,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
plt.xlim([1,com.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-company_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,com.shape[0]+1),com_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.xlim([1,com.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-company_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,com.shape[0]+1),com_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,com.shape[0]+1),com_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.grid(True)
#fig.tight_
|
lucventurini/mikado
|
Mikado/loci/excluded.py
|
Python
|
lgpl-3.0
| 3,468 | 0.003172 |
# coding: utf-8
"""
This module defines a containers that hold transcripts excluded from further consideration.
It is invoked when all transcripts in a locus have a score of 0 and the "purge"
option has been enabled.
"""
from .abstractlocus import Abstractlocus
from ..transcripts import Transcript
class Excluded(Abstractlocus):
"""This is a container of discarded transcripts. It is used only for completeness purposes -
i.e. printing out the discarded transcripts to a separate file.
"""
__name__ = "excluded_transcripts"
def __init__(self, monosublocus_instance=None, configuration=None, logger=None):
"""
Constructor method
:param monosublocus_instance:
:type monosublocus_instance: Mikado.loci_objects.monosublocus.Monosublocus
:param configuration: configuration file
:type configuration: (MikadoConfiguration|DaijinConfiguration)
:param logger: logger instance
:type logger: logging.Logger | None
"""
Abstractlocus.__init__(self, configuration=configuration)
self.splitted = False
self.metrics_calculated = False
self.logger = logger
if isinstance(monosublocus_instance, Transcript):
Abstractlocus.__init__(self, transcript_instance=monosublocus_instance)
elif isinstance(monosublocus_instance, Abstractlocus):
# Add the transcript to the Locus
self.add_monosublocus(monosublocus_instance)
def add_transcript_to_locus(self, transcript, **kwargs):
"""Override of the sublocus method, and reversal to the original
method in the Abstractlocus class.
:param transcript: a transcript to add
:type transcript: Mikado.loci_objects.transcript.Transcript
:param kw
|
args: optional arguments are completely ignored by this method.
"""
# Notice that check_in_locus is always set to False.
|
_ = kwargs
Abstractlocus.add_transcript_to_locus(self, transcript, check_in_locus=False)
def add_monosublocus(self, monosublocus_instance):
"""Wrapper to extract the transcript from the monosubloci and pass it
to the constructor.
:param monosublocus_instance
:type monosublocus_instance: Mikado.loci_objects.monosublocus.Monosublocus
"""
assert len(monosublocus_instance.transcripts) == 1
for tid in monosublocus_instance.transcripts:
self.add_transcript_to_locus(monosublocus_instance.transcripts[tid])
def __str__(self):
"""This special method is explicitly *not* implemented;
this Locus object is not meant for printing, only for computation!"""
message = """This is a container used for computational purposes only,
it should not be printed out directly!"""
raise NotImplementedError(message)
def filter_and_calculate_scores(self, check_requirements=True):
"""
Suppress the method from the base class
"""
raise NotImplementedError("Scores are not calculated by this class!")
def define_monosubloci(self):
"""
Suppress the method from the base class
"""
raise NotImplementedError("Monosubloci are not calculated by this class!!")
@classmethod
def is_intersecting(cls):
"""Present to fulfill the contract with Abstractlocus, but it only raises a NotImplementedError"""
raise NotImplementedError()
|
nioinnovation/safepickle
|
safepickle/types/tests/test_timedelta.py
|
Python
|
apache-2.0
| 509 | 0 |
from unittest import TestCase
from datetime import timedelta
from safepickle.types.timedelta import TimedeltaType
|
from safepickle.encoding import encode, decode
class TestTimedelta(TestCase):
def test_timedelta(self):
""" Asserts timedelta type is handled as expected
"""
obj = t
|
imedelta(days=1, seconds=2, microseconds=3)
type_ = TimedeltaType()
encoding = type_.encode(obj, encode)
decoding = decode(encoding)
self.assertEqual(obj, decoding)
|
PytLab/catplot
|
tests/edge_3d_test.py
|
Python
|
mit
| 2,499 | 0.001601 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Edge3D.
"""
import unittest
from catplot.grid_components.nodes import Node2D, Node3D
from catplot.grid_components.edges import Edge2D, Edge3D
class Edge3DTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct Grid2DNode correctly.
"""
node1 = Node3D([1.0, 1.0, 1.0], color="#595959", width=1)
node2 = Node3D([0.5, 0.5, 0.5], color="#595959", width=1)
edge = Edge3D(node1, node2, n=10)
ref_x = [1.0,
0.9545454545454546,
0.9090909090909091,
0.8636363636363636,
0.8181818181818181,
0.7727272727272727,
0.7272727272727273,
0.6818181818181819,
0.6363636363636364,
0.5909090909090908,
0.5454545454545454,
0.5]
self.assertListEqual(edge.x.tolist(), ref_x)
self.assertListEqual(edge.y.tolist(), ref_x)
self.assertListEqual(edge.z.tolist(), ref_x)
def test_construction_from2d(self):
""" Make sure we can construc
|
t 3D edge from a 2D edge.
"""
node1 = Node2D([1.0, 1.0])
node2 = Node2D([1.0, 2.0])
edge2d = Edge2D(node1, node2)
edge3d = Edge3D.from2d(edge2d)
self.assertTrue(isinstance(edge3d, Edge3D))
def test_move(self):
""" Test the edge can be moved correctly.
"""
node1 = Node
|
3D([1.0, 1.0, 1.0], color="#595959", width=1)
node2 = Node3D([0.5, 0.5, 0.5], color="#595959", width=1)
edge = Edge3D(node1, node2, n=10)
edge.move([0.5, 0.5, 0.5])
ref_x = [1.5,
1.4545454545454546,
1.4090909090909092,
1.3636363636363638,
1.3181818181818181,
1.2727272727272727,
1.2272727272727273,
1.1818181818181819,
1.1363636363636362,
1.0909090909090908,
1.0454545454545454,
1.0]
self.assertListEqual(edge.x.tolist(), ref_x)
self.assertListEqual(edge.y.tolist(), ref_x)
self.assertListEqual(edge.z.tolist(), ref_x)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Edge3DTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
mvaled/sentry
|
tests/sentry/similarity/backends/base.py
|
Python
|
bsd-3-clause
| 8,478 | 0.003185 |
from __future__ import absolute_import
import abc
class MinHashIndexBackendTestMixin(object):
__meta__ = abc.ABCMeta
@abc.abstractproperty
def index(self):
pass
def test_basic(self):
self.index.record("example", "1", [("index", "hello world")])
self.index.record("example", "2", [("index", "hello world")])
self.index.record("example", "3", [("index", "jello world")])
self.index.record("example", "4", [("index", "yellow world"), ("index", "mellow world")])
self.index.record("example", "5", [("index", "pizza world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index", 0)])
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# comparison, low threshold
results = self.index.compare("example", "1", [("index", 6)])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# comparison, high threshold (exact match)
results = self.index.compare("example", "1", [("index", self.index.bands)])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index", 0)], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
# classification, without thresholding
re
|
sults = self.index.classify("example", [("index", 0, "hello world")])
assert results[0:2] == [("1", [1.0]), ("2",
|
[1.0])]
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# classification, low threshold
results = self.index.classify("example", [("index", 6, "hello world")])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# classification, high threshold (exact match)
results = self.index.classify("example", [("index", self.index.bands, "hello world")])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify("example", [("index", 0, "hello world")], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
self.index.delete("example", [("index", "3")])
assert [key for key, _ in self.index.compare("example", "1", [("index", 0)])] == [
"1",
"2",
"4",
"5",
]
def test_multiple_index(self):
self.index.record("example", "1", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "2", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "3", [("index:a", "hello world"), ("index:b", "pizza world")])
self.index.record("example", "4", [("index:a", "hello world")])
self.index.record("example", "5", [("index:b", "hello world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)])
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)], limit=4)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# classification, without thresholding
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")]
)
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# classification, with thresholding (low)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", 8, "pizza world"), # one direct hit
],
)
assert len(results) == 1
assert results[0][0] == "3"
# this should have a value since it's similar even thought it was not
# considered as a candidate for this index
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
# classification, with thresholding (high)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", self.index.bands, "hello world"), # 3 direct hits
],
)
assert len(results) == 3
assert results[0][0] == "1" # tie btw first 2 items is broken by lex sort
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
assert results[1][0] == "2"
assert results[1][1][0] > 0
assert results[1][1][1] == 1.0
assert results[2] == ("5", [0.0, 1.0])
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")], limit=4
)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# empty query
assert (
self.index.classify("example", [("index:a", 0, "hello world"), ("index:b", 0, "")])
== self.index.compare("example", "4", [("index:a", 0), ("index:b", 0)])
== [("4", [1.0, None]), ("1", [1.0, 0.0]), ("2", [1.0, 0.0]), ("3", [1.0, 0.0])]
)
def test_merge(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
self.index.record("example", "2", [("index", ["baz"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.merge("example", "1", [("index", "2")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [0.5])]
# merge into an empty key should act as a move
self.index.merge("example", "2", [("index", "1")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("2", [0.5])]
def test_flush_scoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.flush("example", ["index"])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == []
def test_flush_unscoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"]
|
piotroxp/scibibscan
|
scib/lib/python3.5/site-packages/astropy/utils/compat/fractions.py
|
Python
|
mit
| 568 | 0 |
# Licensed un
|
der a 3-clause BSD style license - see LICENSE.rst
"""
Handles backports of the standard library's `fractions.py`.
The fractions module in 2.6 does not handle being instantiated using a
float and then calculating an approximate fraction based on that.
This functionality is required by the FITS unit format generator,
since the FITS unit format handles only rational, not decimal point,
powers.
"""
from __future_
|
_ import absolute_import
import sys
if sys.version_info[:2] == (2, 6):
from ._fractions_py2 import *
else:
from fractions import *
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/cli_options.py
|
Python
|
gpl-3.0
| 2,217 | 0.000903 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 The Spyder development team
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
import optparse
def get_options():
"""
Convert options into commands
return commands, message
"""
parser = optparse.OptionParser(usage="spyder [options] files")
parser.add_option('-l', '--light', action='store_true', default=False,
help="Light version (all add-ons are disabled)")
parser.add_option('--new-instance', action='store_true', default=False,
he
|
lp="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)")
parser.add_option('--session', dest="startup_session", default='',
|
help="Startup session")
parser.add_option('--defaults', dest="reset_to_defaults",
action='store_true', default=False,
help="Reset configuration settings to defaults")
parser.add_option('--reset', dest="reset_session",
action='store_true', default=False,
help="Remove all configuration files!")
parser.add_option('--optimize', action='store_true', default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)")
parser.add_option('-w', '--workdir', dest="working_directory", default=None,
help="Default working directory")
parser.add_option('--show-console', action='store_true', default=False,
help="Do not hide parent console window (Windows)")
parser.add_option('--multithread', dest="multithreaded",
action='store_true', default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)")
parser.add_option('--profile', action='store_true', default=False,
help="Profile mode (internal test, "
"not related with Python profiling)")
options, args = parser.parse_args()
return options, args
|
praveen-pal/edx-platform
|
common/lib/xmodule/xmodule/util/date_utils.py
|
Python
|
agpl-3.0
| 1,275 | 0.000784 |
"""
Convenience methods for working with datetime objects
"""
from datetime import timedelta
from django.utils.translation import ugettext as _
def get_default_time_display(dt, show_timezone=True):
"""
Co
|
nverts a datetime to a string representation. This is the default
representation used in Studio and LMS.
It is of the form "Apr 09, 2013 at 16:00" or "Apr 09, 2013 at 16:00 UTC",
depending on the value of show_timezone.
If None is passed in for dt, an empty string will be returned.
The default value of show_timezone is True.
"""
if dt is None:
return u""
timezone = u""
if show_timezone:
if dt.tzinfo is not None:
|
try:
timezone = u" " + dt.tzinfo.tzname(dt)
except NotImplementedError:
timezone = dt.strftime('%z')
else:
timezone = u" UTC"
return unicode(dt.strftime(u"%b %d, %Y {at} %H:%M{tz}")).format(
at=_(u"at"), tz=timezone).strip()
def almost_same_datetime(dt1, dt2, allowed_delta=timedelta(minutes=1)):
"""
Returns true if these are w/in a minute of each other. (in case secs saved to db
or timezone aren't same)
:param dt1:
:param dt2:
"""
return abs(dt1 - dt2) < allowed_delta
|
srluge/SickRage
|
sickbeard/clients/download_station_client.py
|
Python
|
gpl-3.0
| 2,731 | 0.001465 |
# coding=utf-8
# Authors:
# Pedro Jose Pereira Vieito <pvieito@gmail.com> (Twitter: @pvieito)
#
# URL: https://github.com/mr-orange/Sick-Beard
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
#
# Uses the Synology Download Station API: http://download.synology.com/download/Document/DeveloperGuide/Synology_Download_Station_Web_API.pdf
import sickbeard
from sickbeard.clients.generic import GenericClient
class DownloadStationAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(DownloadStationAPI, self).__init__('DownloadStation', host, username, password)
self.url = self.host + 'webapi/DownloadStation/task.cgi'
def _get_auth(self):
auth_url = self.host + 'webapi/auth.cgi?api=SYNO.API.Auth&version=2&method=login&account=' + self.username + '&passwd=' + self.password + '&session=DownloadStation&format=sid'
try:
self.response = self.session.get(auth_url, verify=False)
self.auth = self.response.json()['data']['sid']
except Exception:
return None
return self.auth
def _add_torrent_uri(self, result):
data = {
'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'session': 'DownloadStation',
'_sid': self.auth,
'uri': result.url
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
self._request(method='post', dat
|
a=data)
return self.response.json()['success']
def _add_torrent_file(self, result):
data = {
'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'
|
session': 'DownloadStation',
'_sid': self.auth
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
files = {'file': (result.name + '.torrent', result.content)}
self._request(method='post', data=data, files=files)
return self.response.json()['success']
api = DownloadStationAPI()
|
krkeegan/lib-py-insteon
|
insteon/rest_server.py
|
Python
|
gpl-2.0
| 1,792 | 0.001116 |
import threading
import pprint
import json
from bottle import route, run, Bottle
class Rest_Server(Bottle):
'''The REST front end'''
def __init__(self, core):
super(Rest_Server, self).__init__()
self._core = core
self.route('/plms', callback=self.list_plms)
def start(self):
threading.Thread(target=self.run, kwargs=dict(
host='localhost', port=8080, debug=True)).start()
def list_plms(self):
'''
Returns an object containin all of the plms.
**Example request**:
.. sourcecode:: http
GET /plms HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"20F5F5": {
"dev_cat": 3,
"firmware": 155,
"port": "/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A501LCKJ-if00-port0",
"port_active": false,
"sub_cat": 21
},
"3C4DB9": {
"dev_cat": 3,
"firmware": 158,
"port": "/dev/serial/by-i
|
d/usb-FTDI_FT232R_USB_UART_A403KDV3-if00-port0",
"port_active": true,
"sub_cat": 21
}
}
:statuscode 200: no
|
error
'''
plms = self._core.get_all_plms()
ret = {}
for plm in plms:
ret[plm.dev_addr_str] = {
'dev_cat': plm.dev_cat,
'sub_cat': plm.sub_cat,
'firmware': plm.firmware,
'port': plm.port,
'port_active': plm.port_active
}
return self.jsonify(ret)
def jsonify(self, data):
return json.dumps(data, indent=4, sort_keys=True)
|
codex-bot/github
|
github/data_types/commit.py
|
Python
|
mit
| 1,228 | 0.001629 |
from data_types.user import User
class Commit:
"""
Commit object
https://developer.github.com/v3/repos/commits/
Attributes:
url: Commit URL in repo
author: Commit author
committer: Commit sender
message: Commit messagee
tree: Example {
"url": "https://api.github.com/repos/octocat/Hello-World/tree/6dcb09b5b57875f334f61aebed695e2e4193db5e",
"sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e"
},
comment_count: Number of comments
added: List of added files
removed: List of removed files
modified: List of modified files
"""
def __init__(self, data):
self.url = data.get('url', '')
self.author = None
if 'author' in data:
self.
|
author = User(data['author'])
self.committer = None
if 'committer' in data:
self.committer = User(data['committ
|
er'])
self.message = data.get('message', '')
self.tree = data.get('tree', None)
self.comment_count = data.get('comment_count', 0)
self.added = data.get('added', [])
self.removed = data.get('removed', [])
self.modified = data.get('modified', [])
|
kennethreitz/pipenv
|
pipenv/vendor/tomlkit/container.py
|
Python
|
mit
| 24,835 | 0.000322 |
from __future__ import unicode_literals
import copy
from ._compat import decode
from ._utils import merge_dicts
from .exceptions import KeyAlreadyPresent
from .exceptions import NonExistentKey
from .exceptions import ParseError
from .exceptions import TOMLKitError
from .items import AoT
from .items impor
|
t Comment
from .items import Item
from .items import K
|
ey
from .items import Null
from .items import Table
from .items import Whitespace
from .items import item as _item
_NOT_SET = object()
class Container(dict):
"""
A container for items within a TOMLDocument.
"""
def __init__(self, parsed=False): # type: (bool) -> None
self._map = {} # type: Dict[Key, int]
self._body = [] # type: List[Tuple[Optional[Key], Item]]
self._parsed = parsed
self._table_keys = []
@property
def body(self): # type: () -> List[Tuple[Optional[Key], Item]]
return self._body
@property
def value(self): # type: () -> Dict[Any, Any]
d = {}
for k, v in self._body:
if k is None:
continue
k = k.key
v = v.value
if isinstance(v, Container):
v = v.value
if k in d:
merge_dicts(d[k], v)
else:
d[k] = v
return d
def parsing(self, parsing): # type: (bool) -> None
self._parsed = parsing
for k, v in self._body:
if isinstance(v, Table):
v.value.parsing(parsing)
elif isinstance(v, AoT):
for t in v.body:
t.value.parsing(parsing)
def add(
self, key, item=None
): # type: (Union[Key, Item, str], Optional[Item]) -> Container
"""
Adds an item to the current Container.
"""
if item is None:
if not isinstance(key, (Comment, Whitespace)):
raise ValueError(
"Non comment/whitespace items must have an associated key"
)
key, item = None, key
return self.append(key, item)
def append(self, key, item): # type: (Union[Key, str, None], Item) -> Container
if not isinstance(key, Key) and key is not None:
key = Key(key)
if not isinstance(item, Item):
item = _item(item)
if isinstance(item, (AoT, Table)) and item.name is None:
item.name = key.key
if (
isinstance(item, Table)
and self._body
and not self._parsed
and not item.trivia.indent
):
item.trivia.indent = "\n"
if isinstance(item, AoT) and self._body and not self._parsed:
if item and "\n" not in item[0].trivia.indent:
item[0].trivia.indent = "\n" + item[0].trivia.indent
else:
self.append(None, Whitespace("\n"))
if key is not None and key in self:
current_idx = self._map[key]
if isinstance(current_idx, tuple):
current_body_element = self._body[current_idx[-1]]
else:
current_body_element = self._body[current_idx]
current = current_body_element[1]
if isinstance(item, Table):
if not isinstance(current, (Table, AoT)):
raise KeyAlreadyPresent(key)
if item.is_aot_element():
# New AoT element found later on
# Adding it to the current AoT
if not isinstance(current, AoT):
current = AoT([current, item], parsed=self._parsed)
self._replace(key, key, current)
else:
current.append(item)
return self
elif current.is_aot():
if not item.is_aot_element():
# Tried to define a table after an AoT with the same name.
raise KeyAlreadyPresent(key)
current.append(item)
return self
elif current.is_super_table():
if item.is_super_table():
# We need to merge both super tables
if (
self._table_keys[-1] != current_body_element[0]
or key.is_dotted()
or current_body_element[0].is_dotted()
):
if not isinstance(current_idx, tuple):
current_idx = (current_idx,)
self._map[key] = current_idx + (len(self._body),)
self._body.append((key, item))
self._table_keys.append(key)
# Building a temporary proxy to check for errors
OutOfOrderTableProxy(self, self._map[key])
return self
for k, v in item.value.body:
current.append(k, v)
return self
elif current_body_element[0].is_dotted():
raise TOMLKitError("Redefinition of an existing table")
elif not item.is_super_table():
raise KeyAlreadyPresent(key)
elif isinstance(item, AoT):
if not isinstance(current, AoT):
# Tried to define an AoT after a table with the same name.
raise KeyAlreadyPresent(key)
for table in item.body:
current.append(table)
return self
else:
raise KeyAlreadyPresent(key)
is_table = isinstance(item, (Table, AoT))
if key is not None and self._body and not self._parsed:
# If there is already at least one table in the current container
# and the given item is not a table, we need to find the last
# item that is not a table and insert after it
# If no such item exists, insert at the top of the table
key_after = None
idx = 0
for k, v in self._body:
if isinstance(v, Null):
# This happens only after deletion
continue
if isinstance(v, Whitespace) and not v.is_fixed():
continue
if not is_table and isinstance(v, (Table, AoT)):
break
key_after = k or idx
idx += 1
if key_after is not None:
if isinstance(key_after, int):
if key_after + 1 < len(self._body) - 1:
return self._insert_at(key_after + 1, key, item)
else:
previous_item = self._body[-1][1]
if (
not isinstance(previous_item, Whitespace)
and not is_table
and "\n" not in previous_item.trivia.trail
):
previous_item.trivia.trail += "\n"
else:
return self._insert_after(key_after, key, item)
else:
return self._insert_at(0, key, item)
if key in self._map:
current_idx = self._map[key]
if isinstance(current_idx, tuple):
current_idx = current_idx[-1]
current = self._body[current_idx][1]
if key is not None and not isinstance(current, Table):
raise KeyAlreadyPresent(key)
# Adding sub tables to a currently existing table
if not isinstance(current_idx, tuple):
current_idx = (current_idx,)
self._map[key] = current_idx + (len(self._body),)
else:
self._map[key] = len(self._body)
self._body.append((key, item))
if item.is_table():
self._table_keys.append(key)
if k
|
diegocepedaw/oncall
|
src/oncall/user_sync/ldap_sync.py
|
Python
|
bsd-2-clause
| 17,821 | 0.002188 |
from gevent import monkey, sleep, spawn
monkey.patch_all() # NOQA
import sys
import time
import yaml
import logging
import ldap
from oncall import metrics
from ldap.controls import SimplePagedResultsControl
from datetime import datetime
from pytz import timezone
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import SQLAlchemyError, IntegrityError
from phonenumbers import format_number, parse, PhoneNumberFormat
from phonenumbers.phonenumberutil import NumberParseException
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.setLevel(logging.INFO)
logger.addHandler(ch)
stats = {
'ldap_found': 0,
'sql_errors': 0,
'users_added': 0,
'users_failed_to_add': 0,
'users_failed_to_update': 0,
'users_purged': 0,
'user_contacts_updated': 0,
'user_names_updated': 0,
'user_photos_updated': 0,
'users_reactivated': 0,
'users_failed_to_reactivate': 0,
}
LDAP_SETTINGS = {}
def normalize_phone_number(num):
return format_number(parse(num, 'US'), PhoneNumberFormat.INTERNATIONAL)
def get_predefined_users(config):
users = {}
try:
config_users = config['sync_script']['preset_users']
except KeyError:
return {}
for user in config_users:
users[user['name']] = user
for key in ['sms', 'call']:
try:
users[user['name']][key] = normalize_phone_number(users[user['name']][key])
except (NumberParseException, KeyError, AttributeError):
users[user['name']][key] = None
return users
def timestamp_to_human_str(timestamp, tz):
dt = datetime.fromtimestamp(timestamp, timezone(tz))
return ' '.join([dt.strftime('%Y-%m-%d %H:%M:%S'), tz])
def prune_user(engine, username):
global stats
stats['users_purged'] += 1
try:
engine.execute('DELETE FROM `user` WHERE `name` = %s', username)
logger.info('Deleted inactive user %s', username)
# The user has messages or some other user data which should be preserved. Just mark as inactive.
except IntegrityError:
logger.info('Marking user %s inactive', username)
engine.execute('UPDATE `user` SET `active` = FALSE WHERE `name` = %s', username)
except SQLAlchemyError as e:
logger.error('Deleting user %s failed: %s', username, e)
stats['sql_errors'] += 1
try:
engine.execute('DELETE FROM `ical_key` WHERE `requester` = %s', username)
logger.info('Invalidated ical_key of inactive user %s', username)
except Exception as e:
logger.error('Invalidating ical_key of inactive user %s failed: %s', username, e)
stats['sql_errors'] += 1
def fetch_ldap():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
l = ldap.initialize(LDAP_SETTINGS['url'])
if 'cert_path' in LDAP_SETTINGS:
l.set_option(ldap.OPT_X_TLS_CACERTFILE, LDAP_SETTINGS['cert_path'])
l.simple_bind_s(LDAP_SETTINGS['user'], LDAP_SETTINGS['password'])
req_ctrl = SimplePagedResultsControl(True, size=1000, cookie='')
known_ldap_resp_ctrls = {
SimplePagedResultsControl.controlType: SimplePagedResultsControl,
}
base = LDAP_SETTINGS['base']
attrs = ['distinguishedName'] + list(LDAP_SETTINGS['attrs'].values())
query = LDAP_SETTINGS['query']
users = {}
dn_map = {}
while True:
msgid = l.search_ext(base, ldap.SCOPE_SUBTREE, query, attrs, serverctrls=[req_ctrl])
rtype, rdata, rmsgid, serverctrls = l.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls)
logger.info('Loaded %d entries from ldap.' % len(rdata))
for dn, ldap_dict in rdata:
if LDAP_SETTINGS['attrs']['mail'] not in ldap_dict:
logger.error('ERROR: invalid ldap entry for dn: %s' % dn)
continue
try:
username_field = LDAP_SETTINGS['attrs']['username']
except KeyError:
username_field = "sAMAccountName"
username = ldap_dict[username_field][0]
if isinstance(username, bytes):
username = username.decode("utf-8")
name = ldap_dict.get(LDAP_SETTINGS['attrs']['full_name'])[0]
if isinstance(name, bytes):
name = name.decode("utf-8")
mobile = ldap_dict.get(LDAP_SETTINGS['attrs']['mobile'])
mail = ldap_dict.get(LDAP_SETTINGS['attrs']['mail'])
if mobile:
try:
mobile = mobile[0]
if isinstance(mobile, bytes):
|
mobile = mobile.decode("utf-8")
mobile = normalize_phone_number(mobile)
except NumberParseException:
mobile = None
except UnicodeEncodeError:
mobile = None
if mail:
mail = mail[0]
if isinstance(mail, bytes):
mail = mail.decode("utf-8")
slack = mail.split('@')[0]
|
else:
slack = None
contacts = {'call': mobile, 'sms': mobile, 'email': mail, 'slack': slack, 'name': name}
dn_map[dn] = username
users[username] = contacts
pctrls = [
c for c in serverctrls if c.controlType == SimplePagedResultsControl.controlType
]
cookie = pctrls[0].cookie
if not cookie:
break
req_ctrl.cookie = cookie
return users
def user_exists(username, engine):
return engine.execute('SELECT `id` FROM user WHERE name = %s', username)
def import_user(username, ldap_contacts, engine):
logger.debug('Inserting %s' % username)
full_name = ldap_contacts.pop('full_name')
user_add_sql = 'INSERT INTO `user` (`name`, `full_name`, `photo_url`) VALUES (%s, %s, %s)'
# get objects needed for insertion
modes = get_modes(engine)
try:
photo_url_tpl = LDAP_SETTINGS.get('image_url')
photo_url = photo_url_tpl % username if photo_url_tpl else None
engine.execute(user_add_sql, (username, full_name, photo_url))
engine.execute("SELECT `id` FROM user WHERE name = %s", username)
row = engine.fetchone()
user_id = row['id']
except SQLAlchemyError:
stats['users_failed_to_add'] += 1
stats['sql_errors'] += 1
logger.exception('Failed to add user %s' % username)
return
stats['users_added'] += 1
for key, value in ldap_contacts.items():
if value and key in modes:
logger.debug('\t%s -> %s' % (key, value))
user_contact_add_sql = 'INSERT INTO `user_contact` (`user_id`, `mode_id`, `destination`) VALUES (%s, %s, %s)'
engine.execute(user_contact_add_sql, (user_id, modes[key], value))
def get_modes(engine):
engine.execute('SELECT `name`, `id` FROM `contact_mode`')
modes = {}
for row in engine.fetchall():
modes[row['name']] = row['id']
return modes
def update_user(username, ldap_contacts, engine):
oncall_user = get_oncall_user(username, engine)
db_contacts = oncall_user[username]
full_name = ldap_contacts.pop('full_name')
contact_update_sql = 'UPDATE user_contact SET destination = %s WHERE user_id = (SELECT id FROM user WHERE name = %s) AND mode_id = %s'
contact_insert_sql = 'INSERT INTO user_contact (user_id, mode_id, destination) VALUES ((SELECT id FROM user WHERE name = %s), %s, %s)'
contact_delete_sql = 'DELETE FROM user_contact WHERE user_id = (SELECT id FROM user WHERE name = %s) AND mode_id = %s'
name_update_sql = 'UPDATE user SET full_name = %s WHERE name = %s'
photo_update_sql = 'UPDATE user SET photo_url = %s WHERE name = %s'
modes = get_modes(engine)
try:
if full_name != db_contacts.get('full_name'):
engine.execute(name_update_sql, (full_name, username))
stats['user_names_updated'] += 1
if 'image_url' in LDAP_SETTINGS and not db_contacts.get('photo_
|
andela/codango
|
codango/resources/migrations/0001_initial.py
|
Python
|
mit
| 1,494 | 0.002677 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import cloudinary.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(null=True)),
('language_tags', models.CharField(default=b'Untagged', max_length=30, choices=[(b'PYTHON', b'Python'), (b'RUBY', b'Ruby'), (b'ANDROID', b'Android'), (b'MARKUP', b'HTML/CSS'), (b'JAVA', b'Java'), (b'PHP', b'PHP'), (b'IOS', b'IOS'), (b'JAVASCRIPT', b'Javascript'), (b'C', b'C')])),
('resource_file', cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name=b'resource_file', blank=True)),
('re
|
source_file_name', models.CharField(max_length=100, null=True)),
('resource_file_size', models.IntegerField(default=0)),
('snippet_text', models.TextField(null=True, blank=True)),
('date_added', models.DateTimeField(auto_now_add=True))
|
,
('date_modified', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
nparley/mylatitude
|
lib/attr/_make.py
|
Python
|
mit
| 49,291 | 0.00002 |
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
import sys
import warnings
from operator import itemgetter
from . import _config
from ._compat import PY2, isclass, iteritems, metadata_proxy, set_closure_cell
from .exceptions import (
DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError,
UnannotatedAttributeError
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata=None, type=None, converter=None):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If t
|
he value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr
|
.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to converter attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `super_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class("_Attributes", [
"attrs", # all attributes to build dunder methods for
"super_attrs", # attributes that have been inherited from super classes
])
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The implementation is gross but importing `typing` is slow and there are
discussions to remove it from the stdlib alltogether.
"""
return str(annot).startswith("typing.ClassVar")
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for super_cls in cls.__mro__[1:]:
if anns is getattr(super_cls, "__annotations__", None):
return {}
return anns
def _transform_attrs(cls, these, auto_attribs):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_lis
|
unioslo/cerebrum
|
testsuite/docker/test-config/cereconf_local.py
|
Python
|
gpl-2.0
| 1,350 | 0.000741 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2003-2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU
|
General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
impo
|
rt os
from Cerebrum.default_config import *
CEREBRUM_DATABASE_NAME = os.getenv('DB_NAME')
CEREBRUM_DATABASE_CONNECT_DATA['user'] = os.getenv('DB_USER')
CEREBRUM_DATABASE_CONNECT_DATA['table_owner'] = os.getenv('DB_USER')
CEREBRUM_DATABASE_CONNECT_DATA['host'] = os.getenv('DB_HOST')
CEREBRUM_DATABASE_CONNECT_DATA['table_owner'] = os.getenv('DB_USER')
CEREBRUM_DDL_DIR = '/src/design'
DB_AUTH_DIR = '/db-auth'
LOGGING_CONFIGFILE = os.path.join(os.getenv('TEST_CONFIG_DIR'),
'logging.ini')
|
marcoconstancio/yanta
|
plugins/viewers/html_editor/html_editor.py
|
Python
|
gpl-2.0
| 16,041 | 0.004239 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os.path
import string
import webbrowser
import shutil
import json
import base64
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import QFile
from PyQt5.QtCore import QUrl
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWidgets import QInputDialog, QFileDialog
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
# from bs4 import BeautifulSoup
from libs.python.pyquery import PyQuery as pq
from lxml import etree
import urllib
import time
from PyQt5.QtWidgets import QProgressDialog
from PyQt5.QtWidgets import QApplication
#from PIL import Image
import requests
from io import BytesIO
class html_editor(QWebView):
def __init__(self, parent=None, html=None, css_file=None):
#def __init__(self, html=None, style_filename=None):
super(html_editor, self).__init__(parent)
# http://stackoverflow.com/questions/21357157/is-there-any-solution-for-the-qtwebkit-memory-leak
# https://github.com/lycying/seeking
#self.page().setContentEditable(True)
#self.execute_js('document.designMode = "on"')
self.file_dialog_dir = '.'
# TO CHECK
# http://nullege.com/codes/show/src%40c%40a%40calibre-HEAD%40src%40calibre%40gui2%40viewer%40documentview.py/89/PyQt4.QtWebKit.QWebPage.setLinkDelegationPolicy/python
settings = self.settings()
# settings.setMaximumPagesInCache(0)
# settings.setObjectCacheCapacities(0, 0, 0)
# settings.setOfflineStorageDefaultQuota(0)
# settings.setOfflineWebApplicationCacheQuota(0)
# Security
settings.setAttribute(QWebSettings.JavaEnabled, False)
#settings.setAttribute(QWebSettings.PluginsEnabled, False)
#settings.setAttribute(QWebSettings.JavascriptCanOpenWindows, False)
#settings.setAttribute(QWebSettings.JavascriptCanAccessClipboard, False)
# Miscellaneous
settings.setAttribute(QWebSettings.LinksIncludedInFocusChain, True)
settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
# settings.setAttribute(QWebSettings.AutoLoadImages, False)
# Disable Hyperlinks following, open url on system browser
self.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.page().linkClicked.connect(lambda url: webbrowser.open(str(url.toString())))
if html:
self.setHtml(html)
else:
self.set_readonly(True)
# config
config_file_path = os.path.join(os.path.dirname(__file__), 'config.json')
self.config = None
if os.path.isfile(config_file_path):
with open(config_file_path) as outfile:
self.config = json.load(outfile)
outfile.close()
self.context_menu_actions = []
# TO CHECK
# https://github.com/gen2brain/pyhtmleditor/blob/master/src/pyhtmleditor/htmleditor.py
# https://github.com/kovidgoyal/calibre/blob/master/src/calibre/gui2/comments_editor.py
#if css_file:
# self.apply_stylefile(css_file)
########
|
##### TO IMPLEMENT ##########
#self.note_editor.execute_js(self.functions.get_javascript_plugins())
#self.load_functions = []
#self.settings().setAttribute(QWebSettings.AutoLoadImages, False)
#QWebSettings.globalSettings()->setAttribute(QWebSettings
|
::DeveloperExtrasEnabled, true);
#QWebSettings.globalSettings().setAttribute(QWebSettings.OfflineWebApplicationCacheEnabled, True)
def get_config(self):
return self.config
def set_context_menu_append_actions(self, context_menu_actions):
self.context_menu_actions = context_menu_actions
def contextMenuEvent(self, event):
menu = self.page().createStandardContextMenu()
if 'default_context_menu_replace' in self.config:
if self.config['default_context_menu_replace'] == 'True':
menu = QtWidgets.QMenu(self)
if 'context_menu_actions' in self.config:
for action in self.context_menu_actions:
menu.addAction(action)
menu.exec_(QtGui.QCursor.pos())
def set_readonly(self, param=True):
if param == True:
self.execute_js('document.body.contentEditable = "false"')
elif param == False:
self.execute_js('document.body.contentEditable = "true"')
def set_writeable(self):
self.set_readonly(False)
def set_html(self, html=None):
if html:
self.setHtml(html)
def get_html(self,relative_path=None):
html = self.page().mainFrame().toHtml()
pd_content = pq(html)
if pd_content('img').length > 0:
num_img = 0
max_num_img = 0
# Dertemines the number of image to download and process
for img in pd_content('img'):
if "base64" not in img.attrib['src']:
max_num_img += 1
# There are image to download and process
if max_num_img > 0:
progress_dialog = QProgressDialog(self)
progress_dialog.setWindowTitle('Please Wait')
progress_dialog.setLabelText('Downloading and processing images. Please wait.')
progress_dialog.setRange(num_img, max_num_img)
progress_dialog.setValue(num_img)
progress_dialog.setCancelButton(None)
progress_dialog.show()
QApplication.processEvents()
for img in pd_content('img'):
if "base64" not in img.attrib['src']:
if 'http' in img.attrib['src'].lower() or 'ftp' in img.attrib['src'].lower():
# Downloads images
response = requests.get(img.attrib['src'])
# Generates base64 of the image
base64_img = base64.b64encode(response.content).decode('ascii')
# Build uri
uri = "data:" + response.headers['Content-Type'] + ";" + "base64," + base64_img
# Reasings src attrbiute with the uri data
img.attrib['src'] = uri
# Updates progress bar
num_img = num_img + 1
progress_dialog.setValue(num_img)
QApplication.processEvents()
html = pd_content.html()
return html
def get_content(self):
return self.get_html()
def set_content(self, content):
if content:
self.set_html(content)
def open_file(self, file_path):
with open(file_path, encoding='UTF-8', errors="ignore") as fd:
base_url = QUrl.fromLocalFile(os.path.join(os.path.dirname(file_path), ''))
self.setHtml(fd.read(), base_url)
fd.close()
# Generates uft8 bugs
# fd = QFile(file_path)
# if fd.open(QFile.ReadOnly):
# # Required for webkit to access local images
# base_url = QUrl.fromLocalFile(os.path.join(os.path.dirname(file_path),''))
# self.setContent(fd.readAll(), "text/html", base_url)
# fd.close()
def toggle_bold(self, parm=None):
self.page().triggerAction(QWebPage.ToggleBold)
def toggle_italic(self, parm=None):
self.page().triggerAction(QWebPage.ToggleItalic)
def heading(self, param=None):
if param and param in ['heading_1', 'heading_2', 'heading_3', 'heading_4', 'heading_5', 'heading_6']:
cmd_str = str("document.execCommand('formatblock', false, '%s');" % str('h'+param[8]))
self.execute_js(cmd_str)
def orderedlist(self, param=None):
self.page().triggerAction(QWebPage.InsertOrderedList)
def unorderedlist(self, param=None):
self.page().triggerActi
|
braams/shtoom
|
shtoom/__init__.py
|
Python
|
lgpl-2.1
| 121 | 0 |
# Copyright (C) 2004
|
Anthony Baxter
# This file is necessary to make this directory a pack
|
age
__version__ = '0.3alpha0'
|
Hexadorsimal/pynes
|
nes/processors/ppu/name_table.py
|
Python
|
mit
| 106 | 0 |
class NameTable:
def __init__(self, start, size):
self.s
|
tart = start
self.s
|
ize = size
|
TomHeatwole/osf.io
|
admin/base/settings/defaults.py
|
Python
|
apache-2.0
| 5,822 | 0.001546 |
"""
Django settings for the admin project.
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
from django.contrib import messages
# import local # Build own local.py (used with postgres)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# from the OSF settings
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
ALLOWED_HOSTS = [
'.osf.io'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 5,
}
},
]
# Email settings. Account created for testing. Password shouldn't be hardcoded
# [DEVOPS] this should be set to 'django.core.mail.backends.smtp.EmailBackend' in the > dev local.py.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Sendgrid Email Settings - Using OSF credentials.
# Add settings references to local.py
EMAIL_HOST = osf_settings.MAIL_SERVER
EMAIL_HOST_USER = osf_settings.MAIL_USERNAME
EMAIL_HOST_PASSWORD = osf_settings.MAIL_PASSWORD
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'admin.common_auth',
'admin.base',
'admin.pre_reg',
'admin.spam',
'admin.metrics',
'admin.nodes',
'admin.users',
# 3rd party
'raven.contrib.django.raven_compat',
'webpack_loader',
'django_nose',
'ckeditor',
'password_reset',
)
# Custom user model (extends AbstractBaseUser)
AUTH_USER_MODEL = 'common_auth.MyUser'
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'admin'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
CORS_ALLOW_CREDENTIALS = True
MIDDLEWARE_CLASSES = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.MongoConnectionMiddleware',
|
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.TokuTransactionMiddleware',
'django.contrib.sessions.mi
|
ddleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
MESSAGE_TAGS = {
messages.SUCCESS: 'text-success',
messages.ERROR: 'text-danger',
messages.WARNING: 'text-warning',
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}
}]
# Database
# Postgres:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': local.POSTGRES_NAME,
# 'USER': local.POSTGRES_USER,
# 'PASSWORD': local.POSTGRES_PASSWORD,
# 'HOST': local.POSTGRES_HOST,
# 'PORT': '',
# }
# }
# Postgres settings in local.py
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
ROOT_URLCONF = 'admin.base.urls'
WSGI_APPLICATION = 'admin.base.wsgi.application'
ADMIN_BASE = ''
STATIC_URL = '/static/'
LOGIN_URL = 'account/login/'
LOGIN_REDIRECT_URL = ADMIN_BASE
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
LANGUAGE_CODE = 'en-us'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'public/js/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--verbosity=2']
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Source'],
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList'],
['Link']
]
},
}
|
Hernanarce/pelisalacarta
|
python/version-mediaserver/platformcode/launcher.py
|
Python
|
gpl-3.0
| 24,841 | 0.017736 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# Mediaserver Launcher
# ------------------------------------------------------------
import os
import sys
from core.item import Item
from core import logger
from core import config
from platformcode import platformtools
from core import channeltools
import channelselector
from core import servertools
from core import library
def start():
""" Primera funcion que se ejecuta al entrar en el plugin.
Dentro de esta funcion deberian ir todas las llamadas a las
funciones que deseamos que se ejecuten nada mas abrir el plugin.
"""
logger.info("pelisalacarta.platformcode.launcher start")
# Test if all the required directories are created
config.verify_directories_created()
import library_service
library_service.start()
def run(item):
itemlist = []
#Muestra el item en el log:
PrintItems(item)
#Control Parental, comprueba si es adulto o no
if item.action=="mainlist":
# Parental control
if channeltools.is_adult(item.channel) and config.get_setting("adult_pin")!="":
tecleado = platformtools.dialog_input("","PIN para canales de adultos",True)
if not tecleado==config.get_setting("adult_pin"):
return
#Importa el canal para el item, todo item debe tener un canal, sino sale de la función
if item.channel: channelmodule = ImportarCanal(item)
# If item has no action, stops here
if item.action == "":
logger.info("pelisalacarta.platformcode.launcher Item sin accion")
itemlist = None
#Action Play, para mostrar el menú con las opciones de reproduccion.
elif item.action=="play":
logger.info("pelisalacarta.platformcode.launcher play")
# Si el canal tiene una acción "play" tiene prioridad
if hasattr(channelmodule, 'play'):
logger.info("pelisalacarta.platformcode.launcher executing channel 'play' method")
itemlist = channelmodule.play(item)
b_favourite = item.isFavourite
if len(itemlist)>0 and isinstance(itemlist[0], Item):
item = itemlist[0]
if b_favourite:
item.isFavourite = True
play_menu(item)
elif len(itemlist)>0 and isinstance(itemlist[0], list):
item.video_urls = itemlist
play_menu(item)
else:
platformtools.dialog_ok("plugin", "No hay nada para reproducir")
else:
logger.info("pelisalacarta.platformcode.launcher no channel 'play' method, executing core method")
play_menu(item)
itemlist = None
#Action S
|
earch, para mostrar el teclado y lanzar la busqueda con el texto indicado.
elif item.action=="search":
logger.info("pelisalacar
|
ta.platformcode.launcher search")
tecleado = platformtools.dialog_input()
if not tecleado is None:
itemlist = channelmodule.search(item,tecleado)
else:
itemlist = []
elif item.channel == "channelselector":
import channelselector
if item.action =="mainlist":
itemlist = channelselector.getmainlist("bannermenu")
if config.get_setting("check_for_plugin_updates") == "true":
logger.info("channelselector.mainlist Verificar actualizaciones activado")
from core import updater
try:
version = updater.checkforupdates()
if version:
platformtools.dialog_ok("Versión "+version+" disponible","Ya puedes descargar la nueva versión del plugin\ndesde el listado principal")
itemlist.insert(0,Item(title="Actualizadr pelisalacarta a la versión "+version, version=version, channel="updater", action="update", thumbnail=os.path.join(config.get_runtime_path(),"resources","images","bannermenu","thumb_update.png")))
except:
platformtools.dialog_ok("No se puede conectar","No ha sido posible comprobar","si hay actualizaciones")
logger.info("channelselector.mainlist Fallo al verificar la actualización")
else:
logger.info("channelselector.mainlist Verificar actualizaciones desactivado")
if item.action =="getchanneltypes":
itemlist = channelselector.getchanneltypes("bannermenu")
if item.action =="filterchannels":
itemlist = channelselector.filterchannels(item.channel_type, "bannermenu")
#Todas las demas las intenta ejecturaren el siguiente orden:
# 1. En el canal
# 2. En el launcher
# 3. Si no existe en el canal ni en el launcher guarda un error en el log
else:
#Si existe la funcion en el canal la ejecuta
if hasattr(channelmodule, item.action):
logger.info("Ejectuando accion: " + item.channel + "." + item.action + "(item)")
exec "itemlist = channelmodule." + item.action + "(item)"
#Si existe la funcion en el launcher la ejecuta
elif hasattr(sys.modules[__name__], item.action):
logger.info("Ejectuando accion: " + item.action + "(item)")
exec "itemlist =" + item.action + "(item)"
#Si no existe devuelve un error
else:
logger.info("No se ha encontrado la accion ["+ item.action + "] en el canal ["+item.channel+"] ni en el launcher")
#Llegados a este punto ya tenemos que tener el itemlist con los resultados correspondientes
#Pueden darse 3 escenarios distintos:
# 1. la función ha generado resultados y estan en el itemlist
# 2. la función no ha generado resultados y por tanto el itemlist contiene 0 items, itemlist = []
# 3. la función realiza alguna accion con la cual no se generan nuevos items, en ese caso el resultado deve ser: itemlist = None para que no modifique el listado
#A partir de aquí ya se ha ejecutado la funcion en el lugar adecuado, si queremos realizar alguna acción sobre los resultados, este es el lugar.
#Filtrado de Servers
if item.action== "findvideos" and config.get_setting('filter_servers') == 'true':
server_white_list, server_black_list = set_server_list()
itemlist = filtered_servers(itemlist, server_white_list, server_black_list)
#Si la accion no ha devuelto ningún resultado, añade un item con el texto "No hay elementos para mostrar"
if type(itemlist)==list:
if len(itemlist) ==0:
itemlist = [Item(title="No hay elementos para mostrar", thumbnail="http://media.tvalacarta.info/pelisalacarta/thumb_error.png")]
#Imprime en el log el resultado
PrintItems(itemlist)
#Muestra los resultados en pantalla
platformtools.render_items(itemlist, item)
def ImportarCanal(item):
channel = item.channel
channelmodule=""
if os.path.exists(os.path.join( config.get_runtime_path(), "channels",channel+".py")):
exec "from channels import "+channel+" as channelmodule"
elif os.path.exists(os.path.join( config.get_runtime_path(),"core",channel+".py")):
exec "from core impo
|
uwcirg/true_nth_usa_portal
|
portal/migrations/versions/424f18f4c1df_.py
|
Python
|
bsd-3-clause
| 1,316 | 0.006079 |
"""empty message
Revision ID: 424f18f4c1df
Revises: 106e3631fe9
Create Date: 2015-06-23 11:31:08.548661
"""
# revision identifiers, used by Alembic.
revision = '424f18f4c1df'
down_revision = '106e3631fe9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import ENUM
providers_list = ENUM('facebook', 'twitter', 'truenth', name='providers',
|
create_type=False)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
providers_list.create(op.get_bind(), checkfirst=False)
op.create_table('auth_providers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider', providers_list, nullable=True),
sa.Column('provider_id', sa.Integer(), nullable=True),
sa.Column('user_
|
id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('auth_providers')
providers_list.drop(op.get_bind(), checkfirst=False)
### end Alembic commands ###
|
ltucker/giblets
|
giblets/__init__.py
|
Python
|
bsd-3-clause
| 287 | 0.003484 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010 Luke Tucker
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of t
|
his distribution.
#
# Author: Luke T
|
ucker <voxluci@gmail.com>
#
from giblets.core import *
|
meganbkratz/acq4
|
acq4/devices/DAQGeneric/InputChannelTemplate.py
|
Python
|
mit
| 3,321 | 0.001807 |
# -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file 'InputChannelTemplate.ui'
#
# Created: Sun Feb 22 13:29:16 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(427, 220)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayou
|
t.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.
|
groupBox)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(5, 0, 0, 0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.recordCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordCheck.setFont(font)
self.recordCheck.setChecked(True)
self.recordCheck.setObjectName(_fromUtf8("recordCheck"))
self.gridLayout.addWidget(self.recordCheck, 0, 0, 1, 1)
self.displayCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.gridLayout.addWidget(self.displayCheck, 0, 1, 1, 1)
self.recordInitCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordInitCheck.setFont(font)
self.recordInitCheck.setObjectName(_fromUtf8("recordInitCheck"))
self.gridLayout.addWidget(self.recordInitCheck, 1, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.recordCheck.setText(_translate("Form", "Record Trace", None))
self.displayCheck.setText(_translate("Form", "Display", None))
self.recordInitCheck.setText(_translate("Form", "Record Initial State", None))
from acq4.pyqtgraph import GroupBox
|
Entropy512/libsigrokdecode
|
decoders/rgb_led_ws281x/pd.py
|
Python
|
gpl-3.0
| 4,320 | 0.003472 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2016 Vladimir Ermakov <vooon341@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from functools import reduce
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'rgb_led_ws281x'
name = 'RGB LED (WS281x)'
longname = 'RGB LED string decoder (WS281x)'
desc = 'RGB LED string protocol (WS281x).'
license = 'gplv3+'
inputs = ['logic']
outputs = []
tags = ['Display', 'IC']
channels = (
{'id': 'din', 'name': 'DIN', 'desc': 'DIN data line'},
)
annotations = (
('bit', 'Bit'),
('reset', 'RESET'),
('rgb', 'RGB'),
)
annotation_rows = (
('bit', 'Bits', (0, 1)),
('rgb', 'RGB', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.oldpin = None
self.ss_packet = None
self.ss = None
self.es = None
self.bits = []
self.inreset = False
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
|
self.samplerate = value
def handle_bits(self, samplen
|
um):
if len(self.bits) == 24:
grb = reduce(lambda a, b: (a << 1) | b, self.bits)
rgb = (grb & 0xff0000) >> 8 | (grb & 0x00ff00) << 8 | (grb & 0x0000ff)
self.put(self.ss_packet, samplenum, self.out_ann,
[2, ['#%06x' % rgb]])
self.bits = []
self.ss_packet = None
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
# TODO: Come up with more appropriate self.wait() conditions.
(pin,) = self.wait()
if self.oldpin is None:
self.oldpin = pin
continue
# Check RESET condition (manufacturer recommends 50 usec minimal,
# but real minimum is ~10 usec).
if not self.inreset and not pin and self.es is not None and \
(self.samplenum - self.es) / self.samplerate > 50e-6:
# Decode last bit value.
tH = (self.es - self.ss) / self.samplerate
bit_ = True if tH >= 625e-9 else False
self.bits.append(bit_)
self.handle_bits(self.es)
self.put(self.ss, self.es, self.out_ann, [0, ['%d' % bit_]])
self.put(self.es, self.samplenum, self.out_ann,
[1, ['RESET', 'RST', 'R']])
self.inreset = True
self.bits = []
self.ss_packet = None
self.ss = None
if not self.oldpin and pin:
# Rising edge.
if self.ss and self.es:
period = self.samplenum - self.ss
duty = self.es - self.ss
# Ideal duty for T0H: 33%, T1H: 66%.
bit_ = (duty / period) > 0.5
self.put(self.ss, self.samplenum, self.out_ann,
[0, ['%d' % bit_]])
self.bits.append(bit_)
self.handle_bits(self.samplenum)
if self.ss_packet is None:
self.ss_packet = self.samplenum
self.ss = self.samplenum
elif self.oldpin and not pin:
# Falling edge.
self.inreset = False
self.es = self.samplenum
self.oldpin = pin
|
kcii/numpy-pyqt-multiproc-problem
|
fork.py
|
Python
|
mit
| 868 | 0.009217 |
#!/usr/bin/env python
import PyQt4.QtCore # <- this line causes the error
from multiprocessing import Process
class PTask(Process):
def __init__(self, func):
Process.__init__(self)
self._func = func
def run(self):
self._func()
def f():
try:
import numpy as np
import numpy.linalg as npl
|
for i in range(1000):
print "i: ", i
n = npl.pinv(np
|
.random.rand(100,100))
# Sometimes the segfault or malloc error doesn't occur
# on the first use of pinv.
print "pinv success"
except:
# This just means the random matrix was not invertible
# but that pinv executed correctly.
print "exception success"
if __name__ == '__main__':
p = PTask(f)
print "start"
p.start()
print "wait"
p.join()
print "end"
|
tellesnobrega/storm_plugin
|
sahara/service/trusts.py
|
Python
|
apache-2.0
| 1,897 | 0 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo.config import cfg
from sahara import conductor as c
from sahara import context
from sahara.utils.openstack import keystone
conductor = c.API
CONF = cfg.CONF
def create_trust(cluster):
client = keystone.client()
ctx = context.current()
trustee_id = keystone.client_for_admin().user_id
trust = client.trusts.create(trustor_user=client.user_id,
trustee_user=trustee_id,
impersonation=True,
role_names=ctx.roles,
project=client.tenant_id)
conductor.cluster_update(ctx,
cluster,
{'trust_id': trust.id})
def use_os_admin_auth_token(cluster):
|
if cluster.trust_id:
ctx = context.current()
ctx.username = CONF.keystone_authtoken.admin_user
ctx.tenant_id = cluster.tenant_id
client = keystone.client_for_trusts(cluster.trust_id)
ctx.token = client.auth_token
ctx.service_catalog = json.dumps(
client.service_catalog.catalog['catalog'])
def delete_trust(cluster):
if cluster.trust_id:
keystone_client = keystone.client_for_
|
trusts(cluster.trust_id)
keystone_client.trusts.delete(cluster.trust_id)
|
jieter/django-localflavor
|
localflavor/is_/forms.py
|
Python
|
bsd-3-clause
| 2,973 | 0.002018 |
"""Iceland specific form helpers."""
from __future__ import unicode_literals
from django.forms import ValidationError
from django.forms.fields import RegexField
from django.forms.widgets import Select
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from localflavor.compat import EmptyValueCompatMixin
from localflavor.deprecation import DeprecatedPhoneNumberFormFieldMixin
from .is_postalcodes impor
|
t IS_POSTALCODES
class ISIdNumberField(EmptyValueCompatMixin, RegexField):
"""
Icelandic identification number (kennitala).
This is a number every citizen of Iceland has.
"""
default_error_messages = {
'invalid': _('Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.'),
'checksum': _('The Icelandic identification number is not valid.'),
}
def __init__(self, max_len
|
gth=11, min_length=10, *args, **kwargs):
super(ISIdNumberField, self).__init__(r'^\d{6}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISIdNumberField, self).clean(value)
if value in self.empty_values:
return self.empty_value
value = self._canonify(value)
if self._validate(value):
return self._format(value)
else:
raise ValidationError(self.error_messages['checksum'])
def _canonify(self, value):
"""Returns the value as only digits."""
return value.replace('-', '').replace(' ', '')
def _validate(self, value):
"""
Takes in the value in canonical form and checks the verifier digit.
The method is modulo 11.
"""
check = [3, 2, 7, 6, 5, 4, 3, 2, 1, 0]
return sum([int(value[i]) * check[i] for i in range(10)]) % 11 == 0
def _format(self, value):
"""Takes in the value in canonical form and returns it in the common display format."""
return force_text(value[:6] + '-' + value[6:])
class ISPhoneNumberField(EmptyValueCompatMixin, RegexField, DeprecatedPhoneNumberFormFieldMixin):
"""
Icelandic phone number.
Seven digits with an optional hyphen or space after the first three digits.
"""
def __init__(self, max_length=8, min_length=7, *args, **kwargs):
super(ISPhoneNumberField, self).__init__(r'^\d{3}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISPhoneNumberField, self).clean(value)
if value in self.empty_values:
return self.empty_value
return value.replace('-', '').replace(' ', '')
class ISPostalCodeSelect(Select):
"""A Select widget that uses a list of Icelandic postal codes as its choices."""
def __init__(self, attrs=None):
super(ISPostalCodeSelect, self).__init__(attrs, choices=IS_POSTALCODES)
|
pipermerriam/web3.py
|
web3/utils/six/__init__.py
|
Python
|
mit
| 245 | 0 |
impor
|
t sys
if sys.version_info.major == 2:
from .six_py2 import (
urlparse,
urlunparse,
Generator,
)
else:
from .six_py3 impor
|
t ( # noqa: #401
urlparse,
urlunparse,
Generator,
)
|
PayloadSecurity/VxAPI
|
cli/wrappers/cli_caller.py
|
Python
|
gpl-3.0
| 6,741 | 0.003265 |
from api.callers.api_caller import ApiCaller
from exceptions import ResponseTextContentTypeError
from colors import Color
import os
from cli.arguments_builders.default_cli_arguments import DefaultCliArguments
import datetime
from cli.cli_file_writer import CliFileWriter
from cli.formatter.cli_json_formatter import CliJsonFormatter
from constants import CALLED_SCRIPT
class CliCaller:
api_object = None
action_name = None
help_description = ''
given_args = {}
result_msg_for_files = 'Response contains files. They were saved in the output folder ({}).'
result_msg_for_json = '{}'
cli_output_folder = ''
args_to_prevent_from_being_send = ['chosen_action', 'verbose', 'quiet']
def __init__(self, api_object: ApiCaller, action_name: str):
self.api_object = api_object
self.action_name = action_name
self.help_description = self.help_description.format(self.api_object.endpoint_url)
def init_verbose_mode(self):
self.result_msg_for_json = 'JSON:\n\n{}'
def build_argument_builder(self, child_parser):
return DefaultCliArguments(child_parser)
def add_parser_args(self, child_parser):
parser_argument_builder = self.build_argument_builder(child_parser)
parser_argument_builder.add_verbose_arg()
parser_argument_builder.add_help_opt()
parser_argument_builder.add_quiet_opt()
return parser_argument_builder
def attach_args(self, args):
self.given_args = args.copy()
args_to_send = args.copy()
for arg_to_remove in self.args_to_prevent_from_being_send:
if arg_to_remove in args_to_send:
del args_to_send[arg_to_remove]
if 'output' in args:
self.cli_output_folder = args['output']
del args_to_send['output']
args_to_send = {k: v for k, v in args_to_send.items() if v not in [None, '']} # Removing some 'empty' elements from dictionary
if 'file' in args:
del args_to_send['file'] # attaching file is handled by separated method
if self.api_object.request_method_name == ApiCaller.CONST_REQUEST_METHOD_GET:
self.api_object.attach_params(args_to_send)
else: # POST
self.api_object.attach_data(args_to_send)
def attach_file(self, file):
if isinstance(file, str):
file = open(file, 'rb')
self.api_object.attach_files({'file': file}) # it's already stored as file handler
def get_colored_response_status_code(self):
response_code = self.api_object.get_response_status_code()
return Color.success(response_code) if self.api_object.if_request_success() is True else Color.error(response_code)
def get_colored_prepared_response_msg(self):
response_msg = self.api_object.get_prepared_response_msg()
return Color.success(response_msg) if self.api_object.if_request_success() is True else Color.error(response_msg)
def get_result_msg(self):
if self.api_object.api_response.headers['Content-Type'] == 'text/html':
raise ResponseTextContentTypeError('Can\'t print result, since it\'s \'text/html\' instead of expected content type with \'{}\' on board.'.format(self.api_object.api_expected_data_type))
if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_JSON:
return self.result_msg_for_json.format(CliJsonFormatter.format_to_pretty_string(self.api_object.get_response_json()))
elif self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE:
if self.api_object.if_request_success() is True:
return self.get_result_msg_for_files()
else:
error_msg = 'Error has occurred and your files were not saved.'
if self.given_args['verbose'] is False:
error_msg += ' To get more information, please run command in verbose mode. (add \'-v\')'
return error_msg
def get_processed_output_path(self):
output_path = self.cli_output_folder
if output_path.startswith('/') is True: # Give
|
n path is absolute
final_output_path = output_path
else:
path_parts = os.path.dirname(os.path.realpath(__file__)).split('/')[:-2]
called_script_dir = os.path.dirname(CALLED_SCRIPT)
# It's about a case when user is calling script from not root directory.€
if called_script_dir != 'vxapi.py':
new_path_parts = []
bad_parts = called_script_dir.split('/')
|
for part in reversed(path_parts):
if part in bad_parts:
bad_parts.remove(part)
continue
new_path_parts.append(part)
new_path_parts.reverse()
path_parts = new_path_parts
prepared_file_path = path_parts + [self.cli_output_folder]
final_output_path = '/'.join(prepared_file_path)
if not final_output_path.startswith('/'):
final_output_path = '/' + final_output_path
return final_output_path
def get_result_msg_for_files(self):
return self.result_msg_for_files.format(self.get_processed_output_path())
def do_post_processing(self):
if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE and self.api_object.if_request_success() is True:
self.save_files()
def get_date_string(self):
now = datetime.datetime.now()
return '{}_{}_{}_{}_{}_{}'.format(now.year, now.month, now.day, now.hour, now.minute, now.second)
def convert_file_hashes_to_array(self, args, file_arg='hash_list', key_of_array_arg='hashes'):
with args[file_arg] as file:
hashes = file.read().splitlines()
if not hashes:
raise Exception('Given file does not contain any data.')
for key, value in enumerate(hashes):
args['{}[{}]'.format(key_of_array_arg, key)] = value
del args[file_arg]
return args
def save_files(self):
api_response = self.api_object.api_response
identifier = None
if 'id' in self.given_args:
identifier = self.given_args['id']
elif 'sha256' in self.given_args:
identifier = self.given_args['sha256']
filename = '{}-{}-{}'.format(self.action_name, identifier, api_response.headers['Vx-Filename']) if identifier is not None else '{}-{}'.format(self.action_name, api_response.headers['Vx-Filename'])
return CliFileWriter.write(self.get_processed_output_path(), filename, api_response.content)
|
BirkbeckCTP/janeway
|
src/security/templatetags/securitytags.py
|
Python
|
agpl-3.0
| 2,227 | 0.001347 |
from django import template
from security import logic
register = template.Library()
# General role-based checks
@register.simple_tag(takes_context=True)
def is_author(context):
request = context['request']
return request.user.is_author(request)
@register.simple_tag(takes_context=True)
def is_editor(contex
|
t):
request = context['request']
if request.user.is_anonymous():
return False
return request.user.is_editor(request)
@register.simple_tag(takes_context=True)
def is_section_editor(context):
request = context['request']
|
if request.user.is_anonymous():
return False
return request.user.is_section_editor(request)
@register.simple_tag(takes_context=True)
def is_production(context):
request = context['request']
return request.user.is_production(request)
@register.simple_tag(takes_context=True)
def is_reviewer(context):
request = context['request']
return request.user.is_reviewer(request)
@register.simple_tag(takes_context=True)
def is_proofreader(context):
request = context['request']
return request.user.is_proofreader(request)
# File-based checks
@register.simple_tag(takes_context=True)
def can_edit_file(context, file_object, article_object):
return logic.can_edit_file(context['request'], context['request'].user, file_object, article_object)
@register.simple_tag(takes_context=True)
def can_view_file_history(context, file_object, article_object):
return logic.can_view_file_history(context['request'], context['request'].user, file_object, article_object)
@register.simple_tag(takes_context=True)
def can_view_file(context, file_object):
return logic.can_view_file(context['request'], context['request'].user, file_object)
@register.simple_tag(takes_context=True)
def is_author(context):
request = context['request']
return request.user.is_author(request)
@register.simple_tag(takes_context=True)
def is_repository_manager(context):
request = context['request']
return request.user.is_repository_manager(request.repository)
@register.simple_tag(takes_context=True)
def is_preprint_editor(context):
request = context['request']
return request.user.is_preprint_editor(request)
|
suokko/python-apt
|
tests/test_cache_invocation.py
|
Python
|
gpl-2.0
| 863 | 0.001159 |
#!/usr/bin/python
import unittest
import apt_pkg
import apt.pr
|
ogress.base
class TestCache(unittest.TestCase):
"""Test invocation of apt_pkg.Cache()"""
def setUp(self):
apt_pkg.init_config()
apt_pkg.init_system()
def test_wrong_in
|
vocation(self):
"""cache_invocation: Test wrong invocation."""
apt_cache = apt_pkg.Cache(progress=None)
self.assertRaises(ValueError, apt_pkg.Cache, apt_cache)
self.assertRaises(ValueError, apt_pkg.Cache,
apt.progress.base.AcquireProgress())
self.assertRaises(ValueError, apt_pkg.Cache, 0)
def test_proper_invocation(self):
"""cache_invocation: Test correct invocation."""
apt_cache = apt_pkg.Cache(progress=None)
apt_depcache = apt_pkg.DepCache(apt_cache)
if __name__ == "__main__":
unittest.main()
|
DShokes/ArcREST
|
samples/update_largethumbnail.py
|
Python
|
apache-2.0
| 2,476 | 0.008078 |
"""
This sample shows how to update the
large thumbnail of an item
Python 2.x
ArcREST 3.0.1
"""
import arcrest
from arcresthelper import securityhandlerhelper
from arcresthelper import common
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def main():
proxy_port = None
proxy_url = None
securityinfo = {}
securityinfo['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfo['username'] = ""#<UserName>
securityinfo['password'] = ""#<Password>
securityinfo['org_url'] = "http://www.arcgis.com"
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = None
securityinfo['token_url'] = None
securityinfo['certificatefile'] = None
securityinfo['keyfile'] = None
securityinfo['client_id'] = None
securityinfo['secret_id'] = None
itemId = "" #Item ID
pathToImage = r"" #Path to image
try:
s
|
hh = securityhandlerhelper.securityhandlerhelper(securityinfo=securityinfo)
if shh.valid == False:
print shh.message
else:
admin = arcrest.manageorg.Administration(securityHandler=shh.securityhandler)
content = admin.content
item = content.getIte
|
m(itemId)
itemParams = arcrest.manageorg.ItemParameter()
itemParams.largeThumbnail = pathToImage
print item.userItem.updateItem(itemParameters=itemParams)
except (common.ArcRestHelperError),e:
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
except:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
if __name__ == "__main__":
main()
|
pythonpopayan/bermoto
|
backend/handlers/transactional_messaging.py
|
Python
|
mit
| 3,778 | 0.002649 |
"""
handlers for transactional messaging service
"""
import json
# tornado imports
from tornado.queues import Queue
from tornado import websocket, gen, web
#local imports
from settings import DEBUG
#===============================================================================
# WEBSOCKETS SERVER
#===============================================================================
class messaging_server(web.Application):
"""listener application class"""
def __init__(self, q):
"""listener builder method"""
#define petition handlers to use
handlers = [
(r'/channel', channelHandler, dict(q=q)),
(r'/mirror', mirrorHandler),
]
web.Application.__init__(self, handlers)
#===============================================================================
# TESTING HANDLERS
#===============================================================================
class mirrorHandler(websocket.WebSocketHandler):
"""return to the sender the same message they sent"""
verbose = DEBUG
def open(self):
"""defines the websocket open method"""
pass
@gen.coroutine
def on_message(self, message):
"""mirror income data"""
yield self.write_message(message)
def on_close(self):
"""defines the websocket close method"""
pass
class channelHandler(websocket.WebSocketHandler):
"""class that handles app websockets communication"""
verbose = DEBUG
def initialize(self, q):
"""initialize vigilante handler"""
self.q = q
self.service_functions = {
'create_user': self.create_user,
'login': self.login_user,
'logout': self.logout_user
}
def open(self):
"""defines the websocket open method"""
print('[channel]: started connection')
@gen.coroutine
def on_message(self, message):
"""defines the response to income messages"""
data = json.loads(message)
action = data.get('action')
if action:
print(message)
self.service_functions[action](message)
else:
print('[channelHandler]: must give an action')
self.write_message(
json.dumps({'error': [0, 'there is no action in request']})
)
self.write_message(message)
def on_close(self):
"""defines the websocket close method"""
pass
|
def create_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
# 1. vaidar si la informacion esta completa
# se necesita al menos: name, password
# se pide tambien el correo,
|
(trabajar en el modelo de bd de usuario)
# 2. validar si usuario no existe
# ir a la base de datos y ver si existe el user_name que llego
# mandar mensaje de ya existente
# 3. validar si esta bien la contraseña
# minimo 8 caracteres, letras y numeros al menos
# mandar un mensaje de contraseña mala
# 4. crear objeto usuario si pasa todas las validaciones
# completar con defaults datos no obtenidos
# 5. almacenar informacion del usuario
# 6. devolver una respuesta al cliente
# TODO: definir modelo de base de datos (christian)
# TODO: seleccionar orm (edwin)
# TODO: validar si usuario existe (edwin)
# TODO: crear registro de usuario (edwin)
# TODO: completar datos del json para insercion (christian)
# TODO: funcion de validar contraseña (christian)
pass
def login_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
pass
def logout_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
pass
|
reingart/gui2py
|
gui/tools/propeditor.py
|
Python
|
lgpl-3.0
| 12,658 | 0.006241 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"Visual Property Editor (using wx PropertyGrid) of gui2py's components"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"
# some parts where inspired or borrowed from wxFormBuilders & wxPython examples
import sys, time, math, os, os.path
import wx
_ = wx.GetTranslation
import wx.propgrid as wxpg
from gui.component import InitSpec, StyleSpec, Spec, EventSpec, DimensionSpec
from gui.font import Font
DEBUG = False
class PropertyEditorPanel(wx.Panel):
def __init__( self, parent, log ):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.log = log
self.callback = None
self.panel = panel = wx.Panel(self, wx.ID_ANY)
topsizer = wx.BoxSizer(wx.VERTICAL)
# Difference between using PropertyGridManager vs PropertyGrid is that
# the manager supports multiple pages and a description box.
self.pg = pg = wxpg.PropertyGrid(panel,
style=wxpg.PG_SPLITTER_AUTO_CENTER |
wxpg.PG_AUTO_SORT |
wxpg.PG_TOOLBAR)
# Show help as tooltips
pg.SetExtraStyle(wxpg.PG_EX_HELP_AS_TOOLTIPS)
pg.Bind( wxpg.EVT_PG_CHANGED, self.OnPropGridChange )
pg.Bind( wxpg.EVT_PG_PAGE_CHANGED, self.OnPropGridPageChange )
pg.Bind( wxpg.EVT_PG_SELECTED, self.OnPropGridSelect )
pg.Bind( wxpg.EVT_PG_RIGHT_CLICK, self.OnPropGridRightClick )
##pg.AddPage( "Page 1 - Testing All" )
# store the property grid for future reference
self.pg = pg
# load empty object (just draws categories)
self.load_object(None)
# sizing stuff:
topsizer.Add(pg, 1, wx.EXPAND)
panel.SetSizer(topsizer)
topsizer.SetSizeHints(panel)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
def load_object(self, obj, callback=None):
pg = self.pg # get the property grid reference
self.callback = callback # store the update method
# delete all properties
pg.Clear()
# clean references and aux structures
appended = set()
self.obj = obj
self.groups = {}
# loop on specs and append each property (categorized):
for i, cat, class_ in ((1, 'Init Specs', InitSpec),
(2, 'Dimension Specs', DimensionSpec),
(3, 'Style Specs', StyleSpec),
(5, 'Events', EventSpec),
(4, 'Basic Specs', Spec),
):
pg.Append(wxpg.PropertyCategory("%s - %s" % (i, cat)))
if obj is None:
continue
specs = sorted(obj._meta.specs.items(), key=lambda it: it[0])
for name, spec in specs:
if DEBUG: print "setting prop", spec, class_, spec.type
if isinstance(spec, class_):
prop = {'string': wxpg.StringProperty,
'integer': wxpg.IntProperty,
'float': wxpg.FloatProperty,
'boolean': wxpg.BoolProperty,
'text': wxpg.LongStringProperty,
'code': wxpg.LongStringProperty,
'enum': wxpg.EnumProperty,
'edit_enum': wxpg.EditEnumProperty,
'expr': wxpg.StringProperty,
'array': wxpg.ArrayStringProperty,
'font': wxpg.FontProperty,
'image_file': wxpg.ImageFileProperty,
'colour': wxpg.ColourProperty}.get(spec.type)
if prop and name not in appended:
value = getattr(obj, name)
if DEBUG: print "name", name, value
if spec.type == "code" and value is None:
value = ""
if spec.type == "boolean" and value is None:
value = False
if spec.type == "integer" and value is None:
value = -1
if spec.type in ("string", "text") and value is None:
value = ""
if spec.type == "expr":
value = repr(value)
if spec.type == "font":
if value is None:
value = wx.NullFont
else:
value = value.get_wx_font()
if callable(value):
# event binded at runtime cannot be modified:
value = str(value)
readonly = True
else:
readonly = False
if spec.type == "enum":
prop = prop(name, name,
spec.mapping.keys(),
spec.mapping.values(),
value=spec.mapping.get(value, 0))
elif spec.type == "edit_enum":
prop = prop(name, name,
spec.mapping.keys(),
range(len(spec.mapping.values())),
|
value=spec.mapping[value])
else:
try:
prop = prop(name, value=value)
except Exception, e:
print "CANNOT LOAD PROPERTY", name, value, e
prop.SetPyClientData(spec)
appended.add(name)
|
if spec.group is None:
pg.Append(prop)
if readonly:
pg.SetPropertyReadOnly(prop)
else:
# create a group hierachy (wxpg uses dot notation)
group = ""
prop_parent = None
for grp in spec.group.split("."):
prev_group = group # ancestor
group += ("." if group else "") + grp # path
if group in self.groups:
prop_parent = self.groups[group]
else:
prop_group = wxpg.StringProperty(grp,
value="<composed>")
if not prop_parent:
pg.Append(prop_group)
else:
pg.AppendIn(prev_group, prop_group)
prop_parent = prop_group
self.groups[group] = prop_parent
pg.SetPropertyReadOnly(group)
pg.AppendIn(spec.group, prop)
pg.Collapse(spec.group)
name = spec.group + "." + name
if spec.type == "boolean":
pg.SetPropertyAttribute(name, "UseCheckbox", True)
doc = spec.__doc__
if doc:
pg.SetPropertyHelpString(name, doc)
def edit(self, name=""):
"Programatically selec
|
IncidentNormal/TestApps
|
ALE/HF_Sim_Book.py
|
Python
|
gpl-2.0
| 17,239 | 0.019375 |
'''
Created on Sep 15, 2010
@author: duncantait
'''
from SimPy.Simulation import *
import numpy as np
import random
import math
class G():
#Settings for HF Stations
num_channels = 18
num_stations = 10
class Network():
stations = []
class Medium():
def __init__(self):
self.channels = []
for i in range(G.num_channels):
S = Store(name=i,capacity=1)
self.channels.append(S)
class StationContainer():
def __init__(self,ID):
self.ID = ID
self.Operator = Operator(ID)
self.StationSettings = StationSettings(ID)
self.Scanning = Scanning(ID)
self.Tx = Tx(ID)
def initComponents(self):
self.Operator.initCounterparts()
self.StationSettings.initCounterparts()
self.Scanning.initCounterparts()
self.Tx.initCounterparts()
def activate(self):
activate(self.Operator,self.Operator.sendMessage(),at=0.0)
activate(self.StationSettings,self.StationSettings.sounding(),at=0.0)
activate(self.Scanning,self.Scanning.scan(),at=0.0)
activate(self.Tx,self.Tx.sending(),at=0.0)
class Operator(Process):
def __init__(self, ID):
Process.__init__(self)
self.ID = ID
def initComponents(self):
self.StationSettings = [N.Sta
|
tionSettings for N in Network.stations if N.ID==self.ID][0]
def sendMessage(self):
while True:
#every so often operator wants to send a message: adds to queue.
yield hold, self, random.uniform(0,1200)
#Create a Message of type 'CALL'
frameInfo = frameDetails(self.ID,self.decideDestination(),0,fType.CALL,False,-1,-1)
frameInfo.channels
|
= self.ChannelOrder(frameInfo.destination)
yield put,self,self.Tx.sendQ,[frameInfo]
yield hold, self, random.uniform(0,1200)
def decideDestination(self):
while True:
dest = random.randint(0,G.num_channels-1)
if dest != self.ID:
return dest
def ChannelOrder(self,channel=-1,station=-1):
#sorts best channels best-worst
if channel==-1:
ordered = self.StationSettings.channelBER[station,:].argsort()
return ordered[::-1] #reverse order of array
if station==-1:
ordered = self.StationSettings.channelBER[:,channel].argsort()
return ordered[::-1]
class StationSettings(Process):
def __init__(self, ID):
Process.__init__(self)
self.ID = ID
self.state = sState.SCANNING #can be scanning, linking or linked.
self.sending = False
self.channelBER = np.zeros((G.num_channels,G.num_stations)) #LQA: Link Quality Analysis
self.timeout = 2 #current timeout counter for linking/linked mode, if this hits zero, go back to scanning
self.Td = 2 #dwell time per channel
self.Twce = 2 #wait for calling cycle to end
self.Twr = 2
self.minLQA = 0.2
self.bitrate = 392
self.hardwareTime = 20 #e.g. power up/down time, modulation/demodulation, encoding/decoding, crypto in ms.
#tune up/down time. Included in Twrt (wait for response and tune time)
def Sounding(self):
while True:
yield hold, self, random.uniform(0,120)
#Sound
yield hold, self, 1800
class Scanning(Process):
#Is HF ALWAYS scanning? No, either scanning, linking or linked
def __init__(self, ID):
self.ID = ID
Process.__init__(self)
self.currentChannel = 0
def initComponents(self):
self.StationSettings = [N.StationSettings for N in Network.stations if N.ID==self.ID][0]
self.Tx = [N.Tx for N in Network.stations if N.ID==self.ID][0]
def scan(self):
while True:
#Different responses depending on mode.
#Rules: cannot receive while sending <-----------------
#Otherwise, packets will be interpreted as to the mode the station is in.
channel = Medium.channels[self.currentChannel]
yield (get,self,channel,1),(hold,self,self.StationSettings.timeout)
if self.acquired(channel):
signal = self.got
yield put, self , channel, signal
frameInfo = self.decode(signal) #This implies picking up the signal frame by frame from the channel
if (frameInfo.LQA > self.StationSettings.minLQA) and (frameInfo.destination==self.ID):
yield (put,self,channel,['PH:'+str(self.ID)]),(hold,self,self.StationSettings.Twce)
if self.stored(channel):
yield get,self,channel,1 #Yank sniffer packet back off channel.
if frameInfo.type== fType.CALL:
if self.StationSettings.state==sState.SCANNING:
yield put,self,self.Tx.sendQ,[frameInfo]
self.StationSettings.state=sState.LINKING
yield waitevent,self,self.Tx.sE
if frameInfo.type== fType.RESPONSE:
if self.StationSettings.state==sState.LINKING:
yield put,self,self.Tx.sendQ,[frameInfo]
yield waitevent,self,self.Tx.sE
if frameInfo.type== fType.ACK:
if self.StationSettings.state==sState.LINKING:
yield put,self,self.Tx.sendQ,[frameInfo]
self.StationSettings.state=sState.LINKED
yield waitevent,self,self.Tx.sE
if frameInfo.type== fType.QUICK_ID:
if (self.StationSettings.state==sState.SCANNING or sState.LINKED) and (frameInfo.terminate==False):
'I dont think you can have a QUICK ID out of the blue, and it doesnt need a reply...'
#yield put,self,self.Tx.sendQ,[frameInfo]
#yield waitevent,self,self.Tx.sE
elif frameInfo.terminate==True:
self.StationSettings.state=sState.SCANNING
if frameInfo.type== fType.MSG:
if self.StationSettings.state== sState.LINKED and frameInfo.terminate==False:
'again, why the reply? just keep channel open...'
elif frameInfo.terminate==True:
self.StationSettings.state=sState.SCANNING
#yield put,self,self.Tx.sendQ,[frameInfo]
#yield waitevent,self,self.Tx.sE
else:
print 'Invalid Packet'
self.StationSettings.state=sState.SCANNING
else:
print 'Timed out'
self.StationSettings.state=sState.SCANNING
else:
'Frame unsuitable: Continue Scan'
self.StationSettings.state=sState.SCANNING
else:
'Channel Empty: Continue Scan'
self.StationSettings.state=sState.SCANNING
if self.StationSettings.state==sState.SCANNING:
if self.currentChannel==G.num_channels-1:
self.currentChannel = 0
else:
self.currentChannel+=1
def decode(self,frameInfo):
#Return a packet useable to send straightaway. All data is known to achieve this.
returnInfo = self.convertReply(frameInfo)
returnInfo = self.responseSize(returnInfo)
returnInfo = self.calculate_LQA(returnInfo)
returnInfo.channels = self.currentChannel
#Messages and Acks/Responses always have to be on the same channel as before... which is all
#That is dealt with in 'Scanning'
returnInfo.terminate = False
|
Tijndagamer/bin
|
validate_ip.py
|
Python
|
mit
| 609 | 0.00821 |
#!/usr/bin/python3
# Small script to validate a given IP address
import socket
import sys
def validate_ip(ip):
try:
socket.inet_p
|
ton(socket.AF_INET, ip)
return (True,"IPv4")
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, ip)
return(True,"IPv6")
except socket.error:
return(False,"")
if __name__ == "__ma
|
in__":
try:
ip = sys.argv[1]
state, version = validate_ip(ip)
if state:
print(ip + " is a valid " + version + " address")
except IndexError:
print("No IP given")
|
lsgunth/nvme-cli
|
tests/nvme_writeuncor_test.py
|
Python
|
gpl-2.0
| 2,752 | 0 |
# Copyright (c) 2015-2016 Western Digital Corporation or its affiliates.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Author: Chaitanya Kulkarni <chaitanya.kulkarni@hgst.com>
#
"""
NVMe Write Compare Testcae:-
1. Read block of data successfully.
2. Issue write uncorrectable to block of data.
3. Attempt to read from same block; shall fail.
4. Issue a write command to first block of data.
5. Read from the same block; shall pass.
"""
from nose.tools import assert_equal, assert_not_equal
from nvme_test_io import TestNVMeIO
class TestNVMeUncor(TestNVMeIO):
"""
Represents NVMe Write Uncorrecatble testcase.
- Attributes:
- start_block : starting block of to perform IO.
- test_log_dir : directory for logs, temp files.
"""
def __init__(self):
""" Constructor TestNVMeUncor """
TestNVMeIO.__init__(self)
self.start_block = 1023
self.setup_log_dir(self.__class__.__name__)
self.write_file = self.test_log_dir + "/" + self.write_file
self.read_file = self.test_log_dir + "/" + self.read_file
self.create_data_file(self.write_file, self.data_size, "15")
open(self.read_file, 'a').close()
def __del__(self):
""" Post Section for TestNVMeUncor """
TestNVMeIO.__del__(self)
def write_uncor(self):
""" Wrapper for nvme write uncorrectable
- Args:
- None
- Returns:
- return code of nvme write uncorrectable command.
"""
write_uncor_cmd = "nvme write-uncor " + self.ns1 + \
" --start-block=" + str(self.start_block) + \
" --block-count=" + str(self.
|
block_count)
return self.exec_cmd(write_uncor_cmd)
def test_write_uncor(self):
""" Testcase ma
|
in """
assert_equal(self.nvme_read(), 0)
assert_equal(self.write_uncor(), 0)
assert_not_equal(self.nvme_read(), 0)
assert_equal(self.nvme_write(), 0)
assert_equal(self.nvme_read(), 0)
|
MapofLife/MOL
|
app/tile_handler.py
|
Python
|
bsd-3-clause
| 3,335 | 0.009595 |
"""This module contains a tile cache handler."""
__author__ = 'Aaron Steele'
# MOL imports
import cache
# Standard Python imports
import hashlib
import logging
import os
import urllib
import webapp2
# Google App Engine imports
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext.webapp.util import run_wsgi_app
if 'SERVER_SOFTWARE' in os.environ:
PROD = not os.environ['SERVER_SOFTWARE'].startswith('Development')
else:
PROD = True
app_id = os.environ['CURRENT_VERSION_ID'].split('.')[0]
if PROD:
host_prefix = 'http'
if os.environ['SERVER_PORT'] == 443:
host_prefix = 'https'
app_host = host_prefix + '://' + os.environ['SERVER_NAME']
else:
app_host = 'http://localhost:8080'
class TileHandler(webapp2.RequestHandler):
"""Request handler for cache requests."""
def get(self):
tile_url = self.request.url.replace(app_host, 'http://mol.cartodb.com')
tile_key = 'tile-%s' % hashlib.sha224(tile_url).hexdigest() # tc means Tile Cache
tile_png = memcache.get(tile_key) # Check memcache
if not tile_png:
tile_png = cache.get(tile_key, value_type='blob') # Check datastore cache
if not tile_png:
result = urlfetch.fetch(tile_url, deadline=60) # Check CartoDB
if result.status_code == 200 or result.status_code == 304:
tile_png = result.content
cache.add(tile_key, tile_png, value_type='blob')
memcache.add(tile_key, tile_png)
else:
memcache.add(tile_key, tile_png)
if not tile_png:
self.error(404)
else:
self.response.headers["Content-Type"] = "image/png"
self.response.headers["Cache-Control"] = "max-age=2629743" # Cache 1 month
self.response.out.write(tile_png)
class GridHandler(webapp2.RequestHandler):
"""Request handler for cache requests."""
def get(self):
grid_url = self.request.url.replace(app_host, 'http://mol.cartodb.com')
grid_key = 'utfgrid-%s' % hashlib.sha224(grid_url).hexdigest()
|
# gc means Grid Cache
grid_json = memcache.get(grid_key)
if not grid_json:
grid_json = cache.get(grid_key)
if not grid_json:
result = urlfetch.fetch(grid_url, deadline=60)
if result.status_code == 200 or result.status_code == 304:
|
grid_json = result.content
cache.add(grid_key, grid_json)
memcache.add(grid_key, grid_json)
else:
memcache.add(grid_key, grid_json)
if not grid_json:
self.error(404)
else:
self.response.headers["Content-Type"] = "application/json"
self.response.headers["Cache-Control"] = "max-age=2629743" # Cache 1 month
self.response.out.write(grid_json)
application = webapp2.WSGIApplication(
[('/tiles/[a-zA-Z0-9_-]+/[\d]+/[\d]+/[\d]+.png?.*', TileHandler),
('/tiles/[a-zA-Z0-9_-]+/[\d]+/[\d]+/[\d]+.grid.json?.*', GridHandler),],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
edisonlz/fruit
|
web_project/base/site-packages/django/contrib/formtools/tests/wizard/test_cookiestorage.py
|
Python
|
apache-2.0
| 1,813 | 0.003309 |
from django.test import TestCase
from django.core import signing
from django.core.exceptions import SuspiciousOperation
from django.http import HttpResponse
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.storage.cookie import CookieStorage
from django.contrib.formtools.tests.wizard.storage import get_request, TestStorage
@skipIfCustomUser
class TestCookieStorage(TestStorage, TestCase):
def get_storage(self):
return CookieStorage
def test_manipulated_cookie(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
cookie_signer = signing.get_cookie_signer(storage.prefix)
storage.request.COOKIES[storage.prefix] = cookie_signer.sign(
storage.encoder.encode({'key1': 'value1'}))
self.assertEqual(storage.load_data(), {'key1': 'value1'})
storage.request.COOKIES[storage.prefix] = 'i_am_ma
|
nipulated'
self.assertRaises(SuspiciousOperation, storage.load_data)
def test_reset_cookie(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
storage.data = {'key1': 'value1'}
response = HttpResponse()
storage.update_response(response)
cookie_signer = signing.get_cookie_signer(storage.prefix)
signed_cook
|
ie_data = cookie_signer.sign(storage.encoder.encode(storage.data))
self.assertEqual(response.cookies[storage.prefix].value, signed_cookie_data)
storage.init_data()
storage.update_response(response)
unsigned_cookie_data = cookie_signer.unsign(response.cookies[storage.prefix].value)
self.assertJSONEqual(unsigned_cookie_data,
{"step_files": {}, "step": None, "extra_data": {}, "step_data": {}})
|
ain7/www.ain7.org
|
ain7/annuaire/__init__.py
|
Python
|
lgpl-2.1
| 55 | 0 |
default_app_co
|
nfig = 'ain7.annuaire.management.FillDb'
| |
dmlc/xgboost
|
python-package/xgboost/sklearn.py
|
Python
|
apache-2.0
| 71,896 | 0.002031 |
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, too-many-lines
"""Scikit-Learn Wrapper interface for XGBoost."""
import copy
import warnings
import json
import os
from typing import Union, Optional, List, Dict, Callable, Tuple, Any, TypeVar, Type, cast
from typing import Sequence
import numpy as np
from .core import Booster, DMatrix, XGBoostError
from .core import _deprecate_positional_args, _convert_ntree_limit
from .core import Metric
from .training import train
from .callback import TrainingCallback
from .data import _is_cudf_df, _is_cudf_ser, _is_cupy_array
# Do not use class names on scikit-learn directly. Re-define the classes on
# .compat to guarantee the behavior without scikit-learn
from .compat import (
SKLEARN_INSTALLED,
XGBModelBase,
XGBClassifierBase,
XGBRegressorBase,
XGBoostLabelEncoder,
)
array_like = Any
class XGBRankerMixIn: # pylint: disable=too-few-public-methods
"""MixIn for ranking, defines the _esti
|
mator_type usually defined in scikit-learn base
classes."""
_estimator_type = "ranker"
def _check_rf_callback(
early_stopping_rounds: Optional[int],
callbacks: Optional[Sequence[TrainingCallback]],
) -> None:
|
if early_stopping_rounds is not None or callbacks is not None:
raise NotImplementedError(
"`early_stopping_rounds` and `callbacks` are not implemented for"
" random forest."
)
_SklObjective = Optional[
Union[
str, Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
]
]
def _objective_decorator(
func: Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
) -> Callable[[np.ndarray, DMatrix], Tuple[np.ndarray, np.ndarray]]:
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func:
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func:
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds: np.ndarray, dmatrix: DMatrix) -> Tuple[np.ndarray, np.ndarray]:
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner
def _metric_decorator(func: Callable) -> Metric:
"""Decorate a metric function from sklearn.
Converts an metric function that uses the typical sklearn metric signature so that it
is compatible with :py:func:`train`
"""
def inner(y_score: np.ndarray, dmatrix: DMatrix) -> Tuple[str, float]:
y_true = dmatrix.get_label()
return func.__name__, func(y_true, y_score)
return inner
__estimator_doc = '''
n_estimators : int
Number of gradient boosted trees. Equivalent to number of boosting
rounds.
'''
__model_doc = f'''
max_depth : Optional[int]
Maximum tree depth for base learners.
max_leaves :
Maximum number of leaves; 0 indicates no limit.
max_bin :
If using histogram-based algorithm, maximum number of bins per feature
grow_policy :
Tree growing policy. 0: favor splitting at nodes closest to the node, i.e. grow
depth-wise. 1: favor splitting at nodes with highest loss change.
learning_rate : Optional[float]
Boosting learning rate (xgb's "eta")
verbosity : Optional[int]
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
objective : {_SklObjective}
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
booster: Optional[str]
Specify which booster to use: gbtree, gblinear or dart.
tree_method: Optional[str]
Specify which tree method to use. Default to auto. If this parameter is set to
default, XGBoost will choose the most conservative option available. It's
recommended to study this option from the parameters document :doc:`tree method
</treemethod>`
n_jobs : Optional[int]
Number of parallel threads used to run xgboost. When used with other Scikit-Learn
algorithms like grid search, you may choose which algorithm to parallelize and
balance the threads. Creating thread contention will significantly slow down both
algorithms.
gamma : Optional[float]
(min_split_loss) Minimum loss reduction required to make a further partition on a
leaf node of the tree.
min_child_weight : Optional[float]
Minimum sum of instance weight(hessian) needed in a child.
max_delta_step : Optional[float]
Maximum delta step we allow each tree's weight estimation to be.
subsample : Optional[float]
Subsample ratio of the training instance.
sampling_method :
Sampling method. Used only by `gpu_hist` tree method.
- `uniform`: select random training instances uniformly.
- `gradient_based` select random training instances with higher probability when
the gradient and hessian are larger. (cf. CatBoost)
colsample_bytree : Optional[float]
Subsample ratio of columns when constructing each tree.
colsample_bylevel : Optional[float]
Subsample ratio of columns for each level.
colsample_bynode : Optional[float]
Subsample ratio of columns for each split.
reg_alpha : Optional[float]
L1 regularization term on weights (xgb's alpha).
reg_lambda : Optional[float]
L2 regularization term on weights (xgb's lambda).
scale_pos_weight : Optional[float]
Balancing of positive and negative weights.
base_score : Optional[float]
The initial prediction score of all instances, global bias.
random_state : Optional[Union[numpy.random.RandomState, int]]
Random number seed.
.. note::
Using gblinear booster with shotgun updater is nondeterministic as
it uses Hogwild algorithm.
missing : float, default np.nan
Value in the data which needs to be present as a missing value.
num_parallel_tree: Optional[int]
Used for boosting random forest.
monotone_constraints : Optional[Union[Dict[str, int], str]]
Constraint of variable monotonicity. See :doc:`tutorial </tutorials/monotonic>`
for more information.
interaction_constraints : Optional[Union[str, List[Tuple[str]]]]
Constraints for interaction representing permitted interactions. The
constraints must be specified in the form of a nested list, e.g. ``[[0, 1], [2,
3, 4]]``, where each inner list is a group of indices of features that are
allowed to interact with each other. See :doc:`tutorial
</tutorials/feature_interaction_constraint>` for more information
importance_type: Optional[str]
The feature importance type for the feature_importances\\_ property:
* For tree model, it's either "gain", "weight", "cover", "total_gain" or
"total_cover".
* For linear model, only "weight" is defined and it's the normalized coefficients
without bias.
gpu_id : Optional[int]
Device ordinal.
validate_parameters : Optional[bool]
Give warnings for unknown parameter.
predictor : Optional[str]
Force XGBoost to use specific predictor, available choices are [cpu_predictor,
gpu_predictor].
enable_categorical : bool
.. versionadded:: 1.5.0
.. note:: This parameter is experimental
Experimental support for categorical data. When enabled, cudf/pandas.DataFrame
should be used
|
CianDuffy/nodecopter-security
|
python/pedestrian_detect.py
|
Python
|
mit
| 1,228 | 0.004886 |
import os
import cv2
import time
drone_output_path_string = "./images/intruder-detection/drone-output/drone-output.png"
detected_image_path_string = "./images/intruder-detection/detected/intruder-detected.png"
full_body_haar_cascade_path_string = "./node_modules/opencv/data/haarcascade_fullbody.xml"
def clear_directories():
if os.path.exists(drone_output_path_string):
os.remove(drone_output_path_string)
if os.path.exists(detected_image_path_string):
os.remove(detected_image_path_string)
def detect_intruders():
time.sleep(0.5)
drone_output_image = cv2.imread(drone_output_path_string)
intruder_classifier = cv2.CascadeClassifier(full_body_haar_cascade
|
_path_string)
intruders = intruder_classifier.detectMultiScale(drone_output_image)
if len(intruders) > 0:
for (x, y, w, h) in intruders:
cv2.rectangle(drone_output_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imwrite(detected_image_path_string, drone_output_image)
os.remove(drone_output_path_string)
def main():
clear_directories()
while True:
if os.path.exists(drone_output_path_string):
detect_intruders()
|
if __name__ == '__main__':
main()
|
sklam/numba
|
numba/cuda/cudadrv/nvvm.py
|
Python
|
bsd-2-clause
| 24,855 | 0.000523 |
"""
This is a direct translation of nvvm.h
"""
import logging
import re
import sys
from ctypes import (c_void_p, c_int, POINTER, c_char_p, c_size_t, byref,
c_char)
import threading
from llvmlite import ir
from .error import NvvmError, NvvmSupportError
from .libs import get_libdevice, open_libdevice, open_cudalib
from numba.core import config
logger = logging.getLogger(__name__)
ADDRSPACE_GENERIC = 0
ADDRSPACE_GLOBAL = 1
ADDRSPACE_SHARED = 3
ADDRSPACE_CONSTANT = 4
ADDRSPACE_LOCAL = 5
# Opaque handle for compilation unit
nvvm_program = c_void_p
# Result code
nvvm_result = c_int
RESULT_CODE_NAMES = '''
NVVM_SUCCESS
NVVM_ERROR_OUT_OF_MEMORY
NVVM_ERROR_PROGRAM_CREATION_FAILURE
NVVM_ERROR_IR_VERSION_MISMATCH
NVVM_ERROR_INVALID_INPUT
NVVM_ERROR_INVALID_PROGRAM
NVVM_ERROR_INVALID_IR
NVVM_ERROR_INVALID_OPTION
NVVM_ERROR_NO_MODULE_IN_PROGRAM
NVVM_ERROR_COMPILATION
'''.split()
for i, k in enumerate(RESULT_CODE_NAMES):
setattr(sys.modules[__name__], k, i)
def is_available():
"""
Return if libNVVM is available
"""
try:
NVVM()
except NvvmSupportError:
return False
else:
return True
_nvvm_lock = threading.Lock()
class NVVM(object):
'''Process-wide singleton.
'''
_PROTOTYPES = {
# nvvmResult nvvmVersion(int *major, int *minor)
'nvvmVersion': (nvvm_result, POINTER(c_int), POINTER(c_int)),
# nvvmResult nvvmCreateProgram(nvvmProgram *cu)
'nvvmCreateProgram': (nvvm_result, POINTER(nvvm_program)),
# nvvmResult nvvmDestroyProgram(nvvmProgram *cu)
'nvvmDestroyProgram': (nvvm_result, POINTER(nvvm_program)),
# nvvmResult nvvmAddModuleToProgram(nvvmProgram cu, const char *buffer,
# size_t size, const char *name)
'nvvmAddModuleToProgram': (
nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p),
# nvvmResult nvvmCompileProgram(nvvmProgram cu, int numOptions,
# const char **options)
'nvvmCompileProgram': (
nvvm_result, nvvm_program, c_int, POINTER(c_char_p)),
# nvvmResult nvvmGetCompiledResultSize(nvvmProgram cu,
# size_t *bufferSizeRet)
'nvvmGetCompiledResultSize': (
nvvm_result, nvvm_program, POINTER(c_size_t)),
# nvvmResult nvvmGetCompiledResult(nvvmProgram cu, char *buffer)
'nvvmGetCompiledResult': (nvvm_result, nvvm_program, c_char_p),
# nvvmResult nvvmGetProgramLogSize(nvvmProgram cu,
# size_t *bufferSizeRet)
'nvvmGetProgramLogSize': (nvvm_result, nvvm_program, POINTER(c_size_t)),
# nvvmResult nvvmGetProgramLog(nvvmProgram cu, char *buffer)
'nvvmGetProgramLog': (nvvm_result, nvvm_program, c_char_p),
}
# Singleton reference
__INSTANCE = None
def __new__(cls):
with _nvvm_lock:
if cls.__INSTANCE is None:
cls.__INSTANCE = inst = object.__new__(cls)
try:
inst.driver = open_cudalib('nvvm')
except OSError as e:
cls.__INSTANCE = None
errmsg = ("libNVVM cannot be found. Do `conda install "
"cudatoolkit`:\n%s")
raise NvvmSupportError(errmsg % e)
# Find & populate functions
for name, proto in inst._PROTOTYPES.items():
func = getattr(inst.driver, name)
func.restype = proto[0]
func.argtypes = proto[1:]
setattr(inst, name, func)
return cls.__INSTANCE
def get_version(self):
major = c_int()
minor = c_int()
err = self.nvvmVersion(byref(major), byref(minor))
self.check_error(err, 'Failed to get version.')
return major.value, minor.value
def check_error(self, error, msg, exit=False):
if error:
exc = NvvmError(msg, RESULT_CODE_NAMES[error])
if exit:
print(exc)
sys.exit(1)
else:
raise exc
class CompilationUnit(object):
def __init__(self):
self.driver = NVVM()
self._handle = nvvm_program()
err = self.driver.nvvmCreateProgram(byref(self._handle))
self.driver.check_error(err, 'Failed to create CU')
def __del__(self):
driver = NVVM()
err = driver.nvvmDestroyProgram(byref(self._handle))
driver.check_error(err, 'Failed to destroy CU', exit=True)
def add_module(self, buffer):
"""
Add a module level NVVM IR to a compilation unit.
- The buffer should contain an NVVM module IR either in the bitcode
representation (LLVM3.0) or in the text representation.
"""
err = self.driver.nvvmAddModuleToProgram(self._handle, buffer,
len(buffer), None)
self.driver.check_error(err, 'Failed to add module')
def compile(self, **options):
"""Perform Compilation
The valid compiler options are
* - -g (enable generation of debugging information)
* - -opt=
* - 0 (disable optimizations)
* - 3 (default, enable optimizations)
* - -arch=
* - compute_20 (default)
* - compute_30
* - compute_35
* - -ftz=
* - 0 (default, preserve denormal values, when performing
* single-precision floating-point operations)
* - 1 (flush denormal values to zero, when performing
* single-precision floating-point operations)
* - -prec-sqrt=
* - 0 (use a faster approximation for single-precision
* floating-point square root)
* - 1 (default, use IEEE round-to-nearest mode for
* single-precision floating-point square root)
* - -prec-div=
* - 0 (use a faster approximation for single-precision
* floating-point division and reciprocals)
* - 1 (default, use IEEE round-to-nearest mode for
* single-precision floating-point division and reciprocals)
* - -fma=
* - 0 (disable FMA contraction)
* - 1 (default, enable FMA contraction)
*
"""
# stringify options
opts = []
if 'debug' in options:
if options.pop('debug'):
opts.append('-g')
if 'opt' in options:
|
opts.append('-opt=%d' % options.pop('opt'))
if options.get('arch'):
opts.append('-arch=%s' % options.pop('arch'))
other_options = (
'ftz',
'prec_sqrt',
'prec_div',
'fma',
)
for k in other_options:
if k in options:
|
v = int(bool(options.pop(k)))
opts.append('-%s=%d' % (k.replace('_', '-'), v))
# If there are any option left
if options:
optstr = ', '.join(map(repr, options.keys()))
raise NvvmError("unsupported option {0}".format(optstr))
# compile
c_opts = (c_char_p * len(opts))(*[c_char_p(x.encode('utf8'))
for x in opts])
err = self.driver.nvvmCompileProgram(self._handle, len(opts), c_opts)
self._try_error(err, 'Failed to compile\n')
# get result
reslen = c_size_t()
err = self.driver.nvvmGetCompiledResultSize(self._handle, byref(reslen))
self._try_error(err, 'Failed to get size of compiled result.')
ptxbuf = (c_char * reslen.value)()
err = self.driver.nvvmGetCompiledResult(self._handle, ptxbuf)
self._try_error(err, 'Failed to get compiled result.')
# get log
self.log = self.get_log()
return ptxbuf[:]
def _try_error(self, err, msg):
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
def get_log(
|
edisonlz/fruit
|
web_project/base/site-packages/django/conf/locale/mk/formats.py
|
Python
|
apache-2.0
| 1,758 | 0.001138 |
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30
|
'
'%d.%m.%y.', # '25.10
|
.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
parthea/datalab
|
tools/cli/commands/create.py
|
Python
|
apache-2.0
| 37,422 | 0 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for implementing the `datalab create` command."""
from __future__ import absolute_import
import json
import os
import subprocess
import sys
import tempfile
from . import connect, utils
try:
# If we are running in Python 2, builtins is available in 'future'.
from builtins import input as read_input
except Exception:
# We don't want to require the installation of future, so fallback
# to using raw_input from Py2.
read_input = raw_input # noqa: F821
description = ("""`{0} {1}` creates a new Datalab instances running in a Google
Compute Engine VM.
This command also creates the 'datalab-network' network if necessary.
By default, the command creates a persistent connection to the newly
created instance. You can disable that behavior by passing in the
'--no-connect' flag.""")
_DATALAB_NETWORK = 'datalab-network'
_DATALAB_NETWORK_DESCRIPTION = 'Network for Google Cloud Datalab instances'
_DATALAB_FIREWALL_RULE_TEMPLATE = '{0}-allow-ssh'
_DATALAB_FIREWALL_RULE_DESCRIPTION = 'Allow SSH access to Datalab instances'
_DATALAB_UNEXPECTED_FIREWALLS_WARNING_TEMPLATE = (
'The network `{0}` has firewall rules that were not created by the '
'`datalab` command line tool. Instances created in that network may '
'be open to traffic that they should not be exposed to.')
_DATALAB_DEFAULT_DISK_SIZE_GB = 200
_DATALAB_DISK_DESCRIPTION = (
'Persistent disk for a Google Cloud Datalab instance')
_DATALAB_NOTEBOOKS_REPOSITORY = 'datalab-notebooks'
_DATALAB_STARTUP_SCRIPT = """#!/bin/bash
# First, make sure the
|
`datalab` and `logger` users exist with their
# home directories setup correctly.
useradd datalab -u 2000 || useradd datalab
useradd logger -u 2001 || useradd logger
# In case the instance has started before, the `/home/datalab` directory
# may already exist, but with the incorrect user ID (since `/etc/passwd`
# is saved in a tmpfs and changes after restarts). To account for that,
# we shou
|
ld force the file ownership under `/home/datalab` to match
# the current UID for the `datalab` user.
chown -R datalab /home/datalab
chown -R logger /home/logger
PERSISTENT_DISK_DEV="/dev/disk/by-id/google-datalab-pd"
MOUNT_DIR="/mnt/disks/datalab-pd"
MOUNT_CMD="mount -o discard,defaults ${{PERSISTENT_DISK_DEV}} ${{MOUNT_DIR}}"
download_docker_image() {{
# Since /root/.docker is not writable on the default image,
# we need to set HOME to be a writable directory. This same
# directory is used later on by the datalab.service.
export OLD_HOME=$HOME
export HOME=/home/datalab
echo "Getting Docker credentials"
docker-credential-gcr configure-docker
echo "Pulling latest image: {0}"
docker pull {0}
export HOME=$OLD_HOME
}}
clone_repo() {{
echo "Creating the datalab directory"
mkdir -p ${{MOUNT_DIR}}/content/datalab
echo "Cloning the repo {1}"
docker run --rm -v "${{MOUNT_DIR}}/content:/content" \
--entrypoint "/bin/bash" {0} \
gcloud source repos clone {1} /content/datalab/notebooks
}}
repo_is_populated() {{
cd ${{MOUNT_DIR}}/content/datalab/notebooks
git show-ref --quiet
}}
populate_repo() {{
echo "Populating datalab-notebooks repo"
docker run --rm -v "${{MOUNT_DIR}}/content:/content" \
--workdir=/content/datalab/notebooks \
--entrypoint "/bin/bash" {0} -c "\
echo '.ipynb_checkpoints' >> .gitignore; \
echo '*.pyc' >> .gitignore; \
echo '# Project Notebooks' >> README.md; \
git add .gitignore README.md; \
git -c user.email=nobody -c user.name=Datalab \
commit --message='Set up initial notebook repo.'; \
git push origin master; \
"
}}
format_disk() {{
echo "Formatting the persistent disk"
mkfs.ext4 -F \
-E lazy_itable_init=0,lazy_journal_init=0,discard \
${{PERSISTENT_DISK_DEV}}
${{MOUNT_CMD}}
clone_repo
if ! repo_is_populated; then
populate_repo
fi
}}
checked_format_disk() {{
echo "Checking if the persistent disk needs to be formatted"
if [ -z "$(blkid ${{PERSISTENT_DISK_DEV}})" ]; then
format_disk
else
echo "Disk already formatted, but mounting failed; rebooting..."
# The mount failed, but the disk seems to already
# be formatted. Reboot the machine to try again.
reboot now
fi
}}
mount_and_prepare_disk() {{
echo "Trying to mount the persistent disk"
mkdir -p "${{MOUNT_DIR}}"
${{MOUNT_CMD}} || checked_format_disk
if [ -z "$(mount | grep ${{MOUNT_DIR}})" ]; then
echo "Failed to mount the persistent disk; rebooting..."
reboot now
fi
chmod a+w "${{MOUNT_DIR}}"
mkdir -p "${{MOUNT_DIR}}/content"
old_dir="${{MOUNT_DIR}}/datalab"
new_dir="${{MOUNT_DIR}}/content/datalab"
if [ -d "${{old_dir}}" ] && [ ! -d "${{new_dir}}" ]; then
echo "Moving ${{old_dir}} to ${{new_dir}}"
mv "${{old_dir}}" "${{new_dir}}"
else
echo "Creating ${{new_dir}}"
mkdir -p "${{new_dir}}"
fi
}}
configure_swap() {{
if [ "{2}" == "false" ]; then
return
fi
mem_total_line=`cat /proc/meminfo | grep MemTotal`
mem_total_value=`echo "${{mem_total_line}}" | cut -d ':' -f 2`
memory_kb=`echo "${{mem_total_value}}" | cut -d 'k' -f 1 | tr -d '[:space:]'`
# Before proceeding, check if we have more disk than memory.
# Specifically, if the free space on disk is not N times the
# size of memory, then enabling swap makes no sense.
#
# Arbitrarily choosing a value of N=10
disk_kb_cutoff=`expr 10 "*" ${{memory_kb}}`
disk_kb_available=`df --output=avail ${{MOUNT_DIR}} | tail -n 1`
if [ "${{disk_kb_available}}" -lt "${{disk_kb_cutoff}}" ]; then
return
fi
swapfile="${{MOUNT_DIR}}/swapfile"
# Create the swapfile if it is either missing or not big enough
current_size="0"
if [ -e "${{swapfile}}" ]; then
current_size=`ls -s ${{swapfile}} | cut -d ' ' -f 1`
fi
if [ "${{memory_kb}}" -gt "${{current_size}}" ]; then
echo "Creating a ${{memory_kb}} kilobyte swapfile at ${{swapfile}}"
dd if=/dev/zero of="${{swapfile}}" bs=1024 count="${{memory_kb}}"
fi
chmod 0600 "${{swapfile}}"
mkswap "${{swapfile}}"
# Enable swap
sysctl vm.disk_based_swap=1
swapon "${{swapfile}}"
}}
cleanup_tmp() {{
tmpdir="${{MOUNT_DIR}}/tmp"
# First, make sure the temporary directory exists.
mkdir -p "${{tmpdir}}"
# Remove all files from it.
#
# We do not remove the directory itself, as that could lead to a broken
# volume mount if the Docker container has already started).
#
# We also do not just use `rm -rf ${{tmpdir}}/*`, as that would leave
# behind any hidden files.
find "${{tmpdir}}/" -mindepth 1 -delete
}}
download_docker_image
mount_and_prepare_disk
configure_swap
cleanup_tmp
journalctl -u google-startup-scripts --no-pager > /var/log/startupscript.log
"""
_DATALAB_CLOUD_CONFIG = """
#cloud-config
users:
- name: datalab
uid: 2000
groups: docker
- name: logger
uid: 2001
groups: docker
write_files:
- path: /etc/systemd/system/wait-for-startup-script.service
permissions: 0755
owner: root
content: |
[Unit]
Description=Wait for the startup script to setup required directories
Requires=network-online.target gcr-online.target
After=network-online.target gcr-online.target
[Service]
User=root
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/bash -c 'while [ ! -e /mnt/disks/datalab-pd/tmp ]; do \
sleep 1; \
done'
- path: /etc/systemd/system/datalab.service
permissions: 0644
owner: root
content: |
[Unit]
Description=datalab docker container
Requires=network-online.target gcr-online.target \
wait-for-startup
|
anthraxx/pwndbg
|
pwndbg/chain.py
|
Python
|
mit
| 4,555 | 0.003305 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gdb
import pwndbg.abi
import pwndbg.color.chain as C
import pwndbg.color.memory as M
import pwndbg.color.theme as theme
import pwndbg.enhance
import pwndbg.memory
import pwndbg.symbol
import pwndbg.typeinfo
import pwndbg.vmmap
LIMIT = pwndbg.config.Parameter('dereference-limit', 5, 'max number of pointers to dereference in a chain')
def get(address, limit=LIMIT, offset=0, hard_stop=None, hard_end=0, include_start=True):
"""
Recursively dereferences an address. For bare metal, it will stop when the address is not in any of vmmap pages to avoid redundant dereference.
Arguments:
address(int): the first address to begin dereferencing
limit(int): number of valid pointers
offset(int): offset into the address to get the next pointer
hard_stop(int): address to stop at
hard_end: value to append when hard_stop is reached
include_start(bool): whether to include starting address or not
Returns:
A list representing pointers of each ```address``` and reference
"""
limit = int(limit)
result = [address] if include_start else []
for i in range(limit):
# Don't follow cycles, except to stop at the second occurrence.
if result.count(address) >= 2:
break
if hard_stop is not None and address == hard_stop:
result.append(hard_end)
break
try:
address = address + offset
# Avoid redundant dereferences in bare metal mode by checking
# if address is in any of vmmap pages
if not pwndbg.abi.linux and not pwndbg.vmmap.find(address):
break
address = int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address))
address &= pwndbg.arch.ptrmask
result.append(address)
except gdb.MemoryError:
break
return result
config_arrow_left = theme.Parameter('chain-arrow-left', '◂—', 'left arrow of chain formatting')
config_arrow_right = theme.Parameter('chain-arrow-right', '—▸', 'right arrow of chain formatting')
config_contiguous = theme.Parameter('chain-contiguous-marker', '...', 'contiguous marker of chain formatting')
def format(value, limit=LIMIT, code=True, offset=0, hard_stop=None, hard_end=0):
"""
Recursively dereferences an address into string representation, or convert the list representation
of address dereferences into string representation.
Arguments:
value(int|list): Either the starting address to be sent to get, or the result of get (a list)
limit(int): Number of valid pointers
code(bool): Hint that indicates the value may be an instruction
offset(int): Offset into the address to get the next pointer
hard_stop(int): Value to stop on
hard_end: Value to append when hard_stop is reached: null, value of hard stop, a string.
Returns:
A string representing pointers of each address and reference
Strings format: 0x0804a10 —▸ 0x08061000 ◂— 0x41414141
"""
limit = int(limit)
# Allow results from get function to be passed to format
if isinstance(value, list):
chain = value
else:
|
chain = get(value, limit, offset, hard_stop, hard_end)
arrow_left = C.arrow(' %s ' % config_arrow_left)
arrow_right = C.arrow(
|
' %s ' % config_arrow_right)
# Colorize the chain
rest = []
for link in chain:
symbol = pwndbg.symbol.get(link) or None
if symbol:
symbol = '%#x (%s)' % (link, symbol)
rest.append(M.get(link, symbol))
# If the dereference limit is zero, skip any enhancements.
if limit == 0:
return rest[0]
# Otherwise replace last element with the enhanced information.
rest = rest[:-1]
# Enhance the last entry
# If there are no pointers (e.g. eax = 0x41414141), then enhance
# the only element there is.
if len(chain) == 1:
enhanced = pwndbg.enhance.enhance(chain[-1], code=code)
# Otherwise, the last element in the chain is the non-pointer value.
# We want to enhance the last pointer value. If an offset was used
# chain failed at that offset, so display that offset.
elif len(chain) < limit + 1:
enhanced = pwndbg.enhance.enhance(chain[-2] + offset, code=code)
else:
enhanced = C.contiguous('%s' % config_contiguous)
if len(chain) == 1:
return enhanced
return arrow_right.join(rest) + arrow_left + enhanced
|
jmuhlich/indra
|
indra/reach/reach_reader.py
|
Python
|
bsd-2-clause
| 1,451 | 0.000689 |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
from indra.java_vm import autoclass, JavaException
logger = logging.getLogger('reach_reader')
class ReachReader(object):
"""The ReachReader wraps a singleton instance of the REACH reader.
This allows calling the reader many times without having to wait for it to
start up each time.
Attributes
|
----------
|
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
def __init__(self):
self.api_ruler = None
def get_api_ruler(self):
"""Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
if self.api_ruler is None:
try:
self.api_ruler = \
autoclass('org.clulab.reach.export.apis.ApiRuler')
except JavaException:
# This second autoclass is needed because of a jnius
# issue in which the first JavaException is not raised.
try:
autoclass('java.lang.String')
except JavaException as e:
logger.error(e)
pass
return None
return self.api_ruler
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.