text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
class TestFromHoudiniPointsConverter( IECoreHoudini.TestCase ) :
def createBox( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
return box
def createTorus( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
return torus
def createPoints( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
points = geo.createNode( "scatter" )
facet.setInput( 0, box )
points.setInput( 0, facet )
return points
# creates a converter
def testCreateConverter( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniPointsConverter( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
return converter
# creates a converter
def testFactory( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.Parameter )
self.assertEqual( converter, None )
self.failUnless( IECore.TypeId.PointsPrimitive in IECoreHoudini.FromHoudiniGeometryConverter.supportedTypes() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( [ IECore.TypeId.PointsPrimitive ] )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# performs geometry conversion
def testDoConversion( self ) :
converter = self.testCreateConverter()
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
def testConvertFromHOMGeo( self ) :
geo = self.createPoints().geometry()
converter = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo )
self.failUnless( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.failUnless( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
converter2 = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo, IECore.TypeId.PointsPrimitive )
self.failUnless( converter2.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# convert a mesh
def testConvertMesh( self ) :
torus = self.createTorus()
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() )
bbox = result.bound()
self.assertEqual( bbox.min.x, -1.5 )
self.assertEqual( bbox.max.x, 1.5 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test prim/vertex attributes
def testConvertPrimVertAttributes( self ) :
torus = self.createTorus()
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_f3 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vert_v3f )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_i3 )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min.x, -1.5 )
self.assertEqual( bbox.max.x, 1.5 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cd" not in result )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f" ]
for a in attrs :
self.assert_( a not in result )
self.assert_( result.arePrimitiveVariablesValid() )
# test prim/vertex attributes on a single primitive (mesh)
def testConvertMeshPrimVertAttributes( self ) :
torus = self.createTorus()
torus.parm( "type" ).set( 1 )
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_f3 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
vertString = geo.createNode( "attribcreate", node_name = "vertString", exact_type_name=True )
vertString.parm("name").set("vertString")
vertString.parm("class").set(3)
vertString.parm("type").set(3)
vertString.parm("string").set("string $VTX!")
vertString.setInput( 0, vert_v3f )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vertString )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_i3 )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min.x, -1.5 )
self.assertEqual( bbox.max.x, 1.5 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cs" in result )
self.assertEqual( result["Cs"].data.typeId(), IECore.TypeId.Color3fVectorData )
self.assertEqual( result["Cs"].interpolation, IECore.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( result["Cs"].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) :
for j in range( 0, 3 ) :
self.assert_( result["Cs"].data[i][j] >= 0.0 )
self.assert_( result["Cs"].data[i][j] <= 1.0 )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f", "vertStringIndices" ]
for a in attrs :
self.assert_( a in result )
self.assertEqual( result[a].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result[a].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["vert_f1"].data.typeId(), IECore.FloatVectorData.staticTypeId() )
self.assertEqual( result["vert_f2"].data.typeId(), IECore.V2fVectorData.staticTypeId() )
self.assertEqual( result["vert_f3"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_f3"].data[i][j] >= 0.0 )
self.assert_( result["vert_f3"].data[i][j] < 400.1 )
self.assertEqual( result["vert_i1"].data.typeId(), IECore.IntVectorData.staticTypeId() )
self.assertEqual( result["vert_i2"].data.typeId(), IECore.V2iVectorData.staticTypeId() )
self.assertEqual( result["vert_i3"].data.typeId(), IECore.V3iVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_i3"].data[i][j] < 10 )
self.assertEqual( result["vert_v3f"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["vertString"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["vertString"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["vertStringIndices"].data.typeId(), IECore.TypeId.IntVectorData )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
self.assertEqual( result["vertString"].data[i], "string %d!" % i )
self.assertEqual( result["vertStringIndices"].data[i], i )
self.assert_( result.arePrimitiveVariablesValid() )
# convert some points
def testConvertPoints( self ) :
points = self.createPoints()
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() )
self.assertEqual( points.parm('npts').eval(), result.numPoints )
self.assert_( "P" in result.keys() )
self.assert_( "N" in result.keys() )
self.assert_( result.arePrimitiveVariablesValid() )
# simple attribute conversion
def testSetupAttributes( self ) :
points = self.createPoints()
geo = points.parent()
attr = geo.createNode( "attribcreate", exact_type_name=True )
attr.setInput( 0, points )
attr.parm("name").set( "test_attribute" )
attr.parm("type").set(0) # float
attr.parm("size").set(1) # 1 element
attr.parm("value1").set(123.456)
attr.parm("value2").set(654.321)
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" in result.keys() )
self.assertEqual( result["test_attribute"].data.size(), points.parm('npts').eval() )
self.assert_( result.arePrimitiveVariablesValid() )
return attr
# testing point attributes and types
def testPointAttributes( self ) :
attr = self.testSetupAttributes()
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatVectorData )
self.assert_( result["test_attribute"].data[0] > 123.0 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attribute"].data[0], 123 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result["test_attribute"].data[0],IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).set( "string $PT!" )
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["test_attribute"].data[10], "string 10!" )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["test_attributeIndices"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attributeIndices"].data[10], 10 )
self.assertEqual( result["test_attributeIndices"].data.size(), 5000 )
self.assertEqual( result["test_attributeIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
# testing detail attributes and types
def testDetailAttributes( self ) :
attr = self.testSetupAttributes()
attr.parm("class").set(0) # detail attribute
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
attr.parm("value1").set(123.456)
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatData )
self.assert_( result["test_attribute"].data > IECore.FloatData( 123.0 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntData )
self.assertEqual( result["test_attribute"].data, IECore.IntData( 123 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).set( "string!" )
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringData )
self.assertEqual( result["test_attribute"].data.value, "string!" )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
# testing that float[4] doesn't work!
def testFloat4attr( self ) : # we can't deal with float 4's right now
attr = self.testSetupAttributes()
attr.parm("name").set( "test_attribute" )
attr.parm("size").set(4) # 4 elements per point-attribute
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" not in result.keys() ) # invalid due to being float[4]
self.assert_( result.arePrimitiveVariablesValid() )
# testing conversion of animating geometry
def testAnimatingGeometry( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
mountain = geo.createNode( "mountain" )
mountain.parm("offset1").setExpression( "$FF" )
points = geo.createNode( "scatter" )
facet.setInput( 0, torus )
mountain.setInput( 0, facet )
points.setInput( 0, mountain )
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
hou.setFrame(1)
points_1 = converter.convert()
hou.setFrame(2)
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
points_2 = converter.convert()
self.assertNotEqual( points_1["P"].data, points_2["P"].data )
# testing we can handle an object being deleted
def testObjectWasDeleted( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniPointsConverter, torus ) )
# testing we can handle an object being deleted
def testObjectWasDeletedFactory( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniGeometryConverter.create, torus ) )
# testing converting a Houdini particle primitive with detail and point attribs
def testParticlePrimitive( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = geo.createNode( "popnet" )
location = popnet.createNode( "location" )
detailAttr = popnet.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm("name").set( "float3detail" )
detailAttr.parm("class").set( 0 ) # detail
detailAttr.parm("type").set( 0 ) # float
detailAttr.parm("size").set( 3 ) # 3 elements
detailAttr.parm("value1").set( 1 )
detailAttr.parm("value2").set( 2 )
detailAttr.parm("value3").set( 3 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm("name").set( "float3point" )
pointAttr.parm("class").set( 2 ) # point
pointAttr.parm("type").set( 0 ) # float
pointAttr.parm("size").set( 3 ) # 3 elements
pointAttr.parm("value1").set( 1 )
pointAttr.parm("value2").set( 2 )
pointAttr.parm("value3").set( 3 )
hou.setFrame( 5 )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( pointAttr )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
points = converter.convert()
self.assertEqual( type(points), IECore.PointsPrimitive )
self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 21 )
self.assertEqual( points["float3detail"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( type(points["float3detail"].data), IECore.V3fData )
self.assert_( points["float3detail"].data.value.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) )
self.assertEqual( type(points["float3point"].data), IECore.V3fVectorData )
self.assertEqual( points["float3point"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
for p in points["float3point"].data :
self.assert_( p.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) )
self.assert_( points.arePrimitiveVariablesValid() )
add = pointAttr.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
del points['generator']
del points['generatorIndices']
del points['born']
del points['source']
self.assertEqual( points2, points )
def testMultipleParticlePrimitives( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = geo.createNode( "popnet" )
fireworks = popnet.createNode( "fireworks" )
hou.setFrame( 15 )
converter = IECoreHoudini.FromHoudiniPointsConverter( popnet )
points = converter.convert()
self.assertEqual( type(points), IECore.PointsPrimitive )
self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 24 )
self.assertEqual( points["accel"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( type(points["accel"].data), IECore.V3fVectorData )
self.assertEqual( points["accel"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertEqual( points["nextid"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( points["nextid"].data, IECore.IntData( 25 ) )
self.assertTrue( points.arePrimitiveVariablesValid() )
add = popnet.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
# showing that prim attribs don't get converted because the interpolation size doesn't match
self.assertEqual( points2, points )
def testName( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
name = particles.createOutputNode( "name" )
name.parm( "name1" ).set( "points" )
box = points.parent().createNode( "box" )
name2 = box.createOutputNode( "name" )
name2.parm( "name1" ).set( "box" )
merge = name.createOutputNode( "merge" )
merge.setInput( 1, name2 )
converter = IECoreHoudini.FromHoudiniPointsConverter( merge )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# both shapes were converted as one PointsPrimitive
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5008 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "points" )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named points were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5000 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "box", IECore.TypeId.PointsPrimitive )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named points were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 8 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertTrue( result.arePrimitiveVariablesValid() )
def testAttributeFilter( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
# add vertex normals
facet = particles.createOutputNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
# add a primitive colour attributes
primcol = facet.createOutputNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
detail = primcol.createOutputNode( "attribcreate", node_name = "detail", exact_type_name=True )
detail.parm("name").set("detailAttr")
detail.parm("class").set(0)
detail.parm("type").set(1)
detail.parm("size").set(3)
detail.parm("value1").set(123)
detail.parm("value2").set(456.789) # can we catch it out with a float?
detail.parm("value3").set(789)
converter = IECoreHoudini.FromHoudiniPointsConverter( detail )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "N", "P", "detailAttr", "varmap" ] )
converter.parameters()["attributeFilter"].setTypedValue( "P" )
self.assertEqual( sorted(converter.convert().keys()), [ "P" ] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^N ^varmap" )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P", "detailAttr" ] )
# P must be converted
converter.parameters()["attributeFilter"].setTypedValue( "* ^P" )
self.assertTrue( "P" in converter.convert().keys() )
def testStandardAttributeConversion( self ) :
points = self.createPoints()
color = points.createOutputNode( "color" )
color.parm( "colortype" ).set( 2 )
rest = color.createOutputNode( "rest" )
scale = rest.createOutputNode( "attribcreate" )
scale.parm( "name1" ).set( "pscale" )
scale.parm( "value1v1" ).setExpression( "$PT" )
converter = IECoreHoudini.FromHoudiniPointsConverter( scale )
result = converter.convert()
self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
converter["convertStandardAttributes"].setTypedValue( False )
result = converter.convert()
self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest", "varmap" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["rest"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
if __name__ == "__main__":
unittest.main()
| DoubleNegativeVisualEffects/cortex | test/IECoreHoudini/FromHoudiniPointsConverter.py | Python | bsd-3-clause | 39,834 | 0.052945 |
from openerp.osv import osv, fields
class attributes(osv.Model):
_name = "product.attribute"
def _get_float_max(self, cr, uid, ids, field_name, arg, context=None):
result = dict.fromkeys(ids, 0)
if ids:
cr.execute("""
SELECT attribute_id, MAX(value)
FROM product_attribute_line
WHERE attribute_id in (%s)
GROUP BY attribute_id
""" % ",".join(map(str, ids)))
result.update(dict(cr.fetchall()))
return result
def _get_float_min(self, cr, uid, ids, field_name, arg, context=None):
result = dict.fromkeys(ids, 0)
if ids:
cr.execute("""
SELECT attribute_id, MIN(value)
FROM product_attribute_line
WHERE attribute_id in (%s)
GROUP BY attribute_id
""" % ",".join(map(str, ids)))
result.update(dict(cr.fetchall()))
return result
def _get_min_max(self, cr, uid, ids, context=None):
result = {}
for value in self.pool.get('product.attribute.line').browse(cr, uid, ids, context=context):
if value.type == 'float':
result[value.attribute_id.id] = True
return result.keys()
_columns = {
'name': fields.char('Name', translate=True, required=True),
'type': fields.selection([('distinct', 'Textual Value'), ('float', 'Numeric Value')], "Type", required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values'),
'attr_product_ids': fields.one2many('product.attribute.line', 'attribute_id', 'Products'),
'float_max': fields.function(_get_float_max, type='float', string="Max", store={
'product.attribute.line': (_get_min_max, ['value','attribute_id'], 20),
}),
'float_min': fields.function(_get_float_min, type='float', string="Min", store={
'product.attribute.line': (_get_min_max, ['value','attribute_id'], 20),
}),
'visible': fields.boolean('Display Filter on Website'),
}
_defaults = {
'type': 'distinct',
'visible': True,
}
class attributes_value(osv.Model):
_name = "product.attribute.value"
_columns = {
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'atr_product_ids': fields.one2many('product.attribute.line', 'value_id', 'Products'),
}
class attributes_product(osv.Model):
_name = "product.attribute.line"
_order = 'attribute_id, value_id, value'
_columns = {
'value': fields.float('Numeric Value'),
'value_id': fields.many2one('product.attribute.value', 'Textual Value'),
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'product_tmpl_id': fields.many2one('product.template', 'Product', required=True),
'type': fields.related('attribute_id', 'type', type='selection',
selection=[('distinct', 'Distinct'), ('float', 'Float')], string='Type'),
}
def onchange_attribute_id(self, cr, uid, ids, attribute_id, context=None):
attribute = self.pool.get('product.attribute').browse(cr, uid, attribute_id, context=context)
return {'value': {'type': attribute.type, 'value_id': False, 'value': ''}}
class product_template(osv.Model):
_inherit = "product.template"
_columns = {
'attribute_lines': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product attributes'),
}
| ovnicraft/openerp-restaurant | website_sale/models/product_characteristics.py | Python | agpl-3.0 | 3,642 | 0.006315 |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import collections
import sys
import unittest
from build_swift import shell
import six
from six import StringIO
from .. import utils
try:
# Python 3.4
from pathlib import Path
except ImportError:
pass
try:
# Python 3.3
from unittest import mock
from unittest.mock import patch, mock_open, MagicMock
except ImportError:
mock, mock_open = None, None
class MagicMock(object):
def __init__(self, *args, **kwargs):
pass
def _id(obj):
return obj
def patch(*args, **kwargs):
return _id
# -----------------------------------------------------------------------------
# Constants
_OPEN_NAME = '{}.open'.format(six.moves.builtins.__name__)
# -----------------------------------------------------------------------------
# Test Cases
class TestHelpers(unittest.TestCase):
"""Unit tests for the helper functions defined in the build_swift.shell
module.
"""
# -------------------------------------------------------------------------
# _flatmap
def test_flatmap(self):
def duplicate(x):
return [x, x]
result = shell._flatmap(duplicate, [1, 2, 3])
self.assertIsInstance(result, collections.Iterable)
self.assertEqual(list(result), [1, 1, 2, 2, 3, 3])
# -------------------------------------------------------------------------
# _convert_pathlib_path
@utils.requires_module('unittest.mock')
@utils.requires_module('pathlib')
@patch('build_swift.shell.Path', None)
def test_convert_pathlib_path_pathlib_not_imported(self):
path = Path('/path/to/file.txt')
self.assertEqual(shell._convert_pathlib_path(path), path)
@utils.requires_module('pathlib')
def test_convert_pathlib_path(self):
path = Path('/path/to/file.txt')
self.assertEqual(shell._convert_pathlib_path(''), '')
self.assertEqual(
shell._convert_pathlib_path(path),
six.text_type(path))
# -------------------------------------------------------------------------
# _get_stream_file
def test_get_stream_file(self):
self.assertEqual(shell._get_stream_file(shell.PIPE), sys.stdout)
self.assertEqual(shell._get_stream_file(shell.STDOUT), sys.stdout)
self.assertEqual(shell._get_stream_file(sys.stdout), sys.stdout)
self.assertEqual(shell._get_stream_file(sys.stderr), sys.stderr)
def test_get_stream_file_raises_devnull(self):
with self.assertRaises(ValueError):
shell._get_stream_file(shell.DEVNULL)
# -------------------------------------------------------------------------
# _echo_command
@utils.requires_module('unittest.mock')
def test_echo_command(self):
test_command = ['sudo', 'rm', '-rf', '/tmp/*']
mock_stream = MagicMock()
shell._echo_command(test_command, mock_stream)
mock_stream.write.assert_called_with(
'>>> {}\n'.format(shell.quote(test_command)))
assert(mock_stream.flush.called)
@utils.requires_module('unittest.mock')
def test_echo_command_custom_prefix(self):
mock_stream = MagicMock()
shell._echo_command('ls', mock_stream, prefix='$ ')
mock_stream.write.assert_called_with('$ ls\n')
assert(mock_stream.flush.called)
# -------------------------------------------------------------------------
# _normalize_args
def test_normalize_args_splits_basestring(self):
command = 'rm -rf /Applications/Xcode.app'
self.assertEqual(
shell._normalize_args(command),
['rm', '-rf', '/Applications/Xcode.app'])
def test_normalize_args_list_str(self):
command = ['rm', '-rf', '/Applications/Xcode.app']
self.assertEqual(shell._normalize_args(command), command)
def test_normalize_args_converts_wrappers(self):
sudo = shell.wraps('sudo')
rm = shell.wraps('rm')
command = [sudo, rm, '-rf', '/Applications/Xcode.app']
self.assertEqual(
shell._normalize_args(command),
['sudo', 'rm', '-rf', '/Applications/Xcode.app'])
def test_normalize_args_converts_complex_wrapper_commands(self):
sudo_rm_rf = shell.wraps('sudo rm -rf')
command = [sudo_rm_rf, '/Applications/Xcode.app']
self.assertEqual(
shell._normalize_args(command),
['sudo', 'rm', '-rf', '/Applications/Xcode.app'])
@utils.requires_module('pathlib')
def test_normalize_args_accepts_single_wrapper_arg(self):
rm_xcode = shell.wraps(['rm', '-rf', Path('/Applications/Xcode.app')])
self.assertEqual(
shell._normalize_args(rm_xcode),
['rm', '-rf', '/Applications/Xcode.app'])
@utils.requires_module('pathlib')
def test_normalize_args_converts_pathlib_path(self):
command = ['rm', '-rf', Path('/Applications/Xcode.app')]
self.assertEqual(
shell._normalize_args(command),
['rm', '-rf', '/Applications/Xcode.app'])
@utils.requires_module('pathlib')
def test_normalize_args_converts_pathlib_path_in_wrapper_commands(self):
rm_xcode = shell.wraps(['rm', '-rf', Path('/Applications/Xcode.app')])
self.assertEqual(
shell._normalize_args([rm_xcode]),
['rm', '-rf', '/Applications/Xcode.app'])
class TestDecorators(unittest.TestCase):
"""Unit tests for the decorators defined in the build_swift.shell module
used to backport or add functionality to the subprocess wrappers.
"""
# -------------------------------------------------------------------------
# _backport_devnull
@utils.requires_module('unittest.mock')
@patch(_OPEN_NAME, new_callable=mock_open)
@patch('build_swift.shell._PY_VERSION', (3, 2))
def test_backport_devnull_stdout_kwarg(self, mock_open):
mock_file = MagicMock()
mock_open.return_value.__enter__.return_value = mock_file
@shell._backport_devnull
def func(command, **kwargs):
self.assertEqual(kwargs['stdout'], mock_file)
func('', stdout=shell.DEVNULL)
assert(mock_open.return_value.__enter__.called)
assert(mock_open.return_value.__exit__.called)
@utils.requires_module('unittest.mock')
@patch(_OPEN_NAME, new_callable=mock_open)
@patch('build_swift.shell._PY_VERSION', (3, 2))
def test_backport_devnull_stderr_kwarg(self, mock_open):
mock_file = MagicMock()
mock_open.return_value.__enter__.return_value = mock_file
@shell._backport_devnull
def func(command, **kwargs):
self.assertEqual(kwargs['stderr'], mock_file)
func('', stderr=shell.DEVNULL)
assert(mock_open.return_value.__enter__.called)
assert(mock_open.return_value.__exit__.called)
@utils.requires_module('unittest.mock')
@patch(_OPEN_NAME, new_callable=mock_open)
def test_backport_devnull_does_not_open(self, mock_open):
@shell._backport_devnull
def func(command):
pass
func('')
mock_open.return_value.__enter__.assert_not_called()
mock_open.return_value.__exit__.assert_not_called()
@utils.requires_module('unittest.mock')
@patch('build_swift.shell._PY_VERSION', (3, 3))
def test_backport_devnull_noop_starting_with_python_3_3(self):
def func():
pass
self.assertEqual(shell._backport_devnull(func), func)
# -------------------------------------------------------------------------
# _normalize_command
def test_normalize_command_basestring_command_noop(self):
test_command = 'touch test.txt'
@shell._normalize_command
def func(command):
self.assertEqual(command, test_command)
func(test_command)
@utils.requires_module('unittest.mock')
@patch('build_swift.shell._normalize_args')
def test_normalize_command(self, mock_normalize_args):
test_command = ['rm', '-rf', '/tmp/*']
@shell._normalize_command
def func(command):
pass
func(test_command)
mock_normalize_args.assert_called_with(test_command)
# -------------------------------------------------------------------------
# _add_echo_kwarg
@utils.requires_module('unittest.mock')
@patch('build_swift.shell._echo_command')
def test_add_echo_kwarg_calls_echo_command(self, mock_echo_command):
test_command = ['rm', '-rf', '/tmp/*']
@shell._add_echo_kwarg
def func(command, **kwargs):
pass
mock_stream = mock.mock_open()
func(test_command, echo=True, stdout=mock_stream)
mock_echo_command.assert_called_with(test_command, mock_stream)
@utils.requires_module('unittest.mock')
@patch('build_swift.shell._echo_command')
def test_add_echo_kwarg_noop_echo_false(self, mock_echo_command):
test_command = ['rm', '-rf', '/tmp/*']
@shell._add_echo_kwarg
def func(command):
pass
func(test_command)
func(test_command, echo=False)
mock_echo_command.assert_not_called()
class TestPublicFunctions(unittest.TestCase):
"""Unit tests for the public functions defined in the build_swift.shell
module.
"""
# -------------------------------------------------------------------------
# quote
def test_quote_string(self):
self.assertEqual(
shell.quote('/Applications/App Store.app'),
"'/Applications/App Store.app'")
def test_quote_iterable(self):
self.assertEqual(
shell.quote(['rm', '-rf', '~/Documents/My Homework']),
"rm -rf '~/Documents/My Homework'")
# -------------------------------------------------------------------------
# rerun_as_root
def test_rerun_as_root(self):
pass
class TestSubprocessWrappers(unittest.TestCase):
"""Unit tests for the subprocess wrappers defined in the build_swift.shell
module.
"""
# -------------------------------------------------------------------------
# Popen
# NOTE: Testing the Popen class is harder than it might appear. We're not
# able to mock out the subprocess.Popen superclass as one might initially
# expect. Rather that shell.Popen class object already exists and inherts
# from subprocess.Popen, thus mocking it out does not change the behavior.
# Ultimately this class is merely a wrapper that uses already tested
# decorators to add functionality so testing here might not provide any
# benefit.
# -------------------------------------------------------------------------
# call
@utils.requires_module('unittest.mock')
@patch('subprocess.call')
def test_call(self, mock_call):
shell.call('ls')
mock_call.assert_called_with('ls')
# -------------------------------------------------------------------------
# check_call
@utils.requires_module('unittest.mock')
@patch('subprocess.check_call')
def test_check_call(self, mock_check_call):
shell.check_call('ls')
mock_check_call.assert_called_with('ls')
# -------------------------------------------------------------------------
# check_output
@utils.requires_module('unittest.mock')
@patch('subprocess.check_output')
def test_check_output(self, mock_check_output):
# Before Python 3 the subprocess.check_output function returned bytes.
if six.PY3:
mock_check_output.return_value = ''
else:
mock_check_output.return_value = b''
output = shell.check_output('ls')
# We always expect str (utf-8) output
self.assertIsInstance(output, six.text_type)
if six.PY3:
mock_check_output.assert_called_with('ls', encoding='utf-8')
else:
mock_check_output.assert_called_with('ls')
class TestShellUtilities(unittest.TestCase):
"""Unit tests for the shell utility wrappers defined in the
build_swift.shell module.
"""
# -------------------------------------------------------------------------
# copy
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=True))
@patch('shutil.copyfile', MagicMock())
@patch('build_swift.shell._convert_pathlib_path')
def test_copy_converts_pathlib_paths(self, mock_convert):
source = Path('/source/path')
dest = Path('/dest/path')
shell.copy(source, dest)
mock_convert.assert_has_calls([
mock.call(source),
mock.call(dest),
])
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=True))
@patch('shutil.copyfile')
def test_copy_files(self, mock_copyfile):
source = '/source/path'
dest = '/dest/path'
shell.copy(source, dest)
mock_copyfile.assert_called_with(source, dest)
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=False))
@patch('os.path.isdir', MagicMock(return_value=True))
@patch('shutil.copytree')
def test_copy_directories(self, mock_copytree):
source = '/source/path'
dest = '/dest/path'
shell.copy(source, dest)
mock_copytree.assert_called_with(source, dest)
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=True))
@patch('shutil.copyfile', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_copy_echos_fake_cp_file_command(self, mock_stdout):
source = '/source/path'
dest = '/dest/path'
shell.copy(source, dest, echo=True)
self.assertEqual(
mock_stdout.getvalue(),
'>>> cp {} {}\n'.format(source, dest))
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=False))
@patch('os.path.isdir', MagicMock(return_value=True))
@patch('shutil.copytree', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_copy_echos_fake_cp_directory_command(self, mock_stdout):
source = '/source/path'
dest = '/dest/path'
shell.copy(source, dest, echo=True)
self.assertEqual(
mock_stdout.getvalue(),
'>>> cp -R {} {}\n'.format(source, dest))
# -------------------------------------------------------------------------
# pushd
@utils.requires_module('unittest.mock')
@utils.requires_module('pathlib')
@patch('os.getcwd', MagicMock(return_value='/start/path'))
@patch('build_swift.shell._convert_pathlib_path')
def test_pushd_converts_pathlib_path(self, mock_convert):
path = Path('/other/path')
mock_convert.return_value = six.text_type(path)
shell.pushd(path)
mock_convert.assert_called_with(path)
@utils.requires_module('unittest.mock')
@patch('os.getcwd', MagicMock(return_value='/start/path'))
@patch('os.chdir')
def test_pushd_restores_cwd(self, mock_chdir):
with shell.pushd('/other/path'):
mock_chdir.assert_called_with('/other/path')
mock_chdir.assert_called_with('/start/path')
@utils.requires_module('unittest.mock')
@patch('os.getcwd', MagicMock(return_value='/start/path'))
@patch('os.chdir', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_pushd_echos_fake_pushd_popd_commands(self, mock_stdout):
with shell.pushd('/other/path', echo=True):
pass
self.assertEqual(mock_stdout.getvalue().splitlines(), [
'>>> pushd /other/path',
'>>> popd'
])
# -------------------------------------------------------------------------
# makedirs
@utils.requires_module('unittest.mock')
@utils.requires_module('pathlib')
@patch('os.path.exists', MagicMock(return_value=False))
@patch('os.makedirs', MagicMock())
@patch('build_swift.shell._convert_pathlib_path')
def test_makedirs_converts_pathlib_path(self, mock_convert):
path = Path('/some/directory')
shell.makedirs(path)
mock_convert.assert_called_with(path)
@utils.requires_module('unittest.mock')
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.makedirs')
def test_makedirs_noop_path_exists(self, mock_makedirs):
shell.makedirs('/some/directory')
mock_makedirs.assert_not_called()
@utils.requires_module('unittest.mock')
@patch('os.path.exists', MagicMock(return_value=False))
@patch('os.makedirs')
def test_makedirs_creates_path(self, mock_makedirs):
path = '/some/directory'
shell.makedirs(path)
mock_makedirs.assert_called_with(path)
@utils.requires_module('unittest.mock')
@patch('os.path.exists', MagicMock(return_value=False))
@patch('os.makedirs', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_makedirs_echos_fake_mkdir_command(self, mock_stdout):
path = '/some/directory'
shell.makedirs(path, echo=True)
self.assertEqual(
mock_stdout.getvalue(),
'>>> mkdir -p {}\n'.format(path))
# -------------------------------------------------------------------------
# move
@utils.requires_module('unittest.mock')
@utils.requires_module('pathlib')
@patch('shutil.move', MagicMock())
@patch('build_swift.shell._convert_pathlib_path')
def test_move_converts_pathlib_paths(self, mock_convert):
source = Path('/source/path')
dest = Path('/dest/path')
shell.move(source, dest)
mock_convert.assert_has_calls([
mock.call(source),
mock.call(dest),
])
@utils.requires_module('unittest.mock')
@patch('shutil.move')
def test_move(self, mock_move):
source = '/source/path'
dest = '/dest/path'
shell.move(source, dest)
mock_move.assert_called_with(source, dest)
@utils.requires_module('unittest.mock')
@patch('shutil.move', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_move_echos_fake_mv_command(self, mock_stdout):
source = '/source/path'
dest = '/dest/path'
shell.move(source, dest, echo=True)
self.assertEqual(
mock_stdout.getvalue(),
'>>> mv {} {}\n'.format(source, dest))
# -------------------------------------------------------------------------
# remove
@utils.requires_module('unittest.mock')
@utils.requires_module('pathlib')
@patch('os.path.isfile', MagicMock(return_value=True))
@patch('os.remove', MagicMock())
@patch('build_swift.shell._convert_pathlib_path')
def test_remove_converts_pathlib_paths(self, mock_convert):
path = Path('/path/to/remove')
shell.remove(path)
mock_convert.assert_called_with(path)
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=True))
@patch('os.remove')
def test_remove_files(self, mock_remove):
path = '/path/to/remove'
shell.remove(path)
mock_remove.assert_called_with(path)
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=False))
@patch('os.path.isdir', MagicMock(return_value=True))
@patch('shutil.rmtree')
def test_remove_directories(self, mock_rmtree):
path = '/path/to/remove'
shell.remove(path)
mock_rmtree.assert_called_with(path, ignore_errors=True)
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=True))
@patch('os.remove', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_remove_echos_fake_rm_file_command(self, mock_stdout):
path = '/path/to/remove'
shell.remove(path, echo=True)
self.assertEqual(
mock_stdout.getvalue(),
'>>> rm {}\n'.format(path))
@utils.requires_module('unittest.mock')
@patch('os.path.isfile', MagicMock(return_value=False))
@patch('os.path.isdir', MagicMock(return_value=True))
@patch('shutil.rmtree', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_remove_echos_fake_rm_directory_command(self, mock_stdout):
path = '/path/to/remove'
shell.remove(path, echo=True)
self.assertEqual(
mock_stdout.getvalue(),
'>>> rm -rf {}\n'.format(path))
# -------------------------------------------------------------------------
# symlink
@utils.requires_module('unittest.mock')
@utils.requires_module('pathlib')
@patch('os.symlink', MagicMock())
@patch('build_swift.shell._convert_pathlib_path')
def test_symlink_converts_pathlib_paths(self, mock_convert):
source = Path('/source/path')
dest = Path('/dest/path')
shell.symlink(source, dest)
mock_convert.assert_has_calls([
mock.call(source),
mock.call(dest),
])
@utils.requires_module('unittest.mock')
@patch('os.symlink')
def test_symlink(self, mock_symlink):
source = '/source/path'
dest = '/dest/path'
shell.symlink(source, dest)
mock_symlink.assert_called_with(source, dest)
@utils.requires_module('unittest.mock')
@patch('os.symlink', MagicMock())
@patch('sys.stdout', new_callable=StringIO)
def test_symlink_echos_fake_ln_command(self, mock_stdout):
source = '/source/path'
dest = '/dest/path'
shell.symlink(source, dest, echo=True)
self.assertEqual(
mock_stdout.getvalue(),
'>>> ln -s {} {}\n'.format(source, dest))
# -------------------------------------------------------------------------
# which
# NOTE: We currently have a polyfill for the shutil.which function. This
# will be swapped out for the real-deal as soon as we convert to Python 3,
# which should be in the near future. We could also use a backport package
# from pypi, but we rely on the shell module working in scripting that does
# not use a virtual environment at the moment. Until we either adopt
# Python 3 by default _or_ enforce virtual environments for all our scripts
# we are stuck with the polyfill.
def test_which(self):
pass
class TestAbstractWrapper(unittest.TestCase):
"""Unit tests for the AbstractWrapper class defined in the build_swift.shell
module.
"""
def test_cannot_be_instantiated(self):
with self.assertRaises(TypeError):
shell.AbstractWrapper()
class TestCommandWrapper(unittest.TestCase):
"""Unit tests for the CommandWrapper class defined in the build_swift.shell
module.
"""
# -------------------------------------------------------------------------
# wraps
def test_wraps(self):
sudo = shell.wraps('sudo')
self.assertIsInstance(sudo, shell.CommandWrapper)
self.assertEqual(sudo.command, ['sudo'])
# -------------------------------------------------------------------------
@utils.requires_module('pathlib')
def test_command_normalized(self):
wrapper = shell.CommandWrapper(['ls', '-al', Path('/tmp')])
self.assertEqual(wrapper._command, ['ls', '-al', '/tmp'])
def test_command_property(self):
git = shell.CommandWrapper('git')
self.assertEqual(git.command, ['git'])
@utils.requires_module('unittest.mock')
def test_callable(self):
ls = shell.CommandWrapper('ls')
with patch.object(ls, 'check_call') as mock_check_call:
ls('-al')
mock_check_call.assert_called_with('-al')
# -------------------------------------------------------------------------
# Subprocess Wrappers
@utils.requires_module('unittest.mock')
@patch('build_swift.shell.Popen')
def test_Popen(self, mock_popen):
ls = shell.CommandWrapper('ls')
ls.Popen('-al')
mock_popen.assert_called_with(['ls', '-al'])
@utils.requires_module('unittest.mock')
@patch('build_swift.shell.call')
def test_call(self, mock_call):
ls = shell.CommandWrapper('ls')
ls.call('-al')
mock_call.assert_called_with(['ls', '-al'])
@utils.requires_module('unittest.mock')
@patch('build_swift.shell.check_call')
def test_check_call(self, mock_check_call):
ls = shell.CommandWrapper('ls')
ls.check_call('-al')
mock_check_call.assert_called_with(['ls', '-al'])
@utils.requires_module('unittest.mock')
@patch('build_swift.shell.check_output')
def test_check_output(self, mock_check_output):
ls = shell.CommandWrapper('ls')
ls.check_output('-al')
mock_check_output.assert_called_with(['ls', '-al'])
class TestExecutableWrapper(unittest.TestCase):
"""Unit tests for the ExecutableWrapper class defined in the
build_swift.shell module.
"""
def test_raises_without_executable(self):
class MyWrapper(shell.ExecutableWrapper):
pass
with self.assertRaises(AttributeError):
MyWrapper()
def test_raises_complex_executable(self):
class MyWrapper(shell.ExecutableWrapper):
EXECUTABLE = ['xcrun', 'swiftc']
with self.assertRaises(AttributeError):
MyWrapper()
@utils.requires_module('pathlib')
def test_converts_pathlib_path(self):
class MyWrapper(shell.ExecutableWrapper):
EXECUTABLE = Path('/usr/local/bin/xbs')
wrapper = MyWrapper()
self.assertEqual(wrapper.EXECUTABLE, '/usr/local/bin/xbs')
def test_command_property(self):
class MyWrapper(shell.ExecutableWrapper):
EXECUTABLE = 'test'
wrapper = MyWrapper()
self.assertEqual(wrapper.command, ['test'])
@utils.requires_module('unittest.mock')
@patch('build_swift.shell.which')
def test_path_property(self, mock_which):
class MyWrapper(shell.ExecutableWrapper):
EXECUTABLE = 'test'
wrapper = MyWrapper()
wrapper.path
mock_which.assert_called_with('test')
| nathawes/swift | utils/build_swift/tests/build_swift/test_shell.py | Python | apache-2.0 | 26,680 | 0.000037 |
from test_support import *
prove_all(no_fail=True, steps = 400)
| ptroja/spark2014 | testsuite/gnatprove/tests/intro/test.py | Python | gpl-3.0 | 65 | 0.030769 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
class SoftmaxTest(tf.test.TestCase):
def _npSoftmax(self, features, log=False):
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features -
np.reshape(np.amax(features, axis=class_dim), [batch_size, 1]))
softmax = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
if log:
return np.log(softmax)
else:
return softmax
def _testSoftmax(self, np_features, log=False, use_gpu=False):
np_softmax = self._npSoftmax(np_features, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
tf_softmax = tf.nn.log_softmax(np_features)
else:
tf_softmax = tf.nn.softmax(np_features)
out = tf_softmax.eval()
self.assertAllClose(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in each
# batch element.
self.assertAllClose(np.ones(out.shape[0]),
np.sum(out, axis=1))
def _testAll(self, features):
self._testSoftmax(features, use_gpu=False)
self._testSoftmax(features, log=True, use_gpu=False)
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5, atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5, atol=1.e-5)
def testShapeMismatch(self):
with self.assertRaises(ValueError):
tf.nn.softmax([0., 1., 2., 3.])
with self.assertRaises(ValueError):
tf.nn.log_softmax([0., 1., 2., 3.])
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32
else:
type = np.float64
max = np.finfo(type).max
features = np.array(
[[1., 1., 1., 1.],
[max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = tf.nn.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5, atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testEmpty(self):
with self.test_session():
x = tf.constant([[]], shape=[0, 3])
self.assertEqual(0, tf.size(x).eval())
expected_y = np.array([]).reshape(0, 3)
np.testing.assert_array_equal(expected_y, tf.nn.softmax(x).eval())
if __name__ == "__main__":
tf.test.main()
| awni/tensorflow | tensorflow/python/kernel_tests/softmax_op_test.py | Python | apache-2.0 | 4,506 | 0.006214 |
from pippi import dsp
from hcj import snds, fx
hat = snds.load('mc303/hat2.wav')
def make(length, i):
#h = dsp.bln(length / 4, dsp.rand(6000, 8000), dsp.rand(9000, 16000))
#h = dsp.amp(h, dsp.rand(0.5, 1))
#h = dsp.env(h, 'phasor')
h = hat
h = dsp.fill(h, length, silence=True)
if dsp.rand() > 0.5:
h = fx.penv(h)
return h
| hecanjog/pattern.studies | orc/hat.py | Python | cc0-1.0 | 364 | 0.013736 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
def python3xBytesToStr():
f = open("BytesToStr.txt", "wb")
zhcnBytes = b'\x63\x68\x61\x72\x43\x6f\x64\x65\x41\x74'
zhcnUnicodeStr = zhcnBytes.decode('gbk')
print(zhcnUnicodeStr)
f.write(zhcnUnicodeStr.encode('utf-8'))
f.close()
if __name__ == "__main__":
python3xBytesToStr() | weahwww/python | bytes_to_str.py | Python | gpl-2.0 | 351 | 0.008547 |
# test_patternchain.py -- Tests for Pattern Chains
"""Tests for Pattern Chain objects"""
from morph import (
pattern,
patternchain
)
from morph.pattern import (
LiteralPattern,
NumericCounterPattern,
)
from morph.patternchain import (
generateFullReplaceChain,
PatternChain,
FilePatternChain,
)
from morph.errors import (
PatternModeError
)
from morph.tests import TestCase
class PatternChainTestCase(TestCase):
def testGenFullReplace(self):
chain = patternchain.generateFullReplaceChain([
'abc_',
'###'])
litpat = LiteralPattern('abc_', mode = pattern.MODE_REPLACE)
numcountpat = NumericCounterPattern(1, 3)
self.assertEqual(PatternChain([litpat, numcountpat]), chain)
def testStr(self):
chain = patternchain.generateFullReplaceChain([
'abc_',
'###'])
self.assertEqual("\tLiteral (replace, abc_)\n"
"\tNumericCounter (append, 1, 1, 3)\n",
str(chain))
def testAppendApply(self):
appendPat0 = LiteralPattern('abc')
appendPat1 = LiteralPattern('123')
chain = PatternChain([appendPat0, appendPat1])
self.assertEqual(['fileabc123'],
chain.apply_to_strings(['file']))
self.assertEqual(['file0abc123', 'file1abc123', 'file2abc123'],
chain.apply_to_strings(['file0', 'file1', 'file2']))
def testReplaceApply(self):
appendPat0 = LiteralPattern('abc_', mode = pattern.MODE_REPLACE)
appendPat1 = NumericCounterPattern(1, 2)
chain = PatternChain([appendPat0, appendPat1])
self.assertEqual(['abc_01'],
chain.apply_to_strings(['file']))
chain.reset()
self.assertEqual(['abc_01', 'abc_02', 'abc_03'],
chain.apply_to_strings(['file0', 'file1', 'file2']))
class FilePatternChainTestCase(TestCase):
def testApply(self):
chain = FilePatternChain()
chain.insert_file('file5', 5)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual(
['file1.5', 'file2', 'file3', 'file4', 'file5'],
chain.apply_to_strings(
['file0', 'file1', 'file2', 'file3', 'file4'])
)
def testMap(self):
chain = FilePatternChain()
chain.insert_file('file5', 5)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual(
[(None, 'file1.5'),
('file2', 'file2'),
('file3', 'file3'),
('file4', 'file4'),
(None, 'file5'),
('file0', None),
('file1', None)],
chain.map_to_strings(
['file0', 'file1', 'file2', 'file3', 'file4'])
)
def testStr(self):
chain = FilePatternChain()
chain.insert_file('file5', 4)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual("\t('insert', 'file5', 4)\n"
"\t('insert', 'file1.5', 2)\n"
"\t('delete', 0)\n"
"\t('move', 0, 2)\n"
"\t('delete', 2)\n",
str(chain))
| milki/morph | morph/tests/test_patternchain.py | Python | bsd-2-clause | 3,569 | 0.001121 |
"""
Copyright (C) 2017 kanishka-linux kanishka.linux@gmail.com
This file is part of hlspy.
hlspy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
hlspy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with hlspy. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from PyQt5 import QtCore, QtNetwork
from PyQt5.QtNetwork import QNetworkAccessManager
from PyQt5.QtCore import pyqtSignal
class NetManager(QNetworkAccessManager):
netS = pyqtSignal(str)
def __init__(
self, parent=None, url=None, print_request=None, block_request=None,
default_block=None, select_request=None, get_link=None):
super(NetManager, self).__init__()
self.url = url
self.print_request = print_request
if block_request:
self.block_request = block_request.split(',')
else:
self.block_request = []
self.default_block = default_block
self.select_request = select_request
self.get_link = get_link
def createRequest(self, op, request, device = None ):
global block_list
try:
urlLnk = (request.url().toString())
except UnicodeEncodeError:
urlLnk = (request.url().path())
if self.get_link:
if self.get_link in urlLnk:
self.netS.emit(urlLnk)
lower_case = urlLnk.lower()
lst = []
if self.default_block:
lst = [
"doubleclick.net", 'adnxs', r"||youtube-nocookie.com/gen_204?",
r"youtube.com###watch-branded-actions", "imagemapurl",
"b.scorecardresearch.com", "rightstuff.com", "scarywater.net",
"popup.js", "banner.htm", "_tribalfusion",
"||n4403ad.doubleclick.net^$third-party",
".googlesyndication.com", "graphics.js", "fonts.googleapis.com/css",
"s0.2mdn.net", "server.cpmstar.com", "||banzai/banner.$subdocument",
"@@||anime-source.com^$document", "/pagead2.", "frugal.gif",
"jriver_banner.png", "show_ads.js",
'##a[href^="http://billing.frugalusenet.com/"]',
"http://jriver.com/video.html", "||animenewsnetwork.com^*.aframe?",
"||contextweb.com^$third-party", ".gutter", ".iab", 'revcontent',
".ads", "ads.", ".bebi", "mgid"
]
if self.block_request:
lst = lst + self.block_request
block = False
for l in lst:
if lower_case.find(l) != -1:
block = True
break
if (self.select_request and self.select_request in urlLnk) or self.print_request:
print(urlLnk)
if block:
return QNetworkAccessManager.createRequest(self, QNetworkAccessManager.GetOperation, QtNetwork.QNetworkRequest(QtCore.QUrl()))
else:
return QNetworkAccessManager.createRequest(self, op, request, device)
| kanishka-linux/kawaii-player | kawaii_player/hls_webkit/netmon_webkit.py | Python | gpl-3.0 | 3,417 | 0.009072 |
import io
from django.core.management import call_command
from django.test.testcases import TestCase
from 臺灣言語服務.models import 訓練過渡格式
class KIPsu試驗(TestCase):
@classmethod
def setUpClass(cls):
with io.StringIO() as tshogoo:
call_command('教育部臺灣閩南語字詞頻調查工作', stderr=tshogoo)
print(tshogoo.getvalue()[:1000])
return super().setUpClass()
def test數量(self):
self.assertGreater(訓練過渡格式.資料數量(), 50000)
| sih4sing5hong5/hue7jip8 | 試驗/test教育部臺灣閩南語字詞頻調查工作.py | Python | mit | 539 | 0 |
import time
import logging
from cnavbot import settings
logger = logging.getLogger()
class Driver(object):
def __init__(self, *args, **kwargs):
self.driver = kwargs.pop('driver', settings.BOT_DRIVER)
class Motors(Driver):
def __init__(self, speed=None, *args, **kwargs):
super(Motors, self).__init__(*args, **kwargs)
self.speed = kwargs.pop('speed', settings.BOT_DEFAULT_SPEED)
self.validate_speed(self.speed)
logger.info('Speed set to {}'.format(self.speed))
@staticmethod
def validate_speed(speed):
if not (1 <= speed <= 100):
raise Exception(
"Invalid speed value '{}', must be between 1 an 100".format(
speed
)
)
def forward(self, steps=None):
"""Sets both motors to go forward"""
logger.debug('Going forward')
self.driver.forward(self.speed)
if steps:
self.keep_running(steps)
def reverse(self, steps=None):
"""Sets both motors to reverse"""
logger.debug('Reversing')
self.driver.reverse(self.speed)
if steps:
self.keep_running(steps)
def left(self, steps=None):
"""Sets motors to turn opposite directions for left spin"""
logger.debug('Spinning left')
self.driver.spinLeft(self.speed)
if steps:
self.keep_running(steps)
def right(self, steps=None):
"""Sets motors to turn opposite directions for right spin"""
logger.debug('Spinning right')
self.driver.spinRight(self.speed)
if steps:
self.keep_running(steps)
def keep_running(self, steps):
logger.debug('Keeping running for {} steps'.format(steps))
time.sleep(0.1 * steps)
self.stop()
def stop(self):
logger.debug('Stopping')
self.driver.stop()
class Lights(Driver):
led_numbers = (1, 2, 3, 4)
def validate_led_number(self, led_number):
if not(led_number in self.led_numbers):
raise Exception(
"Invalid led number '{}', must be in {}".format(
led_number,
self.led_numbers
)
)
def set_led_rbg(self, led_number, red, blue, green):
"""Spins right specified number of steps"""
self.validate_led_number(led_number)
logger.debug('Setting LED {} to red: {}, green: {}. blue: {}'.format(
led_number, red, green, blue
))
self.driver.setLED(led_number, red, green, blue)
def set_all_leds_rbg(self, red, blue, green):
"""Spins right specified number of steps"""
for led_number in self.led_numbers:
self.driver.setLED(led_number, red, green, blue)
class ObstacleSensor(Driver):
def __init__(self, *args, **kwargs):
super(ObstacleSensor, self).__init__(*args, **kwargs)
self.max_distance = kwargs.pop(
'max_distance', settings.BOT_DEFAULT_MAX_DISTANCE
)
logger.info('Max distance set to {}'.format(self.max_distance))
def left(self):
"""Returns true if there is an obstacle to the left"""
obstacle = self.driver.irLeft()
logger.debug('Left obstacle: {}'.format(obstacle))
return obstacle
def right(self):
"""Returns true if there is an obstacle to the right"""
obstacle = self.driver.irRight()
logger.debug('Right obstacle: {}'.format(obstacle))
return obstacle
def front(self):
"""Returns true if there is an obstacle in front"""
obstacle = self.driver.irCentre()
logger.debug('Front obstacle: {}'.format(obstacle))
return obstacle
def front_close(self):
front_close = self.distance() <= self.max_distance
logger.debug('Front obstacle close: {}'.format(front_close))
return front_close
def distance(self):
"""
Returns the distance in cm to the nearest reflecting object
in front of the bot
"""
distance = self.driver.getDistance()
logger.debug('Distance: {}'.format(distance))
return distance
def any(self):
"""Returns true if there is any obstacle"""
any_obstacle = self.driver.irAll()
logger.debug('Any obstacle: {}'.format(any_obstacle))
return any_obstacle
class LineSensor(Driver):
def left(self):
"""Returns True if left line sensor detected dark line"""
left = not self.driver.irLeftLine()
logger.debug('Left line detected: {}'.format(left))
return left
def right(self):
"""Returns True if right line sensor detected dark line"""
right = not self.driver.irRightLine()
logger.debug('Right line detected: {}'.format(right))
return right
| konradko/cnav-bot | cnavbot/services/pi2go.py | Python | mit | 4,857 | 0 |
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import stat
import warnings
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers import remotefs as remotefs_drv
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('glusterfs_shares_config',
default='/etc/cinder/glusterfs_shares',
help='File with the list of available gluster shares'),
cfg.StrOpt('glusterfs_mount_point_base',
default='$state_path/mnt',
help='Base dir containing mount points for gluster shares.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD,
driver.ExtendVD):
"""Gluster based cinder driver.
Creates file on Gluster share for using it as block device on hypervisor.
Operations such as create/delete/extend volume/snapshot use locking on a
per-process basis to prevent multiple threads from modifying qcow2 chains
or the snapshot .info file simultaneously.
"""
driver_volume_type = 'glusterfs'
driver_prefix = 'glusterfs'
volume_backend_name = 'GlusterFS'
VERSION = '1.3.0'
def __init__(self, execute=processutils.execute, *args, **kwargs):
self._remotefsclient = None
super(GlusterfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
root_helper = utils.get_root_helper()
self.base = getattr(self.configuration,
'glusterfs_mount_point_base',
CONF.glusterfs_mount_point_base)
self._remotefsclient = remotefs_brick.RemoteFsClient(
'glusterfs', root_helper, execute,
glusterfs_mount_point_base=self.base)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(GlusterfsDriver, self).do_setup(context)
config = self.configuration.glusterfs_shares_config
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
'glusterfs_shares_config')
LOG.warning(msg)
raise exception.GlusterfsException(msg)
if not os.path.exists(config):
msg = (_("Gluster config file at %(config)s doesn't exist") %
{'config': config})
LOG.warning(msg)
raise exception.GlusterfsException(msg)
self.shares = {}
try:
self._execute('mount.glusterfs', check_exit_code=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.GlusterfsException(
_('mount.glusterfs is not installed'))
else:
raise
self._refresh_mounts()
def _unmount_shares(self):
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._do_umount(True, share)
except Exception as exc:
LOG.warning(_LW('Exception during unmounting %s'), exc)
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
command = ['umount', mount_path]
try:
self._execute(*command, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ignore_not_mounted and 'not mounted' in exc.stderr:
LOG.info(_LI("%s is already umounted"), share)
else:
LOG.error(_LE("Failed to umount %(share)s, reason=%(stderr)s"),
{'share': share, 'stderr': exc.stderr})
raise
def _refresh_mounts(self):
try:
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
LOG.warning(_LW("Failed to refresh mounts, reason=%s"),
exc.stderr)
else:
raise
self._ensure_shares_mounted()
def _qemu_img_info(self, path, volume_name):
return super(GlusterfsDriver, self)._qemu_img_info_base(
path, volume_name, self.configuration.glusterfs_mount_point_base)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def _local_volume_dir(self, volume):
hashed = self._get_hash_str(volume['provider_location'])
path = '%s/%s' % (self.configuration.glusterfs_mount_point_base,
hashed)
return path
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(GlusterfsDriver, self)._update_volume_stats()
data = self._stats
global_capacity = data['total_capacity_gb']
global_free = data['free_capacity_gb']
thin_enabled = self.configuration.nas_volume_prov_type == 'thin'
if thin_enabled:
provisioned_capacity = self._get_provisioned_capacity()
else:
provisioned_capacity = round(global_capacity - global_free, 2)
data['provisioned_capacity_gb'] = provisioned_capacity
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio)
data['thin_provisioning_support'] = thin_enabled
data['thick_provisioning_support'] = not thin_enabled
self._stats = data
@remotefs_drv.locked_volume_id_operation
def create_volume(self, volume):
"""Creates a volume."""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_path = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_path, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path,
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_path, img_info.backing_file)
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.nas_volume_prov_type == 'thin':
out_format = 'qcow2'
else:
out_format = 'raw'
image_utils.convert_image(path_to_snap_img,
path_to_new_vol,
out_format)
self._set_rw_permissions_for_all(path_to_new_vol)
@remotefs_drv.locked_volume_id_operation
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
self._execute('rm', '-f', mounted_path, run_as_root=True)
# If an exception (e.g. timeout) occurred during delete_snapshot, the
# base volume may linger around, so just delete it if it exists
base_volume_path = self._local_path_volume(volume)
fileutils.delete_if_exists(base_volume_path)
info_path = self._local_path_volume_info(volume)
fileutils.delete_if_exists(info_path)
def _get_matching_backing_file(self, backing_chain, snapshot_file):
return next(f for f in backing_chain
if f.get('backing-filename', '') == snapshot_file)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def validate_connector(self, connector):
pass
@remotefs_drv.locked_volume_id_operation
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
# Find active qcow2 file
active_file = self.get_active_image_from_info(volume)
path = '%s/%s/%s' % (self.configuration.glusterfs_mount_point_base,
self._get_hash_str(volume['provider_location']),
active_file)
data = {'export': volume['provider_location'],
'name': active_file}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
# Test file for raw vs. qcow2 format
info = self._qemu_img_info(path, volume['name'])
data['format'] = info.file_format
if data['format'] not in ['raw', 'qcow2']:
msg = _('%s must be a valid raw or qcow2 image.') % path
raise exception.InvalidVolume(msg)
return {
'driver_volume_type': 'glusterfs',
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
@remotefs_drv.locked_volume_id_operation
def extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
info = self._qemu_img_info(volume_path, volume['name'])
backing_fmt = info.file_format
if backing_fmt not in ['raw', 'qcow2']:
msg = _('Unrecognized backing format: %s')
raise exception.InvalidVolume(msg % backing_fmt)
# qemu-img can resize both raw and qcow2 files
image_utils.resize_image(volume_path, size_gb)
def _do_create_volume(self, volume):
"""Create a volume on given glusterfs_share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("creating new volume at %s", volume_path)
if os.path.exists(volume_path):
msg = _('file already exists at %s') % volume_path
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if self.configuration.nas_volume_prov_type == 'thin':
self._create_qcow2_file(volume_path, volume_size)
else:
try:
self._fallocate(volume_path, volume_size)
except processutils.ProcessExecutionError as exc:
if 'Operation not supported' in exc.stderr:
warnings.warn('Fallocate not supported by current version '
'of glusterfs. So falling back to dd.')
self._create_regular_file(volume_path, volume_size)
else:
fileutils.delete_if_exists(volume_path)
raise
self._set_rw_permissions_for_all(volume_path)
def _ensure_shares_mounted(self):
"""Mount all configured GlusterFS shares."""
self._mounted_shares = []
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
LOG.debug('Available shares: %s', self._mounted_shares)
def _ensure_share_mounted(self, glusterfs_share):
"""Mount GlusterFS share.
:param glusterfs_share: string
"""
mount_path = self._get_mount_point_for_share(glusterfs_share)
self._mount_glusterfs(glusterfs_share)
# Ensure we can write to this share
group_id = os.getegid()
current_group_id = utils.get_file_gid(mount_path)
current_mode = utils.get_file_mode(mount_path)
if group_id != current_group_id:
cmd = ['chgrp', group_id, mount_path]
self._execute(*cmd, run_as_root=True)
if not (current_mode & stat.S_IWGRP):
cmd = ['chmod', 'g+w', mount_path]
self._execute(*cmd, run_as_root=True)
self._ensure_share_writable(mount_path)
def _find_share(self, volume_size_for):
"""Choose GlusterFS share among available ones for given volume size.
Current implementation looks for greatest capacity.
:param volume_size_for: int size in GB
"""
if not self._mounted_shares:
raise exception.GlusterfsNoSharesMounted()
greatest_size = 0
greatest_share = None
for glusterfs_share in self._mounted_shares:
capacity = self._get_available_capacity(glusterfs_share)[0]
if capacity > greatest_size:
greatest_share = glusterfs_share
greatest_size = capacity
if volume_size_for * units.Gi > greatest_size:
raise exception.GlusterfsNoSuitableShareFound(
volume_size=volume_size_for)
return greatest_share
def _mount_glusterfs(self, glusterfs_share):
"""Mount GlusterFS share to mount path."""
mnt_flags = []
if self.shares.get(glusterfs_share) is not None:
mnt_flags = self.shares[glusterfs_share].split()
try:
self._remotefsclient.mount(glusterfs_share, mnt_flags)
except processutils.ProcessExecutionError:
LOG.error(_LE("Mount failure for %(share)s."),
{'share': glusterfs_share})
raise
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume.
Allow a backup to occur only if no snapshots exist.
Check both Cinder and the file on-disk. The latter is only
a safety mechanism to prevent further damage if the snapshot
information is already inconsistent.
"""
snapshots = self.db.snapshot_get_all_for_volume(context,
backup['volume_id'])
snap_error_msg = _('Backup is not supported for GlusterFS '
'volumes with snapshots.')
if len(snapshots) > 0:
raise exception.InvalidVolume(snap_error_msg)
volume = self.db.volume_get(context, backup['volume_id'])
volume_dir = self._local_volume_dir(volume)
active_file_path = os.path.join(
volume_dir,
self.get_active_image_from_info(volume))
info = self._qemu_img_info(active_file_path, volume['name'])
if info.backing_file is not None:
LOG.error(_LE('No snapshots found in database, but %(path)s has '
'backing file %(backing_file)s!'),
{'path': active_file_path,
'backing_file': info.backing_file})
raise exception.InvalidVolume(snap_error_msg)
if info.file_format != 'raw':
msg = _('Backup is only supported for raw-formatted '
'GlusterFS volumes.')
raise exception.InvalidVolume(msg)
return super(GlusterfsDriver, self).backup_volume(
context, backup, backup_service)
| Paul-Ezell/cinder-1 | cinder/volume/drivers/glusterfs.py | Python | apache-2.0 | 17,430 | 0 |
# -*- coding: utf-8 -*-
import os, tempfile
PROJECT_ROOT = os.path.dirname(__file__)
TMP_DIR = tempfile.mkdtemp()
DOWNLOAD_DIR = os.path.join(TMP_DIR, "download")
LOG_DIR = os.path.join(TMP_DIR, "logs")
try:
os.makedirs(DOWNLOAD_DIR)
except OSError:
pass
try:
os.makedirs(LOG_DIR)
except OSError:
pass
TEST_DIR = os.path.join(PROJECT_ROOT, "test_data")
try:
os.makedirs(TEST_DIR)
except OSError:
pass
STORAGE_PATH = os.path.join(TMP_DIR, "download", "configuration.json")
START_PORT = 6841
END_PORT = 6851
SUPPORTED_MOVIE_EXTENSIONS = (
"mp4", "avi", "mkv", "ogv", "ogg", "mpeg", "flv", "wmv")
SUPPORTED_SUBTITLE_EXTENSIONS = ("txt", "srt")
DOWNLOAD_PIECE_SIZE = 1024 * 1024 * 5
# for 1MB piece length
PRIORITY_INTERVAL = 2 | rafallo/p2c | settings.py | Python | mit | 760 | 0.003947 |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('albums/', include('albums.urls')),
path('photos/', include('photos.urls')),
path('admin/', admin.site.urls),
path('login/', auth_views.login, name='login'),
path('logout/', auth_views.logout, {'template_name': 'account/logout.html'}, name='logout'),
path('api/login', views.api_login, name="api_login"),
path('', views.IndexView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| asommer70/photolandia | photolandia/urls.py | Python | mit | 678 | 0.00295 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.files.storage
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0011_auto_20151120_0137'),
]
operations = [
migrations.AlterField(
model_name='product',
name='media',
field=models.FileField(storage=django.core.files.storage.FileSystemStorage(location=b'/Users/jmitch/Desktop/dm/static_cdn/protected'), null=True, upload_to=products.models.download_media_location, blank=True),
),
]
| codingforentrepreneurs/digital-marketplace | src/products/migrations/0012_auto_20151120_0434.py | Python | mit | 630 | 0.001587 |
import boto
import mock
import moto
import tempfile
import unittest
from click.testing import CliRunner
from rubberjackcli.click import rubberjack
class CLITests(unittest.TestCase):
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy(self, cav, ue):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_promote(self, ue, de):
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': 'laterpay-devnull-live', # FIXME Remove hardcoded EnvName
'VersionLabel': 'old',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'new',
},
],
},
},
}
CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)
@moto.mock_s3_deprecated
@mock.patch('sys.exit')
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_promoting_same_version(self, ue, de, se):
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': 'laterpay-devnull-live', # FIXME Remove hardcoded EnvName
'VersionLabel': 'same',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'same',
},
],
},
},
}
CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)
self.assertTrue(se.called)
@moto.mock_s3_deprecated
def test_sigv4(self):
CliRunner().invoke(rubberjack, ['--sigv4-host', 'foo', 'deploy'], catch_exceptions=False)
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_to_custom_environment(self, ue, cav):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', '--environment', 'wibble', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_without_updating_the_environment(self, ue, cav):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', '--no-update-environment', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 0, "update_environment was called, but it shouldn't")
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_to_custom_bucket(self, ue, cav):
bucket_name = 'rbbrjck-test'
s3 = boto.connect_s3()
s3.create_bucket(bucket_name)
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['--bucket', bucket_name, 'deploy', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")
_, cav_kwargs = cav.call_args
self.assertEqual(bucket_name, cav_kwargs['s3_bucket'])
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
def test_promote_to_custom_environment(self, de, ue):
CUSTOM_TO_ENVIRONMENT = "loremipsum"
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': CUSTOM_TO_ENVIRONMENT,
'VersionLabel': 'old',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'new',
},
],
},
},
}
result = CliRunner().invoke(rubberjack, ['promote', '--to-environment', CUSTOM_TO_ENVIRONMENT], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
| laterpay/rubberjack-cli | tests/test_cli.py | Python | mit | 6,380 | 0.003135 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import get_url, random_string, cint
from frappe.utils.user import get_user_fullname
from frappe.utils.print_format import download_pdf
from frappe.desk.form.load import get_attachments
from frappe.core.doctype.communication.email import make
from erpnext.accounts.party import get_party_account_currency, get_party_details
from erpnext.stock.doctype.material_request.material_request import set_missing_values
from erpnext.controllers.buying_controller import BuyingController
STANDARD_USERS = ("Guest", "Administrator")
class RequestforQuotation(BuyingController):
def validate(self):
self.validate_duplicate_supplier()
self.validate_common()
self.update_email_id()
def validate_duplicate_supplier(self):
supplier_list = [d.supplier for d in self.suppliers]
if len(supplier_list) != len(set(supplier_list)):
frappe.throw(_("Same supplier has been entered multiple times"))
def validate_common(self):
pc = frappe.get_doc('Purchase Common')
pc.validate_for_items(self)
def update_email_id(self):
for rfq_supplier in self.suppliers:
if not rfq_supplier.email_id:
rfq_supplier.email_id = frappe.db.get_value("Contact", rfq_supplier.contact, "email_id")
def validate_email_id(self, args):
if not args.email_id:
frappe.throw(_("Row {0}: For supplier {0} email id is required to send email").format(args.idx, args.supplier))
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
def send_to_supplier(self):
for rfq_supplier in self.suppliers:
if rfq_supplier.send_email:
self.validate_email_id(rfq_supplier)
# make new user if required
update_password_link = self.update_supplier_contact(rfq_supplier, self.get_link())
self.update_supplier_part_no(rfq_supplier)
self.supplier_rfq_mail(rfq_supplier, update_password_link, self.get_link())
def get_link(self):
# RFQ link for supplier portal
return get_url("/rfq/" + self.name)
def update_supplier_part_no(self, args):
self.vendor = args.supplier
for item in self.items:
item.supplier_part_no = frappe.db.get_value('Item Supplier',
{'parent': item.item_code, 'supplier': args.supplier}, 'supplier_part_no')
def update_supplier_contact(self, rfq_supplier, link):
'''Create a new user for the supplier if not set in contact'''
update_password_link = ''
if frappe.db.exists("User", rfq_supplier.email_id):
user = frappe.get_doc("User", rfq_supplier.email_id)
else:
user, update_password_link = self.create_user(rfq_supplier, link)
self.update_contact_of_supplier(rfq_supplier, user)
return update_password_link
def update_contact_of_supplier(self, rfq_supplier, user):
if rfq_supplier.contact:
contact = frappe.get_doc("Contact", rfq_supplier.contact)
else:
contact = frappe.new_doc("Contact")
contact.first_name = rfq_supplier.supplier_name or rfq_supplier.supplier
contact.supplier = rfq_supplier.supplier
if not contact.email_id and not contact.user:
contact.email_id = user.name
contact.user = user.name
contact.save(ignore_permissions=True)
def create_user(self, rfq_supplier, link):
user = frappe.get_doc({
'doctype': 'User',
'send_welcome_email': 0,
'email': rfq_supplier.email_id,
'first_name': rfq_supplier.supplier_name or rfq_supplier.supplier,
'user_type': 'Website User',
'redirect_url': link
})
user.save(ignore_permissions=True)
update_password_link = user.reset_password()
return user, update_password_link
def supplier_rfq_mail(self, data, update_password_link, rfq_link):
full_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'update_password_link': update_password_link,
'message': frappe.render_template(self.message_for_supplier, data.as_dict()),
'rfq_link': rfq_link,
'user_fullname': full_name
}
subject = _("Request for Quotation")
template = "templates/emails/request_for_quotation.html"
sender = frappe.session.user not in STANDARD_USERS and frappe.session.user or None
message = frappe.get_template(template).render(args)
attachments = self.get_attachments()
self.send_email(data, sender, subject, message, attachments)
def send_email(self, data, sender, subject, message, attachments):
make(subject = subject, content=message,recipients=data.email_id,
sender=sender,attachments = attachments, send_email=True,
doctype=self.doctype, name=self.name)["name"]
frappe.msgprint(_("Email sent to supplier {0}").format(data.supplier))
def get_attachments(self):
attachments = [d.name for d in get_attachments(self.doctype, self.name)]
attachments.append(frappe.attach_print(self.doctype, self.name, doc=self))
return attachments
@frappe.whitelist()
def send_supplier_emails(rfq_name):
check_portal_enabled('Request for Quotation')
rfq = frappe.get_doc("Request for Quotation", rfq_name)
if rfq.docstatus==1:
rfq.send_to_supplier()
def check_portal_enabled(reference_doctype):
if not frappe.db.get_value('Portal Menu Item',
{'reference_doctype': reference_doctype}, 'enabled'):
frappe.throw(_("Request for Quotation is disabled to access from portal, for more check portal settings."))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["show_sidebar"] = True
return list_context
# This method is used to make supplier quotation from material request form.
@frappe.whitelist()
def make_supplier_quotation(source_name, for_supplier, target_doc=None):
def postprocess(source, target_doc):
target_doc.supplier = for_supplier
args = get_party_details(for_supplier, party_type="Supplier", ignore_permissions=True)
target_doc.currency = args.currency or get_party_account_currency('Supplier', for_supplier, source.company)
target_doc.buying_price_list = args.buying_price_list or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
set_missing_values(source, target_doc)
doclist = get_mapped_doc("Request for Quotation", source_name, {
"Request for Quotation": {
"doctype": "Supplier Quotation",
"validation": {
"docstatus": ["=", 1]
}
},
"Request for Quotation Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"name": "request_for_quotation_item",
"parent": "request_for_quotation"
},
}
}, target_doc, postprocess)
return doclist
# This method is used to make supplier quotation from supplier's portal.
@frappe.whitelist()
def create_supplier_quotation(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
try:
sq_doc = frappe.get_doc({
"doctype": "Supplier Quotation",
"supplier": doc.get('supplier'),
"terms": doc.get("terms"),
"company": doc.get("company"),
"currency": doc.get('currency') or get_party_account_currency('Supplier', doc.get('supplier'), doc.get('company')),
"buying_price_list": doc.get('buying_price_list') or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
})
add_items(sq_doc, doc.get('supplier'), doc.get('items'))
sq_doc.flags.ignore_permissions = True
sq_doc.run_method("set_missing_values")
sq_doc.save()
frappe.msgprint(_("Supplier Quotation {0} created").format(sq_doc.name))
return sq_doc.name
except Exception:
return None
def add_items(sq_doc, supplier, items):
for data in items:
if data.get("qty") > 0:
if isinstance(data, dict):
data = frappe._dict(data)
create_rfq_items(sq_doc, supplier, data)
def create_rfq_items(sq_doc, supplier, data):
sq_doc.append('items', {
"item_code": data.item_code,
"item_name": data.item_name,
"description": data.description,
"qty": data.qty,
"rate": data.rate,
"supplier_part_no": frappe.db.get_value("Item Supplier", {'parent': data.item_code, 'supplier': supplier}, "supplier_part_no"),
"warehouse": data.warehouse or '',
"request_for_quotation_item": data.name,
"request_for_quotation": data.parent
})
@frappe.whitelist()
def get_pdf(doctype, name, supplier_idx):
doc = get_rfq_doc(doctype, name, supplier_idx)
if doc:
download_pdf(doctype, name, doc=doc)
def get_rfq_doc(doctype, name, supplier_idx):
if cint(supplier_idx):
doc = frappe.get_doc(doctype, name)
args = doc.get('suppliers')[cint(supplier_idx) - 1]
doc.update_supplier_part_no(args)
return doc
| bhupennewalkar1337/erpnext | erpnext/buying/doctype/request_for_quotation/request_for_quotation.py | Python | gpl-3.0 | 8,686 | 0.023947 |
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Read and store transient event triggers
"""
import warnings
from urllib.parse import urlparse
from astropy.table import vstack as vstack_tables
from lal.utils import CacheEntry
from ligo.lw import lsctables
from glue.lal import Cache
from gwpy.io.cache import cache_segments
from gwpy.table import (EventTable, filters as table_filters)
from gwpy.table.filter import parse_column_filters
from gwpy.table.io.pycbc import filter_empty_files as filter_pycbc_live_files
from gwpy.segments import (DataQualityFlag, SegmentList)
import gwtrigfind
from . import globalv
from .utils import (re_cchar, vprint, safe_eval)
from .config import GWSummConfigParser
from .channels import get_channel
# build list of default keyword arguments for reading ETGs
ETG_READ_KW = {
'cwb': {
'format': 'root',
'treename': 'waveburst',
},
'daily_ihope': {
'format': 'ligolw',
'tablename': 'sngl_inspiral',
'use_numpy_dtypes': True,
},
'daily_ahope': {
'format': 'ligolw',
'tablename': 'sngl_inspiral',
'use_numpy_dtypes': True,
},
'dmt_omega': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'dmt_wsearch': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'kleinewelle': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'kw': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'omega': {
'format': 'ascii',
},
'omegadq': {
'format': 'ascii',
},
'omicron': {
'format': 'ligolw',
'tablename': 'sngl_burst',
'use_numpy_dtypes': True,
},
'pycbc_live': {
'format': 'hdf5.pycbc_live',
'timecolumn': 'end_time',
'extended_metadata': False,
},
}
# set default for all LIGO_LW
for name in lsctables.TableByName:
ETG_READ_KW[name] = {
'format': 'ligolw',
'tablename': name,
'use_numpy_dtypes': True,
}
def get_etg_table(etg):
"""Find which table should be used for the given etg
Parameters
----------
etg : `str`
name of Event Trigger Generator for which to query
Returns
-------
table : `type`, subclass of `~ligo.lw.table.Table`
LIGO_LW table registered to the given ETG
Raises
------
KeyError
if the ETG is not registered
"""
try:
kw_ = get_etg_read_kwargs(etg)
form = kw_['format']
tablename = kw_['tablename']
except KeyError as e:
e.args = ('No LIGO_LW table registered to etg %r' % etg,)
raise
if form == 'ligolw':
return lsctables.TableByName[tablename]
raise KeyError("No LIGO_LW table registered to etg %r" % etg)
def get_triggers(channel, etg, segments, config=GWSummConfigParser(),
cache=None, columns=None, format=None, query=True,
nproc=1, ligolwtable=None, filter=None,
timecolumn=None, verbose=False, return_=True):
"""Read a table of transient event triggers for a given channel.
"""
key = '%s,%s' % (str(channel), etg.lower())
# convert input segments to a segmentlist (for convenience)
if isinstance(segments, DataQualityFlag):
segments = segments.active
segments = SegmentList(segments)
# get read keywords for this etg
read_kw = get_etg_read_kwargs(etg, config=config, exclude=[])
read_kw['verbose'] = verbose
# extract columns (using function keyword if given)
if columns:
read_kw['columns'] = columns
columns = read_kw.pop('columns', None)
# override with user options
if format:
read_kw['format'] = format
elif not read_kw.get('format', None):
read_kw['format'] = etg.lower()
if timecolumn:
read_kw['timecolumn'] = timecolumn
elif columns is not None and 'time' in columns:
read_kw['timecolumn'] = 'time'
# replace columns keyword
if read_kw['format'].startswith('ascii.'):
read_kw['include_names'] = columns
else:
read_kw['columns'] = columns
# parse filters
if filter:
read_kw['selection'].extend(parse_column_filters(filter))
# read segments from global memory
try:
havesegs = globalv.TRIGGERS[key].meta['segments']
except KeyError:
new = segments
else:
new = segments - havesegs
# read new triggers
if query and abs(new) != 0:
ntrigs = 0
vprint(" Grabbing %s triggers for %s" % (etg, str(channel)))
# -- setup ----------
# get find/read kwargs
trigfindkwargs = dict(
(k[9:], read_kw.pop(k)) for k in list(read_kw) if
k.startswith('trigfind-'))
trigfindetg = trigfindkwargs.pop('etg', etg)
# customise kwargs for this ETG
if etg.lower().replace('-', '_') in ['pycbc_live']:
read_kw['ifo'] = get_channel(channel).ifo
if etg.lower() in ['kw', 'kleinewelle']:
read_kw['selection'].append('channel == "%s"' % channel)
if etg.lower() in ['cwb'] and 'root' not in read_kw['format']:
read_kw.pop('treename')
# filter on segments
if 'timecolumn' in read_kw:
read_kw['selection'].append((
read_kw['timecolumn'], table_filters.in_segmentlist, new))
# -- read -----------
# if single file
if cache is not None and len(cache) == 1:
trigs = read_cache(cache, new, etg, nproc=nproc, **read_kw)
if trigs is not None:
add_triggers(trigs, key)
ntrigs += len(trigs)
# otherwise, loop over segments
else:
for segment in new:
# find trigger files
if cache is None and not etg.lower() == 'hacr':
try:
segcache = gwtrigfind.find_trigger_files(
str(channel), trigfindetg, segment[0], segment[1],
**trigfindkwargs)
except ValueError as e:
warnings.warn("Caught %s: %s"
% (type(e).__name__, str(e)))
continue
elif cache is not None:
segcache = cache
# read table
if etg.lower() == 'hacr':
from gwpy.table.io.hacr import get_hacr_triggers
trigs = get_hacr_triggers(channel, segment[0], segment[1],
columns=columns)
trigs.meta['segments'] = SegmentList([segment])
else:
trigs = read_cache(segcache, SegmentList([segment]), etg,
nproc=nproc, **read_kw)
# record triggers
if trigs is not None:
# add metadata
add_triggers(trigs, key)
ntrigs += len(trigs)
vprint(".")
vprint(" | %d events read\n" % ntrigs)
# if asked to read triggers, but didn't actually read any,
# create an empty table so that subsequent calls don't raise KeyErrors
if query and key not in globalv.TRIGGERS:
# find LIGO_LW table for this ETG
try:
if columns is not None: # don't need to map to LIGO_LW
raise KeyError
TableClass = get_etg_table(etg)
except KeyError: # build simple table
tab = EventTable(names=columns)
else: # map to LIGO_LW table with full column listing
tab = EventTable(lsctables.New(TableClass))
tab.meta['segments'] = SegmentList()
for metakey in ('timecolumn', 'tablename',):
if metakey in read_kw:
tab.meta[metakey] = read_kw[metakey]
add_triggers(tab, key)
# work out time function
if return_:
return keep_in_segments(globalv.TRIGGERS[key], segments, etg)
def add_triggers(table, key, segments=None):
"""Add a `EventTable` to the global memory cache
"""
if segments is not None:
table.meta['segments'] = segments
try:
old = globalv.TRIGGERS[key]
except KeyError:
new = globalv.TRIGGERS[key] = table
new.meta.setdefault('segments', SegmentList())
else:
new = globalv.TRIGGERS[key] = vstack_tables((old, table))
new.meta = old.meta
new.meta['segments'] |= table.meta.get('segments', SegmentList())
new.meta['segments'].coalesce()
return new
def keep_in_segments(table, segmentlist, etg=None):
"""Return a view of the table containing only those rows in the segmentlist
"""
times = get_times(table, etg)
keep = table_filters.in_segmentlist(times, segmentlist)
out = table[keep]
out.meta['segments'] = segmentlist & table.meta['segments']
return out
def get_times(table, etg):
"""Get the time data for this table
See Also
--------
get_time_column
"""
try:
return table[get_time_column(table, etg)]
except ValueError:
# match well-defined LIGO_LW table types
tablename = table.meta.get('tablename') or ''
for ttype, tcol in (
('_burst', 'peak'),
('_inspiral', 'end'),
('_ringdown', 'start'),
):
if tablename.endswith(ttype):
sec = '{0}_time'.format(tcol)
nanosec = '{0}_time_ns'.format(tcol)
return table[sec] + table[nanosec] * 1e-9
raise
def get_time_column(table, etg):
"""Get the time column name for this table
"""
# allow user to have selected the time column
if table.meta.get('timecolumn'):
return table.meta['timecolumn']
# otherwise search for it
try:
return table._get_time_column()
except ValueError:
# shortcut pycbc
if etg == 'pycbc_live':
'end_time'
# match well-defined LIGO_LW table types
tablename = table.meta.get('tablename') or ''
for ttype, tcol in (
('_burst', 'peak'),
('_inspiral', 'end'),
('_ringdown', 'start'),
):
if tablename.endswith(ttype) and tcol in table.columns:
table.meta['timecolumn'] = tcol
return tcol
raise
def get_etg_read_kwargs(etg, config=None, exclude=['columns']):
"""Read keyword arguments to pass to the trigger reader for a given etg
"""
# use global defaults
kwargs = {
'format': None,
'selection': [],
}
# get kwargs from config
if config is not None and config.has_section(etg):
config_kw = dict(config.nditems(etg))
elif config is not None and config.has_section(etg.lower()):
config_kw = dict(config.nditems(etg.lower()))
else:
config_kw = {}
usrfmt = config_kw.get('format', None)
# get ETG defaults : only if user didn't specify the read format,
# or the format they did specify matches our default
etgl = re_cchar.sub('_', etg).lower()
if etgl in ETG_READ_KW and usrfmt in (None, ETG_READ_KW[etgl]['format']):
kwargs.update(ETG_READ_KW.get(etgl, {}))
# now add the config kwargs (so they override our defaults)
kwargs.update(config_kw)
# format kwargs
for key in list(kwargs):
# remove unwanted keys
if key in exclude:
kwargs.pop(key)
continue
# eval string to object (safely)
kwargs[key] = safe_eval(kwargs[key])
if key.endswith('columns') and isinstance(kwargs[key], str):
kwargs[key] = kwargs[key].replace(' ', '').split(',')
if 'selection' in kwargs:
kwargs['selection'] = parse_column_filters(kwargs['selection'])
return kwargs
def read_cache(cache, segments, etg, nproc=1, timecolumn=None, **kwargs):
"""Read a table of events from a cache
This function is mainly meant for use from the `get_triggers` method
Parameters
----------
cache : :class:`glue.lal.Cache`
the formatted list of files to read
segments : `~gwpy.segments.SegmentList`
the list of segments to read
etg : `str`
the name of the trigger generator that created the files
nproc : `int`, optional
the number of parallel processes to use when reading
**kwargs
other keyword arguments are passed to the `EventTable.read` or
`{tableclass}.read` methods
Returns
-------
table : `~gwpy.table.EventTable`, `None`
a table of events, or `None` if the cache has no overlap with
the segments
"""
if isinstance(cache, Cache):
cache = cache.sieve(segmentlist=segments)
cache = cache.checkfilesexist()[0]
cache.sort(key=lambda x: x.segment[0])
cache = cache.pfnlist() # some readers only like filenames
else:
cache = [urlparse(url).path for url in cache]
if etg == 'pycbc_live': # remove empty HDF5 files
cache = filter_pycbc_live_files(cache, ifo=kwargs['ifo'])
if len(cache) == 0:
return
# read triggers
table = EventTable.read(cache, **kwargs)
# store read keywords in the meta table
if timecolumn:
table.meta['timecolumn'] = timecolumn
# get back from cache entry
if isinstance(cache, CacheEntry):
cache = Cache([cache])
# append new events to existing table
try:
csegs = cache_segments(cache) & segments
except (AttributeError, TypeError, ValueError):
csegs = SegmentList()
table.meta['segments'] = csegs
if timecolumn: # already filtered on-the-fly
return table
# filter now
return keep_in_segments(table, segments, etg)
| gwpy/gwsumm | gwsumm/triggers.py | Python | gpl-3.0 | 14,721 | 0 |
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
| JWDebelius/scikit-bio | skbio/parse/sequences/tests/__init__.py | Python | bsd-3-clause | 378 | 0 |
#! /usr/bin/env python
import sys
import math
import collections
import dendropy
def count_set_size_difference(v1, v2):
c1 = collections.Counter(v1)
c2 = collections.Counter(v2)
counted_matched = c1 & c2
matched = sorted(list(counted_matched.elements()))
counted_diffs = (c1 - c2) + (c2 - c1)
unmatched = sorted(list(counted_diffs.elements()))
diff = len(unmatched)
return diff
def count_subtree_leaf_set_sizes(tree):
internal_nodes = tree.internal_nodes()
subtree_leaf_set_sizes = {}
for nd in internal_nodes:
leaf_count = 0
for leaf in nd.leaf_iter():
leaf_count += 1
if nd.taxon is not None:
label = nd.taxon.label
else:
label = nd.label
subtree_leaf_set_sizes[label] = leaf_count
return sorted(subtree_leaf_set_sizes.values())
def main():
trees = dendropy.TreeList.get_from_path(sys.argv[1], sys.argv[2])
for tree in trees:
tree.subtree_leaf_set_sizes = count_subtree_leaf_set_sizes(tree)
for tidx1 in range(len(trees)):
for tidx2 in range(len(trees)):
sys.stdout.write("{}\t{}\t{}\n".format(
tidx1,
tidx2,
count_set_size_difference(
trees[tidx1].subtree_leaf_set_sizes,
trees[tidx2].subtree_leaf_set_sizes)))
if __name__ == "__main__":
main()
| jeetsukumaran/pstrudel | test/scripts/calc-tree-unlabeled-symmetric-difference.py | Python | gpl-2.0 | 1,420 | 0.003521 |
# PyUI
# Copyright (C) 2001-2002 Sean C. Riley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Pyui Themes.
Themes are a method of customizing the drawing of widgets in a pyui GUI.
This modules keeps NO state for the drable objects - it just draws them on demand
from the widgets themselves which hold all the state.
The constants for the theme objects live in pyui/locals.py
Themes have a default font that is used for any widgets that dont specify a font.
"""
import locals
import pyui
from pyui.desktop import getRenderer, getDesktop
class ThemeBase:
"""Theme objects (like buttons) are drawn through methods for each type of widget.
It returns the rect that the object draw fit into.
The rect passed in should always be in window coordinates.
"""
def __init__(self, renderer, fontFace="times", fontSize=12, fontFlags=0):
self.renderer = renderer
self.desktop = getDesktop()
self.fgColor = renderer.packColor(255,255,255)
self.bgColor = renderer.packColor(0,0,0)
# setup default font
self.defaultFontFace = fontFace
self.defaultFontSize = fontSize
self.defaultFontFlags = fontFlags
self.defaultFont = renderer.createFont(fontFace, fontSize, fontFlags)
#(self.defaultTextWidth, self.defaultTextHeight) = renderer.getTextSize("M", self.defaultFont)
self.defaultTextHeight = fontSize*2
# setup widget offsets
self.frameBorderLeft = 4
self.frameBorderRight = 4
self.frameBorderTop = int(fontSize *2.2)
self.frameBorderBottom = 4
self.tabsHeight = int(fontSize * 1.3)
self.scrollerSize = 10
### Information about the theme..
def getFrameBorderTop(self):
return self.frameBorderTop
def getFrameBorderLeft(self):
return self.frameBorderLeft
def getFrameBorderRight(self):
return self.frameBorderRight
def getFrameBorderBottom(self):
return self.frameBorderBottom
def getTabsHeight(self):
return self.tabsHeight
def getScrollerSize(self):
return self.scrollerSize
def getFgColor(self):
return self.fgColor
def getBgColor(self):
return self.bgColor
### mouse cursor functions
def setArrowCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_POINTER)
def setResizeCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_RESIZE)
def setButtonCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_HAND)
def setWaitCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_WAIT)
def setMovingCursor(self):
apply(self.renderer.setMouseCursor, pyui.locals.CURSOR_DRAG)
#####################################################################
###
### Utility drawing functions not specific to any widgets
###
#####################################################################
def draw3DRect(self, rect, color, reverse, thick=1):
"""Draw a 3D rectangle
"""
(r,g,b,a) = self.renderer.unpackColor(color)
a=255
colorLo = self.renderer.packColor(0,0,0,255)
colorHi = self.renderer.packColor(255- r/4, 255-g/4, 255-b/4, a)
if reverse:
(colorLo, colorHi) = (colorHi, colorLo)
(x,y,w,h) = rect
if w < 2 or h < 2:
return
self.renderer.drawRect( colorHi, (x, y, w-thick, thick) )
self.renderer.drawRect( colorHi, (x, y+thick, thick, h-thick) )
if w > 2 and h > 2:
self.renderer.drawRect( color, (x+thick, y+thick, w-thick*2, h-thick*2) )
self.renderer.drawRect( colorLo, (x+thick, y+h-thick, w-thick, thick) )
self.renderer.drawRect( colorLo, (x+w-thick, y+thick, thick, h-thick*2) )
def drawOutlineRect(self, rect, color, thick=1):
(x,y,w,h) = rect
self.renderer.drawRect(color, (x,y,w,thick))
self.renderer.drawRect(color, (x,y+thick,thick,h-2*thick))
self.renderer.drawRect(color, (x+w-thick,y+thick,thick,h-2*thick))
self.renderer.drawRect(color, (x,y+h-thick,w,thick))
def drawGradient(self, rect, vertical, c1, c2):
if vertical:
self.renderer.drawGradient(rect, c1, c2, c1, c2)
else:
self.renderer.drawGradient(rect, c1, c1, c2, c2)
#####################################################################
###
### Widgets specific drawing functions.
### These are the methods for actual themes to implement.
###
#####################################################################
def drawButton(self, rect, title, hasFocus, status, enabled, font=None, shadow=0,fgColor=0, bgColor=0,roColor=0):
return rect
def drawImageButton(self, rect, filename, title, hasFocus, status):
return rect
def drawLabel(self, rect, title, color = None, font = None, shadow=0, align=0 ):
return rect
def drawCheckBox(self, rect, text, checkState):
return rect
def drawSliderBar(self, rect, range, position, BARWIDTH=8):
return rect
def drawEdit(self, rect, text, hasFocus, caretPos, selectPos):
return rect
def drawSplitter(self, rect):
return rect
def drawToolTip(self, text, rect):
return rect
# scroll bar methods
def drawScrollBack(self, rect):
return rect
def drawScrollButtonUp(self, rect):
return rect
def drawScrollButtonDown(self, rect):
return rect
def drawScrollBar(self, rect):
return rect
# tabbed panel methods
def drawTabItem(self, rect, title, active):
return rect
def drawTabHeader(self, rect):
return rect
# menu methods
def drawMenuBar(self, rect):
return rect
def drawMenuBarItem(self, rect, title, selected):
return rect
def drawMenu(self, rect):
return rect
def drawMenuItem(self, rect, title, selected, icon = None):
return rect
# list box methods
def drawListBox(self, rect):
return rect
def drawListBoxItem(self, rect, title, selected, color):
return rect
# frame methods
def drawFrame(self, rect, title):
return rect
| burito/PyUI | pyui/themeBase.py | Python | lgpl-2.1 | 7,045 | 0.013343 |
import session
import namespace
import platform
from .exceptions import ObjectNotFound, APIError, ConnectionError, IsilonLibraryError
class API(object):
'''Implements higher level functionality to interface with an Isilon cluster'''
def __init__(self, *args, **kwargs):
self.session = session.Session(*args, **kwargs)
self.namespace = namespace.Namespace(self.session)
self.platform = platform.Platform(self.session)
| sile16/python-isilon-api | isilon/__init__.py | Python | mit | 500 | 0.018 |
# Copyright 2013 Regionbibliotek Halland
#
# This file is part of Digitala sagor.
#
# Digitala sagor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Digitala sagor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Digitala sagor. If not, see <http://www.gnu.org/licenses/>.
import Tkinter as tki
import ttk
from language import lang
import language as lng
import dialog
import thread
class _ProgressDialog(tki.Frame):
"""A Frame intended to be placed in a Toplevel object.
This class will load the images of a datamodel, visualize the progress and
then close the Toplevel window. It will not be possible to abort the
load operation by closing the Toplevel.
"""
def __init__(self, toplevel, datamodel):
"""Initiate and make a pending call to _load()
Arguments
toplevel -- Toplevel object in which this Frame will be placed
datamodel -- datamodel in which to load the images
"""
tki.Frame.__init__(self, toplevel)
self._parent = toplevel
self._datamodel = datamodel
self._progresstext = tki.StringVar()
self.grid()
self.rowconfigure(1, weight = 1)
l = tki.Label(self, textvariable = self._progresstext)
l.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = tki.W)
pbar = ttk.Progressbar(self, orient = tki.HORIZONTAL, length = 400, mode = 'determinate', maximum = 1.0)
self._pbar = pbar
pbar.grid(row = 1, column = 0, columnspan = 2, padx = 5, pady = 5, sticky = tki.W + tki.E)
toplevel.after_idle(self._load)
toplevel.protocol("WM_DELETE_WINDOW", self._dontquit)
def _updateBar(self, progress):
"""Callback function to Datamodel.loadImageData
Argument
progress -- current load progress; 0.0 <= progress <= 1.0
"""
self._pbar.config(value = progress)
self._progresstext.set(lang[lng.txtLoadImageProgress].format(progress))
def _load(self):
"""Start the load operation by launching a thread"""
thread.start_new(self._thrLoadImages, (self, None))
def _thrLoadImages(self, dummy, d2):
"""Perform the load operation and make a pending call to _quit
Arguments
dummy -- unused
d2 -- unused
"""
self._datamodel.loadImageData(self._updateBar)
self._pbar.config(value = 1)
self._parent.after_idle(self._quit)
def _dontquit(self):
"""Event handler for WM_DELETE_WINDOW that does nothing"""
pass
def _quit(self):
"""Close the Toplevel object"""
self._parent.destroy()
class DataModelLoader:
"""Display a progress bar while loading a datamodel"""
def __init__(self, root, datamodel):
"""Initiate
Arguments
root -- Tk object
datamodel -- datamodel in which to load the images
previewsize -- tuple containing dimensions for preview images
playersize -- tuple containing dimensions for playback images
"""
self._root = root
self._datamodel = datamodel
def load(self):
"""Load the images in the datamodel while displaying a progress dialog"""
if(self._datamodel.isEmpty()):
return
dlg = dialog.getDlg(self._root, lang[lng.dlgLoadImages])
pd = _ProgressDialog(dlg, self._datamodel)
dialog.showDlg(dlg)
| regionbibliotekhalland/digitalasagor | progressdlg.py | Python | gpl-3.0 | 4,102 | 0.015115 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# See license.txt
# import frappe
import unittest
class TestLogSettings(unittest.TestCase):
pass
| mhbu50/frappe | frappe/core/doctype/log_settings/test_log_settings.py | Python | mit | 182 | 0.010989 |
#!/usr/bin/env python
#encoding: utf-8
import numpy as np
from pylab import *
dt=0.01 # msec
tau=40.0 # msec
tmax=1000 # msec
V_spk=-20
V_thres=-50.0
V_reset=-70.0
E_leak=V_reset
R_m=10.0 # MΩ
tt=np.arange(0, tmax, dt) #0:dt:tmax
Nt=len(tt) #length(tt)
V=np.zeros((Nt,))
V2=np.zeros((Nt,))
S=np.zeros((Nt,))
S2=np.zeros((Nt,))
#I0=np.zeros((Nt,))
# Plot characteristics
Vlim=E_leak-10,V_spk+10
# tlim=0,1000 #msec
tlim=200,800 #msec
nrows=4
LW=2
colors=[]
cmap = cm.hsv
# Solved Dayan & Abbott (2001) Ch.5 Eq. 5.12 for I_e using r_isi = 7 Hz:
theta_freq = 7
def I_e(f):
tau_isi = 1000.0/f
return -(1/R_m) * (E_leak + (V_reset - V_thres*exp(tau_isi/tau))/(exp(tau_isi/tau) - 1))
I_const=I_e(theta_freq) # 2.0578580 # 2.1 # constant current
print 'I_const = %.4f nA'%I_const
Dt=25 # msec: STDP half window
n=int(Dt/dt)
hPlus=1.0*I_const # max height
hMinus=2.0*hPlus
dI=np.r_[np.linspace(0,hPlus,n),0,np.linspace(-hMinus,0,n)]
## first simulation
V[0]=V_reset
for i in xrange(1, Nt): #=2:Nt
V[i]=((tau-dt)/tau)*V[i-1]+(dt/tau)*(E_leak+R_m*I_const)
if V[i]>=V_thres:
V[i]=V_reset
S[i]=1
k=np.nonzero(S>0)[0]
Nspk=len(k)
ioff()
figure(1, figsize=(10.0, 14.7625))
clf()
subplot(nrows,1,1)
plot(tt,V,'k-',lw=LW)
# hold(True)
# plot([[k*dt,k*dt]*Nspk,[V_reset,V_spk],'b-',lw=LW)
title('control')
xlim(tlim)
ylim(Vlim)
## second simulation
T=(k[2]-k[1])*dt # period
Nsuper=5 # number of super-cycle for testing different timing
timeList=np.linspace((-T/2), T/2,Nsuper)
phaseList=np.zeros((Nsuper,))
plot_spikes =True
for i_super in xrange(Nsuper): #=1:Nsuper
k0=k[2]+int(timeList[i_super]/dt)
I=np.zeros((Nt,))
I[k0-n:k0+n+1]=dI
V2[0]=V_reset
S2=np.zeros((Nt,))
for i in xrange(1, Nt): #=2:Nt
V2[i]=((tau-dt)/tau)*V2[i-1]+(dt/tau)*(E_leak+R_m*(I_const+I[i]))
if V2[i]>=V_thres:
V2[i]=V_reset
S2[i]=1
k2=np.nonzero(S2>0)[0]
Nspk2=len(k2)
subplot(nrows,1,2)
color = cmap(i_super/float(Nsuper))
colors.append(color)
plot(tt,V2,'-',zorder=-Nsuper+i_super,lw=LW,c=color)
if plot_spikes:
hold(True)
plot([k2*dt]*2, [V_reset,V_spk], '-',zorder=-Nsuper+i_super,c=color,lw=LW)
title('Adding input')
subplot(nrows,1,3)
plot(tt,I,c=color,lw=LW,zorder=-Nsuper+i_super)
draw()
# Wrap new phase around half-cycles
newphase=(k2[4]-k[4])*2*dt/T
if newphase<-1:
newphase+=2
elif newphase >=1:
newphase-=2
phaseList[i_super]=newphase
subplot(nrows,1,2)
plot([k*dt]*2, [V_reset,V_spk], 'k-',lw=LW,zorder=-50)
xlim(tlim)
ylim(Vlim)
ylabel('V')
subplot(nrows,1,3)
xlim(tlim)
ylim(-25, 25)
ylabel(r'$I_e$ (pA)')
# plot(timeList/T, phaseList,'o-')
# xlabel('Pulse timing (Period)')
# ylabel('Phase reset (degree)')
# grid(True)
subplot(nrows,2,7)
X=2*timeList/T
Y=phaseList+0.0
# Unwrap phases
jump_ix = np.argmax(np.abs(np.diff(Y)))+1
X = r_[X[jump_ix:]-2, X[:jump_ix]]
Y = r_[Y[jump_ix:], Y[:jump_ix]]
colors = colors[jump_ix:] + colors[:jump_ix]
midX = X[int(Nsuper/2)+1]
for i_super in xrange(Nsuper):
plot(X[i_super],Y[i_super],'o',mec='k',
mfc=colors[i_super],ms=6,mew=1,zorder=i_super)
print X[i_super],Y[i_super]
# p=np.polyfit(x,y,1)
# yp=np.polyval(p,x)
# plot(x,yp,'r-',zorder=0)
# plot(X,Y,'b-',lw=1,zorder=0)
ylabel(r'Phase Reset ($\pi$)')
ax = gca()
ax.set_xticks(linspace(-1, 1, 5))
ax.set_yticks(linspace(-1, 1, 5))
axis('equal')
axis('image')
xlim(midX-1.2, midX+1.2)
ylim(-1.2, 1.2)
ion()
show() | jdmonaco/vmo-feedback-model | src/spike_reset.py | Python | mit | 3,535 | 0.040181 |
# 142. Linked List Cycle II
# Given a linked list, return the node where the cycle begins.
# If there is no cycle, return null.
#
# Note: Do not modify the linked list.
#
# Follow up:
# Can you solve it without using extra space?
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return None
slow = head.next
fast = head.next.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
break
if fast is None or fast.next is None:
return None
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
| gengwg/leetcode | 142_linked_list_cycle_ii.py | Python | apache-2.0 | 976 | 0 |
#!/usr/bin/python
# Copyright 2016 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcpubsub_info
version_added: "2.3"
short_description: List Topics/Subscriptions and Messages from Google PubSub.
description:
- List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
topic/subscription management.
See U(https://cloud.google.com/pubsub/docs) for an overview.
- This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- list state enables user to list topics or subscriptions in the project. See examples for details.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: False
view:
description:
- Choices are 'topics' or 'subscriptions'
required: True
state:
description:
- list is the only valid option.
required: False
'''
EXAMPLES = '''
## List all Topics in a project
- gcpubsub_info:
view: topics
state: list
## List all Subscriptions in a project
- gcpubsub_info:
view: subscriptions
state: list
## List all Subscriptions for a Topic in a project
- gcpubsub_info:
view: subscriptions
topic: my-topic
state: list
'''
RETURN = '''
subscriptions:
description: List of subscriptions.
returned: When view is set to subscriptions.
type: list
sample: ["mysubscription", "mysubscription2"]
topic:
description: Name of topic. Used to filter subscriptions.
returned: Always
type: str
sample: "mytopic"
topics:
description: List of topics.
returned: When view is set to topics.
type: list
sample: ["mytopic", "mytopic2"]
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
def list_func(data, member='name'):
"""Used for state=list."""
return [getattr(x, member) for x in data]
def main():
module = AnsibleModule(argument_spec=dict(
view=dict(choices=['topics', 'subscriptions'], default='topics'),
topic=dict(required=False),
state=dict(choices=['list'], default='list'),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if module._name == 'gcpubsub_facts':
module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'", version='2.13')
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
CLIENT_MINIMUM_VERSION = '0.22.0'
if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
mod_params = {}
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['view'] = module.params.get('view')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = 'ansible-pubsub-0.1'
json_output = {}
if mod_params['view'] == 'topics':
json_output['topics'] = list_func(pubsub_client.list_topics())
elif mod_params['view'] == 'subscriptions':
if mod_params['topic']:
t = pubsub_client.topic(mod_params['topic'])
json_output['subscriptions'] = list_func(t.list_subscriptions())
else:
json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
json_output['changed'] = False
json_output.update(mod_params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| anryko/ansible | lib/ansible/modules/cloud/google/gcpubsub_info.py | Python | gpl-3.0 | 4,597 | 0.00261 |
"""
This module defines the BasePackageManager Class which provides an
implementation of the packaging system API providing methods to fetch,
upload and remove packages. Site specific extensions to any of these methods
should inherit this class.
"""
import fcntl
import logging
import os
import re
import shutil
from autotest.client import os_dep
from autotest.client.shared import error, utils
from autotest.client.shared.settings import settings, SettingsError
# the name of the checksum file that stores the packages' checksums
CHECKSUM_FILE = "packages.checksum"
def has_pbzip2():
'''
Check if parallel bzip2 is available on this system.
:return: True if pbzip2 is available, False otherwise
'''
try:
os_dep.command('pbzip2')
except ValueError:
return False
return True
# is parallel bzip2 available for use?
_PBZIP2_AVAILABLE = has_pbzip2()
def parse_ssh_path(repo):
'''
Parse an SSH url
:type repo: string
:param repo: a repo uri like ssh://xx@xx/path/to/
:return: tuple with (host, remote_path)
'''
match = re.search('^ssh://([^/]+)(/.*)$', repo)
if match:
return match.groups()
else:
raise error.PackageUploadError(
"Incorrect SSH path in settings: %s" % repo)
def repo_run_command(repo, cmd, ignore_status=False, cd=True):
"""
Run a command relative to the repo path
This is basically a utils.run() wrapper that sets itself in a repo
directory if it is appropriate, so parameters such as cmd and ignore_status
are passed along to it.
:type repo: string
:param repo: a repository url
:type cmd: string
:param cmd: the command to be executed. This is passed along to utils.run()
:type ignore_status: boolean
:param ignore_status: do not raise an exception, no matter what the exit
code of the command is.
:type cd: boolean
:param cd: wether to change the working directory to the repo directory
before running the specified command.
:return: a CmdResult object or None
:raise CmdError: the exit code of the command execution was not 0
"""
os_dep.command("ssh")
repo = repo.strip()
run_cmd = None
cd_str = ''
if repo.startswith('ssh://'):
username = None
hostline, remote_path = parse_ssh_path(repo)
if cd:
cd_str = 'cd %s && ' % remote_path
if '@' in hostline:
username, host = hostline.split('@')
run_cmd = 'ssh %s@%s "%s%s"' % (username, host, cd_str, cmd)
else:
run_cmd = 'ssh %s "%s%s"' % (hostline, cd_str, cmd)
else:
if cd:
cd_str = 'cd %s && ' % repo
run_cmd = "%s%s" % (cd_str, cmd)
if run_cmd:
return utils.run(run_cmd, ignore_status=ignore_status)
def create_directory(repo):
'''
Create a directory over at the remote repository
:type repo: string
:param repo: the repo URL containing the remote directory path
:return: a CmdResult object or None
'''
remote_path = repo
if repo.startswith('ssh://'):
_, remote_path = parse_ssh_path(repo)
repo_run_command(repo, 'mkdir -p %s' % remote_path, cd=False)
def check_diskspace(repo, min_free=None):
'''
Check if the remote directory over at the pkg repo has available diskspace
If the amount of free space is not supplied, it is taken from the global
configuration file, section [PACKAGES], key 'mininum_free_space'. The unit
used are in SI, that is, 1 GB = 10**9 bytes.
:type repo: string
:param repo: a remote package repo URL
:type min_free: int
:param: min_free mininum amount of free space, in GB (10**9 bytes)
:raise error.RepoUnknownError: general repository error condition
:raise error.RepoDiskFullError: repository does not have at least the
requested amount of free disk space.
'''
if min_free is None:
min_free = settings.get_value('PACKAGES', 'minimum_free_space',
type=int, default=1)
try:
df = repo_run_command(repo,
'df -PB %d . | tail -1' % 10 ** 9).stdout.split()
free_space_gb = int(df[3])
except Exception, e:
raise error.RepoUnknownError('Unknown Repo Error: %s' % e)
if free_space_gb < min_free:
raise error.RepoDiskFullError('Not enough disk space available '
'%sg < %sg' % (free_space_gb, min_free))
def check_write(repo):
'''
Checks that the remote repository directory is writable
:type repo: string
:param repo: a remote package repo URL
:raise error.RepoWriteError: repository write error
'''
try:
repo_testfile = '.repo_test_file'
repo_run_command(repo, 'touch %s' % repo_testfile).stdout.strip()
repo_run_command(repo, 'rm ' + repo_testfile)
except error.CmdError:
raise error.RepoWriteError('Unable to write to ' + repo)
def trim_custom_directories(repo, older_than_days=None):
'''
Remove old files from the remote repo directory
The age of the files, if not provided by the older_than_days parameter is
taken from the global configuration file, at section [PACKAGES],
configuration item 'custom_max_age'.
:type repo: string
:param repo: a remote package repo URL
'''
if not repo:
return
if older_than_days is None:
older_than_days = settings.get_value('PACKAGES', 'custom_max_age',
type=int, default=40)
cmd = 'find . -type f -atime +%s -exec rm -f {} \;' % older_than_days
repo_run_command(repo, cmd, ignore_status=True)
class RepositoryFetcher(object):
'''
Base class with common functionality for repository fetchers
'''
url = None
def __init__(self, package_manager, repository_url):
"""
Initializes a new Repository Fetcher
:type package_manager: BasePackageManager instance
:param package_manager: and instance of BasePackageManager class
:type repository_url: string
:param repository_url: The base URL of the repository
"""
self.run_command = package_manager._run_command
self.url = repository_url
self.pkgmgr = package_manager
def install_pkg_setup(self, name, fetch_dir, install):
"""
Install setup for a package based on fetcher type.
:type name: string
:param name: The filename to be munged
:type fetch_dir: string
:param fetch_dir: The destination path to be munged
:type install: boolean
:param install: Whether this is be called from the install path or not
:return: tuple with (name, fetch_dir)
"""
if install:
fetch_dir = os.path.join(fetch_dir, re.sub("/", "_", name))
return (name, fetch_dir)
def fetch_pkg_file(self, filename, dest_path):
"""
Fetch a package file from a package repository.
:type filename: string
:param filename: The filename of the package file to fetch.
:type dest_path: string
:param dest_path: Destination path to download the file to.
:raise PackageFetchError: if the fetch failed
"""
raise NotImplementedError()
def install_pkg_post(self, filename, fetch_dir,
install_dir, preserve_install_dir=False):
"""
Fetcher specific post install
:type filename: string
:param filename: The filename of the package to install
:type fetch_dir: string
:param fetch_dir: The fetched path of the package
:type install_dir: string
:param install_dir: The path to install the package to
:type preserve_install_dir: boolean
@preserve_install_dir: Preserve the install directory
"""
# check to see if the install_dir exists and if it does
# then check to see if the .checksum file is the latest
install_dir_exists = False
try:
self.pkgmgr._run_command("ls %s" % install_dir)
install_dir_exists = True
except (error.CmdError, error.AutoservRunError):
pass
fetch_path = os.path.join(fetch_dir, re.sub("/", "_", filename))
if (install_dir_exists and
not self.pkgmgr.untar_required(fetch_path, install_dir)):
return
# untar the package into install_dir and
# update the checksum in that directory
if not preserve_install_dir:
# Make sure we clean up the install_dir
self.pkgmgr._run_command('rm -rf %s' % install_dir)
self.pkgmgr._run_command('mkdir -p %s' % install_dir)
self.pkgmgr.untar_pkg(fetch_path, install_dir)
class HttpFetcher(RepositoryFetcher):
'''
Repository Fetcher using HTTP
'''
#
# parameters: url, destination file path
#
wget_cmd_pattern = 'wget --connect-timeout=15 -nv %s -O %s'
def _quick_http_test(self):
"""
Runs a wget command with a 30s timeout
This checks that the repository is reachable, and avoids the need to
wait for a full 10min timeout.
"""
# just make a temp file to write a test fetch into
mktemp = 'mktemp -u /tmp/tmp.XXXXXX'
dest_file_path = self.run_command(mktemp).stdout.strip()
try:
# build up a wget command
http_cmd = self.wget_cmd_pattern % (self.url, dest_file_path)
try:
self.run_command(http_cmd, _run_command_dargs={'timeout': 30})
except Exception, e:
msg = 'HTTP test failed, unable to contact %s: %s'
raise error.PackageFetchError(msg % (self.url, e))
finally:
self.run_command('rm -rf %s' % dest_file_path)
def fetch_pkg_file(self, filename, dest_path):
"""
Fetch a package file from a package repository.
:type filename: string
:param filename: The filename of the package file to fetch.
:type dest_path: string
:param dest_path: Destination path to download the file to.
:raise PackageFetchError: if the fetch failed
"""
logging.info('Fetching %s from %s to %s', filename, self.url,
dest_path)
# do a quick test to verify the repo is reachable
self._quick_http_test()
# try to retrieve the package via http
package_url = os.path.join(self.url, filename)
try:
cmd = self.wget_cmd_pattern % (package_url, dest_path)
result = self.run_command(cmd)
file_exists = self.run_command(
'ls %s' % dest_path,
_run_command_dargs={'ignore_status': True}).exit_status == 0
if not file_exists:
logging.error('wget failed: %s', result)
raise error.CmdError(cmd, result)
logging.debug('Successfully fetched %s from %s', filename,
package_url)
except error.CmdError:
# remove whatever junk was retrieved when the get failed
self.run_command('rm -f %s' % dest_path)
raise error.PackageFetchError('%s not found in %s' % (filename,
package_url))
class GitFetcher(RepositoryFetcher):
"""
A git based repository fetcher
"""
#
# parameters: url, destination file path, <branch>:<file name>
#
git_archive_cmd_pattern = 'git archive --remote=%s -o %s %s'
def __init__(self, package_manager, repository_url):
"""
Initializes a new GitFetcher
:type package_manager: BasePackageManager class
:param package_manager: and instance of BasePackageManager class
:type repository_url: string
:param repository_url: The base URL of the git repository
"""
super(GitFetcher, self).__init__(package_manager, repository_url)
self._set_repo_url_branch(repository_url)
logging.debug('GitFetcher initialized with repo=%s and branch=%s',
self.url, self.branch)
def _set_repo_url_branch(self, repository_url):
'''
Parse the url, look for a branch and set it accordingly
:type repository_url: string
:param repository_url: The base URL of the git repository
'''
# do we have branch info in the repoistory_url?
branch = "master"
match = repository_url.split(":")
if len(match) > 2:
# we have a branch
branch = match[2]
repository_url = re.sub(":" + branch, "", repository_url)
self.branch = branch
def fetch_pkg_file(self, filename, dest_path):
"""
Fetch a package file and save it to the given destination path
git is an SCM, you can download the test directly. No need to fetch
a bz2'd tarball file. However 'filename' is <type>-<name>.tar.bz2
break this up and only fetch <name>.
:type filename: string
:param filename: The filename of the package file to fetch.
:type dest_path: string
:param dest_path: Destination path to download the file to.
"""
logging.info('Fetching %s from %s to %s', filename, self.url,
dest_path)
name, _ = self.pkgmgr.parse_tarball_name(filename)
package_path = self.branch + " " + name
try:
cmd = self.git_archive_cmd_pattern % (self.url, dest_path, package_path)
result = self.run_command(cmd)
file_exists = self.run_command(
'ls %s' % dest_path,
_run_command_dargs={'ignore_status': True}).exit_status == 0
if not file_exists:
logging.error('git archive failed: %s', result)
raise error.CmdError(cmd, result)
logging.debug('Successfully fetched %s from %s', package_path,
self.url)
except error.CmdError:
raise error.PackageFetchError('%s not found in %s' % (name,
package_path))
def install_pkg_post(self, filename, fetch_dir, install_dir,
preserve_install_dir=False):
os_dep.command("tar")
filename, _ = self.pkgmgr.parse_tarball_name(filename)
install_path = re.sub(filename, "", install_dir)
for suffix in ['', '.tar', '.tar.bz2']:
pkg_name = "%s%s" % (suffix, re.sub("/", "_", filename))
fetch_path = os.path.join(fetch_dir, pkg_name)
if os.path.exists(fetch_path):
self.pkgmgr._run_command('tar -xf %s -C %s' % (fetch_path,
install_path))
class LocalFilesystemFetcher(RepositoryFetcher):
def fetch_pkg_file(self, filename, dest_path):
logging.info('Fetching %s from %s to %s', filename, self.url,
dest_path)
local_path = os.path.join(self.url, filename)
try:
self.run_command('cp %s %s' % (local_path, dest_path))
logging.debug('Successfully fetched %s from %s', filename,
local_path)
except error.CmdError, e:
raise error.PackageFetchError(
'Package %s could not be fetched from %s'
% (filename, self.url), e)
class BasePackageManager(object):
def __init__(self, pkgmgr_dir, hostname=None, repo_urls=None,
upload_paths=None, do_locking=True, run_function=utils.run,
run_function_args=[], run_function_dargs={}):
'''
Initializes a new BasePackageManager instance
One of most used interfaces on this class is the _run_command(), which
is controlled by the run_function parameter. It defaults to utils.run()
but a custom method (if provided) should be of the same schema as
utils.run. It should return a CmdResult object and throw a CmdError
exception. The reason for using a separate function to run the commands
is that the same code can be run to fetch a package on the local
machine or on a remote machine (in which case ssh_host's run function
is passed in for run_function).
:type pkgmgr_dir: string
:param pkgmgr_dir: A directory that can be used by the package manager
to dump stuff (like checksum files of the repositories etc)
:type hostname: string
:param hostname: hostname from where to fetch a list of package repos
:type repo_urls: list of strings
:param repo_urls: The list of the repository urls which is consulted
whilst fetching the package
:type upload_paths: list of strings
:param upload_paths: The list of the upload of repositories to which
the package is uploaded to
:type do_locking: boolean
:param do_locking: Enable locking when the packages are installed.
:type run_function: function
:param run_function: function used to execute commands.
:type run_function_args: tuple
:param run_function_args: positional (tuple-like) arguments to
run_function
:param run_function_dargs: dictionary
:param run_function_dargs: named (dictionary-like) arguments to
run_function
'''
# In memory dictionary that stores the checksum's of packages
self._checksum_dict = {}
self.pkgmgr_dir = pkgmgr_dir
self.do_locking = do_locking
self.hostname = hostname
self.repositories = []
# Create an internal function that is a simple wrapper of
# run_function and takes in the args and dargs as arguments
def _run_command(command, _run_command_args=run_function_args,
_run_command_dargs={}):
'''
Special internal function that takes in a command as
argument and passes it on to run_function (if specified).
The _run_command_dargs are merged into run_function_dargs
with the former having more precedence than the latter.
'''
new_dargs = dict(run_function_dargs)
new_dargs.update(_run_command_dargs)
# avoid polluting logs with extremely verbose packaging output
new_dargs.update({'stdout_tee': None})
return run_function(command, *_run_command_args,
**new_dargs)
self._run_command = _run_command
# Process the repository URLs
if not repo_urls:
repo_urls = []
elif hostname:
repo_urls = self.get_mirror_list(repo_urls)
for url in repo_urls:
self.add_repository(url)
# Process the upload URLs
if not upload_paths:
self.upload_paths = []
else:
self.upload_paths = list(upload_paths)
def add_repository(self, repo):
if isinstance(repo, basestring):
self.repositories.append(self.get_fetcher(repo))
elif isinstance(repo, RepositoryFetcher):
self.repositories.append(repo)
else:
raise TypeError("repo must be RepositoryFetcher or url string")
def get_fetcher(self, url):
if url.startswith('http://'):
return HttpFetcher(self, url)
elif url.startswith('git://'):
return GitFetcher(self, url)
else:
return LocalFilesystemFetcher(self, url)
def repo_check(self, repo):
'''
Check to make sure the repo is in a sane state:
ensure we have at least XX amount of free space
Make sure we can write to the repo
'''
if not repo.startswith('/') and not repo.startswith('ssh:'):
return
try:
create_directory(repo)
check_diskspace(repo)
check_write(repo)
except (error.RepoWriteError, error.RepoUnknownError,
error.RepoDiskFullError), e:
raise error.RepoError("ERROR: Repo %s: %s" % (repo, e))
def upkeep(self, custom_repos=None):
'''
Clean up custom upload/download areas
'''
from autotest.server import subcommand
if not custom_repos:
# Not all package types necessarily require or allow custom repos
try:
custom_repos = settings.get_value('PACKAGES',
'custom_upload_location').split(',')
except SettingsError:
custom_repos = []
try:
custom_download = settings.get_value('PACKAGES',
'custom_download_location')
custom_repos += [custom_download]
except SettingsError:
pass
if not custom_repos:
return
subcommand.parallel_simple(trim_custom_directories, custom_repos,
log=False)
def install_pkg(self, name, pkg_type, fetch_dir, install_dir,
preserve_install_dir=False, repo_url=None):
'''
Remove install_dir if it already exists and then recreate it unless
preserve_install_dir is specified as True.
Fetch the package into the pkg_dir. Untar the package into install_dir
The assumption is that packages are of the form :
<pkg_type>.<pkg_name>.tar.bz2
name : name of the package
type : type of the package
fetch_dir : The directory into which the package tarball will be
fetched to.
install_dir : the directory where the package files will be untarred to
repo_url : the url of the repository to fetch the package from.
'''
# do_locking flag is on by default unless you disable it (typically
# in the cases where packages are directly installed from the server
# onto the client in which case fcntl stuff wont work as the code
# will run on the server in that case..
if self.do_locking:
lockfile_name = '.%s-%s-lock' % (re.sub("/", "_", name), pkg_type)
lockfile = open(os.path.join(self.pkgmgr_dir, lockfile_name), 'w')
try:
if self.do_locking:
fcntl.flock(lockfile, fcntl.LOCK_EX)
self._run_command('mkdir -p %s' % fetch_dir)
pkg_name = self.get_tarball_name(name, pkg_type)
try:
# Fetch the package into fetch_dir
fetcher = self.fetch_pkg(pkg_name, fetch_dir, use_checksum=True,
repo_url=repo_url, install=True)
fetcher.install_pkg_post(pkg_name, fetch_dir, install_dir, preserve_install_dir)
except error.PackageFetchError, why:
raise error.PackageInstallError(
'Installation of %s(type:%s) failed : %s'
% (name, pkg_type, why))
finally:
if self.do_locking:
fcntl.flock(lockfile, fcntl.LOCK_UN)
lockfile.close()
def fetch_pkg(self, pkg_name, dest_path, repo_url=None, use_checksum=False, install=False):
'''
Fetch the package into dest_dir from repo_url. By default repo_url
is None and the package is looked in all the repositories specified.
Otherwise it fetches it from the specific repo_url.
pkg_name : name of the package (ex: test-sleeptest.tar.bz2,
dep-gcc.tar.bz2, kernel.1-1.rpm)
repo_url : the URL of the repository where the package is located.
dest_path : complete path of where the package will be fetched to.
use_checksum : This is set to False to fetch the packages.checksum file
so that the checksum comparison is bypassed for the
checksum file itself. This is used internally by the
packaging system. It should be ignored by externals
callers of this method who use it fetch custom packages.
install : install path has unique name and destination requirements
that vary based on the fetcher that is used. So call them
here as opposed to install_pkg.
'''
try:
self._run_command("ls %s" % os.path.dirname(dest_path))
except (error.CmdError, error.AutoservRunError):
raise error.PackageFetchError("Please provide a valid "
"destination: %s " % dest_path)
# See if the package was already fetched earlier, if so
# the checksums need to be compared and the package is now
# fetched only if they differ.
pkg_exists = False
try:
self._run_command("ls %s" % dest_path)
pkg_exists = True
except (error.CmdError, error.AutoservRunError):
pass
# if a repository location is explicitly provided, fetch the package
# from there and return
if repo_url:
repositories = [self.get_fetcher(repo_url)]
elif self.repositories:
repositories = self.repositories
else:
raise error.PackageFetchError("No repository urls specified")
# install the package from the package repos, try the repos in
# reverse order, assuming that the 'newest' repos are most desirable
for fetcher in reversed(repositories):
try:
if isinstance(fetcher, GitFetcher):
use_checksum = False
# different fetchers have different install requirements
dest = fetcher.install_pkg_setup(pkg_name, dest_path, install)[1]
# Fetch the package if it is not there, the checksum does
# not match, or checksums are disabled entirely
need_to_fetch = (
not use_checksum or not pkg_exists
or not self.compare_checksum(dest, fetcher.url))
if need_to_fetch:
fetcher.fetch_pkg_file(pkg_name, dest)
# update checksum so we won't refetch next time.
if use_checksum:
self.update_checksum(dest)
return fetcher
except (error.PackageFetchError, error.AutoservRunError):
# The package could not be found in this repo, continue looking
logging.debug('%s could not be fetched from %s', pkg_name,
fetcher.url)
repo_url_list = [repo.url for repo in repositories]
message = ('%s could not be fetched from any of the repos %s' %
(pkg_name, repo_url_list))
logging.error(message)
# if we got here then that means the package is not found
# in any of the repositories.
raise error.PackageFetchError(message)
def upload_pkg(self, pkg_path, upload_path=None, update_checksum=False,
timeout=300):
from autotest.server import subcommand
if upload_path:
upload_path_list = [upload_path]
self.upkeep(upload_path_list)
elif len(self.upload_paths) > 0:
self.upkeep()
upload_path_list = self.upload_paths
else:
raise error.PackageUploadError("Invalid Upload Path specified")
if update_checksum:
# get the packages' checksum file and update it with the current
# package's checksum
self.update_checksum(pkg_path)
commands = []
for path in upload_path_list:
commands.append(subcommand.subcommand(self.upload_pkg_parallel,
(pkg_path, path,
update_checksum)))
results = subcommand.parallel(commands, timeout, return_results=True)
for result in results:
if result:
print str(result)
# TODO(aganti): Fix the bug with the current checksum logic where
# packages' checksums that are not present consistently in all the
# repositories are not handled properly. This is a corner case though
# but the ideal solution is to make the checksum file repository specific
# and then maintain it.
def upload_pkg_parallel(self, pkg_path, upload_path, update_checksum=False):
'''
Uploads to a specified upload_path or to all the repos.
Also uploads the checksum file to all the repos.
pkg_path : The complete path to the package file
upload_path : the absolute path where the files are copied to.
if set to 'None' assumes 'all' repos
update_checksum : If set to False, the checksum file is not
going to be updated which happens by default.
This is necessary for custom
packages (like custom kernels and custom tests)
that get uploaded which do not need to be part of
the checksum file and bloat it.
'''
self.repo_check(upload_path)
# upload the package
if os.path.isdir(pkg_path):
self.upload_pkg_dir(pkg_path, upload_path)
else:
self.upload_pkg_file(pkg_path, upload_path)
if update_checksum:
self.upload_pkg_file(self._get_checksum_file_path(),
upload_path)
def upload_pkg_file(self, file_path, upload_path):
'''
Upload a single file. Depending on the upload path, the appropriate
method for that protocol is called. Currently this simply copies the
file to the target directory (but can be extended for other protocols)
This assumes that the web server is running on the same machine where
the method is being called from. The upload_path's files are
basically served by that web server.
'''
try:
if upload_path.startswith('ssh://'):
# parse ssh://user@host[autotest_top_path]/packages
hostline, remote_path = parse_ssh_path(upload_path)
try:
utils.run('scp %s %s:%s' % (file_path, hostline,
remote_path))
r_path = os.path.join(remote_path,
os.path.basename(file_path))
utils.run("ssh %s 'chmod 644 %s'" % (hostline, r_path))
except error.CmdError:
logging.error("Error uploading to repository %s",
upload_path)
else:
shutil.copy(file_path, upload_path)
os.chmod(os.path.join(upload_path,
os.path.basename(file_path)), 0644)
except (IOError, os.error), why:
logging.error("Upload of %s to %s failed: %s", file_path,
upload_path, why)
def upload_pkg_dir(self, dir_path, upload_path):
'''
Upload a full directory. Depending on the upload path, the appropriate
method for that protocol is called. Currently this copies the whole
tmp package directory to the target directory.
This assumes that the web server is running on the same machine where
the method is being called from. The upload_path's files are
basically served by that web server.
'''
local_path = os.path.join(dir_path, "*")
try:
if upload_path.startswith('ssh://'):
hostline, remote_path = parse_ssh_path(upload_path)
try:
utils.run('scp %s %s:%s' % (local_path, hostline,
remote_path))
ssh_path = os.path.join(remote_path, "*")
utils.run("ssh %s 'chmod 644 %s'" % (hostline, ssh_path))
except error.CmdError:
logging.error("Error uploading to repository: %s",
upload_path)
else:
utils.run("cp %s %s " % (local_path, upload_path))
up_path = os.path.join(upload_path, "*")
utils.run("chmod 644 %s" % up_path)
except (IOError, os.error), why:
raise error.PackageUploadError("Upload of %s to %s failed: %s"
% (dir_path, upload_path, why))
def remove_pkg(self, pkg_name, remove_path=None, remove_checksum=False):
'''
Remove the package from the specified remove_path
pkg_name : name of the package (ex: test-sleeptest.tar.bz2,
dep-gcc.tar.bz2)
remove_path : the location to remove the package from.
'''
if remove_path:
remove_path_list = [remove_path]
elif len(self.upload_paths) > 0:
remove_path_list = self.upload_paths
else:
raise error.PackageRemoveError(
"Invalid path to remove the pkg from")
checksum_path = self._get_checksum_file_path()
if remove_checksum:
self.remove_checksum(pkg_name)
# remove the package and upload the checksum file to the repos
for path in remove_path_list:
self.remove_pkg_file(pkg_name, path)
self.upload_pkg_file(checksum_path, path)
def remove_pkg_file(self, filename, pkg_dir):
'''
Remove the file named filename from pkg_dir
'''
try:
# Remove the file
if pkg_dir.startswith('ssh://'):
hostline, remote_path = parse_ssh_path(pkg_dir)
path = os.path.join(remote_path, filename)
utils.run("ssh %s 'rm -rf %s/%s'" % (hostline, remote_path,
path))
else:
os.remove(os.path.join(pkg_dir, filename))
except (IOError, os.error), why:
raise error.PackageRemoveError("Could not remove %s from %s: %s "
% (filename, pkg_dir, why))
def get_mirror_list(self, repo_urls):
'''
Stub function for site specific mirrors.
Returns:
Priority ordered list
'''
return repo_urls
def _get_checksum_file_path(self):
'''
Return the complete path of the checksum file (assumed to be stored
in self.pkgmgr_dir
'''
return os.path.join(self.pkgmgr_dir, CHECKSUM_FILE)
def _get_checksum_dict(self):
'''
Fetch the checksum file if not already fetched. If the checksum file
cannot be fetched from the repos then a new file is created with
the current package's (specified in pkg_path) checksum value in it.
Populate the local checksum dictionary with the values read from
the checksum file.
The checksum file is assumed to be present in self.pkgmgr_dir
'''
checksum_path = self._get_checksum_file_path()
if not self._checksum_dict:
# Fetch the checksum file
try:
try:
self._run_command("ls %s" % checksum_path)
except (error.CmdError, error.AutoservRunError):
# The packages checksum file does not exist locally.
# See if it is present in the repositories.
self.fetch_pkg(CHECKSUM_FILE, checksum_path)
except error.PackageFetchError:
# This should not happen whilst fetching a package..if a
# package is present in the repository, the corresponding
# checksum file should also be automatically present. This
# case happens only when a package
# is being uploaded and if it is the first package to be
# uploaded to the repos (hence no checksum file created yet)
# Return an empty dictionary in that case
return {}
# Read the checksum file into memory
checksum_file_contents = self._run_command('cat '
+ checksum_path).stdout
# Return {} if we have an empty checksum file present
if not checksum_file_contents.strip():
return {}
# Parse the checksum file contents into self._checksum_dict
for line in checksum_file_contents.splitlines():
checksum, package_name = line.split(None, 1)
self._checksum_dict[package_name] = checksum
return self._checksum_dict
def _save_checksum_dict(self, checksum_dict):
'''
Save the checksum dictionary onto the checksum file. Update the
local _checksum_dict variable with this new set of values.
checksum_dict : New checksum dictionary
checksum_dir : The directory in which to store the checksum file to.
'''
checksum_path = self._get_checksum_file_path()
self._checksum_dict = checksum_dict.copy()
checksum_contents = '\n'.join(checksum + ' ' + pkg_name
for pkg_name, checksum in
checksum_dict.iteritems())
# Write the checksum file back to disk
self._run_command('echo "%s" > %s' % (checksum_contents,
checksum_path),
_run_command_dargs={'verbose': False})
def compute_checksum(self, pkg_path):
'''
Compute the MD5 checksum for the package file and return it.
pkg_path : The complete path for the package file
'''
os_dep.command("md5sum")
md5sum_output = self._run_command("md5sum %s " % pkg_path).stdout
return md5sum_output.split()[0]
def update_checksum(self, pkg_path):
'''
Update the checksum of the package in the packages' checksum
file. This method is called whenever a package is fetched just
to be sure that the checksums in the local file are the latest.
pkg_path : The complete path to the package file.
'''
# Compute the new checksum
new_checksum = self.compute_checksum(pkg_path)
checksum_dict = self._get_checksum_dict()
checksum_dict[os.path.basename(pkg_path)] = new_checksum
self._save_checksum_dict(checksum_dict)
def remove_checksum(self, pkg_name):
'''
Remove the checksum of the package from the packages checksum file.
This method is called whenever a package is removed from the
repositories in order clean its corresponding checksum.
pkg_name : The name of the package to be removed
'''
checksum_dict = self._get_checksum_dict()
if pkg_name in checksum_dict:
del checksum_dict[pkg_name]
self._save_checksum_dict(checksum_dict)
def compare_checksum(self, pkg_path, repo_url):
'''
Calculate the checksum of the file specified in pkg_path and
compare it with the checksum in the checksum file
Return True if both match else return False.
:param pkg_path: The full path to the package file for which the
checksum is being compared
:param repo_url: The URL to fetch the checksum from
'''
checksum_dict = self._get_checksum_dict()
package_name = os.path.basename(pkg_path)
if not checksum_dict or package_name not in checksum_dict:
return False
repository_checksum = checksum_dict[package_name]
local_checksum = self.compute_checksum(pkg_path)
return (local_checksum == repository_checksum)
def tar_package(self, pkg_name, src_dir, dest_dir, include_string=None,
exclude_string=None):
'''
Create a tar.bz2 file with the name 'pkg_name' say test-blah.tar.bz2.
Includes the files specified in include_string, and excludes the files
specified on the exclude string, while tarring the source. Returns the
destination tarball path.
:param pkg_name: Package name.
:param src_dir: Directory that contains the data to be packaged.
:param dest_dir: Directory that will hold the destination tarball.
:param include_string: Pattern that represents the files that will be
added to the tar package.
:param exclude_string: Pattern that represents the files that should be
excluded from the tar package. It could be either a string or
a list.
'''
tarball_path = os.path.join(dest_dir, pkg_name)
temp_path = tarball_path + '.tmp'
cmd_list = ['tar', '-cf', temp_path, '-C', src_dir]
if _PBZIP2_AVAILABLE:
cmd_list.append('--use-compress-prog=pbzip2')
else:
cmd_list.append('-j')
if include_string is not None:
cmd_list.append(include_string)
if exclude_string is not None:
if isinstance(exclude_string, list):
for exc_str in exclude_string:
cmd_list.append('--exclude %s' % exc_str)
else:
if "--exclude" not in exclude_string:
cmd_list.append('--exclude')
cmd_list.append(exclude_string)
try:
utils.system(' '.join(cmd_list))
except Exception:
os.unlink(temp_path)
raise
os.rename(temp_path, tarball_path)
return tarball_path
def untar_required(self, tarball_path, dest_dir):
'''
Compare the checksum of the tarball_path with the .checksum file
in the dest_dir and return False if it matches. The untar
of the package happens only if the checksums do not match.
'''
checksum_path = os.path.join(dest_dir, '.checksum')
try:
existing_checksum = self._run_command('cat ' + checksum_path).stdout
except (error.CmdError, error.AutoservRunError):
# If the .checksum file is not present (generally, this should
# not be the case) then return True so that the untar happens
return True
new_checksum = self.compute_checksum(tarball_path)
return (new_checksum.strip() != existing_checksum.strip())
def untar_pkg(self, tarball_path, dest_dir):
'''
Untar the package present in the tarball_path and put a
".checksum" file in the dest_dir containing the checksum
of the tarball. This method
assumes that the package to be untarred is of the form
<name>.tar.bz2
'''
os_dep.command("tar")
self._run_command('tar xjf %s -C %s' % (tarball_path, dest_dir))
# Put the .checksum file in the install_dir to note
# where the package came from
pkg_checksum = self.compute_checksum(tarball_path)
pkg_checksum_path = os.path.join(dest_dir,
'.checksum')
self._run_command('echo "%s" > %s '
% (pkg_checksum, pkg_checksum_path))
@staticmethod
def get_tarball_name(name, pkg_type):
"""
Converts a package name and type into a tarball name.
:param name: The name of the package
:param pkg_type: The type of the package
:return: A tarball filename for that specific type of package
"""
assert '-' not in pkg_type
return '%s-%s.tar.bz2' % (pkg_type, name)
@staticmethod
def parse_tarball_name(tarball_name):
"""Coverts a package tarball name into a package name and type.
:param tarball_name: The filename of the tarball
:return: (name, pkg_type) where name is the package name and pkg_type
is the package type.
"""
match = re.search(r'^([^-]*)-(.*)\.tar\.bz2$', tarball_name)
pkg_type, name = match.groups()
return name, pkg_type
def get_package_name(self, url, pkg_type):
'''
Extract the group and test name for the url. This method is currently
used only for tests.
'''
if pkg_type == 'test':
regex = '[^:]+://(.*)/([^/]*)$'
return self._get_package_name(url, regex)
else:
return ('', url)
def _get_package_name(self, url, regex):
if not utils.is_url(url):
if url.endswith('.tar.bz2'):
testname = url.replace('.tar.bz2', '')
testname = re.sub(r'(\d*)\.', '', testname)
return (testname, testname)
else:
return ('', url)
match = re.match(regex, url)
if not match:
return ('', url)
group, filename = match.groups()
# Generate the group prefix.
group = re.sub(r'\W', '_', group)
# Drop the extension to get the raw test name.
testname = re.sub(r'\.tar\.bz2', '', filename)
# Drop any random numbers at the end of the test name if any
testname = re.sub(r'\.(\d*)', '', testname)
return (group, testname)
| lmr/autotest | client/shared/base_packages.py | Python | gpl-2.0 | 45,844 | 0.000284 |
from __future__ import division, absolute_import, print_function
import math
import textwrap
from numpy import array
from numpy.testing import run_module_suite, assert_, assert_equal, dec
import util
class TestF77Callback(util.F2PyTest):
code = """
subroutine t(fun,a)
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine func(a)
cf2py intent(in,out) a
integer a
a = a + 11
end
subroutine func0(a)
cf2py intent(out) a
integer a
a = 11
end
subroutine t2(a)
cf2py intent(callback) fun
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine string_callback(callback, a)
external callback
double precision callback
double precision a
character*1 r
cf2py intent(out) a
r = 'r'
a = callback(r)
end
"""
@dec.slow
def test_all(self):
for name in "t,t2".split(","):
self.check_function(name)
@dec.slow
def test_docstring(self):
expected = """
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
"""
assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip())
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda: 4)
assert_(r == 4, repr(r))
r = t(lambda a: 5, fun_extra_args=(6,))
assert_(r == 5, repr(r))
r = t(lambda a: a, fun_extra_args=(6,))
assert_(r == 6, repr(r))
r = t(lambda a: 5 + a, fun_extra_args=(7,))
assert_(r == 12, repr(r))
r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,))
assert_(r == 180, repr(r))
r = t(math.degrees, fun_extra_args=(math.pi,))
assert_(r == 180, repr(r))
r = t(self.module.func, fun_extra_args=(6,))
assert_(r == 17, repr(r))
r = t(self.module.func0)
assert_(r == 11, repr(r))
r = t(self.module.func0._cpointer)
assert_(r == 11, repr(r))
class A(object):
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert_(r == 7, repr(r))
r = t(a.mth)
assert_(r == 9, repr(r))
def test_string_callback(self):
def callback(code):
if code == 'r':
return 0
else:
return 1
f = getattr(self.module, 'string_callback')
r = f(callback)
assert_(r == 0, repr(r))
if __name__ == "__main__":
run_module_suite()
| LumPenPacK/NetworkExtractionFromImages | win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/f2py/tests/test_callback.py | Python | bsd-2-clause | 3,040 | 0 |
from gdsfactory.port import csv2port
def test_csv2port(data_regression):
import gdsfactory as gf
name = "straight"
csvpath = gf.CONFIG["gdsdir"] / f"{name}.ports"
ports = csv2port(csvpath)
data_regression.check(ports)
| gdsfactory/gdsfactory | gdsfactory/tests/test_port_from_csv.py | Python | mit | 241 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy.learn.mlpca import mlpca
from hyperspy.signals import Signal1D
@pytest.mark.parametrize("tol", [1e-9, 1e-6])
@pytest.mark.parametrize("max_iter", [100, 500])
def test_mlpca(tol, max_iter):
# Define shape etc.
m = 100 # Dimensionality
n = 101 # Number of samples
r = 3
rng = np.random.RandomState(101)
U = rng.uniform(0, 1, size=(m, r))
V = rng.uniform(0, 10, size=(n, r))
varX = U @ V.T
X = rng.poisson(varX)
rank = r
# Test tolerance
tol = 300
U, S, V, Sobj = mlpca(X, varX, output_dimension=rank, tol=tol, max_iter=max_iter)
X = U @ np.diag(S) @ V.T
# Check the low-rank component MSE
normX = np.linalg.norm(X - X)
assert normX < tol
# Check singular values
S_norm = S / np.sum(S)
np.testing.assert_allclose(S_norm[:rank].sum(), 1.0)
def test_signal():
# Define shape etc.
m = 100 # Dimensionality
n = 101 # Number of samples
r = 3
rng = np.random.RandomState(101)
U = rng.uniform(0, 1, size=(m, r))
V = rng.uniform(0, 10, size=(n, r))
varX = U @ V.T
X = rng.poisson(varX).astype(float)
# Test tolerance
tol = 300
x = X.copy().reshape(10, 10, 101)
s = Signal1D(x)
s.decomposition(algorithm="mlpca", output_dimension=r)
# Check singular values
v = s.get_explained_variance_ratio().data
np.testing.assert_allclose(v[:r].sum(), 1.0)
# Check the low-rank component MSE
Y = s.get_decomposition_model(r).data
normX = np.linalg.norm(Y.reshape(m, n) - X)
assert normX < tol
| dnjohnstone/hyperspy | hyperspy/tests/learn/test_mlpca.py | Python | gpl-3.0 | 2,332 | 0.000429 |
from .lircpy import LircPy
from .exceptions import InvalidResponseError, LircError
__all__ = ['LircPy', 'InvalidResponseError', 'LircError']
| NoMoKeTo/lircpy | lircpy/__init__.py | Python | apache-2.0 | 142 | 0 |
from itertools import chain
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.db.models import Sum
from django.template import Template, Context
class AbstractOrder(models.Model):
u"""An order"""
number = models.CharField(_("Order number"), max_length=128, db_index=True)
# We track the site that each order is placed within
site = models.ForeignKey('sites.Site')
basket = models.ForeignKey('basket.Basket', null=True, blank=True)
# Orders can be anonymous so we don't always have a customer ID
user = models.ForeignKey(User, related_name='orders', null=True, blank=True)
# Billing address is not always required (eg paying by gift card)
billing_address = models.ForeignKey('order.BillingAddress', null=True, blank=True)
# Total price looks like it could be calculated by adding up the
# prices of the associated lines, but in some circumstances extra
# order-level charges are added and so we need to store it separately
total_incl_tax = models.DecimalField(_("Order total (inc. tax)"), decimal_places=2, max_digits=12)
total_excl_tax = models.DecimalField(_("Order total (excl. tax)"), decimal_places=2, max_digits=12)
# Shipping charges
shipping_incl_tax = models.DecimalField(_("Shipping charge (inc. tax)"), decimal_places=2, max_digits=12, default=0)
shipping_excl_tax = models.DecimalField(_("Shipping charge (excl. tax)"), decimal_places=2, max_digits=12, default=0)
# Not all lines are actually shipped (such as downloads), hence shipping address
# is not mandatory.
shipping_address = models.ForeignKey('order.ShippingAddress', null=True, blank=True)
shipping_method = models.CharField(_("Shipping method"), max_length=128, null=True, blank=True)
# Use this field to indicate that an order is on hold / awaiting payment
status = models.CharField(_("Status"), max_length=100, null=True, blank=True)
# Index added to this field for reporting
date_placed = models.DateTimeField(auto_now_add=True, db_index=True)
@property
def basket_total_incl_tax(self):
u"""Return basket total including tax"""
return self.total_incl_tax - self.shipping_incl_tax
@property
def basket_total_excl_tax(self):
u"""Return basket total excluding tax"""
return self.total_excl_tax - self.shipping_excl_tax
@property
def num_lines(self):
return self.lines.count()
@property
def num_items(self):
u"""
Returns the number of items in this order.
"""
num_items = 0
for line in self.lines.all():
num_items += line.quantity
return num_items
@property
def shipping_status(self):
events = self.shipping_events.all()
if not len(events):
return ''
# Collect all events by event-type
map = {}
for event in events:
event_name = event.event_type.name
if event_name not in map:
map[event_name] = []
map[event_name] = list(chain(map[event_name], event.line_quantities.all()))
# Determine last complete event
status = _("In progress")
for event_name, event_line_quantities in map.items():
if self._is_event_complete(event_line_quantities):
status = event_name
return status
def _is_event_complete(self, event_quantites):
# Form map of line to quantity
map = {}
for event_quantity in event_quantites:
line_id = event_quantity.line_id
map.setdefault(line_id, 0)
map[line_id] += event_quantity.quantity
for line in self.lines.all():
if map[line.id] != line.quantity:
return False
return True
class Meta:
abstract = True
ordering = ['-date_placed',]
permissions = (
("can_view", "Can view orders (eg for reporting)"),
)
def __unicode__(self):
return u"#%s" % (self.number,)
class AbstractOrderNote(models.Model):
u"""A note against an order."""
order = models.ForeignKey('order.Order', related_name="notes")
user = models.ForeignKey('auth.User')
message = models.TextField()
date = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
def __unicode__(self):
return u"'%s' (%s)" % (self.message[0:50], self.user)
class AbstractCommunicationEvent(models.Model):
u"""
An order-level event involving a communication to the customer, such
as an confirmation email being sent."""
order = models.ForeignKey('order.Order', related_name="communication_events")
type = models.ForeignKey('order.CommunicationEventType')
date = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
def __unicode__(self):
return u"'%s' event for order #%s" % (self.type.name, self.order.number)
class AbstractCommunicationEventType(models.Model):
u"""Communication events are things like 'OrderConfirmationEmailSent'"""
# Code is used in forms
code = models.SlugField(max_length=128)
# Name is the friendly description of an event
name = models.CharField(max_length=255)
# Template content for emails
email_subject_template = models.CharField(max_length=255, blank=True)
email_body_template = models.TextField(blank=True, null=True)
# Template content for SMS messages
sms_template = models.CharField(max_length=170, blank=True)
def save(self, *args, **kwargs):
if not self.code:
self.code = slugify(self.name)
super(AbstractCommunicationEventType, self).save(*args, **kwargs)
class Meta:
abstract = True
verbose_name_plural = _("Communication event types")
def __unicode__(self):
return self.name
def has_email_templates(self):
return self.email_subject_template and self.email_body_template
def get_email_subject_for_order(self, order, **kwargs):
return self._merge_template_with_context(self.email_subject_template, order, **kwargs)
def get_email_body_for_order(self, order, **kwargs):
return self._merge_template_with_context(self.email_body_template, order, **kwargs)
def _merge_template_with_context(self, template, order, **kwargs):
ctx = {'order': order}
ctx.update(**kwargs)
return Template(template).render(Context(ctx))
class AbstractLine(models.Model):
u"""
A order line (basically a product and a quantity)
Not using a line model as it's difficult to capture and payment
information when it splits across a line.
"""
order = models.ForeignKey('order.Order', related_name='lines')
# We store the partner, their SKU and the title for cases where the product has been
# deleted from the catalogue. We also store the partner name in case the partner
# gets deleted at a later date.
partner = models.ForeignKey('partner.Partner', related_name='order_lines', blank=True, null=True, on_delete=models.SET_NULL)
partner_name = models.CharField(_("Partner name"), max_length=128)
partner_sku = models.CharField(_("Partner SKU"), max_length=128)
title = models.CharField(_("Title"), max_length=255)
# We don't want any hard links between orders and the products table
product = models.ForeignKey('product.Item', on_delete=models.SET_NULL, null=True)
quantity = models.PositiveIntegerField(default=1)
# Price information (these fields are actually redundant as the information
# can be calculated from the LinePrice models
line_price_incl_tax = models.DecimalField(decimal_places=2, max_digits=12)
line_price_excl_tax = models.DecimalField(decimal_places=2, max_digits=12)
# Price information before discounts are applied
line_price_before_discounts_incl_tax = models.DecimalField(decimal_places=2, max_digits=12)
line_price_before_discounts_excl_tax = models.DecimalField(decimal_places=2, max_digits=12)
# REPORTING FIELDS
# Cost price (the price charged by the fulfilment partner for this product).
unit_cost_price = models.DecimalField(decimal_places=2, max_digits=12, blank=True, null=True)
# Normal site price for item (without discounts)
unit_site_price = models.DecimalField(decimal_places=2, max_digits=12, blank=True, null=True)
# Retail price at time of purchase
unit_retail_price = models.DecimalField(decimal_places=2, max_digits=12, blank=True, null=True)
# Partner information
partner_line_reference = models.CharField(_("Partner reference"), max_length=128, blank=True, null=True,
help_text=_("This is the item number that the partner uses within their system"))
partner_line_notes = models.TextField(blank=True, null=True)
# Estimated dispatch date - should be set at order time
est_dispatch_date = models.DateField(blank=True, null=True)
@property
def description(self):
u"""
Returns a description of this line including details of any
line attributes.
"""
d = str(self.product)
ops = []
for attribute in self.attributes.all():
ops.append("%s = '%s'" % (attribute.type, attribute.value))
if ops:
d = "%s (%s)" % (d, ", ".join(ops))
return d
@property
def shipping_status(self):
u"""Returns a string summary of the shipping status of this line"""
status_map = self._shipping_event_history()
if not status_map:
return ''
events = []
last_complete_event_name = None
for event_dict in status_map:
if event_dict['quantity'] == self.quantity:
events.append(event_dict['name'])
last_complete_event_name = event_dict['name']
else:
events.append("%s (%d/%d items)" % (event_dict['name'],
event_dict['quantity'], self.quantity))
if last_complete_event_name == status_map[-1]['name']:
return last_complete_event_name
return ', '.join(events)
def has_shipping_event_occurred(self, event_type):
u"""Checks whether this line has passed a given shipping event"""
for event_dict in self._shipping_event_history():
if event_dict['name'] == event_type.name and event_dict['quantity'] == self.quantity:
return True
return False
@property
def is_product_deleted(self):
return self.product == None
def _shipping_event_history(self):
u"""
Returns a list of shipping events"""
status_map = {}
for event in self.shippingevent_set.all():
event_name = event.event_type.name
event_quantity = event.line_quantities.get(line=self).quantity
if event_name in status_map:
status_map[event_name]['quantity'] += event_quantity
else:
status_map[event_name] = {'name': event_name, 'quantity': event_quantity}
return list(status_map.values())
class Meta:
abstract = True
verbose_name_plural = _("Order lines")
def __unicode__(self):
return u"Product '%s', quantity '%s'" % (self.product, self.quantity)
class AbstractLineAttribute(models.Model):
u"""An attribute of a line."""
line = models.ForeignKey('order.Line', related_name='attributes')
option = models.ForeignKey('product.Option', null=True, on_delete=models.SET_NULL, related_name="line_attributes")
type = models.CharField(_("Type"), max_length=128)
value = models.CharField(_("Value"), max_length=255)
class Meta:
abstract = True
def __unicode__(self):
return "%s = %s" % (self.type, self.value)
class AbstractLinePrice(models.Model):
u"""
For tracking the prices paid for each unit within a line.
This is necessary as offers can lead to units within a line
having different prices. For example, one product may be sold at
50% off as it's part of an offer while the remainder are full price.
"""
order = models.ForeignKey('order.Order', related_name='line_prices')
line = models.ForeignKey('order.Line', related_name='prices')
quantity = models.PositiveIntegerField(default=1)
price_incl_tax = models.DecimalField(decimal_places=2, max_digits=12)
price_excl_tax = models.DecimalField(decimal_places=2, max_digits=12)
shipping_incl_tax = models.DecimalField(decimal_places=2, max_digits=12, default=0)
shipping_excl_tax = models.DecimalField(decimal_places=2, max_digits=12, default=0)
class Meta:
abstract = True
def __unicode__(self):
return u"Line '%s' (quantity %d) price %s" % (self.line, self.quantity, self.price_incl_tax)
# PAYMENT EVENTS
class AbstractPaymentEventType(models.Model):
"""
Payment events are things like 'Paid', 'Failed', 'Refunded'
"""
name = models.CharField(max_length=128)
code = models.SlugField(max_length=128)
sequence_number = models.PositiveIntegerField(default=0)
def save(self, *args, **kwargs):
if not self.code:
self.code = slugify(self.name)
super(AbstractPaymentEventType, self).save(*args, **kwargs)
class Meta:
abstract = True
verbose_name_plural = _("Payment event types")
ordering = ('sequence_number',)
def __unicode__(self):
return self.name
class AbstractPaymentEvent(models.Model):
u"""
An event is something which happens to a line such as
payment being taken for 2 items, or 1 item being dispatched.
"""
order = models.ForeignKey('order.Order', related_name='payment_events')
lines = models.ManyToManyField('order.Line', through='PaymentEventQuantity')
event_type = models.ForeignKey('order.PaymentEventType')
date = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
verbose_name_plural = _("Payment events")
def __unicode__(self):
return u"Order #%d, line %s: %d items %s" % (
self.line.order.number, self.line.line_id, self.quantity, self.event_type)
class PaymentEventQuantity(models.Model):
u"""A "through" model linking lines to payment events"""
event = models.ForeignKey('order.PaymentEvent', related_name='line_quantities')
line = models.ForeignKey('order.Line')
quantity = models.PositiveIntegerField()
class AbstractShippingEvent(models.Model):
u"""
An event is something which happens to a group of lines such as
1 item being dispatched.
"""
order = models.ForeignKey('order.Order', related_name='shipping_events')
lines = models.ManyToManyField('order.Line', through='ShippingEventQuantity')
event_type = models.ForeignKey('order.ShippingEventType')
notes = models.TextField(_("Event notes"), blank=True, null=True,
help_text="This could be the dispatch reference, or a tracking number")
date = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
verbose_name_plural = _("Shipping events")
ordering = ['-date']
def __unicode__(self):
return u"Order #%s, type %s" % (
self.order.number, self.event_type)
def num_affected_lines(self):
return self.lines.count()
class ShippingEventQuantity(models.Model):
u"""A "through" model linking lines to shipping events"""
event = models.ForeignKey('order.ShippingEvent', related_name='line_quantities')
line = models.ForeignKey('order.Line')
quantity = models.PositiveIntegerField()
def _check_previous_events_are_complete(self):
u"""Checks whether previous shipping events have passed"""
previous_events = ShippingEventQuantity._default_manager.filter(line=self.line,
event__event_type__sequence_number__lt=self.event.event_type.sequence_number)
self.quantity = int(self.quantity)
for event_quantities in previous_events:
if event_quantities.quantity < self.quantity:
raise ValueError("Invalid quantity (%d) for event type (a previous event has not been fully passed)" % self.quantity)
def _check_new_quantity(self):
quantity_row = ShippingEventQuantity._default_manager.filter(line=self.line,
event__event_type=self.event.event_type).aggregate(Sum('quantity'))
previous_quantity = quantity_row['quantity__sum']
if previous_quantity == None:
previous_quantity = 0
if previous_quantity + self.quantity > self.line.quantity:
raise ValueError("Invalid quantity (%d) for event type (total exceeds line total)" % self.quantity)
def save(self, *args, **kwargs):
# Default quantity to full quantity of line
if not self.quantity:
self.quantity = self.line.quantity
self._check_previous_events_are_complete()
self._check_new_quantity()
super(ShippingEventQuantity, self).save(*args, **kwargs)
def __unicode__(self):
return "%s - quantity %d" % (self.line.product, self.quantity)
class AbstractShippingEventType(models.Model):
u"""Shipping events are things like 'OrderPlaced', 'Acknowledged', 'Dispatched', 'Refunded'"""
# Code is used in forms
code = models.CharField(max_length=128)
# Name is the friendly description of an event
name = models.CharField(max_length=255)
# Code is used in forms
code = models.SlugField(max_length=128)
is_required = models.BooleanField(default=True, help_text="This event must be passed before the next shipping event can take place")
# The normal order in which these shipping events take place
sequence_number = models.PositiveIntegerField(default=0)
def save(self, *args, **kwargs):
if not self.code:
self.code = slugify(self.name)
super(AbstractShippingEventType, self).save(*args, **kwargs)
class Meta:
abstract = True
verbose_name_plural = _("Shipping event types")
ordering = ('sequence_number',)
def __unicode__(self):
return self.name
class AbstractOrderDiscount(models.Model):
order = models.ForeignKey('order.Order', related_name="discounts")
offer = models.ForeignKey('offer.ConditionalOffer', null=True, on_delete=models.SET_NULL)
voucher = models.ForeignKey('offer.Voucher', related_name="discount_vouchers", null=True, on_delete=models.SET_NULL)
voucher_code = models.CharField(_("Code"), max_length=128, db_index=True)
amount = models.DecimalField(decimal_places=2, max_digits=12, default=0)
class Meta:
abstract = True
def description(self):
if self.voucher_code:
return self.voucher_code
return self.offer.name
| aykut/django-oscar | oscar/apps/order/abstract_models.py | Python | bsd-3-clause | 19,620 | 0.007594 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
from . import transform
from ..util.yaml import load_yaml
logger = logging.getLogger(__name__)
class TestTask(transform.TransformTask):
"""
A task implementing a Gecko test.
"""
@classmethod
def get_inputs(cls, kind, path, config, params, loaded_tasks):
# the kind on which this one depends
if len(config.get('kind-dependencies', [])) != 1:
raise Exception("TestTask kinds must have exactly one item in kind-dependencies")
dep_kind = config['kind-dependencies'][0]
# get build tasks, keyed by build platform
builds_by_platform = cls.get_builds_by_platform(dep_kind, loaded_tasks)
# get the test platforms for those build tasks
test_platforms_cfg = load_yaml(path, 'test-platforms.yml')
test_platforms = cls.get_test_platforms(test_platforms_cfg, builds_by_platform)
# expand the test sets for each of those platforms
test_sets_cfg = load_yaml(path, 'test-sets.yml')
test_platforms = cls.expand_tests(test_sets_cfg, test_platforms)
# load the test descriptions
test_descriptions = load_yaml(path, 'tests.yml')
# generate all tests for all test platforms
for test_platform_name, test_platform in test_platforms.iteritems():
for test_name in test_platform['test-names']:
test = copy.deepcopy(test_descriptions[test_name])
test['build-platform'] = test_platform['build-platform']
test['test-platform'] = test_platform_name
test['build-label'] = test_platform['build-label']
test['test-name'] = test_name
if test_platform['nightly']:
test.setdefault('attributes', {})['nightly'] = True
logger.debug("Generating tasks for test {} on platform {}".format(
test_name, test['test-platform']))
yield test
@classmethod
def get_builds_by_platform(cls, dep_kind, loaded_tasks):
"""Find the build tasks on which tests will depend, keyed by
platform/type. Returns a dictionary mapping build platform to task."""
builds_by_platform = {}
for task in loaded_tasks:
if task.kind != dep_kind:
continue
build_platform = task.attributes.get('build_platform')
build_type = task.attributes.get('build_type')
if not build_platform or not build_type:
continue
platform = "{}/{}".format(build_platform, build_type)
if platform in builds_by_platform:
raise Exception("multiple build jobs for " + platform)
builds_by_platform[platform] = task
return builds_by_platform
@classmethod
def get_test_platforms(cls, test_platforms_cfg, builds_by_platform):
"""Get the test platforms for which test tasks should be generated,
based on the available build platforms. Returns a dictionary mapping
test platform to {test-set, build-platform, build-label}."""
test_platforms = {}
for test_platform, cfg in test_platforms_cfg.iteritems():
build_platform = cfg['build-platform']
if build_platform not in builds_by_platform:
logger.warning(
"No build task with platform {}; ignoring test platform {}".format(
build_platform, test_platform))
continue
test_platforms[test_platform] = {
'nightly': builds_by_platform[build_platform].attributes.get('nightly', False),
'build-platform': build_platform,
'build-label': builds_by_platform[build_platform].label,
}
test_platforms[test_platform].update(cfg)
return test_platforms
@classmethod
def expand_tests(cls, test_sets_cfg, test_platforms):
"""Expand the test sets in `test_platforms` out to sets of test names.
Returns a dictionary like `get_test_platforms`, with an additional
`test-names` key for each test platform, containing a set of test
names."""
rv = {}
for test_platform, cfg in test_platforms.iteritems():
test_sets = cfg['test-sets']
if not set(test_sets) < set(test_sets_cfg):
raise Exception(
"Test sets {} for test platform {} are not defined".format(
', '.join(test_sets), test_platform))
test_names = set()
for test_set in test_sets:
test_names.update(test_sets_cfg[test_set])
rv[test_platform] = cfg.copy()
rv[test_platform]['test-names'] = test_names
return rv
| Yukarumya/Yukarum-Redfoxes | taskcluster/taskgraph/task/test.py | Python | mpl-2.0 | 5,066 | 0.000987 |
#!/usr/bin/env python
import os
from os import path
import logging
import shutil
from sqlalchemy import create_engine
from . import config
from .config import TestBase
import taxtastic
from taxtastic.taxonomy import Taxonomy, TaxonIntegrityError
import taxtastic.ncbi
import taxtastic.utils
log = logging
datadir = config.datadir
echo = False
dbname = config.ncbi_master_db
class TestTaxonomyBase(TestBase):
def setUp(self):
self.engine = create_engine('sqlite:///' + self.dbname, echo=echo)
self.tax = Taxonomy(self.engine)
def tearDown(self):
self.engine.dispose()
class TestAddNode(TestTaxonomyBase):
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddNode, self).setUp()
def tearDown(self):
pass
def test01(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[{'tax_name': 'foo'}],
source_name='ncbi'
)
lineage = self.tax.lineage('1280_1')
self.assertEqual(lineage['tax_id'], '1280_1')
self.assertEqual(lineage['tax_name'], 'foo')
def test02(self):
new_taxid = '1279_1'
new_taxname = 'between genus and species'
children = ['1280', '1281']
self.tax.add_node(
tax_id=new_taxid,
parent_id='1279',
rank='species_group',
names=[{'tax_name': new_taxname}],
children=children,
source_name='foo'
)
lineage = self.tax.lineage(new_taxid)
self.assertTrue(lineage['tax_id'] == new_taxid)
self.assertTrue(lineage['tax_name'] == new_taxname)
for taxid in children:
lineage = self.tax.lineage(taxid)
self.assertTrue(lineage['parent_id'] == new_taxid)
def test03(self):
new_taxid = '1279_1'
new_taxname = 'between genus and species'
children = ['1280', '1281']
self.assertRaises(
TaxonIntegrityError,
self.tax.add_node,
tax_id=new_taxid,
parent_id='1279',
rank='genus',
names=[{'tax_name': new_taxname}],
children=children,
source_name='ncbi')
def test04(self):
# existing node
self.assertRaises(
ValueError,
self.tax.add_node,
tax_id='1280',
parent_id='1279',
rank='species',
names=[{'tax_name': 'I already exist'}],
source_name='ncbi'
)
def test05(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo', 'is_primary': True},
{'tax_name': 'bar'},
],
source_name='ncbi'
)
lineage = self.tax.lineage('1280_1')
self.assertEqual(lineage['tax_id'], '1280_1')
self.assertEqual(lineage['tax_name'], 'foo')
def test06(self):
# multiple names, none primary
self.assertRaises(
ValueError,
self.tax.add_node,
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo'},
{'tax_name': 'bar'},
],
source_name='ncbi')
def test07(self):
self.tax.add_node(
tax_id='1280_1',
parent_id='1280',
rank='subspecies',
names=[
{'tax_name': 'foo', 'is_primary': True},
{'tax_name': 'bar'},
],
source_name='ncbi',
execute=False
)
self.assertRaises(ValueError, self.tax.lineage, '1280_1')
def test08(self):
# test has_node()
self.assertTrue(self.tax.has_node('1280'))
self.assertFalse(self.tax.has_node('foo'))
class TestAddName(TestTaxonomyBase):
"""
test tax.add_node
"""
def count_names(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select count(*) from names where tax_id = ?', (tax_id,))
return result.fetchone()[0]
def count_primary_names(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select count(*) from names where tax_id = ? and is_primary',
(tax_id,))
return result.fetchone()[0]
def primary_name(self, tax_id):
with self.tax.engine.connect() as con:
result = con.execute(
'select tax_name from names where tax_id = ? and is_primary',
(tax_id,))
val = result.fetchone()
return val[0] if val else None
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddName, self).setUp()
def test_name01(self):
names_before = self.count_names('1280')
self.tax.add_name(tax_id='1280', tax_name='SA', source_name='ncbi')
self.assertEqual(names_before + 1, self.count_names('1280'))
def test_name02(self):
# number of primary names should remain 1
names_before = self.count_names('1280')
self.assertEqual(self.count_primary_names('1280'), 1)
self.tax.add_name(tax_id='1280', tax_name='SA', is_primary=True,
source_name='ncbi')
self.tax.add_name(tax_id='1280', tax_name='SA2', is_primary=True,
source_name='ncbi')
self.assertEqual(names_before + 2, self.count_names('1280'))
self.assertEqual(self.count_primary_names('1280'), 1)
def test_name03(self):
# insertion of duplicate row fails
self.tax.add_name(tax_id='1280', tax_name='SA', is_primary=True,
source_name='ncbi')
self.assertRaises(
ValueError, self.tax.add_name, tax_id='1280', tax_name='SA',
is_primary=True, source_name='ncbi')
self.assertEqual(self.primary_name('1280'), 'SA')
class TestGetSource(TestTaxonomyBase):
def setUp(self):
self.dbname = dbname
super(TestGetSource, self).setUp()
def test01(self):
self.assertRaises(ValueError, self.tax.get_source)
def test02(self):
self.assertRaises(ValueError, self.tax.get_source, 1, 'ncbi')
def test03(self):
result = self.tax.get_source(source_id=1)
self.assertDictEqual(result, {
'description': 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip',
'id': 1, 'name': 'ncbi'})
def test04(self):
result = self.tax.get_source(source_name='ncbi')
self.assertDictEqual(result, {
'description': 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip',
'id': 1, 'name': 'ncbi'})
def test05(self):
self.assertRaises(ValueError, self.tax.get_source, source_id=2)
class TestAddSource(TestTaxonomyBase):
def setUp(self):
self.dbname = path.join(self.mkoutdir(), 'taxonomy.db')
log.info(self.dbname)
shutil.copyfile(dbname, self.dbname)
super(TestAddSource, self).setUp()
def tearDown(self):
pass
def sources(self):
with self.tax.engine.connect() as con:
result = con.execute('select * from source')
return result.fetchall()
def test01(self):
self.tax.add_source('foo')
self.assertEqual(self.sources()[1], (2, 'foo', None))
def test02(self):
self.tax.add_source('ncbi')
self.assertEqual(
self.sources(),
[(1, 'ncbi', 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdmp.zip')])
def test__node():
engine = create_engine(
'sqlite:///../testfiles/small_taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax._node(None) is None
assert tax._node('91061') == ('1239', 'class')
def test_sibling_of():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.sibling_of(None) is None
assert tax.sibling_of('91061') == '186801'
assert tax.sibling_of('1696') is None
def test_child_of():
engine = create_engine(
'sqlite:///../testfiles/small_taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.child_of(None) is None
assert tax.child_of('1239') == '91061'
assert tax.children_of('1239', 2) == ['91061', '186801']
def test_is_ancestor_of():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.is_ancestor_of('1280', '1239')
assert tax.is_ancestor_of(None, '1239') is False
assert tax.is_ancestor_of('1239', None) is False
def test_rank_and_parent():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.rank(None) is None
assert tax.rank('1239') == 'phylum'
assert tax.rank('1280') == 'species'
assert tax.parent_id(None) is None
assert tax.parent_id('1239') == '2'
def test_species_below():
engine = create_engine('sqlite:///../testfiles/taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
t = tax.species_below('1239')
parent_id, rank = tax._node(t)
for t in [None, '1239', '186801', '1117']:
s = tax.species_below(t)
assert t is None or s is None or tax.is_ancestor_of(s, t)
assert s is None or tax.rank(s) == 'species'
def test_is_below():
assert Taxonomy.is_below('species', 'family')
assert Taxonomy.is_below('family', 'kingdom')
assert not Taxonomy.is_below('kingdom', 'family')
assert Taxonomy.ranks_below('species') == []
assert Taxonomy.ranks_below('family') == ['species', 'genus']
def test_nary_subtree():
engine = create_engine(
'sqlite:///../testfiles/small_taxonomy.db', echo=False)
tax = Taxonomy(engine, taxtastic.ncbi.RANKS)
assert tax.nary_subtree(None) is None
t = tax.nary_subtree('1239')
assert t == ['1280', '372074', '1579', '1580',
'37734', '420335', '166485', '166486']
| fhcrc/taxtastic | tests/test_taxonomy.py | Python | gpl-3.0 | 10,512 | 0 |
#!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
def labelDuplicates(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.requiredAttributes(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def requiredAttributes(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.requiredAttributes(Left, attrList)
if Right: resultRight, rightAttr = self.requiredAttributes(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
return (False, None)
return
if __name__ == "__main__":
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
| lferr/charm | charm/toolbox/policytree.py | Python | lgpl-3.0 | 6,070 | 0.014333 |
# tcpserv
#
# Copyright (c) 2015 Christian Sengstock, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
"""
Simple python socket helper library to implement
stateless tcp-servers.
Usage:
# Interface
>>> from tcpserv import listen, request
# Define server logic by a handler function:
# Gets a request string and returns a response string
>>> def my_handler(request): return "".join(reversed(request))
# Start the server
>>> listen("localhost", 55555, my_handler)
# Make requests
>>> for i in xrange(100):
>>> print request("localhost", 55555, "request %d" % i)
"""
import thread
import socket
import struct
DATA_SIZE_TYPE = "!I" # unsigned 4-byte int, network byte-order
# num of bytes; should always be 4;
# don't know if struct ensures this.
DATA_SIZE_LEN = len(struct.pack(DATA_SIZE_TYPE, 0))
if DATA_SIZE_LEN != 4:
raise ValueError(
"To work on different machines struct <!I> type should have " + \
"4 bytes. This is an implementation error!")
MAX_DATA = 2**(DATA_SIZE_LEN*8)
def listen(host, port, handler):
"""
Listens on "host:port" for requests
and forwards traffic to the handler.
The handler return value is then send
to the client socket. A simple
echo server handler:
>>> def my_handler(request_string) return request_string
The function blocks forever. Surround
with an appropriate signal handler
to quit the call (e.g., wait for
a KeyboardInterrupt event):
>>> try:
>>> listen("localhost", 55555, my_handler)
>>> except KeyboardInterrupt, e:
>>> pass
Args:
host<str>: Listening host
port<int>: Listening port
handler<function>:
Function 'f(request_string)->response_string'
processing the request.
"""
# Taken from
# http://code.activestate.com/recipes/578247-basic-threaded-python-tcp-server/
# Starts a new handler-thread for each request.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
while 1:
clientsock, addr = sock.accept()
thread.start_new_thread(_server, (clientsock, handler))
def request(host, port, data):
"""
Sends data to server listening on "host:port" and returns
the response.
Args:
host<str>: Server host
port<int>: Server port
data<str>: Request data
Returns<str>:
The response data
"""
if type(data) != str:
raise ValueError("data must be of type <str>")
if len(data) > MAX_DATA:
raise ValueError("request data must have len <= %d", MAX_DATA)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
b4 = struct.pack(DATA_SIZE_TYPE, len(data))
sock.sendall(b4)
sock.sendall(data)
b4 = _recvn(sock, DATA_SIZE_LEN)
n = struct.unpack(DATA_SIZE_TYPE, b4)[0]
data = _recvn(sock, n)
sock.close()
return data
def _recvn(sock, n):
"""
Reads exactly n bytes from the socket.
"""
buf = []
m = 0
while m < n:
pack = sock.recv(n-m)
m += len(pack)
buf.append(pack)
return "".join(buf)
def _server(clientsock, handler):
"""
Reads the request from the client socket
and calls the handler callback to process the data.
Sends back the response (return value of the
handler callback) to the client socket.
"""
b4 = _recvn(clientsock, DATA_SIZE_LEN)
n = struct.unpack(DATA_SIZE_TYPE, b4)[0]
req = _recvn(clientsock, n)
resp = handler(req)
if type(resp) != str:
raise ValueError("handler return value must be of type <str>")
if len(resp) > MAX_DATA:
raise ValueError("handler return value must have len <= %d", MAX_DATA)
b4 = struct.pack(DATA_SIZE_TYPE, len(resp))
clientsock.sendall(b4)
clientsock.sendall(resp)
def _test():
import time
def echo_handler(data):
return data
thread.start_new_thread(listen, ("localhost", 55555, echo_handler))
# listen("localhost", 55555, echo_handler)
time.sleep(1)
print "generating data..."
data = "1"*(2**28)
print "starting communication..."
for i in xrange(1000):
print "request", i
resp = request("localhost", 55555, data)
print "received %.02f KB" % (len(resp)/1000.0)
print "validation..."
assert len(resp) == len(data)
#for j,c in enumerate(data):
# assert(resp[j] == c)
if __name__ == "__main__":
_test() | csengstock/tcpserv | tcpserv.py | Python | lgpl-3.0 | 5,176 | 0.000773 |
"""Main entry for mass apk when invoked as python module.
>>> python -m massapk
"""
from mass_apk import cli
if __name__ == "__main__":
cli.main()
| binary-signal/mass-apk-installer | mass_apk/__main__.py | Python | bsd-3-clause | 154 | 0 |
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_bootstrap_external = None
_thread = None # Brython
import _weakref # Brython
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
# Dictionary protected by the global import lock
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
self._lock = _get_module_lock(self._name)
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Acquire/release internally the global import lock to protect
_module_locks."""
_imp.acquire_lock()
try:
try:
lock = _module_locks[name]()
except KeyError:
lock = None
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(ref, name=name):
_imp.acquire_lock()
try:
# bpo-31070: Check if another thread created a new lock
# after the previous lock was destroyed
# but before the weakref callback was called.
if _module_locks.get(name) is ref:
del _module_locks[name]
finally:
_imp.release_lock()
_module_locks[name] = _weakref.ref(lock, cb)
finally:
_imp.release_lock()
return lock
def _lock_unlock_module(name):
"""Acquires then releases the module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
"""
lock = _get_module_lock(name)
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
# Typically used by loader classes as a method replacement.
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
spec = spec_from_loader(fullname, self)
if fullname in sys.modules:
module = sys.modules[fullname]
_exec(spec, module)
return sys.modules[fullname]
else:
return _load(spec)
# Module specifications #######################################################
def _module_repr(module):
# The implementation of ModuleType.__repr__().
loader = getattr(module, '__loader__', None)
if hasattr(loader, 'module_repr'):
# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
# drop their implementations for module_repr. we can add a
# deprecation warning here.
try:
return loader.module_repr(module)
except Exception:
pass
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return _module_repr_from_spec(spec)
# We could use module.__class__.__name__ instead of 'module' in the
# various repr permutations.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes
# wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
if any(arg is not None for arg in args):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message('import {!r} # {!r}', spec.name, spec.loader)
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def cached(self):
if self._cached is None:
if self.origin is not None and self._set_fileattr:
if _bootstrap_external is None:
raise NotImplementedError
self._cached = _bootstrap_external._get_cached(self.origin)
return self._cached
@cached.setter
def cached(self, cached):
self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if _bootstrap_external is None:
raise NotImplementedError
spec_from_file_location = _bootstrap_external.spec_from_file_location
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
cached = module.__cached__
except AttributeError:
cached = None
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.cached = cached
spec.submodule_search_locations = submodule_search_locations
return spec
def _init_module_attrs(spec, module, *, override=False):
# The passed-in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (override or getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if override or getattr(module, '__loader__', None) is None:
loader = spec.loader
if loader is None:
# A backward compatibility hack.
if spec.submodule_search_locations is not None:
if _bootstrap_external is None:
raise NotImplementedError
_NamespaceLoader = _bootstrap_external._NamespaceLoader
loader = _NamespaceLoader.__new__(_NamespaceLoader)
loader._path = spec.submodule_search_locations
spec.loader = loader
# While the docs say that module.__file__ is not set for
# built-in modules, and the code below will avoid setting it if
# spec.has_location is false, this is incorrect for namespace
# packages. Namespace packages have no location, but their
# __spec__.origin is None, and thus their module.__file__
# should also be None for consistency. While a bit of a hack,
# this is the best place to ensure this consistency.
#
# See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module
# and bpo-32305
module.__file__ = None
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
# __file__/__cached__
if spec.has_location:
if override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
if override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
return module
def module_from_spec(spec):
"""Create a module based on the provided spec."""
# Typically loaders will not implement create_module().
module = None
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` then it means default
# module creation should be used.
module = spec.loader.create_module(spec)
elif hasattr(spec.loader, 'exec_module'):
raise ImportError('loaders that define exec_module() '
'must also define create_module()')
if module is None:
module = _new_module(spec.name)
_init_module_attrs(spec, module)
return module
def _module_repr_from_spec(spec):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
# Used by importlib.reload() and _load_module_shim().
def _exec(spec, module):
"""Execute the spec's specified module in an existing module's namespace."""
name = spec.name
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# namespace package
_init_module_attrs(spec, module, override=True)
return module
_init_module_attrs(spec, module, override=True)
if not hasattr(spec.loader, 'exec_module'):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec.loader.load_module(name)
else:
spec.loader.exec_module(module)
return sys.modules[name]
def _load_backward_compatible(spec):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(spec):
# A helper for direct use by the import system.
if spec.loader is not None:
# not a namespace package
if not hasattr(spec.loader, 'exec_module'):
return _load_backward_compatible(spec)
module = module_from_spec(spec)
with _installed_safely(module):
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# A namespace package so do nothing.
else:
spec.loader.exec_module(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[spec.name]
# A method used during testing of _load_unlocked() and by
# _load_module_shim().
def _load(spec):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
with _ModuleLockManager(spec.name):
return _load_unlocked(spec)
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (built-in)>'.format(module.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
return spec.loader if spec is not None else None
@classmethod
def create_module(self, spec):
"""Create a built-in module"""
if spec.name not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(spec.name),
name=spec.name)
return _call_with_frames_removed(_imp.create_builtin, spec)
@classmethod
def exec_module(self, module):
"""Exec a built-in module"""
_call_with_frames_removed(_imp.exec_builtin, module)
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
load_module = classmethod(_load_module_shim)
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(m):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (frozen)>'.format(m.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if _imp.is_frozen(fullname):
return spec_from_loader(fullname, cls, origin='frozen')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module.
This method is deprecated. Use find_spec() instead.
"""
return cls if _imp.is_frozen(fullname) else None
@classmethod
def create_module(cls, spec):
"""Use default semantics for module creation."""
@staticmethod
def exec_module(module):
name = module.__spec__.name
if not _imp.is_frozen(name):
raise ImportError('{!r} is not a frozen module'.format(name),
name=name)
code = _call_with_frames_removed(_imp.get_frozen_object, name)
exec(code, module.__dict__)
@classmethod
def load_module(cls, fullname):
"""Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
return _load_module_shim(cls, fullname)
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_spec_legacy(finder, name, path):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
loader = finder.find_module(name, path)
if loader is None:
return None
return spec_from_loader(name, loader)
def _find_spec(name, path, target=None):
"""Find a module's spec."""
meta_path = sys.meta_path
if meta_path is None:
# PyImport_Cleanup() is running or has been called.
raise ImportError("sys.meta_path is None, Python is likely "
"shutting down")
if not meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError('module name must be str, not {}'.format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if level > 0:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif not package:
raise ImportError('attempted relative import with no known parent '
'package')
if not name and level == 0:
raise ValueError('Empty module name')
_ERR_MSG_PREFIX = 'No module named '
_ERR_MSG = _ERR_MSG_PREFIX + '{!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
spec = _find_spec(name, path)
if spec is None:
raise ModuleNotFoundError(_ERR_MSG.format(name), name=name)
else:
module = _load_unlocked(spec)
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
_NEEDS_LOADING = object()
def _find_and_load(name, import_):
"""Find and load the module."""
with _ModuleLockManager(name):
module = sys.modules.get(name, _NEEDS_LOADING)
if module is _NEEDS_LOADING:
return _find_and_load_unlocked(name, import_)
if module is None:
message = ('import of {} halted; '
'None in sys.modules'.format(name))
raise ModuleNotFoundError(message, name=name)
_lock_unlock_module(name)
return module
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
return _find_and_load(name, _gcd_import)
def _handle_fromlist(module, fromlist, import_, *, recursive=False):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
for x in fromlist:
if not isinstance(x, str):
if recursive:
where = module.__name__ + '.__all__'
else:
where = "``from list''"
raise TypeError(f"Item in {where} must be str, "
f"not {type(x).__name__}")
elif x == '*':
if not recursive and hasattr(module, '__all__'):
_handle_fromlist(module, module.__all__, import_,
recursive=True)
elif not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ModuleNotFoundError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if (exc.name == from_name and
sys.modules.get(from_name, _NEEDS_LOADING) is not None):
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
spec = globals.get('__spec__')
if package is not None:
if spec is not None and package != spec.parent:
_warnings.warn("__package__ != __spec__.parent "
f"({package!r} != {spec.parent!r})",
ImportWarning, stacklevel=3)
return package
elif spec is not None:
return spec.parent
else:
_warnings.warn("can't resolve package from __spec__ or __package__, "
"falling back on __name__ and __path__",
ImportWarning, stacklevel=3)
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occurring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _builtin_from_name(name):
spec = BuiltinImporter.find_spec(name)
if spec is None:
raise ImportError('no built-in module named ' + name)
return _load_unlocked(spec)
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys
_imp = _imp_module
sys = sys_module
# Set up the spec for existing builtin/frozen modules.
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if name in sys.builtin_module_names:
loader = BuiltinImporter
elif _imp.is_frozen(name):
loader = FrozenImporter
else:
continue
spec = _spec_from_module(module, loader)
_init_module_attrs(spec, module)
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
#for builtin_name in ('_thread', '_warnings', '_weakref'):
# Brython : _thread and _weakref are not built-in
for builtin_name in ('_warnings',):
if builtin_name not in sys.modules:
builtin_module = _builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
def _install(sys_module, _imp_module):
"""Install importers for builtin and frozen modules"""
_setup(sys_module, _imp_module)
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
def _install_external_importers():
"""Install importers that require external filesystem access"""
global _bootstrap_external
import _frozen_importlib_external
_bootstrap_external = _frozen_importlib_external
_frozen_importlib_external._install(sys.modules[__name__])
| kikocorreoso/brython | www/src/Lib/importlib/_bootstrap.py | Python | bsd-3-clause | 39,424 | 0.00038 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ResponseBase(Model):
"""ResponseBase.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Identifiable
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'_type': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
}
_subtype_map = {
'_type': {'Identifiable': 'Identifiable'}
}
def __init__(self, **kwargs) -> None:
super(ResponseBase, self).__init__(**kwargs)
self._type = None
class Identifiable(ResponseBase):
"""Defines the identity of a resource.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Response
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
_subtype_map = {
'_type': {'Response': 'Response'}
}
def __init__(self, **kwargs) -> None:
super(Identifiable, self).__init__(**kwargs)
self.id = None
self._type = 'Identifiable'
class Response(Identifiable):
"""Defines a response. All schemas that could be returned at the root of a
response should inherit from this.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResponse, ErrorResponse, Answer, Thing
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
}
_subtype_map = {
'_type': {'SearchResponse': 'SearchResponse', 'ErrorResponse': 'ErrorResponse', 'Answer': 'Answer', 'Thing': 'Thing'}
}
def __init__(self, **kwargs) -> None:
super(Response, self).__init__(**kwargs)
self.web_search_url = None
self._type = 'Response'
class Answer(Response):
"""Answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResultsAnswer
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.customsearch.models.Query]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
}
_subtype_map = {
'_type': {'SearchResultsAnswer': 'SearchResultsAnswer'}
}
def __init__(self, **kwargs) -> None:
super(Answer, self).__init__(**kwargs)
self.follow_up_queries = None
self._type = 'Answer'
class Thing(Response):
"""Thing.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CreativeWork
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
}
_subtype_map = {
'_type': {'CreativeWork': 'CreativeWork'}
}
def __init__(self, **kwargs) -> None:
super(Thing, self).__init__(**kwargs)
self.name = None
self.url = None
self.description = None
self.bing_id = None
self._type = 'Thing'
class CreativeWork(Thing):
"""CreativeWork.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: WebPage
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.customsearch.models.Thing]
:ivar text:
:vartype text: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
}
_subtype_map = {
'_type': {'WebPage': 'WebPage'}
}
def __init__(self, **kwargs) -> None:
super(CreativeWork, self).__init__(**kwargs)
self.thumbnail_url = None
self.provider = None
self.text = None
self._type = 'CreativeWork'
class Error(Model):
"""Defines the error that occurred.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code that identifies the category of
error. Possible values include: 'None', 'ServerError', 'InvalidRequest',
'RateLimitExceeded', 'InvalidAuthorization', 'InsufficientAuthorization'.
Default value: "None" .
:type code: str or
~azure.cognitiveservices.search.customsearch.models.ErrorCode
:ivar sub_code: The error code that further helps to identify the error.
Possible values include: 'UnexpectedError', 'ResourceError',
'NotImplemented', 'ParameterMissing', 'ParameterInvalidValue',
'HttpNotAllowed', 'Blocked', 'AuthorizationMissing',
'AuthorizationRedundancy', 'AuthorizationDisabled', 'AuthorizationExpired'
:vartype sub_code: str or
~azure.cognitiveservices.search.customsearch.models.ErrorSubCode
:param message: Required. A description of the error.
:type message: str
:ivar more_details: A description that provides additional information
about the error.
:vartype more_details: str
:ivar parameter: The parameter in the request that caused the error.
:vartype parameter: str
:ivar value: The parameter's value in the request that was not valid.
:vartype value: str
"""
_validation = {
'code': {'required': True},
'sub_code': {'readonly': True},
'message': {'required': True},
'more_details': {'readonly': True},
'parameter': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'sub_code': {'key': 'subCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'more_details': {'key': 'moreDetails', 'type': 'str'},
'parameter': {'key': 'parameter', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, message: str, code="None", **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.code = code
self.sub_code = None
self.message = message
self.more_details = None
self.parameter = None
self.value = None
class ErrorResponse(Response):
"""The top-level response that represents a failed request.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:param errors: Required. A list of errors that describe the reasons why
the request failed.
:type errors:
list[~azure.cognitiveservices.search.customsearch.models.Error]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'errors': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[Error]'},
}
def __init__(self, *, errors, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.errors = errors
self._type = 'ErrorResponse'
class ErrorResponseException(HttpOperationError):
"""Server responded with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class Query(Model):
"""Defines a search query.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param text: Required. The query string. Use this string as the query term
in a new search request.
:type text: str
:ivar display_text: The display version of the query term. This version of
the query term may contain special characters that highlight the search
term found in the query string. The string contains the highlighting
characters only if the query enabled hit highlighting
:vartype display_text: str
:ivar web_search_url: The URL that takes the user to the Bing search
results page for the query.Only related search results include this field.
:vartype web_search_url: str
:ivar search_link:
:vartype search_link: str
"""
_validation = {
'text': {'required': True},
'display_text': {'readonly': True},
'web_search_url': {'readonly': True},
'search_link': {'readonly': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'display_text': {'key': 'displayText', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'search_link': {'key': 'searchLink', 'type': 'str'},
}
def __init__(self, *, text: str, **kwargs) -> None:
super(Query, self).__init__(**kwargs)
self.text = text
self.display_text = None
self.web_search_url = None
self.search_link = None
class QueryContext(Model):
"""Defines the query context that Bing used for the request.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param original_query: Required. The query string as specified in the
request.
:type original_query: str
:ivar altered_query: The query string used by Bing to perform the query.
Bing uses the altered query string if the original query string contained
spelling mistakes. For example, if the query string is "saling downwind",
the altered query string will be "sailing downwind". This field is
included only if the original query string contains a spelling mistake.
:vartype altered_query: str
:ivar alteration_override_query: The query string to use to force Bing to
use the original string. For example, if the query string is "saling
downwind", the override query string will be "+saling downwind". Remember
to encode the query string which results in "%2Bsaling+downwind". This
field is included only if the original query string contains a spelling
mistake.
:vartype alteration_override_query: str
:ivar adult_intent: A Boolean value that indicates whether the specified
query has adult intent. The value is true if the query has adult intent;
otherwise, false.
:vartype adult_intent: bool
"""
_validation = {
'original_query': {'required': True},
'altered_query': {'readonly': True},
'alteration_override_query': {'readonly': True},
'adult_intent': {'readonly': True},
}
_attribute_map = {
'original_query': {'key': 'originalQuery', 'type': 'str'},
'altered_query': {'key': 'alteredQuery', 'type': 'str'},
'alteration_override_query': {'key': 'alterationOverrideQuery', 'type': 'str'},
'adult_intent': {'key': 'adultIntent', 'type': 'bool'},
}
def __init__(self, *, original_query: str, **kwargs) -> None:
super(QueryContext, self).__init__(**kwargs)
self.original_query = original_query
self.altered_query = None
self.alteration_override_query = None
self.adult_intent = None
class SearchResponse(Response):
"""Defines the top-level object that the response includes when the request
succeeds.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar query_context: An object that contains the query string that Bing
used for the request. This object contains the query string as entered by
the user. It may also contain an altered query string that Bing used for
the query if the query string contained a spelling mistake.
:vartype query_context:
~azure.cognitiveservices.search.customsearch.models.QueryContext
:ivar web_pages: A list of webpages that are relevant to the search query.
:vartype web_pages:
~azure.cognitiveservices.search.customsearch.models.WebWebAnswer
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'query_context': {'readonly': True},
'web_pages': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'web_pages': {'key': 'webPages', 'type': 'WebWebAnswer'},
}
def __init__(self, **kwargs) -> None:
super(SearchResponse, self).__init__(**kwargs)
self.query_context = None
self.web_pages = None
self._type = 'SearchResponse'
class SearchResultsAnswer(Answer):
"""SearchResultsAnswer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: WebWebAnswer
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.customsearch.models.Query]
:ivar query_context:
:vartype query_context:
~azure.cognitiveservices.search.customsearch.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are
relevant to the query. Use this number along with the count and offset
query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
}
_subtype_map = {
'_type': {'Web/WebAnswer': 'WebWebAnswer'}
}
def __init__(self, **kwargs) -> None:
super(SearchResultsAnswer, self).__init__(**kwargs)
self.query_context = None
self.total_estimated_matches = None
self.is_family_friendly = None
self._type = 'SearchResultsAnswer'
class WebMetaTag(Model):
"""Defines a webpage's metadata.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The metadata.
:vartype name: str
:ivar content: The name of the metadata.
:vartype content: str
"""
_validation = {
'name': {'readonly': True},
'content': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(WebMetaTag, self).__init__(**kwargs)
self.name = None
self.content = None
class WebPage(CreativeWork):
"""Defines a webpage that is relevant to the query.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.customsearch.models.Thing]
:ivar text:
:vartype text: str
:ivar display_url: The display URL of the webpage. The URL is meant for
display purposes only and is not well formed.
:vartype display_url: str
:ivar snippet: A snippet of text from the webpage that describes its
contents.
:vartype snippet: str
:ivar deep_links: A list of links to related content that Bing found in
the website that contains this webpage. The Webpage object in this context
includes only the name, url, urlPingSuffix, and snippet fields.
:vartype deep_links:
list[~azure.cognitiveservices.search.customsearch.models.WebPage]
:ivar date_last_crawled: The last time that Bing crawled the webpage. The
date is in the form, YYYY-MM-DDTHH:MM:SS. For example,
2015-04-13T05:23:39.
:vartype date_last_crawled: str
:ivar search_tags: A list of search tags that the webpage owner specified
on the webpage. The API returns only indexed search tags. The name field
of the MetaTag object contains the indexed search tag. Search tags begin
with search.* (for example, search.assetId). The content field contains
the tag's value.
:vartype search_tags:
list[~azure.cognitiveservices.search.customsearch.models.WebMetaTag]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'display_url': {'readonly': True},
'snippet': {'readonly': True},
'deep_links': {'readonly': True},
'date_last_crawled': {'readonly': True},
'search_tags': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'display_url': {'key': 'displayUrl', 'type': 'str'},
'snippet': {'key': 'snippet', 'type': 'str'},
'deep_links': {'key': 'deepLinks', 'type': '[WebPage]'},
'date_last_crawled': {'key': 'dateLastCrawled', 'type': 'str'},
'search_tags': {'key': 'searchTags', 'type': '[WebMetaTag]'},
}
def __init__(self, **kwargs) -> None:
super(WebPage, self).__init__(**kwargs)
self.display_url = None
self.snippet = None
self.deep_links = None
self.date_last_crawled = None
self.search_tags = None
self._type = 'WebPage'
class WebWebAnswer(SearchResultsAnswer):
"""Defines a list of relevant webpage links.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.customsearch.models.Query]
:ivar query_context:
:vartype query_context:
~azure.cognitiveservices.search.customsearch.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are
relevant to the query. Use this number along with the count and offset
query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. A list of webpages that are relevant to the query.
:type value:
list[~azure.cognitiveservices.search.customsearch.models.WebPage]
:ivar some_results_removed: A Boolean value that indicates whether the
response excluded some results from the answer. If Bing excluded some
results, the value is true.
:vartype some_results_removed: bool
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
'some_results_removed': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[WebPage]'},
'some_results_removed': {'key': 'someResultsRemoved', 'type': 'bool'},
}
def __init__(self, *, value, **kwargs) -> None:
super(WebWebAnswer, self).__init__(**kwargs)
self.value = value
self.some_results_removed = None
self._type = 'Web/WebAnswer'
| Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-search-customsearch/azure/cognitiveservices/search/customsearch/models/_models_py3.py | Python | mit | 29,187 | 0.000171 |
class Drawable(object):
def draw(self, display_screen, dT):
pass | bpeck/tumblr-display | src/Drawable.py | Python | apache-2.0 | 76 | 0.013158 |
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
from elopic.data.elopicdb import EloPicDB, EloPicDBError
from elopic.logic.elo import INITIAL_ELO_SCORE
from tests.utils import copy_all_files, delete_files_matching_pattern
class TestDatabase(unittest.TestCase):
"""Test cases for the data package"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.picdir = 'testdata/pics1'
copy_all_files(self.picdir, self.tempdir)
self._initDB()
def tearDown(self):
self.db.close()
shutil.rmtree(self.tempdir)
def _initDB(self):
self.db = EloPicDB()
self.db.load_from_disk(self.tempdir)
def _assert_db_matches_dir(self, dir):
expected = self._get_imagepaths_in_dir(dir)
result = self.db.to_list()
self.assertEquals(len(result), len(expected), 'Number of pictures does not match.')
self.assertListEqual(
[r[0] for r in result],
expected,
'Paths do not match'
)
for r in result:
self.assertEqual(r[1], 0)
self.assertEqual(r[2], INITIAL_ELO_SCORE)
self.assertEqual(r[3], 0)
def _get_imagepaths_in_dir(self, dir):
return [os.path.join(self.tempdir, e) for e in os.listdir(dir) if e.endswith('.jpg')]
def test_load_from_disk_new_folder(self):
self._assert_db_matches_dir(self.tempdir)
def test_load_additional_files(self):
self.db.close()
# delete_files_matching_pattern(self.tempdir, r'^\d+\.jpg$')
copy_all_files('testdata/pics2', self.tempdir)
self._initDB()
self._assert_db_matches_dir(self.tempdir)
@unittest.skip('Support for deleted files is not in yet')
def test_load_deleted_files(self):
self.db.close()
delete_files_matching_pattern(self.tempdir, r'^\d+\.jpg$')
copy_all_files('testdata/pics2', self.tempdir)
self._initDB()
self._assert_db_matches_dir(self.tempdir)
def test_rating(self):
images = self._get_imagepaths_in_dir(self.tempdir)
for path in images:
self.assertEqual(INITIAL_ELO_SCORE, self.db.get_rating(path))
for idx, path in enumerate(images):
self.db.update_rating(path, idx)
for idx, path in enumerate(images):
self.assertEqual(idx, self.db.get_rating(path))
self.assertListEqual(images[:-4:-1], self.db.get_top_x_filepaths_by_rating(3))
def test_headers(self):
expected = [
'ignore',
'path',
'rating',
'seen_count',
]
result = self.db.get_headers()
result.sort()
self.assertEqual(expected, result)
def test_ignore(self):
images = self._get_imagepaths_in_dir(self.tempdir)
self.db.ignore_pictures(images[:3])
self.db.ignore_pictures(images[-1:])
self.maxDiff = None
self.assertListEqual(images[3:-1], [i['path'] for i in self.db.get_all()])
| mmanhertz/elopic | tests/test_db.py | Python | bsd-2-clause | 3,055 | 0.001309 |
import os
import pwd
import grp
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
if os.getuid() != 0:
# We're not root so, like, whatever dude
return
# Get the uid/gid from the name
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
pwnam = pwd.getpwnam(sudo_user)
running_uid = pwnam.pw_uid
running_gid = pwnam.pw_gid
else:
running_uid = pwd.getpwnam(uid_name).pw_uid
running_gid = grp.getgrnam(gid_name).gr_gid
# Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(running_gid)
os.setuid(running_uid)
# Ensure a very conservative umask
os.umask(0o22)
| the-invoice/nab | nwaddrbook/icmp/util.py | Python | gpl-3.0 | 701 | 0 |
#!/bin/python
import os
import roomai.common
import copy
#
#0, 1, 2, 3, ..., 7, 8, 9, 10, 11, 12, 13, 14
#^ ^ ^ ^ ^
#| | | | |
#3, 10, J, Q, K, A, 2, r, R
#
class DouDiZhuActionElement:
str_to_rank = {'3':0, '4':1, '5':2, '6':3, '7':4, '8':5, '9':6, 'T':7, 'J':8, 'Q':9, 'K':10, 'A':11, '2':12, 'r':13, 'R':14, 'x':15, 'b':16}
# x means check, b means bid
rank_to_str = {0: '3', 1: '4', 2: '5', 3: '6', 4: '7', 5: '8', 6: '9', 7: 'T', 8: 'J', 9: 'Q', 10: 'K', 11: 'A', 12: '2', 13: 'r', 14: 'R', 15: 'x', 16: 'b'}
total_normal_cards = 15
class DouDiZhuPokerAction(roomai.common.AbstractAction):
"""
"""
def __init__(self):
"""
"""
pass
def __init__(self, masterCards, slaveCards):
self.__masterCards__ = [c for c in masterCards]
self.__slaveCards__ = [c for c in slaveCards]
self.__masterPoints2Count__ = None
self.__slavePoints2Count__ = None
self.__isMasterStraight__ = None
self.__maxMasterPoint__ = None
self.__minMasterPoint__ = None
self.__pattern__ = None
self.__action2pattern__()
self.__key__ = DouDiZhuPokerAction.__master_slave_cards_to_key__(masterCards, slaveCards)
def __get_key__(self): return self.__key__
key = property(__get_key__, doc="The key of DouDiZhu Action")
def __get_masterCards__(self): return self.__masterCards__
masterCards = property(__get_masterCards__, doc="The cards act as the master cards")
def __get_slaveCards__(self): return self.__slaveCards__
slaveCards = property(__get_slaveCards__, doc="The cards act as the slave cards")
def __get_masterPoints2Count__(self): return self.__masterPoints2Count__
masterPoints2Count = property(__get_masterPoints2Count__, doc="The count of different points in the masterCards")
def __get_slavePoints2Count__(self): return self.__slavePoints2Count__
slavePoints2Count = property(__get_slavePoints2Count__, doc="The count of different points in the slaveCards")
def __get_isMasterStraight__(self): return self.__isMasterStraight__
isMasterStraight = property(__get_isMasterStraight__, doc="The master cards are straight")
def __get_maxMasterPoint__(self): return self.__maxMasterPoint__
maxMasterPoint = property(__get_maxMasterPoint__, doc="The max point in the master cards")
def __get_minMasterPoint__(self): return self.__minMasterPoint__
minMasterPoint = property(__get_minMasterPoint__, doc="The min point in the master cards")
def __get_pattern__(self): return self.__pattern__
pattern = property(__get_pattern__, doc="The pattern of the action")
@classmethod
def lookup(cls, key):
return AllActions["".join(sorted(key))]
@classmethod
def __master_slave_cards_to_key__(cls, masterCards, slaveCards):
key_int = (masterCards + slaveCards)
key_str = []
for key in key_int:
key_str.append(DouDiZhuActionElement.rank_to_str[key])
key_str.sort()
return "".join(key_str)
def __action2pattern__(self):
self.__masterPoints2Count__ = dict()
for c in self.__masterCards__:
if c in self.__masterPoints2Count__:
self.__masterPoints2Count__[c] += 1
else:
self.__masterPoints2Count__[c] = 1
self.__slavePoints2Count__ = dict()
for c in self.__slaveCards__:
if c in self.__slavePoints2Count__:
self.__slavePoints2Count__[c] += 1
else:
self.__slavePoints2Count__[c] = 1
self.__isMasterStraight__ = 0
num = 0
for v in self.__masterPoints2Count__:
if (v + 1) in self.__masterPoints2Count__ and (v + 1) < DouDiZhuActionElement.str_to_rank["2"]:
num += 1
if num == len(self.__masterPoints2Count__) - 1 and len(self.__masterPoints2Count__) != 1:
self.__isMasterStraight__ = 1
self.__maxMasterPoint__ = -1
self.__minMasterPoint__ = 100
for c in self.__masterPoints2Count__:
if self.__maxMasterPoint__ < c:
self.__maxMasterPoint__ = c
if self.__minMasterPoint__ > c:
self.__minMasterPoint__ = c
########################
## action 2 pattern ####
########################
# is cheat?
if len(self.__masterCards__) == 1 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] == DouDiZhuActionElement.str_to_rank["x"]:
self.__pattern__ = AllPatterns["i_cheat"]
# is roblord
elif len(self.__masterCards__) == 1 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] == DouDiZhuActionElement.str_to_rank["b"]:
self.__pattern__ = AllPatterns["i_bid"]
# is twoKings
elif len(self.__masterCards__) == 2 \
and len(self.__masterPoints2Count__) == 2 \
and len(self.__slaveCards__) == 0 \
and self.__masterCards__[0] in [DouDiZhuActionElement.str_to_rank["r"], DouDiZhuActionElement.str_to_rank["R"]] \
and self.__masterCards__[1] in [DouDiZhuActionElement.str_to_rank["r"], DouDiZhuActionElement.str_to_rank["R"]]:
self.__pattern__ = AllPatterns["x_rocket"]
else:
## process masterCards
masterPoints = self.__masterPoints2Count__
if len(masterPoints) > 0:
count = masterPoints[self.__masterCards__[0]]
for c in masterPoints:
if masterPoints[c] != count:
self.__pattern__ = AllPatterns["i_invalid"]
if self.__pattern__ == None:
pattern = "p_%d_%d_%d_%d_%d" % (len(self.__masterCards__), len(masterPoints), \
self.__isMasterStraight__, \
len(self.__slaveCards__), 0)
if pattern in AllPatterns:
self.__pattern__= AllPatterns[pattern]
else:
self.__pattern__ = AllPatterns["i_invalid"]
def __deepcopy__(self, memodict={}, newinstance = None):
return self.lookup(self.key)
############## read data ################
AllPatterns = dict()
AllActions = dict()
from roomai.doudizhu import doudizhu_action_data
from roomai.doudizhu import doudizhu_pattern_data
for line in doudizhu_pattern_data:
line = line.replace(" ", "").strip()
line = line.split("#")[0]
if len(line) == 0 or len(line[0].strip()) == 0:
continue
lines = line.split(",")
for i in range(1, len(lines)):
lines[i] = int(lines[i])
AllPatterns[lines[0]] = lines
for line in doudizhu_action_data:
line = line.replace(" ", "").strip()
lines = line.split("\t")
if lines[3] not in AllPatterns:
continue
m = [int(str1) for str1 in lines[1].split(",")]
s = []
if len(lines[2]) > 0:
s = [int(str1) for str1 in lines[2].split(",")]
action = DouDiZhuPokerAction(m, s)
if "b" in line:
b = 0
if action.key != lines[0] or action.pattern[0] != lines[3]:
raise ValueError("%s is wrong. The generated action has key(%s) and pattern(%s)"%(line, action.key,action.pattern[0]))
AllActions[action.key] = action
| DMRookie/RoomAI | roomai/doudizhu/DouDiZhuPokerAction.py | Python | mit | 7,614 | 0.010901 |
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import atexit
from functools import wraps
import yaml
import os
import requests
import time
from pyvcloud import vcloudair
from pyvcloud.schema.vcd.v1_5.schemas.vcloud import taskType
from cloudify import ctx
from cloudify import context
from cloudify import exceptions as cfy_exc
TASK_RECHECK_TIMEOUT = 3
RELOGIN_TIMEOUT = 3
LOGIN_RETRY_NUM = 5
TASK_STATUS_SUCCESS = 'success'
TASK_STATUS_ERROR = 'error'
STATUS_COULD_NOT_BE_CREATED = -1
STATUS_UNRESOLVED = 0
STATUS_RESOLVED = 1
STATUS_DEPLOYED = 2
STATUS_SUSPENDED = 3
STATUS_POWERED_ON = 4
STATUS_POWERED_OFF = 8
STATUS_WAITING_FOR_USER_INPUT = 5
STATUS_UNKNOWN_STATE = 6
STATUS_UNRECOGNIZED_STATE = 7
STATUS_INCONSISTENT_STATE = 9
VCLOUD_STATUS_MAP = {
-1: "Could not be created",
0: "Unresolved",
1: "Resolved",
2: "Deployed",
3: "Suspended",
4: "Powered on",
5: "Waiting for user input",
6: "Unknown state",
7: "Unrecognized state",
8: "Powered off",
9: "Inconsistent state",
10: "Children do not all have the same status",
11: "Upload initiated, OVF descriptor pending",
12: "Upload initiated, copying contents",
13: "Upload initiated , disk contents pending",
14: "Upload has been quarantined",
15: "Upload quarantine period has expired"
}
SUBSCRIPTION_SERVICE_TYPE = 'subscription'
ONDEMAND_SERVICE_TYPE = 'ondemand'
PRIVATE_SERVICE_TYPE = 'vcd'
SESSION_TOKEN = 'session_token'
ORG_URL = 'org_url'
VCLOUD_CONFIG = 'vcloud_config'
def transform_resource_name(res, ctx):
"""
return name as prefix from bootstrap context + resource name
"""
if isinstance(res, basestring):
res = {'name': res}
if not isinstance(res, dict):
raise ValueError("transform_resource_name() expects either string or "
"dict as the first parameter")
pfx = ctx.bootstrap_context.resources_prefix
if not pfx:
return get_mandatory(res, 'name')
name = get_mandatory(res, 'name')
res['name'] = pfx + name
if name.startswith(pfx):
ctx.logger.warn("Prefixing resource '{0}' with '{1}' but it "
"already has this prefix".format(name, pfx))
else:
ctx.logger.info("Transformed resource name '{0}' to '{1}'".format(
name, res['name']))
return res['name']
class Config(object):
"""
load global config
"""
VCLOUD_CONFIG_PATH_ENV_VAR = 'VCLOUD_CONFIG_PATH'
VCLOUD_CONFIG_PATH_DEFAULT = '~/vcloud_config.yaml'
def get(self):
"""
return settings from ~/vcloud_config.yaml
"""
cfg = {}
env_name = self.VCLOUD_CONFIG_PATH_ENV_VAR
default_location_tpl = self.VCLOUD_CONFIG_PATH_DEFAULT
default_location = os.path.expanduser(default_location_tpl)
config_path = os.getenv(env_name, default_location)
try:
with open(config_path) as f:
cfg = yaml.load(f.read())
if not cfg:
cfg = {}
except IOError:
pass
return cfg
class VcloudAirClient(object):
config = Config
def get(self, config=None, *args, **kw):
"""
return new vca client
"""
static_config = self.__class__.config().get()
cfg = {}
cfg.update(static_config)
if config:
cfg.update(config)
return self.connect(cfg)
def connect(self, cfg):
"""
login to instance described in settings
"""
url = cfg.get('url')
username = cfg.get('username')
password = cfg.get('password')
token = cfg.get('token')
service = cfg.get('service')
org_name = cfg.get('org')
service_type = cfg.get('service_type', SUBSCRIPTION_SERVICE_TYPE)
instance = cfg.get('instance')
org_url = cfg.get(ORG_URL, None)
api_version = cfg.get('api_version', '5.6')
session_token = cfg.get(SESSION_TOKEN)
org_url = cfg.get(ORG_URL)
if not (all([url, token]) or all([url, username, password]) or session_token):
raise cfy_exc.NonRecoverableError(
"Login credentials must be specified.")
if (service_type == SUBSCRIPTION_SERVICE_TYPE and not (
service and org_name
)):
raise cfy_exc.NonRecoverableError(
"vCloud service and vDC must be specified")
if service_type == SUBSCRIPTION_SERVICE_TYPE:
vcloud_air = self._subscription_login(
url, username, password, token, service, org_name,
session_token, org_url)
elif service_type == ONDEMAND_SERVICE_TYPE:
vcloud_air = self._ondemand_login(
url, username, password, token, instance,
session_token, org_url)
# The actual service type for private is 'vcd', but we should accept
# 'private' as well, for user friendliness of inputs
elif service_type in (PRIVATE_SERVICE_TYPE, 'private'):
vcloud_air = self._private_login(
url, username, password, token, org_name, org_url, api_version)
else:
raise cfy_exc.NonRecoverableError(
"Unrecognized service type: {0}".format(service_type))
return vcloud_air
def _subscription_login(self, url, username, password, token, service,
org_name, session_token=None, org_url=None):
"""
login to subscription service
"""
version = '5.6'
logined = False
vdc_logined = False
vca = vcloudair.VCA(
url, username, service_type=SUBSCRIPTION_SERVICE_TYPE,
version=version)
if session_token:
if session_login(vca, org_url, session_token, version):
return vca
else:
raise cfy_exc.NonRecoverableError("Invalid session credentials")
# login with token
if token:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(token=token)
if logined is False:
ctx.logger.info("Login using token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using token successful.")
break
# outdated token, try login by password
if logined is False and password:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(password)
if logined is False:
ctx.logger.info("Login using password failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using password successful.")
break
# can't login to system at all
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
for _ in range(LOGIN_RETRY_NUM):
vdc_logined = vca.login_to_org(service, org_name)
if vdc_logined is False:
ctx.logger.info("Login to VDC failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login to VDC successful.")
break
# we can login to system,
# but have some troubles with login to organization,
# lets retry later
if vdc_logined is False:
raise cfy_exc.RecoverableError(message="Could not login to VDC",
retry_after=RELOGIN_TIMEOUT)
atexit.register(vca.logout)
return vca
def _ondemand_login(self, url, username, password, token, instance_id,
session_token=None, org_url=None):
"""
login to ondemand service
"""
def get_instance(vca, instance_id):
instances = vca.get_instances() or []
for instance in instances:
if instance['id'] == instance_id:
return instance
version = '5.7'
if instance_id is None:
raise cfy_exc.NonRecoverableError(
"Instance ID should be specified for OnDemand login")
logined = False
instance_logined = False
vca = vcloudair.VCA(
url, username, service_type=ONDEMAND_SERVICE_TYPE, version=version)
if session_token:
if session_login(vca, org_url, session_token, version):
return vca
else:
raise cfy_exc.NonRecoverableError("Invalid session credentials")
# login with token
if token:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(token=token)
if logined is False:
ctx.logger.info("Login using token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using token successful.")
break
# outdated token, try login by password
if logined is False and password:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(password)
if logined is False:
ctx.logger.info("Login using password failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using password successful.")
break
# can't login to system at all
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
instance = get_instance(vca, instance_id)
if instance is None:
raise cfy_exc.NonRecoverableError(
"Instance {0} could not be found.".format(instance_id))
for _ in range(LOGIN_RETRY_NUM):
instance_logined = vca.login_to_instance(
instance_id, password, token, None)
if instance_logined is False:
ctx.logger.info("Login to instance failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login to instance successful.")
break
for _ in range(LOGIN_RETRY_NUM):
instance_logined = vca.login_to_instance(
instance_id,
None,
vca.vcloud_session.token,
vca.vcloud_session.org_url)
if instance_logined is False:
ctx.logger.info("Login to instance failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login to instance successful.")
break
# we can login to system,
# but have some troubles with login to instance,
# lets retry later
if instance_logined is False:
raise cfy_exc.RecoverableError(
message="Could not login to instance",
retry_after=RELOGIN_TIMEOUT)
atexit.register(vca.logout)
return vca
def _private_login(self, url, username, password, token, org_name,
org_url=None, api_version='5.6'):
"""
login to private instance
"""
logined = False
vca = vcloudair.VCA(
host=url,
username=username,
service_type=PRIVATE_SERVICE_TYPE,
version=api_version)
if logined is False and password:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(password, org=org_name)
if logined is False:
ctx.logger.info("Login using password failed. Retrying...")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
token = vca.token
# Set org_url based on the session, no matter what was
# passed in to the application, as this is guaranteed to
# be correct
org_url = vca.vcloud_session.org_url
ctx.logger.info("Login using password successful.")
break
# Private mode requires being logged in with a token otherwise you
# don't seem to be able to retrieve any VDCs
if token:
for _ in range(LOGIN_RETRY_NUM):
logined = vca.login(token=token, org_url=org_url)
if logined is False:
ctx.logger.info("Login using token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
ctx.logger.info("Login using token successful.")
break
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
atexit.register(vca.logout)
return vca
def with_vca_client(f):
"""
add vca client to function params
"""
@wraps(f)
def wrapper(*args, **kw):
config = None
prop = None
if ctx.type == context.NODE_INSTANCE:
config = ctx.node.properties.get(VCLOUD_CONFIG)
prop = ctx.instance.runtime_properties
elif ctx.type == context.RELATIONSHIP_INSTANCE:
config = ctx.source.node.properties.get(VCLOUD_CONFIG)
prop = ctx.source.instance.runtime_properties
else:
raise cfy_exc.NonRecoverableError("Unsupported context")
if config and prop:
config[SESSION_TOKEN] = prop.get(SESSION_TOKEN)
config[ORG_URL] = prop.get(ORG_URL)
client = VcloudAirClient().get(config=config)
kw['vca_client'] = client
return f(*args, **kw)
return wrapper
def wait_for_task(vca_client, task):
"""
check status of current task and make request for recheck
task status in case when we have not well defined state
(not error and not success or by timeout)
"""
WAIT_TIME_MAX_MINUTES = 30
MAX_ATTEMPTS = WAIT_TIME_MAX_MINUTES * 60 / TASK_RECHECK_TIMEOUT
ctx.logger.debug('Maximun task wait time {0} minutes.'
.format(WAIT_TIME_MAX_MINUTES))
ctx.logger.debug('Task recheck after {0} seconds.'
.format(TASK_RECHECK_TIMEOUT))
status = task.get_status()
for attempt in range(MAX_ATTEMPTS):
ctx.logger.debug('Attempt: {0}/{1}.'.format(attempt + 1, MAX_ATTEMPTS))
if status == TASK_STATUS_SUCCESS:
ctx.logger.debug('Task completed in {0} seconds'
.format(attempt * TASK_RECHECK_TIMEOUT))
return
if status == TASK_STATUS_ERROR:
error = task.get_Error()
raise cfy_exc.NonRecoverableError(
"Error during task execution: {0}".format(error.get_message()))
time.sleep(TASK_RECHECK_TIMEOUT)
response = requests.get(
task.get_href(),
headers=vca_client.vcloud_session.get_vcloud_headers())
task = taskType.parseString(response.content, True)
status = task.get_status()
raise cfy_exc.NonRecoverableError("Wait for task timeout.")
def get_vcloud_config():
"""
get vcloud config from node properties
"""
config = None
if ctx.type == context.NODE_INSTANCE:
config = ctx.node.properties.get(VCLOUD_CONFIG)
elif ctx.type == context.RELATIONSHIP_INSTANCE:
config = ctx.source.node.properties.get(VCLOUD_CONFIG)
else:
raise cfy_exc.NonRecoverableError("Unsupported context")
static_config = Config().get()
if config:
static_config.update(config)
return static_config
def get_mandatory(obj, parameter):
"""
return value for field or raise exception if field does not exist
"""
value = obj.get(parameter)
if value:
return value
else:
raise cfy_exc.NonRecoverableError(
"Mandatory parameter {0} is absent".format(parameter))
def is_subscription(service_type):
"""
check service type is subscription or empty
"""
return not service_type or service_type == SUBSCRIPTION_SERVICE_TYPE
def is_ondemand(service_type):
"""
check service type is ondemand
"""
return service_type == ONDEMAND_SERVICE_TYPE
def error_response(obj):
"""
return description of response error
"""
try:
return obj.response.content
except AttributeError:
return ''
def session_login(vca, org_url, session_token, version):
vcs = vcloudair.VCS(org_url, None, None, None, org_url, org_url, version)
for _ in range(LOGIN_RETRY_NUM):
if not vcs.login(token=session_token):
ctx.logger.info("Login using session token failed.")
time.sleep(RELOGIN_TIMEOUT)
continue
else:
vca.vcloud_session = vcs
ctx.logger.info("Login using session token successful.")
return True
return False
| denismakogon/tosca-vcloud-plugin | vcloud_plugin_common/__init__.py | Python | apache-2.0 | 17,925 | 0.000167 |
import numpy as np
import fnmatch, os
import h5py
class Hdf5Loader():
def loadDirectory(self, dirname):
"""
Loads all hdf5 files in the directory dirname
@param dirname: The directory which contains the files to load
@returns: list of h5py File objects
"""
cachelist=os.listdir(dirname)
testlist=fnmatch.filter(cachelist,'*.hdf5')
for file_ in testlist:
print("Using {0}".format(file_))
files = [h5py.File(os.path.join(dirname, fn),'r') for fn in testlist]
return files
def getDatasets(self, dirname, dataset_list):
"""
Loads all hdf5 files in a given directory. It extracts all datasets
which are specified in :dataset_list and merges the datasets from
all files.
Finally it returns a numpy array for each dataset in the :dataset_list
@param dirname: The directory containing the hdf5 files
@param dataset_list: List of datasets to load
@returns: A list of numpy arrays loaded from the dataset files
"""
files = self.loadDirectory(dirname)
result = []
for dataset_name in dataset_list:
arr = np.concatenate([f[dataset_name] for f in files])
result.append(arr)
return result
class LoadData():
"""
This class extracts data from features and corresponding powervalues and returns them as array
"""
def __init__(self, sep=";", groundtruth_elements=2, skiprows=1, skipcols=1):
self.sep = sep
self.num_groundtruth_elements = groundtruth_elements
self.skiprows=1
self.skipcols = skipcols
def getFeatureCount(self, file_):
fd = open(file_, 'r')
fd.readline()
count = len(fd.readline().split(self.sep))
return count - self.num_groundtruth_elements
def getFeaturesData(self,csvname):
cols = range(self.skipcols, self.getFeatureCount(csvname))
print cols
log = np.loadtxt(csvname,delimiter=self.sep,skiprows=self.skiprows,usecols=cols)
return log
def getPowerData(self,csvname):
cols = [self.getFeatureCount(csvname)]
power = np.loadtxt(csvname,delimiter=self.sep,skiprows=self.skiprows,usecols=cols)
return power
def load_dir(self, dirname):
"""
Loads all files of a directory to a single feature and power data set
"""
cachelist=os.listdir(dirname)
testlist=fnmatch.filter(cachelist,'*.csv')
testFeatureDataLst = []
testPowerDataLst = []
"""Testdaten laden"""
for file_ in testlist:
testFeatureDataLst.append(self.getFeaturesData(os.path.join(dirname,file_)))
testPowerDataLst.append(self.getPowerData(os.path.join(dirname,file_)))
testFeatureData = np.concatenate(testFeatureDataLst)
testPowerData = np.concatenate(testPowerDataLst)
return testPowerData, testFeatureData | nglrt/virtual_energy_sensor | virtual_energy_sensor/loadtrain.py | Python | mit | 3,264 | 0.016544 |
from jinja2 import Environment, FileSystemLoader
data = {
"extensionInfoList": [
{"ext": "apk", "mimeTypes": ["application/vnd.android.package-archive"]}
, {"ext": "zip", "mimeTypes": []}
, {"ext": "tgz", "mimeTypes": []}
, {"ext": "gz", "mimeTypes": []}
, {"ext": "pdf", "mimeTypes": ["application/pdf"]}
]
}
xmlTemplates = [
{
"template": "template.AndroidManifest.xml",
"output": "../app/src/main/AndroidManifest.xml"
},
{
"template": "template.strings_ext.xml",
"output": "../app/src/main/res/values/strings_ext.xml"
},
{
"template": "template.pref_general.xml",
"output": "../app/src/main/res/xml/pref_general.xml"
},
]
javaTemplates = [
{
"template": "template.ConfirmActivity.java",
"output": "../app/src/main/java/com/nagopy/android/downloadconfirm/extension/{}ConfirmActivity.java"
},
{
"template": "template.HookTest.java",
"output": "../app/src/androidTest/java/com/nagopy/android/downloadconfirm/extension/{}HookTest.java"
},
]
env = Environment(loader=FileSystemLoader('.'))
for xmlTemplate in xmlTemplates:
template = env.get_template(xmlTemplate['template'])
rendered = template.render(data)
with open(xmlTemplate['output'], 'w') as f:
f.write(rendered)
f.close()
for javaTemplate in javaTemplates:
for extInfo in data['extensionInfoList']:
template = env.get_template(javaTemplate['template'])
rendered = template.render({'extInfo': extInfo})
with open(javaTemplate['output'].format(extInfo['ext'].capitalize()), 'w') as f:
f.write(rendered)
f.close()
| 75py/Download-Confirm | work/generate_extentions_res.py | Python | apache-2.0 | 1,723 | 0.004643 |
""" Parse travis.yml file, partly
"""
import sys
if sys.version_info[0] > 2:
basestring = str
class TravisError(Exception):
pass
def get_yaml_entry(yaml_dict, name):
""" Get entry `name` from dict `yaml_dict`
Parameters
----------
yaml_dict : dict
dict or subdict from parsing .travis.yml file
name : str
key to analyze and return
Returns
-------
entry : None or list
If `name` not in `yaml_dict` return None. If key value is a string
return a single entry list. Otherwise return the key value.
"""
entry = yaml_dict.get(name)
if entry is None:
return None
if isinstance(entry, basestring):
return [entry]
return entry
def get_envs(yaml_dict):
""" Get first env combination from travis yaml dict
Parameters
----------
yaml_dict : dict
dict or subdict from parsing .travis.yml file
Returns
-------
bash_str : str
bash scripting lines as string
"""
env = get_yaml_entry(yaml_dict, 'env')
if env is None:
return ''
# Bare string
if isinstance(env, basestring):
return env + '\n'
# Simple list defining matrix
if isinstance(env, (list, tuple)):
return env[0] + '\n'
# More complex dictey things
globals, matrix = [get_yaml_entry(env, name)
for name in ('global', 'matrix')]
if hasattr(matrix, 'keys'):
raise TravisError('Oops, envs too complicated')
lines = []
if not globals is None:
if matrix is None:
raise TravisError('global section needs matrix section')
lines += globals
if not matrix is None:
lines.append(matrix[0])
return '\n'.join(lines) + '\n'
| weberwang/WeRoBot | travis/terryfy/travisparse.py | Python | mit | 1,763 | 0.001134 |
pet = ['dog', 'cat', 'parrot', 'squirrel', 'goldfish']
for animal in pet:
print(animal, len(animal))
| zamonia500/PythonTeacherMythenmetz | 300문제/96.py | Python | gpl-3.0 | 106 | 0 |
"""Plugins that are not OS-specific"""
# pylint: disable=unused-import
from rekall.plugins.common import address_resolver
from rekall.plugins.common import api
from rekall.plugins.common import bovine
from rekall.plugins.common import efilter_plugins
from rekall.plugins.common import inspection
from rekall.plugins.common import memmap
from rekall.plugins.common import profile_index
from rekall.plugins.common import scanners
from rekall.plugins.common import sigscan
| dsweet04/rekall | rekall-core/rekall/plugins/common/__init__.py | Python | gpl-2.0 | 471 | 0 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
#
# Deployment for Cloudera Director using a RHEL 6 image.
#
##########################################################################
import director_base
from gce import *
GCE.setDefaults(
project='curious-lemmings-42',
zone='us-central1-a',
)
resources = director_base.DirectorServer(
sourceImage='rhel-6-latest',
startupScript='../scripts/rhel-6/init.gen.sh')
| mbrukman/cloud-launcher | apps/cloudera/director/py/rhel6.py | Python | apache-2.0 | 1,057 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Alexandre Bulté <alexandre[at]bulte[dot]net>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from flask import Flask, render_template
from flask_fanstatic import Fanstatic
# configuration
DEBUG = True
FANSTATIC_OPTIONS = {'bottom': True, 'minified': True}
app = Flask(__name__)
app.config.from_object(__name__)
fanstatic = Fanstatic(app)
# define your own ressources this way
fanstatic.resource('js/app.js', name='app_js', bottom=True)
@app.route('/')
def index():
return render_template('index.html') | abulte/Flask-Bootstrap-Fanstatic | application/__init__.py | Python | mpl-2.0 | 730 | 0.005487 |
import os
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
from matplotlib.ticker import FuncFormatter
def thousands(x, pos):
if x>=1e9:
return '%.1fB' % (x*1e-9)
elif x>=1e6:
return '%.1fM' % (x*1e-6)
elif x>=1e3:
return '%.1fK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
def ecdf_for_plot(sample):
#x = np.linspace(min(sample), max(sample))
print "sample: ",type(sample)
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
# print ecdf
print "ecdf: ",type(ecdf)
y = ecdf(x)
#print y
print "y: ", type(y)
return (x,y)
res_dir = '/home/nsarafij/project/OpenWPM/analysis/results/'
db = res_dir + 'images.sqlite'
conn = sqlite3.connect(db)
query = 'SELECT * FROM Images'
df = pd.read_sql_query(query,conn)
df.columns = ['respDom_id' if x=='resp_domain' else x for x in df.columns]
query = 'SELECT * FROM Domain_DomainTwoPart'
df_domdom2 = pd.read_sql_query(query,conn)
df=df.merge(df_domdom2,left_on='site_id',right_on='domain_id',how='left')
df.drop('domain_id',axis=1,inplace=True)
df.columns = ['site_id2' if x=='domainTwoPart_id' else x for x in df.columns]
df=df.merge(df_domdom2,left_on='respDom_id',right_on='domain_id',how='left')
df.drop('domain_id',axis=1,inplace=True)
df.columns = ['respDom_id2' if x=='domainTwoPart_id' else x for x in df.columns]
query = 'SELECT * FROM DomainsTwoPart'
df_dom2 = pd.read_sql_query(query,conn)
df=df.merge(df_dom2, left_on = 'site_id2', right_on = 'id', how = 'left')
df.drop('id',inplace=True,axis=1)
df.columns = ['site_domain2' if x=='domainTwoPart' else x for x in df.columns]
df=df.merge(df_dom2, left_on = 'respDom_id2', right_on = 'id', how = 'left')
df.drop('id',inplace=True,axis=1)
df.columns = ['respDom_domain2' if x=='domainTwoPart' else x for x in df.columns]
query = 'SELECT * FROM Domain2Company'
df_dom2com = pd.read_sql_query(query,conn)
df=df.merge(df_dom2com,left_on='respDom_id2',right_on='domainTwoPart_id',how='left')
df.drop('domainTwoPart_id',axis=1,inplace=True)
query = 'SELECT * FROM Companies'
df_com = pd.read_sql_query(query,conn)
df=df.merge(df_com,left_on='company_id',right_on='id',how='left')
df.drop('id',axis=1,inplace=True)
#conn.close()
df1=df.loc[df['site_id2']==df['respDom_id2']]
df2=df.loc[df['site_id2']!=df['respDom_id2']]
df2.shape[0]/float(df.shape[0]) #0.6757349672921374
# how many sites and links have third-party images
sites = []
links = 0
for site_id in range(1,10001):
if site_id % 100 == 0: print site_id
df3=df2.loc[df2['site_id']==site_id]
df3_size = df3['link_id'].unique().shape[0]
links += df3_size
if df3_size: sites.append(site_id)
len(sites) #8343
8343/8965 = 0.9306190741773563
links #912363
912363/964315.
# distinct response domains
df['respDom_id2'].unique().size #29009
df1['respDom_id2'].unique().size #7863
df2['respDom_id2'].unique().size #23235
domains2 = df2[['respDom_id2','respDom_domain2']].groupby(['respDom_id2','respDom_domain2']).size().sort_values(ascending = False).reset_index()
domains2.to_csv('/home/nsarafij/project/OpenWPM/analysis/results/third-domains2_owners',index=False,encoding='utf-8')
# companies
############## considering third-party domains only
# all images: counts per each response domain
domains = df2['respDom_domain2'].value_counts()
total = df2.shape[0]
domains_cum = domains.cumsum()
dom_perc = domains/float(total)
dom_perc_cum = dom_perc.cumsum()
# all images: counts per each company
com = df2['company'].value_counts()
com_cum = com.cumsum()
com_perc = com/df2.shape[0]
com_perc_cum = com_perc.cumsum()
# all images - response domains
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
# cdf of number of third-party images per third-party domains
(x,y) = ecdf_for_plot(domains)
plt.figure()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('no of zero images per domain')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'third-domains2_cdf.png'))
plt.show()
# counts
fig, ax = plt.subplots()
plt.plot(range(1,domains.shape[0]+1),domains,marker='.')
plt.xscale('log')
plt.xlabel('domain rank')
plt.ylabel('count of images')
plt.xlim([1,domains.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,domains.shape[0]+1),dom_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.xlim([1,domains.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,domains.shape[0]+1),domains_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('domain rank')
plt.ylabel('count of all images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,domains.shape[0]+1),dom_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_perc_cum.png',format='png')
# top 30 domains - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,domains[0:n],align='center')
plt.xlabel('domains')
plt.ylabel('count of images')
labels = list(domains.index[0:n])
plt.xticks(x, labels, rotation=80)
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_count_top30.png',format='png')
# top 30 domains - percentages
fig = plt.figure()
plt.bar(x,dom_perc[0:n]*100,align='center')
plt.xlabel('domains')
plt.ylabel('percentage of total number of images')
labels = list(domains.index[0:n])
plt.xticks(x, labels, rotation=80)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_perc_top30.png',format='png')
domcom = df2[['respDom_domain2','company']].groupby(['respDom_domain2','company']).size().reset_index(name='img_perc').sort_values('img_perc',ascending=False)
domcom['img_perc']=domcom['img_perc']/float(df2.shape[0])*100
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-domain2company_perc_top30.txt'),'w+')
### table domains - companies
for i in range(0,n):
dom = domcom.iloc[i,0]
comp = domcom.iloc[i,1]
perc = domcom.iloc[i,2]
s = str(i+1) + ' & ' + dom + ' & ' + comp + ' & ' + '%.2f' % perc + '\\\\ \\hline'
print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
### companies
# counts
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
fig, ax = plt.subplots()
plt.plot(range(1,com.shape[0]+1),com,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
plt.xlim([1,com.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-company_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,com.shape[0]+1),com_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.xlim([1,com.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-company_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,com.shape[0]+1),com_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,com.shape[0]+1),com_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_perc_cum.png',format='png')
# top 30 companies - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,com[0:n],align='center')
plt.xlabel('company')
plt.ylabel('count of third-party images')
labels = list(com.index[0:n])
plt.xticks(x, labels, rotation=90)
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_count_top30.png',format='png')
# top 30 companies - percentages
fig = plt.figure()
plt.bar(x,com_perc[0:n]*100,align='center')
plt.xlabel('company')
plt.ylabel('percentage of third-party images')
labels = list(com.index[0:n])
plt.xticks(x, labels, rotation=90)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_perc_top30.png',format='png')
############################## 1-pixel images
df3all=df.ix[df['pixels']==1]
df3all.shape[0] #9906784
df3all.shape[0]/float(df.shape[0]) #0.31093023806156583
df3=df2.ix[df2['pixels']==1]
df3.shape[0] #9662147
df3.shape[0]/float(df3all.shape[0]) #0.9753061134672968
# 1-pixel images: counts per each response domain
dom_pix1 = df3['respDom_domain2'].value_counts()
dom_pix1_cum = dom_pix1.cumsum()
dom_pix1_perc = dom_pix1/float(df3.shape[0])
dom_pix1_perc_ = dom_pix1/float(dom_pix1_cum[dom_pix1_cum.size-1:dom_pix1_cum.size])
dom_pix1_perc_cum = dom_pix1_perc_.cumsum()
#dom_pix1_=pd.merge(pd.DataFrame(dom_pix1), df_dom, left_index=True, right_on='id')
# 1-pixel images: counts per each company
com_pix1 = df3['company'].value_counts()
com_pix1_cum = com_pix1.cumsum()
com_pix1_perc = com_pix1/float(df3.shape[0])
com_pix1_perc_ = com_pix1/float(com_pix1_cum[com_pix1_cum.size-1:com_pix1_cum.size])
com_pix1_perc_cum = com_pix1_perc_.cumsum()
### figures
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
# cdf of no of
(x,y) = ecdf_for_plot(dom_pix1)
plt.figure()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('no of 1-pixel third-party images per domain')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'third-domains2_cdf.png'))
# counts
fig, ax = plt.subplots()
plt.plot(dom_pix1,marker='.')
plt.xscale('symlog')
ax.yaxis.set_major_formatter(formatter)
plt.xlabel('domain rank')
plt.ylabel('count of images')
plt.title('1-pixel Images')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,dom_pix1_perc.shape[0]+1),dom_pix1_perc*100,marker='.')
plt.xscale('symlog')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.title('1-pixel Images')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,dom_pix1_perc.shape[0]+1),dom_pix1_cum,marker='.')
ax.yaxis.set_major_formatter(formatter)
plt.xscale('log')
plt.title('Cumulative Counts for 1-pixel Images')
plt.xlabel('domain rank')
plt.ylabel('count')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,dom_pix1_perc.shape[0]+1),dom_pix1_perc_cum*100,marker='.')
plt.xscale('log')
plt.title('Cumulative Percentage Counts for 1-pixel Images')
plt.xlabel('domain rank')
plt.ylabel('percentage of 1-pixel images')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_perc_cum.png',format='png')
# top 30 domains - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,dom_pix1[0:n],align='center')
ax.yaxis.set_major_formatter(formatter)
plt.xlabel('domains')
plt.ylabel('count of images')
labels = list(dom_pix1.index[0:n])
plt.xticks(x, labels, rotation=80)
plt.title('1-pixel Images')
plt.grid(True)
fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_pix1_count_top30.png',format='png')
# top 20 domains - percentages
fig = plt.figure()
plt.bar(x,dom_pix1_perc[0:n]*100,align='center')
plt.xlabel('domains')
plt.ylabel('percentage of 1-pixel images')
labels = list(dom_pix1.index[0:n])
plt.xticks(x, labels, rotation=80)
plt.title('1-pixel Images')
plt.grid(True)
fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_pix1_perc_top30.png',format='png')
plt.show()
### table domains - companies
domcom = df3[['respDom_domain2','company']].groupby(['respDom_domain2','company']).size().reset_index(name='img_perc').sort_values('img_perc',ascending=False)
domcom['img_perc']=domcom['img_perc']/float(df3.shape[0])*100
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-domain2company_pix1_perc_top30.txt'),'w+')
for i in range(0,n):
dom = domcom.iloc[i,0]
com = domcom.iloc[i,1]
perc = domcom.iloc[i,2]
s = str(i+1) + ' & ' + dom + ' & ' + com + ' & ' + '%.2f' % perc + '\\\\ \\hline'
print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
### companies
# counts
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
fig, ax = plt.subplots()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
plt.xlim([1,com_pix1.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.xlim([1,com_pix1.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_pix1_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_pix1_perc_cum.png',format='png')
# top 30 companies - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,com_pix1[0:n],align='center')
plt.xlabel('company')
plt.ylabel('count of third-party images')
labels = list(com_pix1.index[0:n])
plt.xticks(x, labels, rotation=90)
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_count_top30.png',format='png')
# top 30 companies - percentages
fig = plt.figure()
plt.bar(x,com_pix1_perc[0:n]*100,align='center')
plt.xlabel('company')
plt.ylabel('percentage of third-party images')
labels = list(com_pix1.index[0:n])
plt.xticks(x, labels, rotation=90)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_perc_top30.png',format='png')
plt.show()
### table companies
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-company_pix1_perc_top30.txt'),'w+')
for i in range(0,n):
com = com_pix1_perc.index[i]
perc = com_pix1_perc[i]*100
s = str(i+1) + ' & ' + com + ' & ' + '%.3f' % perc + '\\\\ \\hline'
print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
conn.close()
'''
fig1 = plt.figure(1)
plt.hist(df['size'], bins=100, color='lightblue',label ='all')
plt.title('Histogram of Images Sizes, no of sites = 100, max no of links = 300')
plt.xlabel('size [bytes]')
plt.ylabel('no of images')
fig2 = plt.figure(2)
plt.hist(df['size'], bins=100, range=(0,50000), color='lightblue',label ='all')
plt.title('Histogram of Images Sizes, no of sites = 100, max no of links = 300')
plt.xlabel('size [bytes]')
plt.ylabel('no of images')
fig3 = plt.figure(3)
plt.hist(df['size'], bins=100, range=(0,100), color='lightblue',label='all')
plt.title('Histogram of Images Sizes, no of sites = 100, max no of links = 300')
plt.xlabel('size [bytes]')
plt.ylabel('no of images')
fig = plt.figure(1)
plt.hist(l, bins=100, color='red',label =r'size $\neq$ content-length')
#plt.title('Histogram of Images Sizes (size != content-length), no of sites = 100, max no of links = 300')
#plt.xlabel('size [bytes]')
#plt.ylabel('no of images')
#fig.savefig('img_sizes_hist4.eps',format='eps')
plt.legend()
fig = plt.figure(2)
plt.hist(l, bins=100, range=(0,50000), color='red',label =r'size $\neq$ content-length')
#plt.title('Histogram of Images Sizes (size != content-length), no of sites = 100, max no of links = 300')
#plt.xlabel('size [bytes]')
#plt.ylabel('no of images')
#fig.savefig('img_sizes_hist5.eps',format='eps')
plt.legend()
fig = plt.figure(3)
plt.hist(l, bins=100, range=(0,100), color='red',label =r'size $\neq$ content-length')
#plt.title('Histogram of Images Sizes (size != content-length), no of sites = 100, max no of links = 300')
#plt.xlabel('size [bytes]')
#plt.ylabel('no of images')
#fig.savefig('img_sizes_hist6.eps',format='eps')
plt.legend()
#plt.show()
fig1.savefig('figs/img_sizes_hist1.eps',format='eps')
fig2.savefig('figs/img_sizes_hist2.eps',format='eps')
fig3.savefig('figs/img_sizes_hist3.eps',format='eps')
fig1 = plt.figure(1)
plt.plot(dom_perc,marker='o')
plt.title('Image Counts for Response Domains, no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
fig2 = plt.figure(2)
plt.plot(dom_perc,marker='o')
plt.title('Image Counts for Response Domains, no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,100])
fig3 = plt.figure(3)
plt.plot(domains,marker='o')
plt.title('Image Counts for Response Domains, no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,10])
#plt.ylim([10,30000])
#plt.xlim([0,10])
fig5 = plt.figure(5)
plt.plot(dom_perc_cum,marker='o')
plt.title('Image Cumulative Percentage Counts for Response Domains \n no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,100])
fig6 = plt.figure(6)
plt.plot(dom_perc_cum,marker='o')
plt.title('Image Cumulative Percentage Counts for Response Domains \n no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,10])
#plt.show()
for i in range(1,7):
fig_file = 'figs/img_domains_' + str(i) +'.eps'
s = "fig{}.savefig('".format(i) + fig_file + "',format='eps')"
print s
exec s
'''
| natasasdj/OpenWPM | analysis/12_images_third-domains2.py | Python | gpl-3.0 | 19,012 | 0.016148 |
# coding: utf-8
"""
This module defines a containers that hold transcripts excluded from further consideration.
It is invoked when all transcripts in a locus have a score of 0 and the "purge"
option has been enabled.
"""
from .abstractlocus import Abstractlocus
from ..transcripts import Transcript
class Excluded(Abstractlocus):
"""This is a container of discarded transcripts. It is used only for completeness purposes -
i.e. printing out the discarded transcripts to a separate file.
"""
__name__ = "excluded_transcripts"
def __init__(self, monosublocus_instance=None, configuration=None, logger=None):
"""
Constructor method
:param monosublocus_instance:
:type monosublocus_instance: Mikado.loci_objects.monosublocus.Monosublocus
:param configuration: configuration file
:type configuration: (MikadoConfiguration|DaijinConfiguration)
:param logger: logger instance
:type logger: logging.Logger | None
"""
Abstractlocus.__init__(self, configuration=configuration)
self.splitted = False
self.metrics_calculated = False
self.logger = logger
if isinstance(monosublocus_instance, Transcript):
Abstractlocus.__init__(self, transcript_instance=monosublocus_instance)
elif isinstance(monosublocus_instance, Abstractlocus):
# Add the transcript to the Locus
self.add_monosublocus(monosublocus_instance)
def add_transcript_to_locus(self, transcript, **kwargs):
"""Override of the sublocus method, and reversal to the original
method in the Abstractlocus class.
:param transcript: a transcript to add
:type transcript: Mikado.loci_objects.transcript.Transcript
:param kwargs: optional arguments are completely ignored by this method.
"""
# Notice that check_in_locus is always set to False.
_ = kwargs
Abstractlocus.add_transcript_to_locus(self, transcript, check_in_locus=False)
def add_monosublocus(self, monosublocus_instance):
"""Wrapper to extract the transcript from the monosubloci and pass it
to the constructor.
:param monosublocus_instance
:type monosublocus_instance: Mikado.loci_objects.monosublocus.Monosublocus
"""
assert len(monosublocus_instance.transcripts) == 1
for tid in monosublocus_instance.transcripts:
self.add_transcript_to_locus(monosublocus_instance.transcripts[tid])
def __str__(self):
"""This special method is explicitly *not* implemented;
this Locus object is not meant for printing, only for computation!"""
message = """This is a container used for computational purposes only,
it should not be printed out directly!"""
raise NotImplementedError(message)
def filter_and_calculate_scores(self, check_requirements=True):
"""
Suppress the method from the base class
"""
raise NotImplementedError("Scores are not calculated by this class!")
def define_monosubloci(self):
"""
Suppress the method from the base class
"""
raise NotImplementedError("Monosubloci are not calculated by this class!!")
@classmethod
def is_intersecting(cls):
"""Present to fulfill the contract with Abstractlocus, but it only raises a NotImplementedError"""
raise NotImplementedError()
| lucventurini/mikado | Mikado/loci/excluded.py | Python | lgpl-3.0 | 3,468 | 0.003172 |
from unittest import TestCase
from datetime import timedelta
from safepickle.types.timedelta import TimedeltaType
from safepickle.encoding import encode, decode
class TestTimedelta(TestCase):
def test_timedelta(self):
""" Asserts timedelta type is handled as expected
"""
obj = timedelta(days=1, seconds=2, microseconds=3)
type_ = TimedeltaType()
encoding = type_.encode(obj, encode)
decoding = decode(encoding)
self.assertEqual(obj, decoding)
| nioinnovation/safepickle | safepickle/types/tests/test_timedelta.py | Python | apache-2.0 | 509 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Edge3D.
"""
import unittest
from catplot.grid_components.nodes import Node2D, Node3D
from catplot.grid_components.edges import Edge2D, Edge3D
class Edge3DTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct Grid2DNode correctly.
"""
node1 = Node3D([1.0, 1.0, 1.0], color="#595959", width=1)
node2 = Node3D([0.5, 0.5, 0.5], color="#595959", width=1)
edge = Edge3D(node1, node2, n=10)
ref_x = [1.0,
0.9545454545454546,
0.9090909090909091,
0.8636363636363636,
0.8181818181818181,
0.7727272727272727,
0.7272727272727273,
0.6818181818181819,
0.6363636363636364,
0.5909090909090908,
0.5454545454545454,
0.5]
self.assertListEqual(edge.x.tolist(), ref_x)
self.assertListEqual(edge.y.tolist(), ref_x)
self.assertListEqual(edge.z.tolist(), ref_x)
def test_construction_from2d(self):
""" Make sure we can construct 3D edge from a 2D edge.
"""
node1 = Node2D([1.0, 1.0])
node2 = Node2D([1.0, 2.0])
edge2d = Edge2D(node1, node2)
edge3d = Edge3D.from2d(edge2d)
self.assertTrue(isinstance(edge3d, Edge3D))
def test_move(self):
""" Test the edge can be moved correctly.
"""
node1 = Node3D([1.0, 1.0, 1.0], color="#595959", width=1)
node2 = Node3D([0.5, 0.5, 0.5], color="#595959", width=1)
edge = Edge3D(node1, node2, n=10)
edge.move([0.5, 0.5, 0.5])
ref_x = [1.5,
1.4545454545454546,
1.4090909090909092,
1.3636363636363638,
1.3181818181818181,
1.2727272727272727,
1.2272727272727273,
1.1818181818181819,
1.1363636363636362,
1.0909090909090908,
1.0454545454545454,
1.0]
self.assertListEqual(edge.x.tolist(), ref_x)
self.assertListEqual(edge.y.tolist(), ref_x)
self.assertListEqual(edge.z.tolist(), ref_x)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Edge3DTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| PytLab/catplot | tests/edge_3d_test.py | Python | mit | 2,499 | 0.001601 |
from __future__ import absolute_import
import abc
class MinHashIndexBackendTestMixin(object):
__meta__ = abc.ABCMeta
@abc.abstractproperty
def index(self):
pass
def test_basic(self):
self.index.record("example", "1", [("index", "hello world")])
self.index.record("example", "2", [("index", "hello world")])
self.index.record("example", "3", [("index", "jello world")])
self.index.record("example", "4", [("index", "yellow world"), ("index", "mellow world")])
self.index.record("example", "5", [("index", "pizza world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index", 0)])
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# comparison, low threshold
results = self.index.compare("example", "1", [("index", 6)])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# comparison, high threshold (exact match)
results = self.index.compare("example", "1", [("index", self.index.bands)])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index", 0)], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
# classification, without thresholding
results = self.index.classify("example", [("index", 0, "hello world")])
assert results[0:2] == [("1", [1.0]), ("2", [1.0])]
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# classification, low threshold
results = self.index.classify("example", [("index", 6, "hello world")])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# classification, high threshold (exact match)
results = self.index.classify("example", [("index", self.index.bands, "hello world")])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify("example", [("index", 0, "hello world")], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
self.index.delete("example", [("index", "3")])
assert [key for key, _ in self.index.compare("example", "1", [("index", 0)])] == [
"1",
"2",
"4",
"5",
]
def test_multiple_index(self):
self.index.record("example", "1", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "2", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "3", [("index:a", "hello world"), ("index:b", "pizza world")])
self.index.record("example", "4", [("index:a", "hello world")])
self.index.record("example", "5", [("index:b", "hello world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)])
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)], limit=4)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# classification, without thresholding
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")]
)
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# classification, with thresholding (low)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", 8, "pizza world"), # one direct hit
],
)
assert len(results) == 1
assert results[0][0] == "3"
# this should have a value since it's similar even thought it was not
# considered as a candidate for this index
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
# classification, with thresholding (high)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", self.index.bands, "hello world"), # 3 direct hits
],
)
assert len(results) == 3
assert results[0][0] == "1" # tie btw first 2 items is broken by lex sort
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
assert results[1][0] == "2"
assert results[1][1][0] > 0
assert results[1][1][1] == 1.0
assert results[2] == ("5", [0.0, 1.0])
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")], limit=4
)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# empty query
assert (
self.index.classify("example", [("index:a", 0, "hello world"), ("index:b", 0, "")])
== self.index.compare("example", "4", [("index:a", 0), ("index:b", 0)])
== [("4", [1.0, None]), ("1", [1.0, 0.0]), ("2", [1.0, 0.0]), ("3", [1.0, 0.0])]
)
def test_merge(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
self.index.record("example", "2", [("index", ["baz"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.merge("example", "1", [("index", "2")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [0.5])]
# merge into an empty key should act as a move
self.index.merge("example", "2", [("index", "1")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("2", [0.5])]
def test_flush_scoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.flush("example", ["index"])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == []
def test_flush_unscoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.flush("*", ["index"])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == []
@abc.abstractmethod
def test_export_import(self):
pass
| mvaled/sentry | tests/sentry/similarity/backends/base.py | Python | bsd-3-clause | 8,478 | 0.003185 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles backports of the standard library's `fractions.py`.
The fractions module in 2.6 does not handle being instantiated using a
float and then calculating an approximate fraction based on that.
This functionality is required by the FITS unit format generator,
since the FITS unit format handles only rational, not decimal point,
powers.
"""
from __future__ import absolute_import
import sys
if sys.version_info[:2] == (2, 6):
from ._fractions_py2 import *
else:
from fractions import *
| piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/utils/compat/fractions.py | Python | mit | 568 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 The Spyder development team
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
import optparse
def get_options():
"""
Convert options into commands
return commands, message
"""
parser = optparse.OptionParser(usage="spyder [options] files")
parser.add_option('-l', '--light', action='store_true', default=False,
help="Light version (all add-ons are disabled)")
parser.add_option('--new-instance', action='store_true', default=False,
help="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)")
parser.add_option('--session', dest="startup_session", default='',
help="Startup session")
parser.add_option('--defaults', dest="reset_to_defaults",
action='store_true', default=False,
help="Reset configuration settings to defaults")
parser.add_option('--reset', dest="reset_session",
action='store_true', default=False,
help="Remove all configuration files!")
parser.add_option('--optimize', action='store_true', default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)")
parser.add_option('-w', '--workdir', dest="working_directory", default=None,
help="Default working directory")
parser.add_option('--show-console', action='store_true', default=False,
help="Do not hide parent console window (Windows)")
parser.add_option('--multithread', dest="multithreaded",
action='store_true', default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)")
parser.add_option('--profile', action='store_true', default=False,
help="Profile mode (internal test, "
"not related with Python profiling)")
options, args = parser.parse_args()
return options, args
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/cli_options.py | Python | gpl-3.0 | 2,217 | 0.000903 |
"""
Convenience methods for working with datetime objects
"""
from datetime import timedelta
from django.utils.translation import ugettext as _
def get_default_time_display(dt, show_timezone=True):
"""
Converts a datetime to a string representation. This is the default
representation used in Studio and LMS.
It is of the form "Apr 09, 2013 at 16:00" or "Apr 09, 2013 at 16:00 UTC",
depending on the value of show_timezone.
If None is passed in for dt, an empty string will be returned.
The default value of show_timezone is True.
"""
if dt is None:
return u""
timezone = u""
if show_timezone:
if dt.tzinfo is not None:
try:
timezone = u" " + dt.tzinfo.tzname(dt)
except NotImplementedError:
timezone = dt.strftime('%z')
else:
timezone = u" UTC"
return unicode(dt.strftime(u"%b %d, %Y {at} %H:%M{tz}")).format(
at=_(u"at"), tz=timezone).strip()
def almost_same_datetime(dt1, dt2, allowed_delta=timedelta(minutes=1)):
"""
Returns true if these are w/in a minute of each other. (in case secs saved to db
or timezone aren't same)
:param dt1:
:param dt2:
"""
return abs(dt1 - dt2) < allowed_delta
| praveen-pal/edx-platform | common/lib/xmodule/xmodule/util/date_utils.py | Python | agpl-3.0 | 1,275 | 0.000784 |
# coding=utf-8
# Authors:
# Pedro Jose Pereira Vieito <pvieito@gmail.com> (Twitter: @pvieito)
#
# URL: https://github.com/mr-orange/Sick-Beard
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
#
# Uses the Synology Download Station API: http://download.synology.com/download/Document/DeveloperGuide/Synology_Download_Station_Web_API.pdf
import sickbeard
from sickbeard.clients.generic import GenericClient
class DownloadStationAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(DownloadStationAPI, self).__init__('DownloadStation', host, username, password)
self.url = self.host + 'webapi/DownloadStation/task.cgi'
def _get_auth(self):
auth_url = self.host + 'webapi/auth.cgi?api=SYNO.API.Auth&version=2&method=login&account=' + self.username + '&passwd=' + self.password + '&session=DownloadStation&format=sid'
try:
self.response = self.session.get(auth_url, verify=False)
self.auth = self.response.json()['data']['sid']
except Exception:
return None
return self.auth
def _add_torrent_uri(self, result):
data = {
'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'session': 'DownloadStation',
'_sid': self.auth,
'uri': result.url
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
self._request(method='post', data=data)
return self.response.json()['success']
def _add_torrent_file(self, result):
data = {
'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'session': 'DownloadStation',
'_sid': self.auth
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
files = {'file': (result.name + '.torrent', result.content)}
self._request(method='post', data=data, files=files)
return self.response.json()['success']
api = DownloadStationAPI()
| srluge/SickRage | sickbeard/clients/download_station_client.py | Python | gpl-3.0 | 2,731 | 0.001465 |
import threading
import pprint
import json
from bottle import route, run, Bottle
class Rest_Server(Bottle):
'''The REST front end'''
def __init__(self, core):
super(Rest_Server, self).__init__()
self._core = core
self.route('/plms', callback=self.list_plms)
def start(self):
threading.Thread(target=self.run, kwargs=dict(
host='localhost', port=8080, debug=True)).start()
def list_plms(self):
'''
Returns an object containin all of the plms.
**Example request**:
.. sourcecode:: http
GET /plms HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"20F5F5": {
"dev_cat": 3,
"firmware": 155,
"port": "/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A501LCKJ-if00-port0",
"port_active": false,
"sub_cat": 21
},
"3C4DB9": {
"dev_cat": 3,
"firmware": 158,
"port": "/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A403KDV3-if00-port0",
"port_active": true,
"sub_cat": 21
}
}
:statuscode 200: no error
'''
plms = self._core.get_all_plms()
ret = {}
for plm in plms:
ret[plm.dev_addr_str] = {
'dev_cat': plm.dev_cat,
'sub_cat': plm.sub_cat,
'firmware': plm.firmware,
'port': plm.port,
'port_active': plm.port_active
}
return self.jsonify(ret)
def jsonify(self, data):
return json.dumps(data, indent=4, sort_keys=True)
| krkeegan/lib-py-insteon | insteon/rest_server.py | Python | gpl-2.0 | 1,792 | 0.001116 |
from data_types.user import User
class Commit:
"""
Commit object
https://developer.github.com/v3/repos/commits/
Attributes:
url: Commit URL in repo
author: Commit author
committer: Commit sender
message: Commit messagee
tree: Example {
"url": "https://api.github.com/repos/octocat/Hello-World/tree/6dcb09b5b57875f334f61aebed695e2e4193db5e",
"sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e"
},
comment_count: Number of comments
added: List of added files
removed: List of removed files
modified: List of modified files
"""
def __init__(self, data):
self.url = data.get('url', '')
self.author = None
if 'author' in data:
self.author = User(data['author'])
self.committer = None
if 'committer' in data:
self.committer = User(data['committer'])
self.message = data.get('message', '')
self.tree = data.get('tree', None)
self.comment_count = data.get('comment_count', 0)
self.added = data.get('added', [])
self.removed = data.get('removed', [])
self.modified = data.get('modified', [])
| codex-bot/github | github/data_types/commit.py | Python | mit | 1,228 | 0.001629 |
from __future__ import unicode_literals
import copy
from ._compat import decode
from ._utils import merge_dicts
from .exceptions import KeyAlreadyPresent
from .exceptions import NonExistentKey
from .exceptions import ParseError
from .exceptions import TOMLKitError
from .items import AoT
from .items import Comment
from .items import Item
from .items import Key
from .items import Null
from .items import Table
from .items import Whitespace
from .items import item as _item
_NOT_SET = object()
class Container(dict):
"""
A container for items within a TOMLDocument.
"""
def __init__(self, parsed=False): # type: (bool) -> None
self._map = {} # type: Dict[Key, int]
self._body = [] # type: List[Tuple[Optional[Key], Item]]
self._parsed = parsed
self._table_keys = []
@property
def body(self): # type: () -> List[Tuple[Optional[Key], Item]]
return self._body
@property
def value(self): # type: () -> Dict[Any, Any]
d = {}
for k, v in self._body:
if k is None:
continue
k = k.key
v = v.value
if isinstance(v, Container):
v = v.value
if k in d:
merge_dicts(d[k], v)
else:
d[k] = v
return d
def parsing(self, parsing): # type: (bool) -> None
self._parsed = parsing
for k, v in self._body:
if isinstance(v, Table):
v.value.parsing(parsing)
elif isinstance(v, AoT):
for t in v.body:
t.value.parsing(parsing)
def add(
self, key, item=None
): # type: (Union[Key, Item, str], Optional[Item]) -> Container
"""
Adds an item to the current Container.
"""
if item is None:
if not isinstance(key, (Comment, Whitespace)):
raise ValueError(
"Non comment/whitespace items must have an associated key"
)
key, item = None, key
return self.append(key, item)
def append(self, key, item): # type: (Union[Key, str, None], Item) -> Container
if not isinstance(key, Key) and key is not None:
key = Key(key)
if not isinstance(item, Item):
item = _item(item)
if isinstance(item, (AoT, Table)) and item.name is None:
item.name = key.key
if (
isinstance(item, Table)
and self._body
and not self._parsed
and not item.trivia.indent
):
item.trivia.indent = "\n"
if isinstance(item, AoT) and self._body and not self._parsed:
if item and "\n" not in item[0].trivia.indent:
item[0].trivia.indent = "\n" + item[0].trivia.indent
else:
self.append(None, Whitespace("\n"))
if key is not None and key in self:
current_idx = self._map[key]
if isinstance(current_idx, tuple):
current_body_element = self._body[current_idx[-1]]
else:
current_body_element = self._body[current_idx]
current = current_body_element[1]
if isinstance(item, Table):
if not isinstance(current, (Table, AoT)):
raise KeyAlreadyPresent(key)
if item.is_aot_element():
# New AoT element found later on
# Adding it to the current AoT
if not isinstance(current, AoT):
current = AoT([current, item], parsed=self._parsed)
self._replace(key, key, current)
else:
current.append(item)
return self
elif current.is_aot():
if not item.is_aot_element():
# Tried to define a table after an AoT with the same name.
raise KeyAlreadyPresent(key)
current.append(item)
return self
elif current.is_super_table():
if item.is_super_table():
# We need to merge both super tables
if (
self._table_keys[-1] != current_body_element[0]
or key.is_dotted()
or current_body_element[0].is_dotted()
):
if not isinstance(current_idx, tuple):
current_idx = (current_idx,)
self._map[key] = current_idx + (len(self._body),)
self._body.append((key, item))
self._table_keys.append(key)
# Building a temporary proxy to check for errors
OutOfOrderTableProxy(self, self._map[key])
return self
for k, v in item.value.body:
current.append(k, v)
return self
elif current_body_element[0].is_dotted():
raise TOMLKitError("Redefinition of an existing table")
elif not item.is_super_table():
raise KeyAlreadyPresent(key)
elif isinstance(item, AoT):
if not isinstance(current, AoT):
# Tried to define an AoT after a table with the same name.
raise KeyAlreadyPresent(key)
for table in item.body:
current.append(table)
return self
else:
raise KeyAlreadyPresent(key)
is_table = isinstance(item, (Table, AoT))
if key is not None and self._body and not self._parsed:
# If there is already at least one table in the current container
# and the given item is not a table, we need to find the last
# item that is not a table and insert after it
# If no such item exists, insert at the top of the table
key_after = None
idx = 0
for k, v in self._body:
if isinstance(v, Null):
# This happens only after deletion
continue
if isinstance(v, Whitespace) and not v.is_fixed():
continue
if not is_table and isinstance(v, (Table, AoT)):
break
key_after = k or idx
idx += 1
if key_after is not None:
if isinstance(key_after, int):
if key_after + 1 < len(self._body) - 1:
return self._insert_at(key_after + 1, key, item)
else:
previous_item = self._body[-1][1]
if (
not isinstance(previous_item, Whitespace)
and not is_table
and "\n" not in previous_item.trivia.trail
):
previous_item.trivia.trail += "\n"
else:
return self._insert_after(key_after, key, item)
else:
return self._insert_at(0, key, item)
if key in self._map:
current_idx = self._map[key]
if isinstance(current_idx, tuple):
current_idx = current_idx[-1]
current = self._body[current_idx][1]
if key is not None and not isinstance(current, Table):
raise KeyAlreadyPresent(key)
# Adding sub tables to a currently existing table
if not isinstance(current_idx, tuple):
current_idx = (current_idx,)
self._map[key] = current_idx + (len(self._body),)
else:
self._map[key] = len(self._body)
self._body.append((key, item))
if item.is_table():
self._table_keys.append(key)
if key is not None:
super(Container, self).__setitem__(key.key, item.value)
return self
def remove(self, key): # type: (Union[Key, str]) -> Container
if not isinstance(key, Key):
key = Key(key)
idx = self._map.pop(key, None)
if idx is None:
raise NonExistentKey(key)
if isinstance(idx, tuple):
for i in idx:
self._body[i] = (None, Null())
else:
self._body[idx] = (None, Null())
super(Container, self).__delitem__(key.key)
return self
def _insert_after(
self, key, other_key, item
): # type: (Union[str, Key], Union[str, Key], Union[Item, Any]) -> Container
if key is None:
raise ValueError("Key cannot be null in insert_after()")
if key not in self:
raise NonExistentKey(key)
if not isinstance(key, Key):
key = Key(key)
if not isinstance(other_key, Key):
other_key = Key(other_key)
item = _item(item)
idx = self._map[key]
# Insert after the max index if there are many.
if isinstance(idx, tuple):
idx = max(idx)
current_item = self._body[idx][1]
if "\n" not in current_item.trivia.trail:
current_item.trivia.trail += "\n"
# Increment indices after the current index
for k, v in self._map.items():
if isinstance(v, tuple):
new_indices = []
for v_ in v:
if v_ > idx:
v_ = v_ + 1
new_indices.append(v_)
self._map[k] = tuple(new_indices)
elif v > idx:
self._map[k] = v + 1
self._map[other_key] = idx + 1
self._body.insert(idx + 1, (other_key, item))
if key is not None:
super(Container, self).__setitem__(other_key.key, item.value)
return self
def _insert_at(
self, idx, key, item
): # type: (int, Union[str, Key], Union[Item, Any]) -> Container
if idx > len(self._body) - 1:
raise ValueError("Unable to insert at position {}".format(idx))
if not isinstance(key, Key):
key = Key(key)
item = _item(item)
if idx > 0:
previous_item = self._body[idx - 1][1]
if (
not isinstance(previous_item, Whitespace)
and not isinstance(item, (AoT, Table))
and "\n" not in previous_item.trivia.trail
):
previous_item.trivia.trail += "\n"
# Increment indices after the current index
for k, v in self._map.items():
if isinstance(v, tuple):
new_indices = []
for v_ in v:
if v_ >= idx:
v_ = v_ + 1
new_indices.append(v_)
self._map[k] = tuple(new_indices)
elif v >= idx:
self._map[k] = v + 1
self._map[key] = idx
self._body.insert(idx, (key, item))
if key is not None:
super(Container, self).__setitem__(key.key, item.value)
return self
def item(self, key): # type: (Union[Key, str]) -> Item
if not isinstance(key, Key):
key = Key(key)
idx = self._map.get(key, None)
if idx is None:
raise NonExistentKey(key)
if isinstance(idx, tuple):
# The item we are getting is an out of order table
# so we need a proxy to retrieve the proper objects
# from the parent container
return OutOfOrderTableProxy(self, idx)
return self._body[idx][1]
def last_item(self): # type: () -> Optional[Item]
if self._body:
return self._body[-1][1]
def as_string(self): # type: () -> str
s = ""
for k, v in self._body:
if k is not None:
if isinstance(v, Table):
s += self._render_table(k, v)
elif isinstance(v, AoT):
s += self._render_aot(k, v)
else:
s += self._render_simple_item(k, v)
else:
s += self._render_simple_item(k, v)
return s
def _render_table(
self, key, table, prefix=None
): # (Key, Table, Optional[str]) -> str
cur = ""
if table.display_name is not None:
_key = table.display_name
else:
_key = key.as_string()
if prefix is not None:
_key = prefix + "." + _key
if not table.is_super_table() or (
any(
not isinstance(v, (Table, AoT, Whitespace)) for _, v in table.value.body
)
and not key.is_dotted()
):
open_, close = "[", "]"
if table.is_aot_element():
open_, close = "[[", "]]"
cur += "{}{}{}{}{}{}{}{}".format(
table.trivia.indent,
open_,
decode(_key),
close,
table.trivia.comment_ws,
decode(table.trivia.comment),
table.trivia.trail,
"\n" if "\n" not in table.trivia.trail and len(table.value) > 0 else "",
)
for k, v in table.value.body:
if isinstance(v, Table):
if v.is_super_table():
if k.is_dotted() and not key.is_dotted():
# Dotted key inside table
cur += self._render_table(k, v)
else:
cur += self._render_table(k, v, prefix=_key)
else:
cur += self._render_table(k, v, prefix=_key)
elif isinstance(v, AoT):
cur += self._render_aot(k, v, prefix=_key)
else:
cur += self._render_simple_item(
k, v, prefix=_key if key.is_dotted() else None
)
return cur
def _render_aot(self, key, aot, prefix=None):
_key = key.as_string()
if prefix is not None:
_key = prefix + "." + _key
cur = ""
_key = decode(_key)
for table in aot.body:
cur += self._render_aot_table(table, prefix=_key)
return cur
def _render_aot_table(self, table, prefix=None): # (Table, Optional[str]) -> str
cur = ""
_key = prefix or ""
if not table.is_super_table():
open_, close = "[[", "]]"
cur += "{}{}{}{}{}{}{}".format(
table.trivia.indent,
open_,
decode(_key),
close,
table.trivia.comment_ws,
decode(table.trivia.comment),
table.trivia.trail,
)
for k, v in table.value.body:
if isinstance(v, Table):
if v.is_super_table():
if k.is_dotted():
# Dotted key inside table
cur += self._render_table(k, v)
else:
cur += self._render_table(k, v, prefix=_key)
else:
cur += self._render_table(k, v, prefix=_key)
elif isinstance(v, AoT):
cur += self._render_aot(k, v, prefix=_key)
else:
cur += self._render_simple_item(k, v)
return cur
def _render_simple_item(self, key, item, prefix=None):
if key is None:
return item.as_string()
_key = key.as_string()
if prefix is not None:
_key = prefix + "." + _key
return "{}{}{}{}{}{}{}".format(
item.trivia.indent,
decode(_key),
key.sep,
decode(item.as_string()),
item.trivia.comment_ws,
decode(item.trivia.comment),
item.trivia.trail,
)
# Dictionary methods
def keys(self): # type: () -> Generator[str]
return super(Container, self).keys()
def values(self): # type: () -> Generator[Item]
for k in self.keys():
yield self[k]
def items(self): # type: () -> Generator[Item]
for k, v in self.value.items():
if k is None:
continue
yield k, v
def update(self, other): # type: (Dict) -> None
for k, v in other.items():
self[k] = v
def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any
if not isinstance(key, Key):
key = Key(key)
if key not in self:
return default
return self[key]
def pop(self, key, default=_NOT_SET):
try:
value = self[key]
except KeyError:
if default is _NOT_SET:
raise
return default
del self[key]
return value
def setdefault(
self, key, default=None
): # type: (Union[Key, str], Any) -> Union[Item, Container]
if key not in self:
self[key] = default
return self[key]
def __contains__(self, key): # type: (Union[Key, str]) -> bool
if not isinstance(key, Key):
key = Key(key)
return key in self._map
def __getitem__(self, key): # type: (Union[Key, str]) -> Union[Item, Container]
if not isinstance(key, Key):
key = Key(key)
idx = self._map.get(key, None)
if idx is None:
raise NonExistentKey(key)
if isinstance(idx, tuple):
# The item we are getting is an out of order table
# so we need a proxy to retrieve the proper objects
# from the parent container
return OutOfOrderTableProxy(self, idx)
item = self._body[idx][1]
if item.is_boolean():
return item.value
return item
def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None
if key is not None and key in self:
self._replace(key, key, value)
else:
self.append(key, value)
def __delitem__(self, key): # type: (Union[Key, str]) -> None
self.remove(key)
def _replace(
self, key, new_key, value
): # type: (Union[Key, str], Union[Key, str], Item) -> None
if not isinstance(key, Key):
key = Key(key)
if not isinstance(new_key, Key):
new_key = Key(new_key)
idx = self._map.get(key, None)
if idx is None:
raise NonExistentKey(key)
self._replace_at(idx, new_key, value)
def _replace_at(
self, idx, new_key, value
): # type: (Union[int, Tuple[int]], Union[Key, str], Item) -> None
if not isinstance(new_key, Key):
new_key = Key(new_key)
if isinstance(idx, tuple):
for i in idx[1:]:
self._body[i] = (None, Null())
idx = idx[0]
k, v = self._body[idx]
self._map[new_key] = self._map.pop(k)
if new_key != k:
super(Container, self).__delitem__(k)
if isinstance(self._map[new_key], tuple):
self._map[new_key] = self._map[new_key][0]
value = _item(value)
# Copying trivia
if not isinstance(value, (Whitespace, AoT)):
value.trivia.indent = v.trivia.indent
value.trivia.comment_ws = v.trivia.comment_ws
value.trivia.comment = v.trivia.comment
value.trivia.trail = v.trivia.trail
if isinstance(value, Table):
# Insert a cosmetic new line for tables
value.append(None, Whitespace("\n"))
self._body[idx] = (new_key, value)
super(Container, self).__setitem__(new_key.key, value.value)
def __str__(self): # type: () -> str
return str(self.value)
def __repr__(self): # type: () -> str
return super(Container, self).__repr__()
def __eq__(self, other): # type: (Dict) -> bool
if not isinstance(other, dict):
return NotImplemented
return self.value == other
def _getstate(self, protocol):
return (self._parsed,)
def __reduce__(self):
return self.__reduce_ex__(2)
def __reduce_ex__(self, protocol):
return (
self.__class__,
self._getstate(protocol),
(self._map, self._body, self._parsed),
)
def __setstate__(self, state):
self._map = state[0]
self._body = state[1]
self._parsed = state[2]
def copy(self): # type: () -> Container
return copy.copy(self)
def __copy__(self): # type: () -> Container
c = self.__class__(self._parsed)
for k, v in super(Container, self).copy().items():
super(Container, c).__setitem__(k, v)
c._body += self.body
c._map.update(self._map)
return c
class OutOfOrderTableProxy(dict):
def __init__(self, container, indices): # type: (Container, Tuple) -> None
self._container = container
self._internal_container = Container(self._container.parsing)
self._tables = []
self._tables_map = {}
self._map = {}
for i in indices:
key, item = self._container._body[i]
if isinstance(item, Table):
self._tables.append(item)
table_idx = len(self._tables) - 1
for k, v in item.value.body:
self._internal_container.append(k, v)
self._tables_map[k] = table_idx
if k is not None:
super(OutOfOrderTableProxy, self).__setitem__(k.key, v)
else:
self._internal_container.append(key, item)
self._map[key] = i
if key is not None:
super(OutOfOrderTableProxy, self).__setitem__(key.key, item)
@property
def value(self):
return self._internal_container.value
def __getitem__(self, key): # type: (Union[Key, str]) -> Any
if key not in self._internal_container:
raise NonExistentKey(key)
return self._internal_container[key]
def __setitem__(self, key, item): # type: (Union[Key, str], Any) -> None
if key in self._map:
idx = self._map[key]
self._container._replace_at(idx, key, item)
elif key in self._tables_map:
table = self._tables[self._tables_map[key]]
table[key] = item
elif self._tables:
table = self._tables[0]
table[key] = item
else:
self._container[key] = item
if key is not None:
super(OutOfOrderTableProxy, self).__setitem__(key, item)
def __delitem__(self, key): # type: (Union[Key, str]) -> None
if key in self._map:
idx = self._map[key]
del self._container[key]
del self._map[key]
elif key in self._tables_map:
table = self._tables[self._tables_map[key]]
del table[key]
del self._tables_map[key]
else:
raise NonExistentKey(key)
del self._internal_container[key]
def keys(self):
return self._internal_container.keys()
def values(self):
return self._internal_container.values()
def items(self): # type: () -> Generator[Item]
return self._internal_container.items()
def update(self, other): # type: (Dict) -> None
self._internal_container.update(other)
def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any
return self._internal_container.get(key, default=default)
def pop(self, key, default=_NOT_SET):
return self._internal_container.pop(key, default=default)
def setdefault(
self, key, default=None
): # type: (Union[Key, str], Any) -> Union[Item, Container]
return self._internal_container.setdefault(key, default=default)
def __contains__(self, key):
return key in self._internal_container
def __str__(self):
return str(self._internal_container)
def __repr__(self):
return repr(self._internal_container)
def __eq__(self, other): # type: (Dict) -> bool
if not isinstance(other, dict):
return NotImplemented
return self._internal_container == other
def __getattr__(self, attribute):
return getattr(self._internal_container, attribute)
| kennethreitz/pipenv | pipenv/vendor/tomlkit/container.py | Python | mit | 24,835 | 0.000322 |
from gevent import monkey, sleep, spawn
monkey.patch_all() # NOQA
import sys
import time
import yaml
import logging
import ldap
from oncall import metrics
from ldap.controls import SimplePagedResultsControl
from datetime import datetime
from pytz import timezone
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import SQLAlchemyError, IntegrityError
from phonenumbers import format_number, parse, PhoneNumberFormat
from phonenumbers.phonenumberutil import NumberParseException
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.setLevel(logging.INFO)
logger.addHandler(ch)
stats = {
'ldap_found': 0,
'sql_errors': 0,
'users_added': 0,
'users_failed_to_add': 0,
'users_failed_to_update': 0,
'users_purged': 0,
'user_contacts_updated': 0,
'user_names_updated': 0,
'user_photos_updated': 0,
'users_reactivated': 0,
'users_failed_to_reactivate': 0,
}
LDAP_SETTINGS = {}
def normalize_phone_number(num):
return format_number(parse(num, 'US'), PhoneNumberFormat.INTERNATIONAL)
def get_predefined_users(config):
users = {}
try:
config_users = config['sync_script']['preset_users']
except KeyError:
return {}
for user in config_users:
users[user['name']] = user
for key in ['sms', 'call']:
try:
users[user['name']][key] = normalize_phone_number(users[user['name']][key])
except (NumberParseException, KeyError, AttributeError):
users[user['name']][key] = None
return users
def timestamp_to_human_str(timestamp, tz):
dt = datetime.fromtimestamp(timestamp, timezone(tz))
return ' '.join([dt.strftime('%Y-%m-%d %H:%M:%S'), tz])
def prune_user(engine, username):
global stats
stats['users_purged'] += 1
try:
engine.execute('DELETE FROM `user` WHERE `name` = %s', username)
logger.info('Deleted inactive user %s', username)
# The user has messages or some other user data which should be preserved. Just mark as inactive.
except IntegrityError:
logger.info('Marking user %s inactive', username)
engine.execute('UPDATE `user` SET `active` = FALSE WHERE `name` = %s', username)
except SQLAlchemyError as e:
logger.error('Deleting user %s failed: %s', username, e)
stats['sql_errors'] += 1
try:
engine.execute('DELETE FROM `ical_key` WHERE `requester` = %s', username)
logger.info('Invalidated ical_key of inactive user %s', username)
except Exception as e:
logger.error('Invalidating ical_key of inactive user %s failed: %s', username, e)
stats['sql_errors'] += 1
def fetch_ldap():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
l = ldap.initialize(LDAP_SETTINGS['url'])
if 'cert_path' in LDAP_SETTINGS:
l.set_option(ldap.OPT_X_TLS_CACERTFILE, LDAP_SETTINGS['cert_path'])
l.simple_bind_s(LDAP_SETTINGS['user'], LDAP_SETTINGS['password'])
req_ctrl = SimplePagedResultsControl(True, size=1000, cookie='')
known_ldap_resp_ctrls = {
SimplePagedResultsControl.controlType: SimplePagedResultsControl,
}
base = LDAP_SETTINGS['base']
attrs = ['distinguishedName'] + list(LDAP_SETTINGS['attrs'].values())
query = LDAP_SETTINGS['query']
users = {}
dn_map = {}
while True:
msgid = l.search_ext(base, ldap.SCOPE_SUBTREE, query, attrs, serverctrls=[req_ctrl])
rtype, rdata, rmsgid, serverctrls = l.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls)
logger.info('Loaded %d entries from ldap.' % len(rdata))
for dn, ldap_dict in rdata:
if LDAP_SETTINGS['attrs']['mail'] not in ldap_dict:
logger.error('ERROR: invalid ldap entry for dn: %s' % dn)
continue
try:
username_field = LDAP_SETTINGS['attrs']['username']
except KeyError:
username_field = "sAMAccountName"
username = ldap_dict[username_field][0]
if isinstance(username, bytes):
username = username.decode("utf-8")
name = ldap_dict.get(LDAP_SETTINGS['attrs']['full_name'])[0]
if isinstance(name, bytes):
name = name.decode("utf-8")
mobile = ldap_dict.get(LDAP_SETTINGS['attrs']['mobile'])
mail = ldap_dict.get(LDAP_SETTINGS['attrs']['mail'])
if mobile:
try:
mobile = mobile[0]
if isinstance(mobile, bytes):
mobile = mobile.decode("utf-8")
mobile = normalize_phone_number(mobile)
except NumberParseException:
mobile = None
except UnicodeEncodeError:
mobile = None
if mail:
mail = mail[0]
if isinstance(mail, bytes):
mail = mail.decode("utf-8")
slack = mail.split('@')[0]
else:
slack = None
contacts = {'call': mobile, 'sms': mobile, 'email': mail, 'slack': slack, 'name': name}
dn_map[dn] = username
users[username] = contacts
pctrls = [
c for c in serverctrls if c.controlType == SimplePagedResultsControl.controlType
]
cookie = pctrls[0].cookie
if not cookie:
break
req_ctrl.cookie = cookie
return users
def user_exists(username, engine):
return engine.execute('SELECT `id` FROM user WHERE name = %s', username)
def import_user(username, ldap_contacts, engine):
logger.debug('Inserting %s' % username)
full_name = ldap_contacts.pop('full_name')
user_add_sql = 'INSERT INTO `user` (`name`, `full_name`, `photo_url`) VALUES (%s, %s, %s)'
# get objects needed for insertion
modes = get_modes(engine)
try:
photo_url_tpl = LDAP_SETTINGS.get('image_url')
photo_url = photo_url_tpl % username if photo_url_tpl else None
engine.execute(user_add_sql, (username, full_name, photo_url))
engine.execute("SELECT `id` FROM user WHERE name = %s", username)
row = engine.fetchone()
user_id = row['id']
except SQLAlchemyError:
stats['users_failed_to_add'] += 1
stats['sql_errors'] += 1
logger.exception('Failed to add user %s' % username)
return
stats['users_added'] += 1
for key, value in ldap_contacts.items():
if value and key in modes:
logger.debug('\t%s -> %s' % (key, value))
user_contact_add_sql = 'INSERT INTO `user_contact` (`user_id`, `mode_id`, `destination`) VALUES (%s, %s, %s)'
engine.execute(user_contact_add_sql, (user_id, modes[key], value))
def get_modes(engine):
engine.execute('SELECT `name`, `id` FROM `contact_mode`')
modes = {}
for row in engine.fetchall():
modes[row['name']] = row['id']
return modes
def update_user(username, ldap_contacts, engine):
oncall_user = get_oncall_user(username, engine)
db_contacts = oncall_user[username]
full_name = ldap_contacts.pop('full_name')
contact_update_sql = 'UPDATE user_contact SET destination = %s WHERE user_id = (SELECT id FROM user WHERE name = %s) AND mode_id = %s'
contact_insert_sql = 'INSERT INTO user_contact (user_id, mode_id, destination) VALUES ((SELECT id FROM user WHERE name = %s), %s, %s)'
contact_delete_sql = 'DELETE FROM user_contact WHERE user_id = (SELECT id FROM user WHERE name = %s) AND mode_id = %s'
name_update_sql = 'UPDATE user SET full_name = %s WHERE name = %s'
photo_update_sql = 'UPDATE user SET photo_url = %s WHERE name = %s'
modes = get_modes(engine)
try:
if full_name != db_contacts.get('full_name'):
engine.execute(name_update_sql, (full_name, username))
stats['user_names_updated'] += 1
if 'image_url' in LDAP_SETTINGS and not db_contacts.get('photo_url'):
photo_url_tpl = LDAP_SETTINGS.get('image_url')
photo_url = photo_url_tpl % username if photo_url_tpl else None
engine.execute(photo_update_sql, (photo_url, username))
stats['user_photos_updated'] += 1
for mode in modes:
if mode in ldap_contacts and ldap_contacts[mode]:
if mode in db_contacts:
if ldap_contacts[mode] != db_contacts[mode]:
logger.debug('\tupdating %s (%s -> %s)' % (mode, db_contacts[mode], ldap_contacts[mode]))
engine.execute(contact_update_sql, (ldap_contacts[mode], username, modes[mode]))
stats['user_contacts_updated'] += 1
else:
logger.debug('\tadding %s', mode)
engine.execute(contact_insert_sql, (username, modes[mode], ldap_contacts[mode]))
stats['user_contacts_updated'] += 1
elif mode in db_contacts:
logger.debug('\tdeleting %s', mode)
engine.execute(contact_delete_sql, (username, modes[mode]))
stats['user_contacts_updated'] += 1
else:
logger.debug('\tmissing %s', mode)
except SQLAlchemyError:
stats['users_failed_to_update'] += 1
stats['sql_errors'] += 1
logger.exception('Failed to update user %s' % username)
def get_oncall_user(username, engine):
oncall_user = {}
user_query = '''SELECT `user`.`name` as `name`, `contact_mode`.`name` as `mode`, `user_contact`.`destination`,
`user`.`full_name`, `user`.`photo_url`
FROM `user`
LEFT OUTER JOIN `user_contact` ON `user`.`id` = `user_contact`.`user_id`
LEFT OUTER JOIN `contact_mode` ON `user_contact`.`mode_id` = `contact_mode`.`id`
WHERE `user`.`name` = %s
ORDER BY `user`.`name`'''
engine.execute(user_query, username)
for row in engine.fetchall():
contacts = oncall_user.setdefault(row['name'], {})
if row['mode'] is None or row['destination'] is None:
continue
contacts[row['mode']] = row['destination']
contacts['full_name'] = row['full_name']
contacts['photo_url'] = row['photo_url']
return oncall_user
def sync(config, engine):
Session = sessionmaker(bind=engine)
session = Session()
oncall_users = {}
users_query = '''SELECT `user`.`name` as `name`, `contact_mode`.`name` as `mode`, `user_contact`.`destination`,
`user`.`full_name`, `user`.`photo_url`
FROM `user`
LEFT OUTER JOIN `user_contact` ON `user`.`id` = `user_contact`.`user_id`
LEFT OUTER JOIN `contact_mode` ON `user_contact`.`mode_id` = `contact_mode`.`id`
ORDER BY `user`.`name`'''
for row in engine.execute(users_query):
contacts = oncall_users.setdefault(row.name, {})
if row.mode is None or row.destination is None:
continue
contacts[row.mode] = row.destination
contacts['full_name'] = row.full_name
contacts['photo_url'] = row.photo_url
oncall_usernames = set(oncall_users)
# users from ldap and config file
ldap_users = fetch_ldap()
stats['ldap_found'] += len(ldap_users)
ldap_users.update(get_predefined_users(config))
ldap_usernames = set(ldap_users)
# set of ldap users not in oncall
users_to_insert = ldap_usernames - oncall_usernames
# set of existing oncall users that are in ldap
users_to_update = oncall_usernames & ldap_usernames
# set of users in oncall but not ldap, assumed to be inactive
inactive_users = oncall_usernames - ldap_usernames
# users who need to be deactivated
if inactive_users:
rows = engine.execute('SELECT name FROM user WHERE active = TRUE AND name IN %s', inactive_users)
users_to_purge = (user.name for user in rows)
else:
users_to_purge = []
# set of inactive oncall users who appear in ldap
rows = engine.execute('SELECT name FROM user WHERE active = FALSE AND name IN %s', ldap_usernames)
users_to_reactivate = (user.name for user in rows)
# get objects needed for insertion
modes = dict(list(session.execute('SELECT `name`, `id` FROM `contact_mode`')))
user_add_sql = 'INSERT INTO `user` (`name`, `full_name`, `photo_url`) VALUES (%s, %s, %s)'
# insert users that need to be
logger.debug('Users to insert:')
for username in users_to_insert:
logger.debug('Inserting %s' % username)
full_name = ldap_users[username].pop('name')
try:
photo_url_tpl = LDAP_SETTINGS.get('image_url')
photo_url = photo_url_tpl % username if photo_url_tpl else None
user_id = engine.execute(user_add_sql, (username, full_name, photo_url)).lastrowid
except SQLAlchemyError:
stats['users_failed_to_add'] += 1
stats['sql_errors'] += 1
logger.exception('Failed to add user %s' % username)
continue
stats['users_added'] += 1
for key, value in ldap_users[username].items():
if value and key in modes:
logger.debug('\t%s -> %s' % (key, value))
user_contact_add_sql = 'INSERT INTO `user_contact` (`user_id`, `mode_id`, `destination`) VALUES (%s, %s, %s)'
engine.execute(user_contact_add_sql, (user_id, modes[key], value))
# update users that need to be
contact_update_sql = 'UPDATE user_contact SET destination = %s WHERE user_id = (SELECT id FROM user WHERE name = %s) AND mode_id = %s'
contact_insert_sql = 'INSERT INTO user_contact (user_id, mode_id, destination) VALUES ((SELECT id FROM user WHERE name = %s), %s, %s)'
contact_delete_sql = 'DELETE FROM user_contact WHERE user_id = (SELECT id FROM user WHERE name = %s) AND mode_id = %s'
name_update_sql = 'UPDATE user SET full_name = %s WHERE name = %s'
photo_update_sql = 'UPDATE user SET photo_url = %s WHERE name = %s'
logger.debug('Users to update:')
for username in users_to_update:
logger.debug(username)
try:
db_contacts = oncall_users[username]
ldap_contacts = ldap_users[username]
full_name = ldap_contacts.pop('name')
if full_name != db_contacts.get('full_name'):
engine.execute(name_update_sql, (full_name, username))
stats['user_names_updated'] += 1
if 'image_url' in LDAP_SETTINGS and not db_contacts.get('photo_url'):
photo_url_tpl = LDAP_SETTINGS.get('image_url')
photo_url = photo_url_tpl % username if photo_url_tpl else None
engine.execute(photo_update_sql, (photo_url, username))
stats['user_photos_updated'] += 1
for mode in modes:
if mode in ldap_contacts and ldap_contacts[mode]:
if mode in db_contacts:
if ldap_contacts[mode] != db_contacts[mode]:
logger.debug('\tupdating %s', mode)
engine.execute(contact_update_sql, (ldap_contacts[mode], username, modes[mode]))
stats['user_contacts_updated'] += 1
else:
logger.debug('\tadding %s', mode)
engine.execute(contact_insert_sql, (username, modes[mode], ldap_contacts[mode]))
stats['user_contacts_updated'] += 1
elif mode in db_contacts:
logger.debug('\tdeleting %s', mode)
engine.execute(contact_delete_sql, (username, modes[mode]))
stats['user_contacts_updated'] += 1
else:
logger.debug('\tmissing %s', mode)
except SQLAlchemyError:
stats['users_failed_to_update'] += 1
stats['sql_errors'] += 1
logger.exception('Failed to update user %s' % username)
continue
logger.debug('Users to mark as inactive:')
for username in users_to_purge:
prune_user(engine, username)
logger.debug('Users to reactivate:')
for username in users_to_reactivate:
logger.debug(username)
try:
engine.execute('UPDATE user SET active = TRUE WHERE name = %s', username)
stats['users_reactivated'] += 1
except SQLAlchemyError:
stats['users_failed_to_reactivate'] += 1
stats['sql_errors'] += 1
logger.exception('Failed to reactivate user %s', username)
session.commit()
session.close()
def metrics_sender():
while True:
metrics.emit_metrics()
sleep(60)
def main(config):
global LDAP_SETTINGS
LDAP_SETTINGS = config['ldap_sync']
metrics.init(config, 'oncall-ldap-user-sync', stats)
spawn(metrics_sender)
# Default sleep one hour
sleep_time = config.get('user_sync_sleep_time', 3600)
engine = create_engine(config['db']['conn']['str'] % config['db']['conn']['kwargs'],
**config['db']['kwargs'])
while 1:
logger.info('Starting user sync loop at %s' % time.time())
sync(config, engine)
logger.info('Sleeping for %s seconds' % sleep_time)
sleep(sleep_time)
if __name__ == '__main__':
config_path = sys.argv[1]
with open(config_path, 'r', encoding='utf-8') as config_file:
config = yaml.safe_load(config_file)
main(config)
| diegocepedaw/oncall | src/oncall/user_sync/ldap_sync.py | Python | bsd-2-clause | 17,821 | 0.002188 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import cloudinary.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(null=True)),
('language_tags', models.CharField(default=b'Untagged', max_length=30, choices=[(b'PYTHON', b'Python'), (b'RUBY', b'Ruby'), (b'ANDROID', b'Android'), (b'MARKUP', b'HTML/CSS'), (b'JAVA', b'Java'), (b'PHP', b'PHP'), (b'IOS', b'IOS'), (b'JAVASCRIPT', b'Javascript'), (b'C', b'C')])),
('resource_file', cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name=b'resource_file', blank=True)),
('resource_file_name', models.CharField(max_length=100, null=True)),
('resource_file_size', models.IntegerField(default=0)),
('snippet_text', models.TextField(null=True, blank=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| andela/codango | codango/resources/migrations/0001_initial.py | Python | mit | 1,494 | 0.002677 |
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
import sys
import warnings
from operator import itemgetter
from . import _config
from ._compat import PY2, isclass, iteritems, metadata_proxy, set_closure_cell
from .exceptions import (
DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError,
UnannotatedAttributeError
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata=None, type=None, converter=None):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to converter attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `super_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class("_Attributes", [
"attrs", # all attributes to build dunder methods for
"super_attrs", # attributes that have been inherited from super classes
])
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The implementation is gross but importing `typing` is slow and there are
discussions to remove it from the stdlib alltogether.
"""
return str(annot).startswith("typing.ClassVar")
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for super_cls in cls.__mro__[1:]:
if anns is getattr(super_cls, "__annotations__", None):
return {}
return anns
def _transform_attrs(cls, these, auto_attribs):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = sorted((
(name, ca)
for name, ca
in iteritems(these)
), key=lambda e: e[1].counter)
elif auto_attribs is True:
ca_names = {
name
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: " +
", ".join(sorted(
unannotated,
key=lambda n: cd.get(n).counter
)) + "."
)
else:
ca_list = sorted((
(name, attr)
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
), key=lambda e: e[1].counter)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name),
)
for attr_name, ca
in ca_list
]
super_attrs = []
taken_attr_names = {a.name: a for a in own_attrs}
# Traverse the MRO and collect attributes.
for super_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(super_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
for a in sub_attrs:
prev_a = taken_attr_names.get(a.name)
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if prev_a is None:
super_attrs.append(a)
taken_attr_names[a.name] = a
attr_names = [a.name for a in super_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
attrs = AttrsClass(
super_attrs + [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name)
)
for attr_name, ca
in ca_list
]
)
had_default = False
for a in attrs:
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
return _Attributes((attrs, super_attrs))
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_cls", "_cls_dict", "_attrs", "_super_names", "_attr_names", "_slots",
"_frozen", "_has_post_init",
)
def __init__(self, cls, these, slots, frozen, auto_attribs):
attrs, super_attrs = _transform_attrs(cls, these, auto_attribs)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._super_names = set(a.name for a in super_attrs)
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_superclass(cls)
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used anymore after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
super_names = self._super_names
# Clean class of attribute definitions (`attr.ib()`s).
for name in self._attr_names:
if name not in super_names and \
getattr(cls, name, None) is not None:
delattr(cls, name)
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
super_names = self._super_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__",)
}
# We only add the names of attributes that aren't inherited.
# Settings __slots__ to inherited attributes wastes memory.
cd["__slots__"] = tuple(
name
for name in self._attr_names
if name not in super_names
)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
attr_names = tuple(self._attr_names)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in attr_names)
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(attr_names, state):
__bound_setattr(name, value)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(
self._cls.__name__,
self._cls.__bases__,
cd,
)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(self._attrs)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._attrs,
self._has_post_init,
self._frozen,
)
)
return self
def add_cmp(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd["__gt__"], \
cd["__ge__"] = (
self._add_method_dunders(meth)
for meth in _make_cmp(self._attrs)
)
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__,)
)
except AttributeError:
pass
return method
def attrs(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False, auto_attribs=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3/reference/datamodel.html#slots
:param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes
(Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an :func:`attr.ib` but lacks a type
annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of :class:`Factory` also
works as expected.
Attributes annotated as :data:`typing.ClassVar` are **ignored**.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
.. versionchanged::
17.1.0 *hash* supports ``None`` as value which is also the default
now.
.. versionadded:: 17.3.0 *auto_attribs*
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
builder = _ClassBuilder(cls, these, slots, frozen, auto_attribs)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if cmp is True:
builder.add_cmp()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
builder.add_hash()
else:
builder.make_unhashable()
if init is True:
builder.add_init()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _make_hash(attrs):
attrs = tuple(
a
for a in attrs
if a.hash is True or (a.hash is None and a.cmp is True)
)
# We cache the generated hash methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),)
type_hash = hash(unique_filename)
lines = [
"def __hash__(self):",
" return hash((",
" %d," % (type_hash,),
]
for a in attrs:
lines.append(" self.%s," % (a.name))
lines.append(" ))")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(attrs)
return cls
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or return the result
negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
def _make_cmp(attrs):
attrs = [a for a in attrs if a.cmp]
# We cache the generated eq methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated eq %s>" % (sha1.hexdigest(),)
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
if attrs:
lines.append(" return (")
others = [
" ) == (",
]
for a in attrs:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
eq = locs["__eq__"]
ne = __ne__
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
return eq, ne, __lt__, __le__, __gt__, __ge__
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = \
_make_cmp(attrs)
return cls
def _make_repr(attrs, ns):
"""
Make a repr method for *attr_names* adding *ns* to the full name.
"""
attr_names = tuple(
a.name
for a in attrs
if a.repr
)
def __repr__(self):
"""
Automatically created by attrs.
"""
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
return "{0}({1})".format(
class_name,
", ".join(
name + "=" + repr(getattr(self, name, NOTHING))
for name in attr_names
)
)
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def _make_init(attrs, post_init, frozen):
attrs = [
a
for a in attrs
if a.init or a.default is not NOTHING
]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs = _attrs_to_init_script(
attrs,
frozen,
post_init,
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__init__"]
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
cls.__init__ = _make_init(
cls.__attrs_attrs__,
getattr(cls, "__attrs_post_init__", False),
frozen,
)
return cls
def fields(cls):
"""
Returns the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _attrs_to_init_script(attrs, frozen, post_init):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if frozen is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(" " + fmt_setter_with_converter(
attr_name, arg_name
))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
For the version history of the fields, see :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"metadata", "type", "converter",
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None, type=None, converter=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`."
" It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr("metadata", (
metadata_proxy(metadata) if metadata
else _empty_metadata_singleton
))
bound_setattr("type", type)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@property
def convert(self):
warnings.warn(
"The `convert` attribute is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2,
)
return self.converter
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default", "type", "convert",
) # exclude methods and deprecated alias
}
return cls(
name=name, validator=ca._validator, default=ca._default, type=type,
**inst_dict
)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__
if name != "convert" # XXX: remove once `convert` is gone
]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "converter", "type")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, converter,
metadata, type):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.converter = converter
self.metadata = metadata
self.type = type
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init}
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__",
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| nparley/mylatitude | lib/attr/_make.py | Python | mit | 49,291 | 0.00002 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2003-2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
from Cerebrum.default_config import *
CEREBRUM_DATABASE_NAME = os.getenv('DB_NAME')
CEREBRUM_DATABASE_CONNECT_DATA['user'] = os.getenv('DB_USER')
CEREBRUM_DATABASE_CONNECT_DATA['table_owner'] = os.getenv('DB_USER')
CEREBRUM_DATABASE_CONNECT_DATA['host'] = os.getenv('DB_HOST')
CEREBRUM_DATABASE_CONNECT_DATA['table_owner'] = os.getenv('DB_USER')
CEREBRUM_DDL_DIR = '/src/design'
DB_AUTH_DIR = '/db-auth'
LOGGING_CONFIGFILE = os.path.join(os.getenv('TEST_CONFIG_DIR'),
'logging.ini') | unioslo/cerebrum | testsuite/docker/test-config/cereconf_local.py | Python | gpl-2.0 | 1,350 | 0.000741 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os.path
import string
import webbrowser
import shutil
import json
import base64
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import QFile
from PyQt5.QtCore import QUrl
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWidgets import QInputDialog, QFileDialog
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
# from bs4 import BeautifulSoup
from libs.python.pyquery import PyQuery as pq
from lxml import etree
import urllib
import time
from PyQt5.QtWidgets import QProgressDialog
from PyQt5.QtWidgets import QApplication
#from PIL import Image
import requests
from io import BytesIO
class html_editor(QWebView):
def __init__(self, parent=None, html=None, css_file=None):
#def __init__(self, html=None, style_filename=None):
super(html_editor, self).__init__(parent)
# http://stackoverflow.com/questions/21357157/is-there-any-solution-for-the-qtwebkit-memory-leak
# https://github.com/lycying/seeking
#self.page().setContentEditable(True)
#self.execute_js('document.designMode = "on"')
self.file_dialog_dir = '.'
# TO CHECK
# http://nullege.com/codes/show/src%40c%40a%40calibre-HEAD%40src%40calibre%40gui2%40viewer%40documentview.py/89/PyQt4.QtWebKit.QWebPage.setLinkDelegationPolicy/python
settings = self.settings()
# settings.setMaximumPagesInCache(0)
# settings.setObjectCacheCapacities(0, 0, 0)
# settings.setOfflineStorageDefaultQuota(0)
# settings.setOfflineWebApplicationCacheQuota(0)
# Security
settings.setAttribute(QWebSettings.JavaEnabled, False)
#settings.setAttribute(QWebSettings.PluginsEnabled, False)
#settings.setAttribute(QWebSettings.JavascriptCanOpenWindows, False)
#settings.setAttribute(QWebSettings.JavascriptCanAccessClipboard, False)
# Miscellaneous
settings.setAttribute(QWebSettings.LinksIncludedInFocusChain, True)
settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
# settings.setAttribute(QWebSettings.AutoLoadImages, False)
# Disable Hyperlinks following, open url on system browser
self.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.page().linkClicked.connect(lambda url: webbrowser.open(str(url.toString())))
if html:
self.setHtml(html)
else:
self.set_readonly(True)
# config
config_file_path = os.path.join(os.path.dirname(__file__), 'config.json')
self.config = None
if os.path.isfile(config_file_path):
with open(config_file_path) as outfile:
self.config = json.load(outfile)
outfile.close()
self.context_menu_actions = []
# TO CHECK
# https://github.com/gen2brain/pyhtmleditor/blob/master/src/pyhtmleditor/htmleditor.py
# https://github.com/kovidgoyal/calibre/blob/master/src/calibre/gui2/comments_editor.py
#if css_file:
# self.apply_stylefile(css_file)
############# TO IMPLEMENT ##########
#self.note_editor.execute_js(self.functions.get_javascript_plugins())
#self.load_functions = []
#self.settings().setAttribute(QWebSettings.AutoLoadImages, False)
#QWebSettings.globalSettings()->setAttribute(QWebSettings::DeveloperExtrasEnabled, true);
#QWebSettings.globalSettings().setAttribute(QWebSettings.OfflineWebApplicationCacheEnabled, True)
def get_config(self):
return self.config
def set_context_menu_append_actions(self, context_menu_actions):
self.context_menu_actions = context_menu_actions
def contextMenuEvent(self, event):
menu = self.page().createStandardContextMenu()
if 'default_context_menu_replace' in self.config:
if self.config['default_context_menu_replace'] == 'True':
menu = QtWidgets.QMenu(self)
if 'context_menu_actions' in self.config:
for action in self.context_menu_actions:
menu.addAction(action)
menu.exec_(QtGui.QCursor.pos())
def set_readonly(self, param=True):
if param == True:
self.execute_js('document.body.contentEditable = "false"')
elif param == False:
self.execute_js('document.body.contentEditable = "true"')
def set_writeable(self):
self.set_readonly(False)
def set_html(self, html=None):
if html:
self.setHtml(html)
def get_html(self,relative_path=None):
html = self.page().mainFrame().toHtml()
pd_content = pq(html)
if pd_content('img').length > 0:
num_img = 0
max_num_img = 0
# Dertemines the number of image to download and process
for img in pd_content('img'):
if "base64" not in img.attrib['src']:
max_num_img += 1
# There are image to download and process
if max_num_img > 0:
progress_dialog = QProgressDialog(self)
progress_dialog.setWindowTitle('Please Wait')
progress_dialog.setLabelText('Downloading and processing images. Please wait.')
progress_dialog.setRange(num_img, max_num_img)
progress_dialog.setValue(num_img)
progress_dialog.setCancelButton(None)
progress_dialog.show()
QApplication.processEvents()
for img in pd_content('img'):
if "base64" not in img.attrib['src']:
if 'http' in img.attrib['src'].lower() or 'ftp' in img.attrib['src'].lower():
# Downloads images
response = requests.get(img.attrib['src'])
# Generates base64 of the image
base64_img = base64.b64encode(response.content).decode('ascii')
# Build uri
uri = "data:" + response.headers['Content-Type'] + ";" + "base64," + base64_img
# Reasings src attrbiute with the uri data
img.attrib['src'] = uri
# Updates progress bar
num_img = num_img + 1
progress_dialog.setValue(num_img)
QApplication.processEvents()
html = pd_content.html()
return html
def get_content(self):
return self.get_html()
def set_content(self, content):
if content:
self.set_html(content)
def open_file(self, file_path):
with open(file_path, encoding='UTF-8', errors="ignore") as fd:
base_url = QUrl.fromLocalFile(os.path.join(os.path.dirname(file_path), ''))
self.setHtml(fd.read(), base_url)
fd.close()
# Generates uft8 bugs
# fd = QFile(file_path)
# if fd.open(QFile.ReadOnly):
# # Required for webkit to access local images
# base_url = QUrl.fromLocalFile(os.path.join(os.path.dirname(file_path),''))
# self.setContent(fd.readAll(), "text/html", base_url)
# fd.close()
def toggle_bold(self, parm=None):
self.page().triggerAction(QWebPage.ToggleBold)
def toggle_italic(self, parm=None):
self.page().triggerAction(QWebPage.ToggleItalic)
def heading(self, param=None):
if param and param in ['heading_1', 'heading_2', 'heading_3', 'heading_4', 'heading_5', 'heading_6']:
cmd_str = str("document.execCommand('formatblock', false, '%s');" % str('h'+param[8]))
self.execute_js(cmd_str)
def orderedlist(self, param=None):
self.page().triggerAction(QWebPage.InsertOrderedList)
def unorderedlist(self, param=None):
self.page().triggerAction(QWebPage.InsertUnorderedList)
def insert_horizontal_rule(self, param=None):
self.execute_js("document.execCommand('inserthorizontalrule', false, false);")
def block_quote(self, param=None):
self.execute_js("document.execCommand('formatblock', false, 'blockquote');")
def insert_html(self, param=None):
if param:
cmd_str = 'var range = document.getSelection().getRangeAt(0); \
document.execCommand("inserthtml",false,"' + param + '");'
self.execute_js(cmd_str)
def preformated_text(self, param=None):
self.execute_js("document.execCommand('formatblock', false, 'pre');")
# if self.page().hasSelection():
# #pass
#
# cmd_str = 'var range = document.getSelection().getRangeAt(0); \
# document.execCommand("inserthtml",false,"<pre><code>" + range + "</code></pre>");'
# self.execute_js(cmd_str)
def block_code(self, param=None):
if self.page().hasSelection():
cmd_str = 'var range = document.getSelection().getRangeAt(0); \
document.execCommand("inserthtml",false,"<code>" + range + "</code>");'
self.execute_js(cmd_str)
def insert_checkbox(self, param=None):
if self.page().hasSelection():
cmd_str = 'var range = document.getSelection().getRangeAt(0); \
document.execCommand("inserthtml",false,"<input type=\'checkbox\' name=\'test\' checked>" + selObj.toString() + range);'
self.execute_js(cmd_str)
def indent(self, param=None):
self.execute_js("document.execCommand('indent', false, true);")
def outdent(self, param=None):
self.execute_js("document.execCommand('outdent', false, true);")
def undo(self, param=None):
self.page().triggerAction(QWebPage.Undo)
def redo(self, param=None):
self.page().triggerAction(QWebPage.Redo)
def cut(self, param=None):
self.page().triggerAction(QWebPage.Cut)
def copy(self, param=None):
self.page().triggerAction(QWebPage.Copy)
def paste(self, param=None):
self.page().triggerAction(QWebPage.Paste)
def remove_format(self, param=None):
self.page().triggerAction(QWebPage.RemoveFormat)
self.execute_js("document.execCommand('formatBlock', false, 'p');")
def insert_link(self, param=None):
link, ok = QInputDialog.getText(None, 'Insert Link','Enter a url for the link (ex: http://www.google.com).') #QLineEdit.Normal
if ok:
self.execute_js("document.execCommand('createLink', false, '%s');" % link)
def insert_embedded_image(self, param=None):
if param:
filename, fileextension = os.path.splitext(param)
fileextension = fileextension[1:]
image_encoded_data = base64.b64encode(open(param, "rb").read())
self.insert_html("<img src='data:image/" + fileextension + ";base64," + image_encoded_data.decode('ascii') + "' />")
def insert_image(self, image_path=None, new_image_path=None):
#image_path, extra = QFileDialog.getOpenFileName(None, 'Select Image', self.file_dialog_dir, "All files (*.*);;JPEG (*.jpg *.jpeg);;TIFF (*.tif)")
image_path_base, file_extension = os.path.splitext(image_path)
file_name = os.path.basename(image_path)
copied = False
if image_path and new_image_path:
if not os.path.isfile(os.path.join(new_image_path, file_name)):
try:
shutil.copy2(image_path, new_image_path)
copied = True
except (OSError, IOError):
print("Unable to copy the file to :" + str(new_image_path))
else:
try:
new_location = image_path_base + '_' + time.strftime("%Y%m%d") + "_" + time.strftime("%H%M%S") + file_extension
shutil.copy2(image_path, new_location)
copied = True
except (OSError, IOError):
print("Unable to copy the file to :" + str(new_location))
if copied:
# file_path = QUrl.fromLocalFile(new_location).toString()
# self.execute_js("document.execCommand('insertImage', false, '%s');" % file_path)
self.insert_html("<img src ='" + file_name + "' />")
def execute_js(self, param=None):
if param:
#print ("**************************************************")
#print (param)
self.page().mainFrame().evaluateJavaScript(param)
def execute_jsfile(self, param=None):
if param:
js_content = None
file_path = os.path.join(os.path.dirname(__file__), param)
if os.path.isfile(file_path):
with open(file_path, encoding='UTF-8') as fd:
js_content = fd.read()
fd.close()
if js_content:
self.execute_js(js_content)
def apply_style(self, style=None, class_name=None):
if style:
style = style.replace("\"", "'").replace("\n", " ")
js_code = ""
if class_name:
js_code += "var elements = document.getElementsByClassName('" + class_name + "'); "
js_code += "while (elements.length > 0){ elements[0].parentNode.removeChild(elements[0]); } "
js_code += "var css = document.createElement('style'); "
js_code += "css.type = 'text/css'; "
if class_name:
js_code += "css.className = '" + class_name + "'; "
js_code += "var styles = '" + style + "'; "
js_code += "if(css.styleSheet){ css.styleSheet.cssText = styles; }else{ css.appendChild(document.createTextNode(styles)); } "
js_code += "document.getElementsByTagName('head')[0].appendChild(css); \n"
self.execute_js(js_code)
# ORIGINAL CODE
# original_html = self.page().mainFrame().toHtml()
#
# try:
# soup = BeautifulSoup(original_html, "lxml")# "html.parser")
# head = soup.head
#
# if class_name:
# note_styles = soup.find_all("style", {'class': class_name})
# if note_styles:
# for note_style in note_styles:
# note_style.decompose()
#
# if style:
# new_style = soup.new_tag('style')
# new_style['type'] = 'text/css'
#
# if class_name:
# new_style['class'] = class_name
#
# new_style.append(style)
# head.append(new_style)
#
# #new_html = soup.prettify()#()(formatter="minimal")
# new_html=str(soup)
# self.set_content(new_html)
# except Exception as e:
# self.set_content(original_html)
def apply_stylefile(self, file_path=None, class_name=None):
if file_path and os.path.isfile(file_path):
css_file_content_content = ""
with open(file_path, encoding='UTF-8', errors="ignore") as fd:
file_content = fd.read() # self.convert_markup(fd.read(), file_name, 'import', 'open')
if file_content is not None:
css_file_content_content = file_content
fd.close()
self.apply_style(css_file_content_content, class_name) | marcoconstancio/yanta | plugins/viewers/html_editor/html_editor.py | Python | gpl-2.0 | 16,041 | 0.004239 |
# Copyright (C) 2004 Anthony Baxter
# This file is necessary to make this directory a package
__version__ = '0.3alpha0'
| braams/shtoom | shtoom/__init__.py | Python | lgpl-2.1 | 121 | 0 |
class NameTable:
def __init__(self, start, size):
self.start = start
self.size = size
| Hexadorsimal/pynes | nes/processors/ppu/name_table.py | Python | mit | 106 | 0 |
"""
Django settings for the admin project.
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
from django.contrib import messages
# import local # Build own local.py (used with postgres)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# from the OSF settings
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
ALLOWED_HOSTS = [
'.osf.io'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 5,
}
},
]
# Email settings. Account created for testing. Password shouldn't be hardcoded
# [DEVOPS] this should be set to 'django.core.mail.backends.smtp.EmailBackend' in the > dev local.py.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Sendgrid Email Settings - Using OSF credentials.
# Add settings references to local.py
EMAIL_HOST = osf_settings.MAIL_SERVER
EMAIL_HOST_USER = osf_settings.MAIL_USERNAME
EMAIL_HOST_PASSWORD = osf_settings.MAIL_PASSWORD
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'admin.common_auth',
'admin.base',
'admin.pre_reg',
'admin.spam',
'admin.metrics',
'admin.nodes',
'admin.users',
# 3rd party
'raven.contrib.django.raven_compat',
'webpack_loader',
'django_nose',
'ckeditor',
'password_reset',
)
# Custom user model (extends AbstractBaseUser)
AUTH_USER_MODEL = 'common_auth.MyUser'
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'admin'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
CORS_ALLOW_CREDENTIALS = True
MIDDLEWARE_CLASSES = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.MongoConnectionMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.TokuTransactionMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
MESSAGE_TAGS = {
messages.SUCCESS: 'text-success',
messages.ERROR: 'text-danger',
messages.WARNING: 'text-warning',
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}
}]
# Database
# Postgres:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': local.POSTGRES_NAME,
# 'USER': local.POSTGRES_USER,
# 'PASSWORD': local.POSTGRES_PASSWORD,
# 'HOST': local.POSTGRES_HOST,
# 'PORT': '',
# }
# }
# Postgres settings in local.py
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
ROOT_URLCONF = 'admin.base.urls'
WSGI_APPLICATION = 'admin.base.wsgi.application'
ADMIN_BASE = ''
STATIC_URL = '/static/'
LOGIN_URL = 'account/login/'
LOGIN_REDIRECT_URL = ADMIN_BASE
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
LANGUAGE_CODE = 'en-us'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'public/js/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--verbosity=2']
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Source'],
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList'],
['Link']
]
},
}
| TomHeatwole/osf.io | admin/base/settings/defaults.py | Python | apache-2.0 | 5,822 | 0.001546 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# Mediaserver Launcher
# ------------------------------------------------------------
import os
import sys
from core.item import Item
from core import logger
from core import config
from platformcode import platformtools
from core import channeltools
import channelselector
from core import servertools
from core import library
def start():
""" Primera funcion que se ejecuta al entrar en el plugin.
Dentro de esta funcion deberian ir todas las llamadas a las
funciones que deseamos que se ejecuten nada mas abrir el plugin.
"""
logger.info("pelisalacarta.platformcode.launcher start")
# Test if all the required directories are created
config.verify_directories_created()
import library_service
library_service.start()
def run(item):
itemlist = []
#Muestra el item en el log:
PrintItems(item)
#Control Parental, comprueba si es adulto o no
if item.action=="mainlist":
# Parental control
if channeltools.is_adult(item.channel) and config.get_setting("adult_pin")!="":
tecleado = platformtools.dialog_input("","PIN para canales de adultos",True)
if not tecleado==config.get_setting("adult_pin"):
return
#Importa el canal para el item, todo item debe tener un canal, sino sale de la función
if item.channel: channelmodule = ImportarCanal(item)
# If item has no action, stops here
if item.action == "":
logger.info("pelisalacarta.platformcode.launcher Item sin accion")
itemlist = None
#Action Play, para mostrar el menú con las opciones de reproduccion.
elif item.action=="play":
logger.info("pelisalacarta.platformcode.launcher play")
# Si el canal tiene una acción "play" tiene prioridad
if hasattr(channelmodule, 'play'):
logger.info("pelisalacarta.platformcode.launcher executing channel 'play' method")
itemlist = channelmodule.play(item)
b_favourite = item.isFavourite
if len(itemlist)>0 and isinstance(itemlist[0], Item):
item = itemlist[0]
if b_favourite:
item.isFavourite = True
play_menu(item)
elif len(itemlist)>0 and isinstance(itemlist[0], list):
item.video_urls = itemlist
play_menu(item)
else:
platformtools.dialog_ok("plugin", "No hay nada para reproducir")
else:
logger.info("pelisalacarta.platformcode.launcher no channel 'play' method, executing core method")
play_menu(item)
itemlist = None
#Action Search, para mostrar el teclado y lanzar la busqueda con el texto indicado.
elif item.action=="search":
logger.info("pelisalacarta.platformcode.launcher search")
tecleado = platformtools.dialog_input()
if not tecleado is None:
itemlist = channelmodule.search(item,tecleado)
else:
itemlist = []
elif item.channel == "channelselector":
import channelselector
if item.action =="mainlist":
itemlist = channelselector.getmainlist("bannermenu")
if config.get_setting("check_for_plugin_updates") == "true":
logger.info("channelselector.mainlist Verificar actualizaciones activado")
from core import updater
try:
version = updater.checkforupdates()
if version:
platformtools.dialog_ok("Versión "+version+" disponible","Ya puedes descargar la nueva versión del plugin\ndesde el listado principal")
itemlist.insert(0,Item(title="Actualizadr pelisalacarta a la versión "+version, version=version, channel="updater", action="update", thumbnail=os.path.join(config.get_runtime_path(),"resources","images","bannermenu","thumb_update.png")))
except:
platformtools.dialog_ok("No se puede conectar","No ha sido posible comprobar","si hay actualizaciones")
logger.info("channelselector.mainlist Fallo al verificar la actualización")
else:
logger.info("channelselector.mainlist Verificar actualizaciones desactivado")
if item.action =="getchanneltypes":
itemlist = channelselector.getchanneltypes("bannermenu")
if item.action =="filterchannels":
itemlist = channelselector.filterchannels(item.channel_type, "bannermenu")
#Todas las demas las intenta ejecturaren el siguiente orden:
# 1. En el canal
# 2. En el launcher
# 3. Si no existe en el canal ni en el launcher guarda un error en el log
else:
#Si existe la funcion en el canal la ejecuta
if hasattr(channelmodule, item.action):
logger.info("Ejectuando accion: " + item.channel + "." + item.action + "(item)")
exec "itemlist = channelmodule." + item.action + "(item)"
#Si existe la funcion en el launcher la ejecuta
elif hasattr(sys.modules[__name__], item.action):
logger.info("Ejectuando accion: " + item.action + "(item)")
exec "itemlist =" + item.action + "(item)"
#Si no existe devuelve un error
else:
logger.info("No se ha encontrado la accion ["+ item.action + "] en el canal ["+item.channel+"] ni en el launcher")
#Llegados a este punto ya tenemos que tener el itemlist con los resultados correspondientes
#Pueden darse 3 escenarios distintos:
# 1. la función ha generado resultados y estan en el itemlist
# 2. la función no ha generado resultados y por tanto el itemlist contiene 0 items, itemlist = []
# 3. la función realiza alguna accion con la cual no se generan nuevos items, en ese caso el resultado deve ser: itemlist = None para que no modifique el listado
#A partir de aquí ya se ha ejecutado la funcion en el lugar adecuado, si queremos realizar alguna acción sobre los resultados, este es el lugar.
#Filtrado de Servers
if item.action== "findvideos" and config.get_setting('filter_servers') == 'true':
server_white_list, server_black_list = set_server_list()
itemlist = filtered_servers(itemlist, server_white_list, server_black_list)
#Si la accion no ha devuelto ningún resultado, añade un item con el texto "No hay elementos para mostrar"
if type(itemlist)==list:
if len(itemlist) ==0:
itemlist = [Item(title="No hay elementos para mostrar", thumbnail="http://media.tvalacarta.info/pelisalacarta/thumb_error.png")]
#Imprime en el log el resultado
PrintItems(itemlist)
#Muestra los resultados en pantalla
platformtools.render_items(itemlist, item)
def ImportarCanal(item):
channel = item.channel
channelmodule=""
if os.path.exists(os.path.join( config.get_runtime_path(), "channels",channel+".py")):
exec "from channels import "+channel+" as channelmodule"
elif os.path.exists(os.path.join( config.get_runtime_path(),"core",channel+".py")):
exec "from core import "+channel+" as channelmodule"
elif os.path.exists(os.path.join( config.get_runtime_path(),channel+".py")):
exec "import "+channel+" as channelmodule"
return channelmodule
def PrintItems(itemlist):
if type(itemlist)==list:
if len(itemlist) >0:
logger.info("Items devueltos")
logger.info("-----------------------------------------------------------------------")
for item in itemlist:
logger.info(item.tostring())
logger.info("-----------------------------------------------------------------------")
else:
item = itemlist
logger.info("-----------------------------------------------------------------------")
logger.info(item.tostring())
logger.info("-----------------------------------------------------------------------")
def findvideos(item):
logger.info("pelisalacarta.platformcode.launcher findvideos")
itemlist = servertools.find_video_items(item)
return itemlist
def add_pelicula_to_library(item):
library.add_pelicula_to_library(item)
def add_serie_to_library(item):
channel = ImportarCanal(item)
library.add_serie_to_library(item, channel)
def download_all_episodes(item,first_episode="",preferred_server="vidspot",filter_language=""):
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, show="+item.show)
channel = ImportarCanal(item)
show_title = item.show
# Obtiene el listado desde el que se llamó
action = item.extra
# Esta marca es porque el item tiene algo más aparte en el atributo "extra"
if "###" in item.extra:
action = item.extra.split("###")[0]
item.extra = item.extra.split("###")[1]
exec "episode_itemlist = channel."+action+"(item)"
# Ordena los episodios para que funcione el filtro de first_episode
episode_itemlist = sorted(episode_itemlist, key=lambda Item: Item.title)
from core import downloadtools
from core import scrapertools
best_server = preferred_server
worst_server = "moevideos"
# Para cada episodio
if first_episode=="":
empezar = True
else:
empezar = False
for episode_item in episode_itemlist:
try:
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, episode="+episode_item.title)
episode_title = scrapertools.get_match(episode_item.title,"(\d+x\d+)")
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, episode="+episode_title)
except:
import traceback
logger.info(traceback.format_exc())
continue
if first_episode!="" and episode_title==first_episode:
empezar = True
if episodio_ya_descargado(show_title,episode_title):
continue
if not empezar:
continue
# Extrae los mirrors
try:
mirrors_itemlist = channel.findvideos(episode_item)
except:
mirrors_itemlist = servertools.find_video_items(episode_item)
print mirrors_itemlist
descargado = False
new_mirror_itemlist_1 = []
new_mirror_itemlist_2 = []
new_mirror_itemlist_3 = []
new_mirror_itemlist_4 = []
new_mirror_itemlist_5 = []
new_mirror_itemlist_6 = []
for mirror_item in mirrors_itemlist:
# Si está en español va al principio, si no va al final
if "(Español)" in mirror_item.title:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_1.append(mirror_item)
else:
new_mirror_itemlist_2.append(mirror_item)
elif "(Latino)" in mirror_item.title:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_3.append(mirror_item)
else:
new_mirror_itemlist_4.append(mirror_item)
elif "(VOS)" in mirror_item.title:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_3.append(mirror_item)
else:
new_mirror_itemlist_4.append(mirror_item)
else:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_5.append(mirror_item)
else:
new_mirror_itemlist_6.append(mirror_item)
mirrors_itemlist = new_mirror_itemlist_1 + new_mirror_itemlist_2 + new_mirror_itemlist_3 + new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6
for mirror_item in mirrors_itemlist:
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, mirror="+mirror_item.title)
if "(Español)" in mirror_item.title:
idioma="(Español)"
codigo_idioma="es"
elif "(Latino)" in mirror_item.title:
idioma="(Latino)"
codigo_idioma="lat"
elif "(VOS)" in mirror_item.title:
idioma="(VOS)"
codigo_idioma="vos"
elif "(VO)" in mirror_item.title:
idioma="(VO)"
codigo_idioma="vo"
else:
idioma="(Desconocido)"
codigo_idioma="desconocido"
logger.info("pelisalacarta.platformcode.launcher filter_language=#"+filter_language+"#, codigo_idioma=#"+codigo_idioma+"#")
if filter_language=="" or (filter_language!="" and filter_language==codigo_idioma):
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, downloading mirror")
else:
logger.info("pelisalacarta.platformcode.launcher language "+codigo_idioma+" filtered, skipping")
continue
if hasattr(channel, 'play'):
video_items = channel.play(mirror_item)
else:
video_items = [mirror_item]
if len(video_items)>0:
video_item = video_items[0]
# Comprueba que esté disponible
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( video_item.server , video_item.url , video_password="" , muestra_dialogo=False)
# Lo añade a la lista de descargas
if puedes:
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, downloading mirror started...")
# El vídeo de más calidad es el último
mediaurl = video_urls[len(video_urls)-1][1]
devuelve = downloadtools.downloadbest(video_urls,show_title+" "+episode_title+" "+idioma+" ["+video_item.server+"]",continuar=False)
if devuelve==0:
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, download ok")
descargado = True
break
elif devuelve==-1:
try:
platformtools.dialog_ok("plugin" , "Descarga abortada")
except:
pass
return
else:
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, download error, try another mirror")
continue
else:
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, downloading mirror not available... trying next")
if not descargado:
logger.info("pelisalacarta.platformcode.launcher download_all_episodes, EPISODIO NO DESCARGADO "+episode_title)
def set_server_list():
logger.info("pelisalacarta.platformcode.launcher.set_server_list start")
server_white_list = []
server_black_list = []
if len(config.get_setting('whitelist')) > 0:
server_white_list_key = config.get_setting('whitelist').replace(', ', ',').replace(' ,', ',')
server_white_list = re.split(',', server_white_list_key)
if len(config.get_setting('blacklist')) > 0:
server_black_list_key = config.get_setting('blacklist').replace(', ', ',').replace(' ,', ',')
server_black_list = re.split(',', server_black_list_key)
logger.info("set_server_list whiteList %s" % server_white_list)
logger.info("set_server_list blackList %s" % server_black_list)
logger.info("pelisalacarta.platformcode.launcher.set_server_list end")
return server_white_list, server_black_list
def filtered_servers(itemlist, server_white_list, server_black_list):
logger.info("pelisalacarta.platformcode.launcher.filtered_servers start")
new_list = []
white_counter = 0
black_counter = 0
logger.info("filtered_servers whiteList %s" % server_white_list)
logger.info("filtered_servers blackList %s" % server_black_list)
if len(server_white_list) > 0:
logger.info("filtered_servers whiteList")
for item in itemlist:
logger.info("item.title " + item.title)
if any(server in item.title for server in server_white_list):
# if item.title in server_white_list:
logger.info("found")
new_list.append(item)
white_counter += 1
else:
logger.info("not found")
if len(server_black_list) > 0:
logger.info("filtered_servers blackList")
for item in itemlist:
logger.info("item.title " + item.title)
if any(server in item.title for server in server_black_list):
# if item.title in server_white_list:
logger.info("found")
black_counter += 1
else:
new_list.append(item)
logger.info("not found")
logger.info("whiteList server %s has #%d rows" % (server_white_list, white_counter))
logger.info("blackList server %s has #%d rows" % (server_black_list, black_counter))
if len(new_list) == 0:
new_list = itemlist
logger.info("pelisalacarta.platformcode.launcher.filtered_servers end")
return new_list
def add_to_favorites(item):
#Proviene del menu contextual:
if "item_action" in item:
item.action = item.item_action
del item.item_action
item.context=[]
from channels import favoritos
from core import downloadtools
if not item.fulltitle: item.fulltitle = item.title
title = platformtools.dialog_input(default=downloadtools.limpia_nombre_excepto_1(item.fulltitle)+" ["+item.channel+"]")
if title is not None:
item.title = title
favoritos.addFavourite(item)
platformtools.dialog_ok("Pelisalacarta", config.get_localized_string(30102) +"\n"+ item.title +"\n"+ config.get_localized_string(30108))
return
def remove_from_favorites(item):
from channels import favoritos
# En "extra" está el nombre del fichero en favoritos
favoritos.delFavourite(item.extra)
platformtools.dialog_ok("Pelisalacarta", config.get_localized_string(30102) +"\n"+ item.title +"\n"+ config.get_localized_string(30105))
platformtools.itemlist_refresh()
return
def download(item):
from channels import descargas
if item.contentType == "list" or item.contentType == "tvshow":
item.contentType = "video"
item.play_menu = True
descargas.save_download(item)
return
def add_to_library(item):
if "item_action" in item:
item.action = item.item_action
del item.item_action
if not item.fulltitle=="":
item.title = item.fulltitle
library.savelibrary(item)
platformtools.dialog_ok("Pelisalacarta", config.get_localized_string(30101) +"\n"+ item.title +"\n"+ config.get_localized_string(30135))
return
def delete_file(item):
os.remove(item.url)
platformtools.itemlist_refresh()
return
def send_to_jdownloader(item):
#d = {"web": url}urllib.urlencode(d)
from core import scrapertools
if item.subtitle!="":
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail + " " + item.subtitle)
else:
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail)
return
def send_to_pyload(item):
logger.info("Enviando a pyload...")
if item.Serie!="":
package_name = item.Serie
else:
package_name = "pelisalacarta"
from core import pyload_client
pyload_client.download(url=item.url,package_name=package_name)
return
def search_trailer(item):
config.set_setting("subtitulo", "false")
item.channel = "trailertools"
item.action ="buscartrailer"
item.contextual=True
run(item)
return
#Crea la lista de opciones para el menu de reproduccion
def check_video_options(item, video_urls):
itemlist = []
#Opciones Reproducir
playable = (len(video_urls) > 0)
for video_url in video_urls:
itemlist.append(item.clone(option=config.get_localized_string(30151) + " " + video_url[0], video_url= video_url[1], action="play_video"))
if item.server=="local":
itemlist.append(item.clone(option=config.get_localized_string(30164), action="delete_file"))
if not item.server=="local" and playable:
itemlist.append(item.clone(option=config.get_localized_string(30153), action="download", video_urls = video_urls))
if item.channel=="favoritos":
itemlist.append(item.clone(option=config.get_localized_string(30154), action="remove_from_favorites"))
if not item.channel=="favoritos" and playable:
itemlist.append(item.clone(option=config.get_localized_string(30155), action="add_to_favorites", item_action = item.action))
if not item.strmfile and playable and item.contentType == "movie":
itemlist.append(item.clone(option=config.get_localized_string(30161), action="add_to_library", item_action = item.action))
if config.get_setting("jdownloader_enabled")=="true" and playable:
itemlist.append(item.clone(option=config.get_localized_string(30158), action="send_to_jdownloader"))
if config.get_setting("pyload_enabled")=="true" and playable:
itemlist.append(item.clone(option=config.get_localized_string(30158).replace("jdownloader","pyLoad"), action="send_to_pyload"))
if not item.channel in ["Trailer","ecarteleratrailers"] and playable:
itemlist.append(item.clone(option=config.get_localized_string(30162), action="search_trailer"))
return itemlist
#play_menu, abre el menu con las opciones para reproducir
def play_menu(item):
if item.server=="": item.server="directo"
if item.video_urls:
video_urls,puedes,motivo = item.video_urls, True, ""
else:
video_urls,puedes,motivo = servertools.resolve_video_urls_for_playing(item.server,item.url,item.password,True)
if not "strmfile" in item: item.strmfile=False
#TODO: unificar show y Serie ya que se usan indistintamente.
if not "Serie" in item: item.Serie = item.show
if item.server=="": item.server="directo"
opciones = check_video_options(item, video_urls)
if not puedes:
if item.server!="directo":
motivo = motivo.replace("<br/>", "\n")
platformtools.dialog_ok("No puedes ver ese vídeo porque...",motivo+"\n"+item.url)
else:
platformtools.dialog_ok("No puedes ver ese vídeo porque...","El servidor donde está alojado no está\nsoportado en pelisalacarta todavía\n"+item.url)
if len(opciones)==0:
return
default_action = config.get_setting("default_action")
logger.info("default_action="+default_action)
# Si la accion por defecto es "Preguntar", pregunta
if default_action=="0":
seleccion = platformtools.dialog_select(config.get_localized_string(30163), [opcion.option for opcion in opciones])
elif default_action=="1":
seleccion = 0
elif default_action=="2":
seleccion = len(video_urls)-1
elif default_action=="3":
seleccion = seleccion
else:
seleccion=0
if seleccion > -1:
logger.info("seleccion=%d" % seleccion)
logger.info("seleccion=%s" % opciones[seleccion].option)
selecteditem = opciones[seleccion]
del selecteditem.option
run(opciones[seleccion])
return
#play_video, Llama a la función especifica de la plataforma para reproducir
def play_video(item):
platformtools.play_video(item)
| Hernanarce/pelisalacarta | python/version-mediaserver/platformcode/launcher.py | Python | gpl-3.0 | 24,841 | 0.017736 |
"""empty message
Revision ID: 424f18f4c1df
Revises: 106e3631fe9
Create Date: 2015-06-23 11:31:08.548661
"""
# revision identifiers, used by Alembic.
revision = '424f18f4c1df'
down_revision = '106e3631fe9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import ENUM
providers_list = ENUM('facebook', 'twitter', 'truenth', name='providers',
create_type=False)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
providers_list.create(op.get_bind(), checkfirst=False)
op.create_table('auth_providers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider', providers_list, nullable=True),
sa.Column('provider_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('auth_providers')
providers_list.drop(op.get_bind(), checkfirst=False)
### end Alembic commands ###
| uwcirg/true_nth_usa_portal | portal/migrations/versions/424f18f4c1df_.py | Python | bsd-3-clause | 1,316 | 0.006079 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010 Luke Tucker
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: Luke Tucker <voxluci@gmail.com>
#
from giblets.core import * | ltucker/giblets | giblets/__init__.py | Python | bsd-3-clause | 287 | 0.003484 |
# -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file 'InputChannelTemplate.ui'
#
# Created: Sun Feb 22 13:29:16 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(427, 220)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(5, 0, 0, 0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.recordCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordCheck.setFont(font)
self.recordCheck.setChecked(True)
self.recordCheck.setObjectName(_fromUtf8("recordCheck"))
self.gridLayout.addWidget(self.recordCheck, 0, 0, 1, 1)
self.displayCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.gridLayout.addWidget(self.displayCheck, 0, 1, 1, 1)
self.recordInitCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordInitCheck.setFont(font)
self.recordInitCheck.setObjectName(_fromUtf8("recordInitCheck"))
self.gridLayout.addWidget(self.recordInitCheck, 1, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.recordCheck.setText(_translate("Form", "Record Trace", None))
self.displayCheck.setText(_translate("Form", "Display", None))
self.recordInitCheck.setText(_translate("Form", "Record Initial State", None))
from acq4.pyqtgraph import GroupBox
| meganbkratz/acq4 | acq4/devices/DAQGeneric/InputChannelTemplate.py | Python | mit | 3,321 | 0.001807 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2016 Vladimir Ermakov <vooon341@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from functools import reduce
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'rgb_led_ws281x'
name = 'RGB LED (WS281x)'
longname = 'RGB LED string decoder (WS281x)'
desc = 'RGB LED string protocol (WS281x).'
license = 'gplv3+'
inputs = ['logic']
outputs = []
tags = ['Display', 'IC']
channels = (
{'id': 'din', 'name': 'DIN', 'desc': 'DIN data line'},
)
annotations = (
('bit', 'Bit'),
('reset', 'RESET'),
('rgb', 'RGB'),
)
annotation_rows = (
('bit', 'Bits', (0, 1)),
('rgb', 'RGB', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.oldpin = None
self.ss_packet = None
self.ss = None
self.es = None
self.bits = []
self.inreset = False
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def handle_bits(self, samplenum):
if len(self.bits) == 24:
grb = reduce(lambda a, b: (a << 1) | b, self.bits)
rgb = (grb & 0xff0000) >> 8 | (grb & 0x00ff00) << 8 | (grb & 0x0000ff)
self.put(self.ss_packet, samplenum, self.out_ann,
[2, ['#%06x' % rgb]])
self.bits = []
self.ss_packet = None
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
# TODO: Come up with more appropriate self.wait() conditions.
(pin,) = self.wait()
if self.oldpin is None:
self.oldpin = pin
continue
# Check RESET condition (manufacturer recommends 50 usec minimal,
# but real minimum is ~10 usec).
if not self.inreset and not pin and self.es is not None and \
(self.samplenum - self.es) / self.samplerate > 50e-6:
# Decode last bit value.
tH = (self.es - self.ss) / self.samplerate
bit_ = True if tH >= 625e-9 else False
self.bits.append(bit_)
self.handle_bits(self.es)
self.put(self.ss, self.es, self.out_ann, [0, ['%d' % bit_]])
self.put(self.es, self.samplenum, self.out_ann,
[1, ['RESET', 'RST', 'R']])
self.inreset = True
self.bits = []
self.ss_packet = None
self.ss = None
if not self.oldpin and pin:
# Rising edge.
if self.ss and self.es:
period = self.samplenum - self.ss
duty = self.es - self.ss
# Ideal duty for T0H: 33%, T1H: 66%.
bit_ = (duty / period) > 0.5
self.put(self.ss, self.samplenum, self.out_ann,
[0, ['%d' % bit_]])
self.bits.append(bit_)
self.handle_bits(self.samplenum)
if self.ss_packet is None:
self.ss_packet = self.samplenum
self.ss = self.samplenum
elif self.oldpin and not pin:
# Falling edge.
self.inreset = False
self.es = self.samplenum
self.oldpin = pin
| Entropy512/libsigrokdecode | decoders/rgb_led_ws281x/pd.py | Python | gpl-3.0 | 4,320 | 0.003472 |
#!/usr/bin/env python
import PyQt4.QtCore # <- this line causes the error
from multiprocessing import Process
class PTask(Process):
def __init__(self, func):
Process.__init__(self)
self._func = func
def run(self):
self._func()
def f():
try:
import numpy as np
import numpy.linalg as npl
for i in range(1000):
print "i: ", i
n = npl.pinv(np.random.rand(100,100))
# Sometimes the segfault or malloc error doesn't occur
# on the first use of pinv.
print "pinv success"
except:
# This just means the random matrix was not invertible
# but that pinv executed correctly.
print "exception success"
if __name__ == '__main__':
p = PTask(f)
print "start"
p.start()
print "wait"
p.join()
print "end"
| kcii/numpy-pyqt-multiproc-problem | fork.py | Python | mit | 868 | 0.009217 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo.config import cfg
from sahara import conductor as c
from sahara import context
from sahara.utils.openstack import keystone
conductor = c.API
CONF = cfg.CONF
def create_trust(cluster):
client = keystone.client()
ctx = context.current()
trustee_id = keystone.client_for_admin().user_id
trust = client.trusts.create(trustor_user=client.user_id,
trustee_user=trustee_id,
impersonation=True,
role_names=ctx.roles,
project=client.tenant_id)
conductor.cluster_update(ctx,
cluster,
{'trust_id': trust.id})
def use_os_admin_auth_token(cluster):
if cluster.trust_id:
ctx = context.current()
ctx.username = CONF.keystone_authtoken.admin_user
ctx.tenant_id = cluster.tenant_id
client = keystone.client_for_trusts(cluster.trust_id)
ctx.token = client.auth_token
ctx.service_catalog = json.dumps(
client.service_catalog.catalog['catalog'])
def delete_trust(cluster):
if cluster.trust_id:
keystone_client = keystone.client_for_trusts(cluster.trust_id)
keystone_client.trusts.delete(cluster.trust_id)
| tellesnobrega/storm_plugin | sahara/service/trusts.py | Python | apache-2.0 | 1,897 | 0 |
"""Iceland specific form helpers."""
from __future__ import unicode_literals
from django.forms import ValidationError
from django.forms.fields import RegexField
from django.forms.widgets import Select
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from localflavor.compat import EmptyValueCompatMixin
from localflavor.deprecation import DeprecatedPhoneNumberFormFieldMixin
from .is_postalcodes import IS_POSTALCODES
class ISIdNumberField(EmptyValueCompatMixin, RegexField):
"""
Icelandic identification number (kennitala).
This is a number every citizen of Iceland has.
"""
default_error_messages = {
'invalid': _('Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.'),
'checksum': _('The Icelandic identification number is not valid.'),
}
def __init__(self, max_length=11, min_length=10, *args, **kwargs):
super(ISIdNumberField, self).__init__(r'^\d{6}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISIdNumberField, self).clean(value)
if value in self.empty_values:
return self.empty_value
value = self._canonify(value)
if self._validate(value):
return self._format(value)
else:
raise ValidationError(self.error_messages['checksum'])
def _canonify(self, value):
"""Returns the value as only digits."""
return value.replace('-', '').replace(' ', '')
def _validate(self, value):
"""
Takes in the value in canonical form and checks the verifier digit.
The method is modulo 11.
"""
check = [3, 2, 7, 6, 5, 4, 3, 2, 1, 0]
return sum([int(value[i]) * check[i] for i in range(10)]) % 11 == 0
def _format(self, value):
"""Takes in the value in canonical form and returns it in the common display format."""
return force_text(value[:6] + '-' + value[6:])
class ISPhoneNumberField(EmptyValueCompatMixin, RegexField, DeprecatedPhoneNumberFormFieldMixin):
"""
Icelandic phone number.
Seven digits with an optional hyphen or space after the first three digits.
"""
def __init__(self, max_length=8, min_length=7, *args, **kwargs):
super(ISPhoneNumberField, self).__init__(r'^\d{3}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISPhoneNumberField, self).clean(value)
if value in self.empty_values:
return self.empty_value
return value.replace('-', '').replace(' ', '')
class ISPostalCodeSelect(Select):
"""A Select widget that uses a list of Icelandic postal codes as its choices."""
def __init__(self, attrs=None):
super(ISPostalCodeSelect, self).__init__(attrs, choices=IS_POSTALCODES)
| jieter/django-localflavor | localflavor/is_/forms.py | Python | bsd-3-clause | 2,973 | 0.002018 |
import sys
if sys.version_info.major == 2:
from .six_py2 import (
urlparse,
urlunparse,
Generator,
)
else:
from .six_py3 import ( # noqa: #401
urlparse,
urlunparse,
Generator,
)
| pipermerriam/web3.py | web3/utils/six/__init__.py | Python | mit | 245 | 0 |
from api.callers.api_caller import ApiCaller
from exceptions import ResponseTextContentTypeError
from colors import Color
import os
from cli.arguments_builders.default_cli_arguments import DefaultCliArguments
import datetime
from cli.cli_file_writer import CliFileWriter
from cli.formatter.cli_json_formatter import CliJsonFormatter
from constants import CALLED_SCRIPT
class CliCaller:
api_object = None
action_name = None
help_description = ''
given_args = {}
result_msg_for_files = 'Response contains files. They were saved in the output folder ({}).'
result_msg_for_json = '{}'
cli_output_folder = ''
args_to_prevent_from_being_send = ['chosen_action', 'verbose', 'quiet']
def __init__(self, api_object: ApiCaller, action_name: str):
self.api_object = api_object
self.action_name = action_name
self.help_description = self.help_description.format(self.api_object.endpoint_url)
def init_verbose_mode(self):
self.result_msg_for_json = 'JSON:\n\n{}'
def build_argument_builder(self, child_parser):
return DefaultCliArguments(child_parser)
def add_parser_args(self, child_parser):
parser_argument_builder = self.build_argument_builder(child_parser)
parser_argument_builder.add_verbose_arg()
parser_argument_builder.add_help_opt()
parser_argument_builder.add_quiet_opt()
return parser_argument_builder
def attach_args(self, args):
self.given_args = args.copy()
args_to_send = args.copy()
for arg_to_remove in self.args_to_prevent_from_being_send:
if arg_to_remove in args_to_send:
del args_to_send[arg_to_remove]
if 'output' in args:
self.cli_output_folder = args['output']
del args_to_send['output']
args_to_send = {k: v for k, v in args_to_send.items() if v not in [None, '']} # Removing some 'empty' elements from dictionary
if 'file' in args:
del args_to_send['file'] # attaching file is handled by separated method
if self.api_object.request_method_name == ApiCaller.CONST_REQUEST_METHOD_GET:
self.api_object.attach_params(args_to_send)
else: # POST
self.api_object.attach_data(args_to_send)
def attach_file(self, file):
if isinstance(file, str):
file = open(file, 'rb')
self.api_object.attach_files({'file': file}) # it's already stored as file handler
def get_colored_response_status_code(self):
response_code = self.api_object.get_response_status_code()
return Color.success(response_code) if self.api_object.if_request_success() is True else Color.error(response_code)
def get_colored_prepared_response_msg(self):
response_msg = self.api_object.get_prepared_response_msg()
return Color.success(response_msg) if self.api_object.if_request_success() is True else Color.error(response_msg)
def get_result_msg(self):
if self.api_object.api_response.headers['Content-Type'] == 'text/html':
raise ResponseTextContentTypeError('Can\'t print result, since it\'s \'text/html\' instead of expected content type with \'{}\' on board.'.format(self.api_object.api_expected_data_type))
if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_JSON:
return self.result_msg_for_json.format(CliJsonFormatter.format_to_pretty_string(self.api_object.get_response_json()))
elif self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE:
if self.api_object.if_request_success() is True:
return self.get_result_msg_for_files()
else:
error_msg = 'Error has occurred and your files were not saved.'
if self.given_args['verbose'] is False:
error_msg += ' To get more information, please run command in verbose mode. (add \'-v\')'
return error_msg
def get_processed_output_path(self):
output_path = self.cli_output_folder
if output_path.startswith('/') is True: # Given path is absolute
final_output_path = output_path
else:
path_parts = os.path.dirname(os.path.realpath(__file__)).split('/')[:-2]
called_script_dir = os.path.dirname(CALLED_SCRIPT)
# It's about a case when user is calling script from not root directory.€
if called_script_dir != 'vxapi.py':
new_path_parts = []
bad_parts = called_script_dir.split('/')
for part in reversed(path_parts):
if part in bad_parts:
bad_parts.remove(part)
continue
new_path_parts.append(part)
new_path_parts.reverse()
path_parts = new_path_parts
prepared_file_path = path_parts + [self.cli_output_folder]
final_output_path = '/'.join(prepared_file_path)
if not final_output_path.startswith('/'):
final_output_path = '/' + final_output_path
return final_output_path
def get_result_msg_for_files(self):
return self.result_msg_for_files.format(self.get_processed_output_path())
def do_post_processing(self):
if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE and self.api_object.if_request_success() is True:
self.save_files()
def get_date_string(self):
now = datetime.datetime.now()
return '{}_{}_{}_{}_{}_{}'.format(now.year, now.month, now.day, now.hour, now.minute, now.second)
def convert_file_hashes_to_array(self, args, file_arg='hash_list', key_of_array_arg='hashes'):
with args[file_arg] as file:
hashes = file.read().splitlines()
if not hashes:
raise Exception('Given file does not contain any data.')
for key, value in enumerate(hashes):
args['{}[{}]'.format(key_of_array_arg, key)] = value
del args[file_arg]
return args
def save_files(self):
api_response = self.api_object.api_response
identifier = None
if 'id' in self.given_args:
identifier = self.given_args['id']
elif 'sha256' in self.given_args:
identifier = self.given_args['sha256']
filename = '{}-{}-{}'.format(self.action_name, identifier, api_response.headers['Vx-Filename']) if identifier is not None else '{}-{}'.format(self.action_name, api_response.headers['Vx-Filename'])
return CliFileWriter.write(self.get_processed_output_path(), filename, api_response.content)
| PayloadSecurity/VxAPI | cli/wrappers/cli_caller.py | Python | gpl-3.0 | 6,741 | 0.003265 |
from django import template
from security import logic
register = template.Library()
# General role-based checks
@register.simple_tag(takes_context=True)
def is_author(context):
request = context['request']
return request.user.is_author(request)
@register.simple_tag(takes_context=True)
def is_editor(context):
request = context['request']
if request.user.is_anonymous():
return False
return request.user.is_editor(request)
@register.simple_tag(takes_context=True)
def is_section_editor(context):
request = context['request']
if request.user.is_anonymous():
return False
return request.user.is_section_editor(request)
@register.simple_tag(takes_context=True)
def is_production(context):
request = context['request']
return request.user.is_production(request)
@register.simple_tag(takes_context=True)
def is_reviewer(context):
request = context['request']
return request.user.is_reviewer(request)
@register.simple_tag(takes_context=True)
def is_proofreader(context):
request = context['request']
return request.user.is_proofreader(request)
# File-based checks
@register.simple_tag(takes_context=True)
def can_edit_file(context, file_object, article_object):
return logic.can_edit_file(context['request'], context['request'].user, file_object, article_object)
@register.simple_tag(takes_context=True)
def can_view_file_history(context, file_object, article_object):
return logic.can_view_file_history(context['request'], context['request'].user, file_object, article_object)
@register.simple_tag(takes_context=True)
def can_view_file(context, file_object):
return logic.can_view_file(context['request'], context['request'].user, file_object)
@register.simple_tag(takes_context=True)
def is_author(context):
request = context['request']
return request.user.is_author(request)
@register.simple_tag(takes_context=True)
def is_repository_manager(context):
request = context['request']
return request.user.is_repository_manager(request.repository)
@register.simple_tag(takes_context=True)
def is_preprint_editor(context):
request = context['request']
return request.user.is_preprint_editor(request)
| BirkbeckCTP/janeway | src/security/templatetags/securitytags.py | Python | agpl-3.0 | 2,227 | 0.001347 |
#!/usr/bin/python
import unittest
import apt_pkg
import apt.progress.base
class TestCache(unittest.TestCase):
"""Test invocation of apt_pkg.Cache()"""
def setUp(self):
apt_pkg.init_config()
apt_pkg.init_system()
def test_wrong_invocation(self):
"""cache_invocation: Test wrong invocation."""
apt_cache = apt_pkg.Cache(progress=None)
self.assertRaises(ValueError, apt_pkg.Cache, apt_cache)
self.assertRaises(ValueError, apt_pkg.Cache,
apt.progress.base.AcquireProgress())
self.assertRaises(ValueError, apt_pkg.Cache, 0)
def test_proper_invocation(self):
"""cache_invocation: Test correct invocation."""
apt_cache = apt_pkg.Cache(progress=None)
apt_depcache = apt_pkg.DepCache(apt_cache)
if __name__ == "__main__":
unittest.main()
| suokko/python-apt | tests/test_cache_invocation.py | Python | gpl-2.0 | 863 | 0.001159 |
"""
This sample shows how to update the
large thumbnail of an item
Python 2.x
ArcREST 3.0.1
"""
import arcrest
from arcresthelper import securityhandlerhelper
from arcresthelper import common
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def main():
proxy_port = None
proxy_url = None
securityinfo = {}
securityinfo['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfo['username'] = ""#<UserName>
securityinfo['password'] = ""#<Password>
securityinfo['org_url'] = "http://www.arcgis.com"
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = None
securityinfo['token_url'] = None
securityinfo['certificatefile'] = None
securityinfo['keyfile'] = None
securityinfo['client_id'] = None
securityinfo['secret_id'] = None
itemId = "" #Item ID
pathToImage = r"" #Path to image
try:
shh = securityhandlerhelper.securityhandlerhelper(securityinfo=securityinfo)
if shh.valid == False:
print shh.message
else:
admin = arcrest.manageorg.Administration(securityHandler=shh.securityhandler)
content = admin.content
item = content.getItem(itemId)
itemParams = arcrest.manageorg.ItemParameter()
itemParams.largeThumbnail = pathToImage
print item.userItem.updateItem(itemParameters=itemParams)
except (common.ArcRestHelperError),e:
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
except:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
if __name__ == "__main__":
main() | DShokes/ArcREST | samples/update_largethumbnail.py | Python | apache-2.0 | 2,476 | 0.008078 |
"""
handlers for transactional messaging service
"""
import json
# tornado imports
from tornado.queues import Queue
from tornado import websocket, gen, web
#local imports
from settings import DEBUG
#===============================================================================
# WEBSOCKETS SERVER
#===============================================================================
class messaging_server(web.Application):
"""listener application class"""
def __init__(self, q):
"""listener builder method"""
#define petition handlers to use
handlers = [
(r'/channel', channelHandler, dict(q=q)),
(r'/mirror', mirrorHandler),
]
web.Application.__init__(self, handlers)
#===============================================================================
# TESTING HANDLERS
#===============================================================================
class mirrorHandler(websocket.WebSocketHandler):
"""return to the sender the same message they sent"""
verbose = DEBUG
def open(self):
"""defines the websocket open method"""
pass
@gen.coroutine
def on_message(self, message):
"""mirror income data"""
yield self.write_message(message)
def on_close(self):
"""defines the websocket close method"""
pass
class channelHandler(websocket.WebSocketHandler):
"""class that handles app websockets communication"""
verbose = DEBUG
def initialize(self, q):
"""initialize vigilante handler"""
self.q = q
self.service_functions = {
'create_user': self.create_user,
'login': self.login_user,
'logout': self.logout_user
}
def open(self):
"""defines the websocket open method"""
print('[channel]: started connection')
@gen.coroutine
def on_message(self, message):
"""defines the response to income messages"""
data = json.loads(message)
action = data.get('action')
if action:
print(message)
self.service_functions[action](message)
else:
print('[channelHandler]: must give an action')
self.write_message(
json.dumps({'error': [0, 'there is no action in request']})
)
self.write_message(message)
def on_close(self):
"""defines the websocket close method"""
pass
def create_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
# 1. vaidar si la informacion esta completa
# se necesita al menos: name, password
# se pide tambien el correo, (trabajar en el modelo de bd de usuario)
# 2. validar si usuario no existe
# ir a la base de datos y ver si existe el user_name que llego
# mandar mensaje de ya existente
# 3. validar si esta bien la contraseña
# minimo 8 caracteres, letras y numeros al menos
# mandar un mensaje de contraseña mala
# 4. crear objeto usuario si pasa todas las validaciones
# completar con defaults datos no obtenidos
# 5. almacenar informacion del usuario
# 6. devolver una respuesta al cliente
# TODO: definir modelo de base de datos (christian)
# TODO: seleccionar orm (edwin)
# TODO: validar si usuario existe (edwin)
# TODO: crear registro de usuario (edwin)
# TODO: completar datos del json para insercion (christian)
# TODO: funcion de validar contraseña (christian)
pass
def login_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
pass
def logout_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
pass
| pythonpopayan/bermoto | backend/handlers/transactional_messaging.py | Python | mit | 3,778 | 0.002649 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"Visual Property Editor (using wx PropertyGrid) of gui2py's components"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"
# some parts where inspired or borrowed from wxFormBuilders & wxPython examples
import sys, time, math, os, os.path
import wx
_ = wx.GetTranslation
import wx.propgrid as wxpg
from gui.component import InitSpec, StyleSpec, Spec, EventSpec, DimensionSpec
from gui.font import Font
DEBUG = False
class PropertyEditorPanel(wx.Panel):
def __init__( self, parent, log ):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.log = log
self.callback = None
self.panel = panel = wx.Panel(self, wx.ID_ANY)
topsizer = wx.BoxSizer(wx.VERTICAL)
# Difference between using PropertyGridManager vs PropertyGrid is that
# the manager supports multiple pages and a description box.
self.pg = pg = wxpg.PropertyGrid(panel,
style=wxpg.PG_SPLITTER_AUTO_CENTER |
wxpg.PG_AUTO_SORT |
wxpg.PG_TOOLBAR)
# Show help as tooltips
pg.SetExtraStyle(wxpg.PG_EX_HELP_AS_TOOLTIPS)
pg.Bind( wxpg.EVT_PG_CHANGED, self.OnPropGridChange )
pg.Bind( wxpg.EVT_PG_PAGE_CHANGED, self.OnPropGridPageChange )
pg.Bind( wxpg.EVT_PG_SELECTED, self.OnPropGridSelect )
pg.Bind( wxpg.EVT_PG_RIGHT_CLICK, self.OnPropGridRightClick )
##pg.AddPage( "Page 1 - Testing All" )
# store the property grid for future reference
self.pg = pg
# load empty object (just draws categories)
self.load_object(None)
# sizing stuff:
topsizer.Add(pg, 1, wx.EXPAND)
panel.SetSizer(topsizer)
topsizer.SetSizeHints(panel)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
def load_object(self, obj, callback=None):
pg = self.pg # get the property grid reference
self.callback = callback # store the update method
# delete all properties
pg.Clear()
# clean references and aux structures
appended = set()
self.obj = obj
self.groups = {}
# loop on specs and append each property (categorized):
for i, cat, class_ in ((1, 'Init Specs', InitSpec),
(2, 'Dimension Specs', DimensionSpec),
(3, 'Style Specs', StyleSpec),
(5, 'Events', EventSpec),
(4, 'Basic Specs', Spec),
):
pg.Append(wxpg.PropertyCategory("%s - %s" % (i, cat)))
if obj is None:
continue
specs = sorted(obj._meta.specs.items(), key=lambda it: it[0])
for name, spec in specs:
if DEBUG: print "setting prop", spec, class_, spec.type
if isinstance(spec, class_):
prop = {'string': wxpg.StringProperty,
'integer': wxpg.IntProperty,
'float': wxpg.FloatProperty,
'boolean': wxpg.BoolProperty,
'text': wxpg.LongStringProperty,
'code': wxpg.LongStringProperty,
'enum': wxpg.EnumProperty,
'edit_enum': wxpg.EditEnumProperty,
'expr': wxpg.StringProperty,
'array': wxpg.ArrayStringProperty,
'font': wxpg.FontProperty,
'image_file': wxpg.ImageFileProperty,
'colour': wxpg.ColourProperty}.get(spec.type)
if prop and name not in appended:
value = getattr(obj, name)
if DEBUG: print "name", name, value
if spec.type == "code" and value is None:
value = ""
if spec.type == "boolean" and value is None:
value = False
if spec.type == "integer" and value is None:
value = -1
if spec.type in ("string", "text") and value is None:
value = ""
if spec.type == "expr":
value = repr(value)
if spec.type == "font":
if value is None:
value = wx.NullFont
else:
value = value.get_wx_font()
if callable(value):
# event binded at runtime cannot be modified:
value = str(value)
readonly = True
else:
readonly = False
if spec.type == "enum":
prop = prop(name, name,
spec.mapping.keys(),
spec.mapping.values(),
value=spec.mapping.get(value, 0))
elif spec.type == "edit_enum":
prop = prop(name, name,
spec.mapping.keys(),
range(len(spec.mapping.values())),
value=spec.mapping[value])
else:
try:
prop = prop(name, value=value)
except Exception, e:
print "CANNOT LOAD PROPERTY", name, value, e
prop.SetPyClientData(spec)
appended.add(name)
if spec.group is None:
pg.Append(prop)
if readonly:
pg.SetPropertyReadOnly(prop)
else:
# create a group hierachy (wxpg uses dot notation)
group = ""
prop_parent = None
for grp in spec.group.split("."):
prev_group = group # ancestor
group += ("." if group else "") + grp # path
if group in self.groups:
prop_parent = self.groups[group]
else:
prop_group = wxpg.StringProperty(grp,
value="<composed>")
if not prop_parent:
pg.Append(prop_group)
else:
pg.AppendIn(prev_group, prop_group)
prop_parent = prop_group
self.groups[group] = prop_parent
pg.SetPropertyReadOnly(group)
pg.AppendIn(spec.group, prop)
pg.Collapse(spec.group)
name = spec.group + "." + name
if spec.type == "boolean":
pg.SetPropertyAttribute(name, "UseCheckbox", True)
doc = spec.__doc__
if doc:
pg.SetPropertyHelpString(name, doc)
def edit(self, name=""):
"Programatically select a (default) property to start editing it"
# for more info see DoSelectAndEdit in propgrid.cpp
for name in (name, "label", "value", "text", "title", "filename",
"name"):
prop = self.pg.GetPropertyByName(name)
if prop is not None:
break
self.Parent.SetFocus()
self.Parent.Raise()
self.pg.SetFocus()
# give time to the ui to show the prop grid and set focus:
wx.CallLater(250, self.select, prop.GetName())
def select(self, name, flags=0):
"Select a property (and start the editor)"
# do not call this directly from another window, use edit() instead
# // wxPropertyGrid::DoSelectProperty flags (selFlags) -see propgrid.h-
wxPG_SEL_FOCUS=0x0001 # Focuses to created editor
wxPG_SEL_FORCE=0x0002 # Forces deletion and recreation of editor
flags |= wxPG_SEL_FOCUS # | wxPG_SEL_FORCE
prop = self.pg.GetPropertyByName(name)
self.pg.SelectProperty(prop, flags)
if DEBUG: print "selected!", prop
def OnPropGridChange(self, event):
p = event.GetProperty()
if DEBUG: print "change!", p
if p:
name = p.GetName()
spec = p.GetPyClientData()
if spec and 'enum' in spec.type:
value = p.GetValueAsString()
else:
value = p.GetValue()
#self.log.write(u'%s changed to "%s"\n' % (p,p.GetValueAsString()))
# if it a property child (parent.child), extract its name
if "." in name:
name = name[name.rindex(".") + 1:]
if spec and not name in self.groups:
if name == 'font': # TODO: detect property type
# create a gui font from the wx.Font
font = Font()
font.set_wx_font(value)
value = font
# expressions must be evaluated to store the python object
if spec.type == "expr":
value = eval(value)
# re-create the wx_object with the new property value
# (this is required at least to apply new styles and init specs)
if DEBUG: print "changed", self.obj.name
kwargs = {str(name): value}
wx.CallAfter(self.obj.rebuild, **kwargs)
if name == 'name':
wx.CallAfter(self.callback, **dict(name=self.obj.name))
def OnPropGridSelect(self, event):
p = event.GetProperty()
if p:
self.log.write(u'%s selected\n' % (event.GetProperty().GetName()))
else:
self.log.write(u'Nothing selected\n')
def OnDeleteProperty(self, event):
p = self.pg.GetSelectedProperty()
if p:
self.pg.DeleteProperty(p)
else:
wx.MessageBox("First select a property to delete")
def OnReserved(self, event):
pass
def OnPropGridRightClick(self, event):
p = event.GetProperty()
if p:
self.log.write(u'%s right clicked\n' % (event.GetProperty().GetName()))
else:
self.log.write(u'Nothing right clicked\n')
#self.obj.get_parent().Refresh()
def OnPropGridPageChange(self, event):
index = self.pg.GetSelectedPage()
self.log.write('Page Changed to \'%s\'\n' % (self.pg.GetPageName(index)))
if __name__ == '__main__':
import sys,os
app = wx.App()
f = wx.Frame(None)
from gui.controls import Button, Label, TextBox, CheckBox, ListBox, ComboBox
frame = wx.Frame(None)
#o = Button(frame, name="btnTest", label="click me!", default=True)
#o = Label(frame, name="lblTest", alignment="right", size=(-1, 500), text="hello!")
o = TextBox(frame, name="txtTest", border=False, text="hello world!")
#o = CheckBox(frame, name="chkTest", border='none', label="Check me!")
#o = ListBox(frame, name="lstTest", border='none',
# items={'datum1': 'a', 'datum2':'b', 'datum3':'c'},
# multiselect="--multiselect" in sys.argv)
#o = ComboBox(frame, name="cboTest",
# items={'datum1': 'a', 'datum2':'b', 'datum3':'c'},
# readonly='--readonly' in sys.argv,
# )
frame.Show()
log = sys.stdout
w = PropertyEditorPanel(f, log)
w.load_object(o)
f.Show()
app.MainLoop()
| reingart/gui2py | gui/tools/propeditor.py | Python | lgpl-3.0 | 12,658 | 0.006241 |
'''
Created on Sep 15, 2010
@author: duncantait
'''
from SimPy.Simulation import *
import numpy as np
import random
import math
class G():
#Settings for HF Stations
num_channels = 18
num_stations = 10
class Network():
stations = []
class Medium():
def __init__(self):
self.channels = []
for i in range(G.num_channels):
S = Store(name=i,capacity=1)
self.channels.append(S)
class StationContainer():
def __init__(self,ID):
self.ID = ID
self.Operator = Operator(ID)
self.StationSettings = StationSettings(ID)
self.Scanning = Scanning(ID)
self.Tx = Tx(ID)
def initComponents(self):
self.Operator.initCounterparts()
self.StationSettings.initCounterparts()
self.Scanning.initCounterparts()
self.Tx.initCounterparts()
def activate(self):
activate(self.Operator,self.Operator.sendMessage(),at=0.0)
activate(self.StationSettings,self.StationSettings.sounding(),at=0.0)
activate(self.Scanning,self.Scanning.scan(),at=0.0)
activate(self.Tx,self.Tx.sending(),at=0.0)
class Operator(Process):
def __init__(self, ID):
Process.__init__(self)
self.ID = ID
def initComponents(self):
self.StationSettings = [N.StationSettings for N in Network.stations if N.ID==self.ID][0]
def sendMessage(self):
while True:
#every so often operator wants to send a message: adds to queue.
yield hold, self, random.uniform(0,1200)
#Create a Message of type 'CALL'
frameInfo = frameDetails(self.ID,self.decideDestination(),0,fType.CALL,False,-1,-1)
frameInfo.channels = self.ChannelOrder(frameInfo.destination)
yield put,self,self.Tx.sendQ,[frameInfo]
yield hold, self, random.uniform(0,1200)
def decideDestination(self):
while True:
dest = random.randint(0,G.num_channels-1)
if dest != self.ID:
return dest
def ChannelOrder(self,channel=-1,station=-1):
#sorts best channels best-worst
if channel==-1:
ordered = self.StationSettings.channelBER[station,:].argsort()
return ordered[::-1] #reverse order of array
if station==-1:
ordered = self.StationSettings.channelBER[:,channel].argsort()
return ordered[::-1]
class StationSettings(Process):
def __init__(self, ID):
Process.__init__(self)
self.ID = ID
self.state = sState.SCANNING #can be scanning, linking or linked.
self.sending = False
self.channelBER = np.zeros((G.num_channels,G.num_stations)) #LQA: Link Quality Analysis
self.timeout = 2 #current timeout counter for linking/linked mode, if this hits zero, go back to scanning
self.Td = 2 #dwell time per channel
self.Twce = 2 #wait for calling cycle to end
self.Twr = 2
self.minLQA = 0.2
self.bitrate = 392
self.hardwareTime = 20 #e.g. power up/down time, modulation/demodulation, encoding/decoding, crypto in ms.
#tune up/down time. Included in Twrt (wait for response and tune time)
def Sounding(self):
while True:
yield hold, self, random.uniform(0,120)
#Sound
yield hold, self, 1800
class Scanning(Process):
#Is HF ALWAYS scanning? No, either scanning, linking or linked
def __init__(self, ID):
self.ID = ID
Process.__init__(self)
self.currentChannel = 0
def initComponents(self):
self.StationSettings = [N.StationSettings for N in Network.stations if N.ID==self.ID][0]
self.Tx = [N.Tx for N in Network.stations if N.ID==self.ID][0]
def scan(self):
while True:
#Different responses depending on mode.
#Rules: cannot receive while sending <-----------------
#Otherwise, packets will be interpreted as to the mode the station is in.
channel = Medium.channels[self.currentChannel]
yield (get,self,channel,1),(hold,self,self.StationSettings.timeout)
if self.acquired(channel):
signal = self.got
yield put, self , channel, signal
frameInfo = self.decode(signal) #This implies picking up the signal frame by frame from the channel
if (frameInfo.LQA > self.StationSettings.minLQA) and (frameInfo.destination==self.ID):
yield (put,self,channel,['PH:'+str(self.ID)]),(hold,self,self.StationSettings.Twce)
if self.stored(channel):
yield get,self,channel,1 #Yank sniffer packet back off channel.
if frameInfo.type== fType.CALL:
if self.StationSettings.state==sState.SCANNING:
yield put,self,self.Tx.sendQ,[frameInfo]
self.StationSettings.state=sState.LINKING
yield waitevent,self,self.Tx.sE
if frameInfo.type== fType.RESPONSE:
if self.StationSettings.state==sState.LINKING:
yield put,self,self.Tx.sendQ,[frameInfo]
yield waitevent,self,self.Tx.sE
if frameInfo.type== fType.ACK:
if self.StationSettings.state==sState.LINKING:
yield put,self,self.Tx.sendQ,[frameInfo]
self.StationSettings.state=sState.LINKED
yield waitevent,self,self.Tx.sE
if frameInfo.type== fType.QUICK_ID:
if (self.StationSettings.state==sState.SCANNING or sState.LINKED) and (frameInfo.terminate==False):
'I dont think you can have a QUICK ID out of the blue, and it doesnt need a reply...'
#yield put,self,self.Tx.sendQ,[frameInfo]
#yield waitevent,self,self.Tx.sE
elif frameInfo.terminate==True:
self.StationSettings.state=sState.SCANNING
if frameInfo.type== fType.MSG:
if self.StationSettings.state== sState.LINKED and frameInfo.terminate==False:
'again, why the reply? just keep channel open...'
elif frameInfo.terminate==True:
self.StationSettings.state=sState.SCANNING
#yield put,self,self.Tx.sendQ,[frameInfo]
#yield waitevent,self,self.Tx.sE
else:
print 'Invalid Packet'
self.StationSettings.state=sState.SCANNING
else:
print 'Timed out'
self.StationSettings.state=sState.SCANNING
else:
'Frame unsuitable: Continue Scan'
self.StationSettings.state=sState.SCANNING
else:
'Channel Empty: Continue Scan'
self.StationSettings.state=sState.SCANNING
if self.StationSettings.state==sState.SCANNING:
if self.currentChannel==G.num_channels-1:
self.currentChannel = 0
else:
self.currentChannel+=1
def decode(self,frameInfo):
#Return a packet useable to send straightaway. All data is known to achieve this.
returnInfo = self.convertReply(frameInfo)
returnInfo = self.responseSize(returnInfo)
returnInfo = self.calculate_LQA(returnInfo)
returnInfo.channels = self.currentChannel
#Messages and Acks/Responses always have to be on the same channel as before... which is all
#That is dealt with in 'Scanning'
returnInfo.terminate = False #This needs to be somewhat randomised, but for now this will do.
return returnInfo
#If LQA below certain amount, reject in PEM above
def convertReply(self, frameInfo): #Convert incoming packet into it's appropriate output type.
returnInfo = frameInfo
if frameInfo.type==fType.OUT:
returnInfo.type= fType.CALL
if frameInfo.type==fType.CALL:
returnInfo.origin = frameInfo.destination
returnInfo.destination = frameInfo.origin
returnInfo.type = fType.RESPONSE
elif frameInfo.type == fType.RESPONSE:
returnInfo.type = fType.ACK
returnInfo.origin = frameInfo.destination
returnInfo.destination = frameInfo.origin
elif frameInfo.type == fType.ACK:
returnInfo.type = fType.MSG
returnInfo.origin = frameInfo.destination
returnInfo.destination = frameInfo.origin
returnInfo = self.decidePayload(returnInfo) #Messages get a payload.
return returnInfo
def responseSize(self,frameInfo):
returnInfo = frameInfo
destination = self.get_address(frameInfo.destination)
origin = self.get_address(frameInfo.origin)
if returnInfo.type == fType.RESPONSE or fType.ACK:
returnInfo.size += len(destination)*2*49 + len(origin)*49 #each word is 49bits after encoding
return returnInfo
def decidePayload(self, frameInfo):
#Data Block Mode: Basic mode 0-572 bits, Extended 572-262820 bits (+18 each for cyclic redundancy check),
#Extended data blocks are 588 bits (49*12 + 16 FCS) Basic are 49 bits. note 572 bits = 81 ASCII chars.
#Other modes are AMD (auto msg display) and DTM (data text msg), but less efficient for larger data
#Upper bound performance = 375 * (588/1176) = 187.5bps
#Also, many many CMD words that do many things. (Important one being LQA transfer)
#See pages around 231, need to add CMD and extra necessary words to these data_blocks etc.
returnInfo = frameInfo
mode = random.randint(0,10)
if mode==0 or mode==1:
#basic data block mode
returnInfo.size += random.randint(1,81)*7 + 16
elif mode==2:
#extended data block mode (least likely)
returnInfo.size += random.randint(82,37260)*7 + 16
elif mode==3 or mode==4 or mode==5 or mode==6:
#CMD message
returnInfo.size += 24 #1 extra word
elif mode==7 or mode==8 or mode==9 or mode==10:
returnInfo.size += 0 #null
return returnInfo
def get_address(self, address):
words = []
div = math.floor(len(address)/3)
rem = len(address)%3
i = 0
rep = True
for word in range(div):
words.append(address[i:i+3])
if rep==False and i >= 3: words.append('DATA')
else: words.append('REP')
rep = not rep
i += 3
if rem>0:
final_word = address[i:i+rem] + '@'*(3-rem)
words.append(final_word)
return words
#
#Instead of 'crafting messages' and spending ages about it. Merely use the functions written already
#(for making the words etc.) to calculate the SIZE of the signal, and make this a parameter of the
#frameInfo that sits on the channel. This can then be used to say HOW LONG it stays on the channel, and
#how long the receiver must receive for (although this doesn't matter too much as the receiver is effectively
#locked in one state once it enters linking/linked mode (line 101). This solves Response/Ack problem too
#
class Tx(Process):
def __init__(self,ID):
self.ID = ID
Process.__init__(self)
self.sendQ = Store(name=ID,capacity='unbounded')
self.sE = SimEvent(name='TxSent')
def initComponents(self):
self.StationSettings = [N.StationSettings for N in Network.stations if N.ID==self.ID][0]
def sending(self):
while True:
yield get,self,self.sendQ,1
frameInfo = self.got[0] #data in form frameDetails()
signal_time = frameInfo.size*self.StationSettings.bitrate + self.StationSettings.hardwareTime
frameInfo.LQA = self.calculate_LQA(frameInfo.destination)
unSent = True
for chanNum in frameInfo.channels:
if unSent:
channel = Medium.channels(chanNum)
if channel.nrBuffered==0:
print 'Channel', chanNum, 'free, occupying..'
yield put,self,channel,[frameInfo]
unSent = False
if self.type == fType.CALL: #call cycle
yield hold,self,2*self.StationSettings.Td*G.num_stations
#THIS NEEDS ATTENTION AS IT IS DIFFERENT FROM THE REST - This could actually be ok... just needs some additions for propagation time
#could use 'signal_time' from 'size' but kind of backwards...
if self.interrupted():
print 'Collision occurred, station:', self.ID
else:
yield hold,self,signal_time #How long does it take to get there?!
if self.interrupted():
print 'Collision occurred, station:', self.ID
yield get,self,channel,1 #Message delivered.
#UNPASSIVATE SCANNING PEM
self.sE.signal(frameInfo)
self.StationSettings.timeout = self.StationSettings.Twr
#INVESTIGATE THIS TIMEOUT VARIABLE, WHAT DOES IT ACTUALLY DO? SEEM TO REMEMBER IT BEING A GOOD IDEA.
def calculate_LQA(self, destination):
#This algorithm has potential to be highly detailed
#Parameters needed: positions of 2 stations --> distance
#Ionospheric conditions
#Time of day, sunspot cycle.
#For now, stations closer in numbers are better connected.
#This should be in Rx as it needs to eventually interface with an Environment process
distance = abs(self.ID - destination)/G.num_stations
LQA = random.normalvariate(100-(distance*100),4)
if LQA > 1: LQA=1
if LQA < 0: LQA=0
##CATER FOR IF OUTGOING FRAME FAILS AND NEEDS TO REPEAT USING A DIFFERENT CHANNEL! (extra parameter?)
#class OutgoingFrame(Process):
# def __init__(self,ID,frameInfo,frame):
# #channels is a list of channels, for a response or single channel call, it will only contain 1 entry
# Process.__init__(self)
# self.ID = ID
# self.destination = frameInfo.destination
# self.channelOrder = frameInfo.channels
# self.type = frameInfo.type
# self.frame = frame
# def initComponents(self):
# self.StationSettings = [N.StationSettings for N in Network.stations if N.ID==self.ID][0]
# self.Tx = [N.Tx for N in Network.stations if N.ID==self.ID][0]
# def go(self):
# unSent = True
# for chanNum in self.channelOrder:
# if unSent:
# channel = Medium.channels(chanNum)
# if channel.nrBuffered==0:
# print 'Channel', chanNum, 'free, occupying..'
# yield put,self,channel,[self.frame]
# unSent = False
# if self.type == fType.OUT: #call cycle
# yield hold,self,2*self.StationSettings.Td*G.num_stations
# if self.interrupted():
# print 'Collision occurred, station:', self.ID
# if self.type == fType.RESPONSE:
# yield hold,self,self.StationSettings.Twr #How long does it take to get there?!
# if self.interrupted():
# print 'Collision occurred, station:', self.ID
# yield get,self,channel,1 #Message delivered.
# #UNPASSIVATE SCANNING PEM
# self.StationSettings.timeout = self.StationSettings.Twr
class frameDetails():
def __init__(self,origin,destination,size,type,terminate,channels,LQA):
self.origin = origin
self.destination = destination
self.size = size
self.type = type
self.terminate = terminate
self.channels = channels
self.LQA = LQA
class fType():
MSG = 1
QUICK_ID = 2
CALL = 3
RESPONSE = 4
ACK = 5
OUT = 6
class sState():
SCANNING = 1
LINKING = 2
LINKED = 3
initialize()
Medium = Medium()
Network.stations = [StationContainer(i) for i in range(G.num_stations)]
for N in Network.stations:
N.initComponents()
N.activate()
simulate(until=G.max_time)
| IncidentNormal/TestApps | ALE/HF_Sim_Book.py | Python | gpl-2.0 | 17,239 | 0.019375 |
#!/usr/bin/python3
# Small script to validate a given IP address
import socket
import sys
def validate_ip(ip):
try:
socket.inet_pton(socket.AF_INET, ip)
return (True,"IPv4")
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, ip)
return(True,"IPv6")
except socket.error:
return(False,"")
if __name__ == "__main__":
try:
ip = sys.argv[1]
state, version = validate_ip(ip)
if state:
print(ip + " is a valid " + version + " address")
except IndexError:
print("No IP given")
| Tijndagamer/bin | validate_ip.py | Python | mit | 609 | 0.00821 |
# Copyright (c) 2015-2016 Western Digital Corporation or its affiliates.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Author: Chaitanya Kulkarni <chaitanya.kulkarni@hgst.com>
#
"""
NVMe Write Compare Testcae:-
1. Read block of data successfully.
2. Issue write uncorrectable to block of data.
3. Attempt to read from same block; shall fail.
4. Issue a write command to first block of data.
5. Read from the same block; shall pass.
"""
from nose.tools import assert_equal, assert_not_equal
from nvme_test_io import TestNVMeIO
class TestNVMeUncor(TestNVMeIO):
"""
Represents NVMe Write Uncorrecatble testcase.
- Attributes:
- start_block : starting block of to perform IO.
- test_log_dir : directory for logs, temp files.
"""
def __init__(self):
""" Constructor TestNVMeUncor """
TestNVMeIO.__init__(self)
self.start_block = 1023
self.setup_log_dir(self.__class__.__name__)
self.write_file = self.test_log_dir + "/" + self.write_file
self.read_file = self.test_log_dir + "/" + self.read_file
self.create_data_file(self.write_file, self.data_size, "15")
open(self.read_file, 'a').close()
def __del__(self):
""" Post Section for TestNVMeUncor """
TestNVMeIO.__del__(self)
def write_uncor(self):
""" Wrapper for nvme write uncorrectable
- Args:
- None
- Returns:
- return code of nvme write uncorrectable command.
"""
write_uncor_cmd = "nvme write-uncor " + self.ns1 + \
" --start-block=" + str(self.start_block) + \
" --block-count=" + str(self.block_count)
return self.exec_cmd(write_uncor_cmd)
def test_write_uncor(self):
""" Testcase main """
assert_equal(self.nvme_read(), 0)
assert_equal(self.write_uncor(), 0)
assert_not_equal(self.nvme_read(), 0)
assert_equal(self.nvme_write(), 0)
assert_equal(self.nvme_read(), 0)
| lsgunth/nvme-cli | tests/nvme_writeuncor_test.py | Python | gpl-2.0 | 2,752 | 0 |
"""This module contains a tile cache handler."""
__author__ = 'Aaron Steele'
# MOL imports
import cache
# Standard Python imports
import hashlib
import logging
import os
import urllib
import webapp2
# Google App Engine imports
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext.webapp.util import run_wsgi_app
if 'SERVER_SOFTWARE' in os.environ:
PROD = not os.environ['SERVER_SOFTWARE'].startswith('Development')
else:
PROD = True
app_id = os.environ['CURRENT_VERSION_ID'].split('.')[0]
if PROD:
host_prefix = 'http'
if os.environ['SERVER_PORT'] == 443:
host_prefix = 'https'
app_host = host_prefix + '://' + os.environ['SERVER_NAME']
else:
app_host = 'http://localhost:8080'
class TileHandler(webapp2.RequestHandler):
"""Request handler for cache requests."""
def get(self):
tile_url = self.request.url.replace(app_host, 'http://mol.cartodb.com')
tile_key = 'tile-%s' % hashlib.sha224(tile_url).hexdigest() # tc means Tile Cache
tile_png = memcache.get(tile_key) # Check memcache
if not tile_png:
tile_png = cache.get(tile_key, value_type='blob') # Check datastore cache
if not tile_png:
result = urlfetch.fetch(tile_url, deadline=60) # Check CartoDB
if result.status_code == 200 or result.status_code == 304:
tile_png = result.content
cache.add(tile_key, tile_png, value_type='blob')
memcache.add(tile_key, tile_png)
else:
memcache.add(tile_key, tile_png)
if not tile_png:
self.error(404)
else:
self.response.headers["Content-Type"] = "image/png"
self.response.headers["Cache-Control"] = "max-age=2629743" # Cache 1 month
self.response.out.write(tile_png)
class GridHandler(webapp2.RequestHandler):
"""Request handler for cache requests."""
def get(self):
grid_url = self.request.url.replace(app_host, 'http://mol.cartodb.com')
grid_key = 'utfgrid-%s' % hashlib.sha224(grid_url).hexdigest() # gc means Grid Cache
grid_json = memcache.get(grid_key)
if not grid_json:
grid_json = cache.get(grid_key)
if not grid_json:
result = urlfetch.fetch(grid_url, deadline=60)
if result.status_code == 200 or result.status_code == 304:
grid_json = result.content
cache.add(grid_key, grid_json)
memcache.add(grid_key, grid_json)
else:
memcache.add(grid_key, grid_json)
if not grid_json:
self.error(404)
else:
self.response.headers["Content-Type"] = "application/json"
self.response.headers["Cache-Control"] = "max-age=2629743" # Cache 1 month
self.response.out.write(grid_json)
application = webapp2.WSGIApplication(
[('/tiles/[a-zA-Z0-9_-]+/[\d]+/[\d]+/[\d]+.png?.*', TileHandler),
('/tiles/[a-zA-Z0-9_-]+/[\d]+/[\d]+/[\d]+.grid.json?.*', GridHandler),],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| MapofLife/MOL | app/tile_handler.py | Python | bsd-3-clause | 3,335 | 0.009595 |
from django.test import TestCase
from django.core import signing
from django.core.exceptions import SuspiciousOperation
from django.http import HttpResponse
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.storage.cookie import CookieStorage
from django.contrib.formtools.tests.wizard.storage import get_request, TestStorage
@skipIfCustomUser
class TestCookieStorage(TestStorage, TestCase):
def get_storage(self):
return CookieStorage
def test_manipulated_cookie(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
cookie_signer = signing.get_cookie_signer(storage.prefix)
storage.request.COOKIES[storage.prefix] = cookie_signer.sign(
storage.encoder.encode({'key1': 'value1'}))
self.assertEqual(storage.load_data(), {'key1': 'value1'})
storage.request.COOKIES[storage.prefix] = 'i_am_manipulated'
self.assertRaises(SuspiciousOperation, storage.load_data)
def test_reset_cookie(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
storage.data = {'key1': 'value1'}
response = HttpResponse()
storage.update_response(response)
cookie_signer = signing.get_cookie_signer(storage.prefix)
signed_cookie_data = cookie_signer.sign(storage.encoder.encode(storage.data))
self.assertEqual(response.cookies[storage.prefix].value, signed_cookie_data)
storage.init_data()
storage.update_response(response)
unsigned_cookie_data = cookie_signer.unsign(response.cookies[storage.prefix].value)
self.assertJSONEqual(unsigned_cookie_data,
{"step_files": {}, "step": None, "extra_data": {}, "step_data": {}})
| edisonlz/fruit | web_project/base/site-packages/django/contrib/formtools/tests/wizard/test_cookiestorage.py | Python | apache-2.0 | 1,813 | 0.003309 |
default_app_config = 'ain7.annuaire.management.FillDb'
| ain7/www.ain7.org | ain7/annuaire/__init__.py | Python | lgpl-2.1 | 55 | 0 |
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, too-many-lines
"""Scikit-Learn Wrapper interface for XGBoost."""
import copy
import warnings
import json
import os
from typing import Union, Optional, List, Dict, Callable, Tuple, Any, TypeVar, Type, cast
from typing import Sequence
import numpy as np
from .core import Booster, DMatrix, XGBoostError
from .core import _deprecate_positional_args, _convert_ntree_limit
from .core import Metric
from .training import train
from .callback import TrainingCallback
from .data import _is_cudf_df, _is_cudf_ser, _is_cupy_array
# Do not use class names on scikit-learn directly. Re-define the classes on
# .compat to guarantee the behavior without scikit-learn
from .compat import (
SKLEARN_INSTALLED,
XGBModelBase,
XGBClassifierBase,
XGBRegressorBase,
XGBoostLabelEncoder,
)
array_like = Any
class XGBRankerMixIn: # pylint: disable=too-few-public-methods
"""MixIn for ranking, defines the _estimator_type usually defined in scikit-learn base
classes."""
_estimator_type = "ranker"
def _check_rf_callback(
early_stopping_rounds: Optional[int],
callbacks: Optional[Sequence[TrainingCallback]],
) -> None:
if early_stopping_rounds is not None or callbacks is not None:
raise NotImplementedError(
"`early_stopping_rounds` and `callbacks` are not implemented for"
" random forest."
)
_SklObjective = Optional[
Union[
str, Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
]
]
def _objective_decorator(
func: Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
) -> Callable[[np.ndarray, DMatrix], Tuple[np.ndarray, np.ndarray]]:
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func:
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func:
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds: np.ndarray, dmatrix: DMatrix) -> Tuple[np.ndarray, np.ndarray]:
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner
def _metric_decorator(func: Callable) -> Metric:
"""Decorate a metric function from sklearn.
Converts an metric function that uses the typical sklearn metric signature so that it
is compatible with :py:func:`train`
"""
def inner(y_score: np.ndarray, dmatrix: DMatrix) -> Tuple[str, float]:
y_true = dmatrix.get_label()
return func.__name__, func(y_true, y_score)
return inner
__estimator_doc = '''
n_estimators : int
Number of gradient boosted trees. Equivalent to number of boosting
rounds.
'''
__model_doc = f'''
max_depth : Optional[int]
Maximum tree depth for base learners.
max_leaves :
Maximum number of leaves; 0 indicates no limit.
max_bin :
If using histogram-based algorithm, maximum number of bins per feature
grow_policy :
Tree growing policy. 0: favor splitting at nodes closest to the node, i.e. grow
depth-wise. 1: favor splitting at nodes with highest loss change.
learning_rate : Optional[float]
Boosting learning rate (xgb's "eta")
verbosity : Optional[int]
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
objective : {_SklObjective}
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
booster: Optional[str]
Specify which booster to use: gbtree, gblinear or dart.
tree_method: Optional[str]
Specify which tree method to use. Default to auto. If this parameter is set to
default, XGBoost will choose the most conservative option available. It's
recommended to study this option from the parameters document :doc:`tree method
</treemethod>`
n_jobs : Optional[int]
Number of parallel threads used to run xgboost. When used with other Scikit-Learn
algorithms like grid search, you may choose which algorithm to parallelize and
balance the threads. Creating thread contention will significantly slow down both
algorithms.
gamma : Optional[float]
(min_split_loss) Minimum loss reduction required to make a further partition on a
leaf node of the tree.
min_child_weight : Optional[float]
Minimum sum of instance weight(hessian) needed in a child.
max_delta_step : Optional[float]
Maximum delta step we allow each tree's weight estimation to be.
subsample : Optional[float]
Subsample ratio of the training instance.
sampling_method :
Sampling method. Used only by `gpu_hist` tree method.
- `uniform`: select random training instances uniformly.
- `gradient_based` select random training instances with higher probability when
the gradient and hessian are larger. (cf. CatBoost)
colsample_bytree : Optional[float]
Subsample ratio of columns when constructing each tree.
colsample_bylevel : Optional[float]
Subsample ratio of columns for each level.
colsample_bynode : Optional[float]
Subsample ratio of columns for each split.
reg_alpha : Optional[float]
L1 regularization term on weights (xgb's alpha).
reg_lambda : Optional[float]
L2 regularization term on weights (xgb's lambda).
scale_pos_weight : Optional[float]
Balancing of positive and negative weights.
base_score : Optional[float]
The initial prediction score of all instances, global bias.
random_state : Optional[Union[numpy.random.RandomState, int]]
Random number seed.
.. note::
Using gblinear booster with shotgun updater is nondeterministic as
it uses Hogwild algorithm.
missing : float, default np.nan
Value in the data which needs to be present as a missing value.
num_parallel_tree: Optional[int]
Used for boosting random forest.
monotone_constraints : Optional[Union[Dict[str, int], str]]
Constraint of variable monotonicity. See :doc:`tutorial </tutorials/monotonic>`
for more information.
interaction_constraints : Optional[Union[str, List[Tuple[str]]]]
Constraints for interaction representing permitted interactions. The
constraints must be specified in the form of a nested list, e.g. ``[[0, 1], [2,
3, 4]]``, where each inner list is a group of indices of features that are
allowed to interact with each other. See :doc:`tutorial
</tutorials/feature_interaction_constraint>` for more information
importance_type: Optional[str]
The feature importance type for the feature_importances\\_ property:
* For tree model, it's either "gain", "weight", "cover", "total_gain" or
"total_cover".
* For linear model, only "weight" is defined and it's the normalized coefficients
without bias.
gpu_id : Optional[int]
Device ordinal.
validate_parameters : Optional[bool]
Give warnings for unknown parameter.
predictor : Optional[str]
Force XGBoost to use specific predictor, available choices are [cpu_predictor,
gpu_predictor].
enable_categorical : bool
.. versionadded:: 1.5.0
.. note:: This parameter is experimental
Experimental support for categorical data. When enabled, cudf/pandas.DataFrame
should be used to specify categorical data type. Also, JSON/UBJSON
serialization format is required.
max_cat_to_onehot : Optional[int]
.. versionadded:: 1.6.0
.. note:: This parameter is experimental
A threshold for deciding whether XGBoost should use one-hot encoding based split
for categorical data. When number of categories is lesser than the threshold
then one-hot encoding is chosen, otherwise the categories will be partitioned
into children nodes. Only relevant for regression and binary classification.
See :doc:`Categorical Data </tutorials/categorical>` for details.
eval_metric : Optional[Union[str, List[str], Callable]]
.. versionadded:: 1.6.0
Metric used for monitoring the training result and early stopping. It can be a
string or list of strings as names of predefined metric in XGBoost (See
doc/parameter.rst), one of the metrics in :py:mod:`sklearn.metrics`, or any other
user defined metric that looks like `sklearn.metrics`.
If custom objective is also provided, then custom metric should implement the
corresponding reverse link function.
Unlike the `scoring` parameter commonly used in scikit-learn, when a callable
object is provided, it's assumed to be a cost function and by default XGBoost will
minimize the result during early stopping.
For advanced usage on Early stopping like directly choosing to maximize instead of
minimize, see :py:obj:`xgboost.callback.EarlyStopping`.
See :doc:`Custom Objective and Evaluation Metric </tutorials/custom_metric_obj>`
for more.
.. note::
This parameter replaces `eval_metric` in :py:meth:`fit` method. The old one
receives un-transformed prediction regardless of whether custom objective is
being used.
.. code-block:: python
from sklearn.datasets import load_diabetes
from sklearn.metrics import mean_absolute_error
X, y = load_diabetes(return_X_y=True)
reg = xgb.XGBRegressor(
tree_method="hist",
eval_metric=mean_absolute_error,
)
reg.fit(X, y, eval_set=[(X, y)])
early_stopping_rounds : Optional[int]
.. versionadded:: 1.6.0
Activates early stopping. Validation metric needs to improve at least once in
every **early_stopping_rounds** round(s) to continue training. Requires at least
one item in **eval_set** in :py:meth:`fit`.
The method returns the model from the last iteration (not the best one). If
there's more than one item in **eval_set**, the last entry will be used for early
stopping. If there's more than one metric in **eval_metric**, the last metric
will be used for early stopping.
If early stopping occurs, the model will have three additional fields:
:py:attr:`best_score`, :py:attr:`best_iteration` and
:py:attr:`best_ntree_limit`.
.. note::
This parameter replaces `early_stopping_rounds` in :py:meth:`fit` method.
callbacks : Optional[List[TrainingCallback]]
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using
:ref:`Callback API <callback_api>`.
.. note::
States in callback are not preserved during training, which means callback
objects can not be reused for multiple training sessions without
reinitialization or deepcopy.
.. code-block:: python
for params in parameters_grid:
# be sure to (re)initialize the callbacks before each run
callbacks = [xgb.callback.LearningRateScheduler(custom_rates)]
xgboost.train(params, Xy, callbacks=callbacks)
kwargs : dict, optional
Keyword arguments for XGBoost Booster object. Full documentation of parameters
can be found :doc:`here </parameter>`.
Attempting to set a parameter via the constructor args and \\*\\*kwargs
dict simultaneously will result in a TypeError.
.. note:: \\*\\*kwargs unsupported by scikit-learn
\\*\\*kwargs is unsupported by scikit-learn. We do not guarantee
that parameters passed via this argument will interact properly
with scikit-learn.
'''
__custom_obj_note = '''
.. note:: Custom objective function
A custom objective function can be provided for the ``objective``
parameter. In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
grad: array_like of shape [n_samples]
The value of the gradient for each sample point.
hess: array_like of shape [n_samples]
The value of the second derivative for each sample point
'''
def xgboost_model_doc(
header: str, items: List[str],
extra_parameters: Optional[str] = None,
end_note: Optional[str] = None
) -> Callable[[Type], Type]:
'''Obtain documentation for Scikit-Learn wrappers
Parameters
----------
header: str
An introducion to the class.
items : list
A list of common doc items. Available items are:
- estimators: the meaning of n_estimators
- model: All the other parameters
- objective: note for customized objective
extra_parameters: str
Document for class specific parameters, placed at the head.
end_note: str
Extra notes put to the end.
'''
def get_doc(item: str) -> str:
'''Return selected item'''
__doc = {'estimators': __estimator_doc,
'model': __model_doc,
'objective': __custom_obj_note}
return __doc[item]
def adddoc(cls: Type) -> Type:
doc = ['''
Parameters
----------
''']
if extra_parameters:
doc.append(extra_parameters)
doc.extend([get_doc(i) for i in items])
if end_note:
doc.append(end_note)
full_doc = [header + '\n\n']
full_doc.extend(doc)
cls.__doc__ = ''.join(full_doc)
return cls
return adddoc
def _wrap_evaluation_matrices(
missing: float,
X: Any,
y: Any,
group: Optional[Any],
qid: Optional[Any],
sample_weight: Optional[Any],
base_margin: Optional[Any],
feature_weights: Optional[Any],
eval_set: Optional[Sequence[Tuple[Any, Any]]],
sample_weight_eval_set: Optional[Sequence[Any]],
base_margin_eval_set: Optional[Sequence[Any]],
eval_group: Optional[Sequence[Any]],
eval_qid: Optional[Sequence[Any]],
create_dmatrix: Callable,
enable_categorical: bool,
) -> Tuple[Any, List[Tuple[Any, str]]]:
"""Convert array_like evaluation matrices into DMatrix. Perform validation on the way.
"""
train_dmatrix = create_dmatrix(
data=X,
label=y,
group=group,
qid=qid,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=missing,
enable_categorical=enable_categorical,
)
n_validation = 0 if eval_set is None else len(eval_set)
def validate_or_none(meta: Optional[Sequence], name: str) -> Sequence:
if meta is None:
return [None] * n_validation
if len(meta) != n_validation:
raise ValueError(
f"{name}'s length does not equal `eval_set`'s length, " +
f"expecting {n_validation}, got {len(meta)}"
)
return meta
if eval_set is not None:
sample_weight_eval_set = validate_or_none(
sample_weight_eval_set, "sample_weight_eval_set"
)
base_margin_eval_set = validate_or_none(
base_margin_eval_set, "base_margin_eval_set"
)
eval_group = validate_or_none(eval_group, "eval_group")
eval_qid = validate_or_none(eval_qid, "eval_qid")
evals = []
for i, (valid_X, valid_y) in enumerate(eval_set):
# Skip the duplicated entry.
if all(
(
valid_X is X, valid_y is y,
sample_weight_eval_set[i] is sample_weight,
base_margin_eval_set[i] is base_margin,
eval_group[i] is group,
eval_qid[i] is qid
)
):
evals.append(train_dmatrix)
else:
m = create_dmatrix(
data=valid_X,
label=valid_y,
weight=sample_weight_eval_set[i],
group=eval_group[i],
qid=eval_qid[i],
base_margin=base_margin_eval_set[i],
missing=missing,
enable_categorical=enable_categorical,
)
evals.append(m)
nevals = len(evals)
eval_names = [f"validation_{i}" for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
if any(
meta is not None
for meta in [
sample_weight_eval_set,
base_margin_eval_set,
eval_group,
eval_qid,
]
):
raise ValueError(
"`eval_set` is not set but one of the other evaluation meta info is "
"not None."
)
evals = []
return train_dmatrix, evals
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
['estimators', 'model', 'objective'])
class XGBModel(XGBModelBase):
# pylint: disable=too-many-arguments, too-many-instance-attributes, missing-docstring
def __init__(
self,
max_depth: Optional[int] = None,
max_leaves: Optional[int] = None,
max_bin: Optional[int] = None,
grow_policy: Optional[str] = None,
learning_rate: Optional[float] = None,
n_estimators: int = 100,
verbosity: Optional[int] = None,
objective: _SklObjective = None,
booster: Optional[str] = None,
tree_method: Optional[str] = None,
n_jobs: Optional[int] = None,
gamma: Optional[float] = None,
min_child_weight: Optional[float] = None,
max_delta_step: Optional[float] = None,
subsample: Optional[float] = None,
sampling_method: Optional[str] = None,
colsample_bytree: Optional[float] = None,
colsample_bylevel: Optional[float] = None,
colsample_bynode: Optional[float] = None,
reg_alpha: Optional[float] = None,
reg_lambda: Optional[float] = None,
scale_pos_weight: Optional[float] = None,
base_score: Optional[float] = None,
random_state: Optional[Union[np.random.RandomState, int]] = None,
missing: float = np.nan,
num_parallel_tree: Optional[int] = None,
monotone_constraints: Optional[Union[Dict[str, int], str]] = None,
interaction_constraints: Optional[Union[str, Sequence[Sequence[str]]]] = None,
importance_type: Optional[str] = None,
gpu_id: Optional[int] = None,
validate_parameters: Optional[bool] = None,
predictor: Optional[str] = None,
enable_categorical: bool = False,
max_cat_to_onehot: Optional[int] = None,
eval_metric: Optional[Union[str, List[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
callbacks: Optional[List[TrainingCallback]] = None,
**kwargs: Any
) -> None:
if not SKLEARN_INSTALLED:
raise XGBoostError(
"sklearn needs to be installed in order to use this module"
)
self.n_estimators = n_estimators
self.objective = objective
self.max_depth = max_depth
self.max_leaves = max_leaves
self.max_bin = max_bin
self.grow_policy = grow_policy
self.learning_rate = learning_rate
self.verbosity = verbosity
self.booster = booster
self.tree_method = tree_method
self.gamma = gamma
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.sampling_method = sampling_method
self.colsample_bytree = colsample_bytree
self.colsample_bylevel = colsample_bylevel
self.colsample_bynode = colsample_bynode
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.scale_pos_weight = scale_pos_weight
self.base_score = base_score
self.missing = missing
self.num_parallel_tree = num_parallel_tree
self.random_state = random_state
self.n_jobs = n_jobs
self.monotone_constraints = monotone_constraints
self.interaction_constraints = interaction_constraints
self.importance_type = importance_type
self.gpu_id = gpu_id
self.validate_parameters = validate_parameters
self.predictor = predictor
self.enable_categorical = enable_categorical
self.max_cat_to_onehot = max_cat_to_onehot
self.eval_metric = eval_metric
self.early_stopping_rounds = early_stopping_rounds
self.callbacks = callbacks
if kwargs:
self.kwargs = kwargs
def _more_tags(self) -> Dict[str, bool]:
'''Tags used for scikit-learn data validation.'''
return {'allow_nan': True, 'no_validation': True}
def __sklearn_is_fitted__(self) -> bool:
return hasattr(self, "_Booster")
def get_booster(self) -> Booster:
"""Get the underlying xgboost Booster of this model.
This will raise an exception when fit was not called
Returns
-------
booster : a xgboost booster of underlying model
"""
if not self.__sklearn_is_fitted__():
from sklearn.exceptions import NotFittedError
raise NotFittedError('need to call fit or load_model beforehand')
return self._Booster
def set_params(self, **params: Any) -> "XGBModel":
"""Set the parameters of this estimator. Modification of the sklearn method to
allow unknown kwargs. This allows using the full range of xgboost
parameters that are not defined as member variables in sklearn grid
search.
Returns
-------
self
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
# this concatenates kwargs into parameters, enabling `get_params` for
# obtaining parameters from keyword parameters.
for key, value in params.items():
if hasattr(self, key):
setattr(self, key, value)
else:
if not hasattr(self, "kwargs"):
self.kwargs = {}
self.kwargs[key] = value
if hasattr(self, '_Booster'):
parameters = self.get_xgb_params()
self.get_booster().set_param(parameters)
return self
def get_params(self, deep: bool = True) -> Dict[str, Any]:
# pylint: disable=attribute-defined-outside-init
"""Get parameters."""
# Based on: https://stackoverflow.com/questions/59248211
# The basic flow in `get_params` is:
# 0. Return parameters in subclass first, by using inspect.
# 1. Return parameters in `XGBModel` (the base class).
# 2. Return whatever in `**kwargs`.
# 3. Merge them.
params = super().get_params(deep)
cp = copy.copy(self)
cp.__class__ = cp.__class__.__bases__[0]
params.update(cp.__class__.get_params(cp, deep))
# if kwargs is a dict, update params accordingly
if hasattr(self, "kwargs") and isinstance(self.kwargs, dict):
params.update(self.kwargs)
if isinstance(params['random_state'], np.random.RandomState):
params['random_state'] = params['random_state'].randint(
np.iinfo(np.int32).max)
def parse_parameter(value: Any) -> Optional[Union[int, float, str]]:
for t in (int, float, str):
try:
ret = t(value)
return ret
except ValueError:
continue
return None
# Get internal parameter values
try:
config = json.loads(self.get_booster().save_config())
stack = [config]
internal = {}
while stack:
obj = stack.pop()
for k, v in obj.items():
if k.endswith('_param'):
for p_k, p_v in v.items():
internal[p_k] = p_v
elif isinstance(v, dict):
stack.append(v)
for k, v in internal.items():
if k in params and params[k] is None:
params[k] = parse_parameter(v)
except ValueError:
pass
return params
def get_xgb_params(self) -> Dict[str, Any]:
"""Get xgboost specific parameters."""
params = self.get_params()
# Parameters that should not go into native learner.
wrapper_specific = {
"importance_type",
"kwargs",
"missing",
"n_estimators",
"use_label_encoder",
"enable_categorical",
"early_stopping_rounds",
"callbacks",
}
filtered = {}
for k, v in params.items():
if k not in wrapper_specific and not callable(v):
filtered[k] = v
return filtered
def get_num_boosting_rounds(self) -> int:
"""Gets the number of xgboost boosting rounds."""
return self.n_estimators
def _get_type(self) -> str:
if not hasattr(self, '_estimator_type'):
raise TypeError(
"`_estimator_type` undefined. "
"Please use appropriate mixin to define estimator type."
)
return self._estimator_type # pylint: disable=no-member
def save_model(self, fname: Union[str, os.PathLike]) -> None:
meta = {}
for k, v in self.__dict__.items():
if k == '_le':
meta['_le'] = self._le.to_json()
continue
if k == '_Booster':
continue
if k == 'classes_':
# numpy array is not JSON serializable
meta['classes_'] = self.classes_.tolist()
continue
try:
json.dumps({k: v})
meta[k] = v
except TypeError:
warnings.warn(str(k) + ' is not saved in Scikit-Learn meta.', UserWarning)
meta['_estimator_type'] = self._get_type()
meta_str = json.dumps(meta)
self.get_booster().set_attr(scikit_learn=meta_str)
self.get_booster().save_model(fname)
# Delete the attribute after save
self.get_booster().set_attr(scikit_learn=None)
save_model.__doc__ = f"""{Booster.save_model.__doc__}"""
def load_model(self, fname: Union[str, bytearray, os.PathLike]) -> None:
# pylint: disable=attribute-defined-outside-init
if not hasattr(self, '_Booster'):
self._Booster = Booster({'n_jobs': self.n_jobs})
self.get_booster().load_model(fname)
meta_str = self.get_booster().attr('scikit_learn')
if meta_str is None:
# FIXME(jiaming): This doesn't have to be a problem as most of the needed
# information like num_class and objective is in Learner class.
warnings.warn(
'Loading a native XGBoost model with Scikit-Learn interface.'
)
return
meta = json.loads(meta_str)
states = {}
for k, v in meta.items():
if k == '_le':
self._le = XGBoostLabelEncoder()
self._le.from_json(v)
continue
# FIXME(jiaming): This can be removed once label encoder is gone since we can
# generate it from `np.arange(self.n_classes_)`
if k == 'classes_':
self.classes_ = np.array(v)
continue
if k == "_estimator_type":
if self._get_type() != v:
raise TypeError(
"Loading an estimator with different type. "
f"Expecting: {self._get_type()}, got: {v}"
)
continue
states[k] = v
self.__dict__.update(states)
# Delete the attribute after load
self.get_booster().set_attr(scikit_learn=None)
load_model.__doc__ = f"""{Booster.load_model.__doc__}"""
# pylint: disable=too-many-branches
def _configure_fit(
self,
booster: Optional[Union[Booster, "XGBModel", str]],
eval_metric: Optional[Union[Callable, str, Sequence[str]]],
params: Dict[str, Any],
early_stopping_rounds: Optional[int],
callbacks: Optional[Sequence[TrainingCallback]],
) -> Tuple[
Optional[Union[Booster, str, "XGBModel"]],
Optional[Metric],
Dict[str, Any],
Optional[int],
Optional[Sequence[TrainingCallback]],
]:
"""Configure parameters for :py:meth:`fit`."""
if isinstance(booster, XGBModel):
model: Optional[Union[Booster, str]] = booster.get_booster()
else:
model = booster
def _deprecated(parameter: str) -> None:
warnings.warn(
f"`{parameter}` in `fit` method is deprecated for better compatibility "
f"with scikit-learn, use `{parameter}` in constructor or`set_params` "
"instead.",
UserWarning,
)
def _duplicated(parameter: str) -> None:
raise ValueError(
f"2 different `{parameter}` are provided. Use the one in constructor "
"or `set_params` instead."
)
# Configure evaluation metric.
if eval_metric is not None:
_deprecated("eval_metric")
if self.eval_metric is not None and eval_metric is not None:
_duplicated("eval_metric")
# - track where does the evaluation metric come from
if self.eval_metric is not None:
from_fit = False
eval_metric = self.eval_metric
else:
from_fit = True
# - configure callable evaluation metric
metric: Optional[Metric] = None
if eval_metric is not None:
if callable(eval_metric) and from_fit:
# No need to wrap the evaluation function for old parameter.
metric = eval_metric
elif callable(eval_metric):
# Parameter from constructor or set_params
metric = _metric_decorator(eval_metric)
else:
params.update({"eval_metric": eval_metric})
# Configure early_stopping_rounds
if early_stopping_rounds is not None:
_deprecated("early_stopping_rounds")
if early_stopping_rounds is not None and self.early_stopping_rounds is not None:
_duplicated("early_stopping_rounds")
early_stopping_rounds = (
self.early_stopping_rounds
if self.early_stopping_rounds is not None
else early_stopping_rounds
)
# Configure callbacks
if callbacks is not None:
_deprecated("callbacks")
if callbacks is not None and self.callbacks is not None:
_duplicated("callbacks")
callbacks = self.callbacks if self.callbacks is not None else callbacks
tree_method = params.get("tree_method", None)
cat_support = {"gpu_hist", "approx", "hist"}
if self.enable_categorical and tree_method not in cat_support:
raise ValueError(
"Experimental support for categorical data is not implemented for"
" current tree method yet."
)
return model, metric, params, early_stopping_rounds, callbacks
def _set_evaluation_result(self, evals_result: TrainingCallback.EvalsLog) -> None:
if evals_result:
self.evals_result_ = cast(Dict[str, Dict[str, List[float]]], evals_result)
@_deprecate_positional_args
def fit(
self,
X: array_like,
y: array_like,
*,
sample_weight: Optional[array_like] = None,
base_margin: Optional[array_like] = None,
eval_set: Optional[Sequence[Tuple[array_like, array_like]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[bool] = True,
xgb_model: Optional[Union[Booster, str, "XGBModel"]] = None,
sample_weight_eval_set: Optional[Sequence[array_like]] = None,
base_margin_eval_set: Optional[Sequence[array_like]] = None,
feature_weights: Optional[array_like] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None
) -> "XGBModel":
# pylint: disable=invalid-name,attribute-defined-outside-init
"""Fit gradient boosting model.
Note that calling ``fit()`` multiple times will cause the model object to be
re-fit from scratch. To resume training from a previous checkpoint, explicitly
pass ``xgb_model`` argument.
Parameters
----------
X :
Feature matrix
y :
Labels
sample_weight :
instance weights
base_margin :
global bias for each instance.
eval_set :
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
eval_metric : str, list of str, or callable, optional
.. deprecated:: 1.6.0
Use `eval_metric` in :py:meth:`__init__` or :py:meth:`set_params` instead.
early_stopping_rounds : int
.. deprecated:: 1.6.0
Use `early_stopping_rounds` in :py:meth:`__init__` or
:py:meth:`set_params` instead.
verbose :
If `verbose` and an evaluation set is used, writes the evaluation metric
measured on the validation set to stderr.
xgb_model :
file name of stored XGBoost model or 'Booster' instance XGBoost model to be
loaded before training (allows training continuation).
sample_weight_eval_set :
A list of the form [L_1, L_2, ..., L_n], where each L_i is an array like
object storing instance weights for the i-th validation set.
base_margin_eval_set :
A list of the form [M_1, M_2, ..., M_n], where each M_i is an array like
object storing base margin for the i-th validation set.
feature_weights :
Weight for each feature, defines the probability of each feature being
selected when colsample is being used. All values must be greater than 0,
otherwise a `ValueError` is thrown.
callbacks :
.. deprecated:: 1.6.0
Use `callbacks` in :py:meth:`__init__` or :py:meth:`set_params` instead.
"""
evals_result: TrainingCallback.EvalsLog = {}
train_dmatrix, evals = _wrap_evaluation_matrices(
missing=self.missing,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
create_dmatrix=lambda **kwargs: DMatrix(nthread=self.n_jobs, **kwargs),
enable_categorical=self.enable_categorical,
)
params = self.get_xgb_params()
if callable(self.objective):
obj: Optional[
Callable[[np.ndarray, DMatrix], Tuple[np.ndarray, np.ndarray]]
] = _objective_decorator(self.objective)
params["objective"] = "reg:squarederror"
else:
obj = None
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
self._Booster = train(
params,
train_dmatrix,
self.get_num_boosting_rounds(),
evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result,
obj=obj,
custom_metric=metric,
verbose_eval=verbose,
xgb_model=model,
callbacks=callbacks,
)
self._set_evaluation_result(evals_result)
return self
def _can_use_inplace_predict(self) -> bool:
# When predictor is explicitly set, using `inplace_predict` might result into
# error with incompatible data type.
# Inplace predict doesn't handle as many data types as DMatrix, but it's
# sufficient for dask interface where input is simpiler.
predictor = self.get_params().get("predictor", None)
if predictor in ("auto", None) and self.booster != "gblinear":
return True
return False
def _get_iteration_range(
self, iteration_range: Optional[Tuple[int, int]]
) -> Tuple[int, int]:
if (iteration_range is None or iteration_range[1] == 0):
# Use best_iteration if defined.
try:
iteration_range = (0, self.best_iteration + 1)
except AttributeError:
iteration_range = (0, 0)
if self.booster == "gblinear":
iteration_range = (0, 0)
return iteration_range
def predict(
self,
X: array_like,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[array_like] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""Predict with `X`. If the model is trained with early stopping, then `best_iteration`
is used automatically. For tree models, when data is on GPU, like cupy array or
cuDF dataframe and `predictor` is not specified, the prediction is run on GPU
automatically, otherwise it will run on CPU.
.. note:: This function is only thread safe for `gbtree` and `dart`.
Parameters
----------
X :
Data to predict with.
output_margin :
Whether to output the raw untransformed margin value.
ntree_limit :
Deprecated, use `iteration_range` instead.
validate_features :
When this is True, validate that the Booster's and data's feature_names are
identical. Otherwise, it is assumed that the feature_names are the same.
base_margin :
Margin added to prediction.
iteration_range :
Specifies which layer of trees are used in prediction. For example, if a
random forest is trained with 100 rounds. Specifying ``iteration_range=(10,
20)``, then only the forests built during [10, 20) (half open set) rounds are
used in this prediction.
.. versionadded:: 1.4.0
Returns
-------
prediction
"""
iteration_range = _convert_ntree_limit(
self.get_booster(), ntree_limit, iteration_range
)
iteration_range = self._get_iteration_range(iteration_range)
if self._can_use_inplace_predict():
try:
predts = self.get_booster().inplace_predict(
data=X,
iteration_range=iteration_range,
predict_type="margin" if output_margin else "value",
missing=self.missing,
base_margin=base_margin,
validate_features=validate_features,
)
if _is_cupy_array(predts):
import cupy # pylint: disable=import-error
predts = cupy.asnumpy(predts) # ensure numpy array is used.
return predts
except TypeError:
# coo, csc, dt
pass
test = DMatrix(
X, base_margin=base_margin,
missing=self.missing,
nthread=self.n_jobs,
enable_categorical=self.enable_categorical
)
return self.get_booster().predict(
data=test,
iteration_range=iteration_range,
output_margin=output_margin,
validate_features=validate_features,
)
def apply(
self, X: array_like,
ntree_limit: int = 0,
iteration_range: Optional[Tuple[int, int]] = None
) -> np.ndarray:
"""Return the predicted leaf every tree for each sample. If the model is trained with
early stopping, then `best_iteration` is used automatically.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
iteration_range :
See :py:meth:`predict`.
ntree_limit :
Deprecated, use ``iteration_range`` instead.
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
"""
iteration_range = _convert_ntree_limit(
self.get_booster(), ntree_limit, iteration_range
)
iteration_range = self._get_iteration_range(iteration_range)
test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs)
return self.get_booster().predict(
test_dmatrix,
pred_leaf=True,
iteration_range=iteration_range
)
def evals_result(self) -> Dict[str, Dict[str, List[float]]]:
"""Return the evaluation results.
If **eval_set** is passed to the :py:meth:`fit` function, you can call
``evals_result()`` to get evaluation results for all passed **eval_sets**. When
**eval_metric** is also passed to the :py:meth:`fit` function, the
**evals_result** will contain the **eval_metrics** passed to the :py:meth:`fit`
function.
The returned evaluation result is a dictionary:
.. code-block:: python
{'validation_0': {'logloss': ['0.604835', '0.531479']},
'validation_1': {'logloss': ['0.41965', '0.17686']}}
Returns
-------
evals_result
"""
if getattr(self, "evals_result_", None) is not None:
evals_result = self.evals_result_
else:
raise XGBoostError(
"No evaluation result, `eval_set` is not used during training."
)
return evals_result
@property
def n_features_in_(self) -> int:
"""Number of features seen during :py:meth:`fit`."""
booster = self.get_booster()
return booster.num_features()
@property
def feature_names_in_(self) -> np.ndarray:
"""Names of features seen during :py:meth:`fit`. Defined only when `X` has feature
names that are all strings."""
feature_names = self.get_booster().feature_names
if feature_names is None:
raise AttributeError(
"`feature_names_in_` is defined only when `X` has feature names that "
"are all strings."
)
return np.array(feature_names)
def _early_stopping_attr(self, attr: str) -> Union[float, int]:
booster = self.get_booster()
try:
return getattr(booster, attr)
except AttributeError as e:
raise AttributeError(
f'`{attr}` in only defined when early stopping is used.'
) from e
@property
def best_score(self) -> float:
"""The best score obtained by early stopping."""
return float(self._early_stopping_attr('best_score'))
@property
def best_iteration(self) -> int:
"""The best iteration obtained by early stopping. This attribute is 0-based,
for instance if the best iteration is the first round, then best_iteration is 0.
"""
return int(self._early_stopping_attr('best_iteration'))
@property
def best_ntree_limit(self) -> int:
return int(self._early_stopping_attr('best_ntree_limit'))
@property
def feature_importances_(self) -> np.ndarray:
"""
Feature importances property, return depends on `importance_type` parameter.
Returns
-------
feature_importances_ : array of shape ``[n_features]`` except for multi-class
linear model, which returns an array with shape `(n_features, n_classes)`
"""
b: Booster = self.get_booster()
def dft() -> str:
return "weight" if self.booster == "gblinear" else "gain"
score = b.get_score(
importance_type=self.importance_type if self.importance_type else dft()
)
if b.feature_names is None:
feature_names = [f"f{i}" for i in range(self.n_features_in_)]
else:
feature_names = b.feature_names
# gblinear returns all features so the `get` in next line is only for gbtree.
all_features = [score.get(f, 0.) for f in feature_names]
all_features_arr = np.array(all_features, dtype=np.float32)
total = all_features_arr.sum()
if total == 0:
return all_features_arr
return all_features_arr / total
@property
def coef_(self) -> np.ndarray:
"""
Coefficients property
.. note:: Coefficients are defined only for linear learners
Coefficients are only defined when the linear model is chosen as
base learner (`booster=gblinear`). It is not defined for other base
learner types, such as tree learners (`booster=gbtree`).
Returns
-------
coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]``
"""
if self.get_params()['booster'] != 'gblinear':
raise AttributeError(
f"Coefficients are not defined for Booster type {self.booster}"
)
b = self.get_booster()
coef = np.array(json.loads(b.get_dump(dump_format='json')[0])['weight'])
# Logic for multiclass classification
n_classes = getattr(self, 'n_classes_', None)
if n_classes is not None:
if n_classes > 2:
assert len(coef.shape) == 1
assert coef.shape[0] % n_classes == 0
coef = coef.reshape((n_classes, -1))
return coef
@property
def intercept_(self) -> np.ndarray:
"""
Intercept (bias) property
.. note:: Intercept is defined only for linear learners
Intercept (bias) is only defined when the linear model is chosen as base
learner (`booster=gblinear`). It is not defined for other base learner types,
such as tree learners (`booster=gbtree`).
Returns
-------
intercept_ : array of shape ``(1,)`` or ``[n_classes]``
"""
if self.get_params()['booster'] != 'gblinear':
raise AttributeError(
f"Intercept (bias) is not defined for Booster type {self.booster}"
)
b = self.get_booster()
return np.array(json.loads(b.get_dump(dump_format='json')[0])['bias'])
PredtT = TypeVar("PredtT", bound=np.ndarray)
def _cls_predict_proba(n_classes: int, prediction: PredtT, vstack: Callable) -> PredtT:
assert len(prediction.shape) <= 2
if len(prediction.shape) == 2 and prediction.shape[1] == n_classes:
# multi-class
return prediction
if (
len(prediction.shape) == 2
and n_classes == 2
and prediction.shape[1] >= n_classes
):
# multi-label
return prediction
# binary logistic function
classone_probs = prediction
classzero_probs = 1.0 - classone_probs
return vstack((classzero_probs, classone_probs)).transpose()
@xgboost_model_doc(
"Implementation of the scikit-learn API for XGBoost classification.",
['model', 'objective'], extra_parameters='''
n_estimators : int
Number of boosting rounds.
''')
class XGBClassifier(XGBModel, XGBClassifierBase):
# pylint: disable=missing-docstring,invalid-name,too-many-instance-attributes
@_deprecate_positional_args
def __init__(
self,
*,
objective: _SklObjective = "binary:logistic",
use_label_encoder: bool = False,
**kwargs: Any
) -> None:
# must match the parameters for `get_params`
self.use_label_encoder = use_label_encoder
if use_label_encoder is True:
raise ValueError("Label encoder was removed in 1.6.")
super().__init__(objective=objective, **kwargs)
@_deprecate_positional_args
def fit(
self,
X: array_like,
y: array_like,
*,
sample_weight: Optional[array_like] = None,
base_margin: Optional[array_like] = None,
eval_set: Optional[Sequence[Tuple[array_like, array_like]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[bool] = True,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[array_like]] = None,
base_margin_eval_set: Optional[Sequence[array_like]] = None,
feature_weights: Optional[array_like] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None
) -> "XGBClassifier":
# pylint: disable = attribute-defined-outside-init,too-many-statements
evals_result: TrainingCallback.EvalsLog = {}
if _is_cudf_df(y) or _is_cudf_ser(y):
import cupy as cp # pylint: disable=E0401
self.classes_ = cp.unique(y.values)
self.n_classes_ = len(self.classes_)
expected_classes = cp.arange(self.n_classes_)
elif _is_cupy_array(y):
import cupy as cp # pylint: disable=E0401
self.classes_ = cp.unique(y)
self.n_classes_ = len(self.classes_)
expected_classes = cp.arange(self.n_classes_)
else:
self.classes_ = np.unique(np.asarray(y))
self.n_classes_ = len(self.classes_)
expected_classes = np.arange(self.n_classes_)
if (
self.classes_.shape != expected_classes.shape
or not (self.classes_ == expected_classes).all()
):
raise ValueError(
f"Invalid classes inferred from unique values of `y`. "
f"Expected: {expected_classes}, got {self.classes_}"
)
params = self.get_xgb_params()
if callable(self.objective):
obj: Optional[
Callable[[np.ndarray, DMatrix], Tuple[np.ndarray, np.ndarray]]
] = _objective_decorator(self.objective)
# Use default value. Is it really not used ?
params["objective"] = "binary:logistic"
else:
obj = None
if self.n_classes_ > 2:
# Switch to using a multiclass objective in the underlying XGB instance
if params.get("objective", None) != "multi:softmax":
params["objective"] = "multi:softprob"
params["num_class"] = self.n_classes_
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
train_dmatrix, evals = _wrap_evaluation_matrices(
missing=self.missing,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
create_dmatrix=lambda **kwargs: DMatrix(nthread=self.n_jobs, **kwargs),
enable_categorical=self.enable_categorical,
)
self._Booster = train(
params,
train_dmatrix,
self.get_num_boosting_rounds(),
evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result,
obj=obj,
custom_metric=metric,
verbose_eval=verbose,
xgb_model=model,
callbacks=callbacks,
)
if not callable(self.objective):
self.objective = params["objective"]
self._set_evaluation_result(evals_result)
return self
assert XGBModel.fit.__doc__ is not None
fit.__doc__ = XGBModel.fit.__doc__.replace(
'Fit gradient boosting model',
'Fit gradient boosting classifier', 1)
def predict(
self,
X: array_like,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[array_like] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
class_probs = super().predict(
X=X,
output_margin=output_margin,
ntree_limit=ntree_limit,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
if output_margin:
# If output_margin is active, simply return the scores
return class_probs
if len(class_probs.shape) > 1 and self.n_classes_ != 2:
# multi-class, turns softprob into softmax
column_indexes: np.ndarray = np.argmax(class_probs, axis=1) # type: ignore
elif len(class_probs.shape) > 1 and class_probs.shape[1] != 1:
# multi-label
column_indexes = np.zeros(class_probs.shape)
column_indexes[class_probs > 0.5] = 1
elif self.objective == "multi:softmax":
return class_probs.astype(np.int32)
else:
# turns soft logit into class label
column_indexes = np.repeat(0, class_probs.shape[0])
column_indexes[class_probs > 0.5] = 1
if hasattr(self, '_le'):
return self._le.inverse_transform(column_indexes)
return column_indexes
def predict_proba(
self,
X: array_like,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[array_like] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
""" Predict the probability of each `X` example being of a given class.
.. note:: This function is only thread safe for `gbtree` and `dart`.
Parameters
----------
X : array_like
Feature matrix.
ntree_limit : int
Deprecated, use `iteration_range` instead.
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are
identical. Otherwise, it is assumed that the feature_names are the same.
base_margin : array_like
Margin added to prediction.
iteration_range :
Specifies which layer of trees are used in prediction. For example, if a
random forest is trained with 100 rounds. Specifying `iteration_range=(10,
20)`, then only the forests built during [10, 20) (half open set) rounds are
used in this prediction.
Returns
-------
prediction :
a numpy array of shape array-like of shape (n_samples, n_classes) with the
probability of each data example being of a given class.
"""
# custom obj: Do nothing as we don't know what to do.
# softprob: Do nothing, output is proba.
# softmax: Unsupported by predict_proba()
# binary:logistic: Expand the prob vector into 2-class matrix after predict.
# binary:logitraw: Unsupported by predict_proba()
if self.objective == "multi:softmax":
# We need to run a Python implementation of softmax for it. Just ask user to
# use softprob since XGBoost's implementation has mitigation for floating
# point overflow. No need to reinvent the wheel.
raise ValueError(
"multi:softmax doesn't support `predict_proba`. "
"Switch to `multi:softproba` instead"
)
class_probs = super().predict(
X=X,
ntree_limit=ntree_limit,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range
)
# If model is loaded from a raw booster there's no `n_classes_`
return _cls_predict_proba(getattr(self, "n_classes_", 0), class_probs, np.vstack)
@xgboost_model_doc(
"scikit-learn API for XGBoost random forest classification.",
['model', 'objective'],
extra_parameters='''
n_estimators : int
Number of trees in random forest to fit.
''')
class XGBRFClassifier(XGBClassifier):
# pylint: disable=missing-docstring
@_deprecate_positional_args
def __init__(
self, *,
learning_rate: float = 1.0,
subsample: float = 0.8,
colsample_bynode: float = 0.8,
reg_lambda: float = 1e-5,
**kwargs: Any
):
super().__init__(learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs)
_check_rf_callback(self.early_stopping_rounds, self.callbacks)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params['num_parallel_tree'] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: array_like,
y: array_like,
*,
sample_weight: Optional[array_like] = None,
base_margin: Optional[array_like] = None,
eval_set: Optional[Sequence[Tuple[array_like, array_like]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[bool] = True,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[array_like]] = None,
base_margin_eval_set: Optional[Sequence[array_like]] = None,
feature_weights: Optional[array_like] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None
) -> "XGBRFClassifier":
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
@xgboost_model_doc(
"Implementation of the scikit-learn API for XGBoost regression.",
['estimators', 'model', 'objective'])
class XGBRegressor(XGBModel, XGBRegressorBase):
# pylint: disable=missing-docstring
@_deprecate_positional_args
def __init__(
self, *, objective: _SklObjective = "reg:squarederror", **kwargs: Any
) -> None:
super().__init__(objective=objective, **kwargs)
@xgboost_model_doc(
"scikit-learn API for XGBoost random forest regression.",
['model', 'objective'], extra_parameters='''
n_estimators : int
Number of trees in random forest to fit.
''')
class XGBRFRegressor(XGBRegressor):
# pylint: disable=missing-docstring
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: float = 1.0,
subsample: float = 0.8,
colsample_bynode: float = 0.8,
reg_lambda: float = 1e-5,
**kwargs: Any
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs
)
_check_rf_callback(self.early_stopping_rounds, self.callbacks)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: array_like,
y: array_like,
*,
sample_weight: Optional[array_like] = None,
base_margin: Optional[array_like] = None,
eval_set: Optional[Sequence[Tuple[array_like, array_like]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[bool] = True,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[array_like]] = None,
base_margin_eval_set: Optional[Sequence[array_like]] = None,
feature_weights: Optional[array_like] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None
) -> "XGBRFRegressor":
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
@xgboost_model_doc(
'Implementation of the Scikit-Learn API for XGBoost Ranking.',
['estimators', 'model'],
end_note='''
.. note::
A custom objective function is currently not supported by XGBRanker.
Likewise, a custom metric function is not supported either.
.. note::
Query group information is required for ranking tasks by either using the
`group` parameter or `qid` parameter in `fit` method.
Before fitting the model, your data need to be sorted by query group. When fitting
the model, you need to provide an additional array that contains the size of each
query group.
For example, if your original data look like:
+-------+-----------+---------------+
| qid | label | features |
+-------+-----------+---------------+
| 1 | 0 | x_1 |
+-------+-----------+---------------+
| 1 | 1 | x_2 |
+-------+-----------+---------------+
| 1 | 0 | x_3 |
+-------+-----------+---------------+
| 2 | 0 | x_4 |
+-------+-----------+---------------+
| 2 | 1 | x_5 |
+-------+-----------+---------------+
| 2 | 1 | x_6 |
+-------+-----------+---------------+
| 2 | 1 | x_7 |
+-------+-----------+---------------+
then your group array should be ``[3, 4]``. Sometimes using query id (`qid`)
instead of group can be more convenient.
''')
class XGBRanker(XGBModel, XGBRankerMixIn):
# pylint: disable=missing-docstring,too-many-arguments,invalid-name
@_deprecate_positional_args
def __init__(self, *, objective: str = "rank:pairwise", **kwargs: Any):
super().__init__(objective=objective, **kwargs)
if callable(self.objective):
raise ValueError("custom objective function not supported by XGBRanker")
if "rank:" not in objective:
raise ValueError("please use XGBRanker for ranking task")
@_deprecate_positional_args
def fit(
self,
X: array_like,
y: array_like,
*,
group: Optional[array_like] = None,
qid: Optional[array_like] = None,
sample_weight: Optional[array_like] = None,
base_margin: Optional[array_like] = None,
eval_set: Optional[Sequence[Tuple[array_like, array_like]]] = None,
eval_group: Optional[Sequence[array_like]] = None,
eval_qid: Optional[Sequence[array_like]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[bool] = False,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[array_like]] = None,
base_margin_eval_set: Optional[Sequence[array_like]] = None,
feature_weights: Optional[array_like] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None
) -> "XGBRanker":
# pylint: disable = attribute-defined-outside-init,arguments-differ
"""Fit gradient boosting ranker
Note that calling ``fit()`` multiple times will cause the model object to be
re-fit from scratch. To resume training from a previous checkpoint, explicitly
pass ``xgb_model`` argument.
Parameters
----------
X :
Feature matrix
y :
Labels
group :
Size of each query group of training data. Should have as many elements as the
query groups in the training data. If this is set to None, then user must
provide qid.
qid :
Query ID for each training sample. Should have the size of n_samples. If
this is set to None, then user must provide group.
sample_weight :
Query group weights
.. note:: Weights are per-group for ranking tasks
In ranking task, one weight is assigned to each query group/id (not each
data point). This is because we only care about the relative ordering of
data points within each group, so it doesn't make sense to assign weights
to individual data points.
base_margin :
Global bias for each instance.
eval_set :
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
eval_group :
A list in which ``eval_group[i]`` is the list containing the sizes of all
query groups in the ``i``-th pair in **eval_set**.
eval_qid :
A list in which ``eval_qid[i]`` is the array containing query ID of ``i``-th
pair in **eval_set**.
eval_metric : str, list of str, optional
.. deprecated:: 1.6.0
use `eval_metric` in :py:meth:`__init__` or :py:meth:`set_params` instead.
early_stopping_rounds : int
.. deprecated:: 1.6.0
use `early_stopping_rounds` in :py:meth:`__init__` or
:py:meth:`set_params` instead.
verbose :
If `verbose` and an evaluation set is used, writes the evaluation metric
measured on the validation set to stderr.
xgb_model :
file name of stored XGBoost model or 'Booster' instance XGBoost model to be
loaded before training (allows training continuation).
sample_weight_eval_set :
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
group weights on the i-th validation set.
.. note:: Weights are per-group for ranking tasks
In ranking task, one weight is assigned to each query group (not each
data point). This is because we only care about the relative ordering of
data points within each group, so it doesn't make sense to assign
weights to individual data points.
base_margin_eval_set :
A list of the form [M_1, M_2, ..., M_n], where each M_i is an array like
object storing base margin for the i-th validation set.
feature_weights :
Weight for each feature, defines the probability of each feature being
selected when colsample is being used. All values must be greater than 0,
otherwise a `ValueError` is thrown.
callbacks :
.. deprecated:: 1.6.0
Use `callbacks` in :py:meth:`__init__` or :py:meth:`set_params` instead.
"""
# check if group information is provided
if group is None and qid is None:
raise ValueError("group or qid is required for ranking task")
if eval_set is not None:
if eval_group is None and eval_qid is None:
raise ValueError(
"eval_group or eval_qid is required if eval_set is not None")
train_dmatrix, evals = _wrap_evaluation_matrices(
missing=self.missing,
X=X,
y=y,
group=group,
qid=qid,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=eval_group,
eval_qid=eval_qid,
create_dmatrix=lambda **kwargs: DMatrix(nthread=self.n_jobs, **kwargs),
enable_categorical=self.enable_categorical,
)
evals_result: TrainingCallback.EvalsLog = {}
params = self.get_xgb_params()
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
if callable(metric):
raise ValueError(
'Custom evaluation metric is not yet supported for XGBRanker.'
)
self._Booster = train(
params,
train_dmatrix,
self.get_num_boosting_rounds(),
early_stopping_rounds=early_stopping_rounds,
evals=evals,
evals_result=evals_result,
custom_metric=metric,
verbose_eval=verbose, xgb_model=model,
callbacks=callbacks
)
self.objective = params["objective"]
self._set_evaluation_result(evals_result)
return self
| dmlc/xgboost | python-package/xgboost/sklearn.py | Python | apache-2.0 | 71,896 | 0.002031 |
import os
import cv2
import time
drone_output_path_string = "./images/intruder-detection/drone-output/drone-output.png"
detected_image_path_string = "./images/intruder-detection/detected/intruder-detected.png"
full_body_haar_cascade_path_string = "./node_modules/opencv/data/haarcascade_fullbody.xml"
def clear_directories():
if os.path.exists(drone_output_path_string):
os.remove(drone_output_path_string)
if os.path.exists(detected_image_path_string):
os.remove(detected_image_path_string)
def detect_intruders():
time.sleep(0.5)
drone_output_image = cv2.imread(drone_output_path_string)
intruder_classifier = cv2.CascadeClassifier(full_body_haar_cascade_path_string)
intruders = intruder_classifier.detectMultiScale(drone_output_image)
if len(intruders) > 0:
for (x, y, w, h) in intruders:
cv2.rectangle(drone_output_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imwrite(detected_image_path_string, drone_output_image)
os.remove(drone_output_path_string)
def main():
clear_directories()
while True:
if os.path.exists(drone_output_path_string):
detect_intruders()
if __name__ == '__main__':
main()
| CianDuffy/nodecopter-security | python/pedestrian_detect.py | Python | mit | 1,228 | 0.004886 |
"""
This is a direct translation of nvvm.h
"""
import logging
import re
import sys
from ctypes import (c_void_p, c_int, POINTER, c_char_p, c_size_t, byref,
c_char)
import threading
from llvmlite import ir
from .error import NvvmError, NvvmSupportError
from .libs import get_libdevice, open_libdevice, open_cudalib
from numba.core import config
logger = logging.getLogger(__name__)
ADDRSPACE_GENERIC = 0
ADDRSPACE_GLOBAL = 1
ADDRSPACE_SHARED = 3
ADDRSPACE_CONSTANT = 4
ADDRSPACE_LOCAL = 5
# Opaque handle for compilation unit
nvvm_program = c_void_p
# Result code
nvvm_result = c_int
RESULT_CODE_NAMES = '''
NVVM_SUCCESS
NVVM_ERROR_OUT_OF_MEMORY
NVVM_ERROR_PROGRAM_CREATION_FAILURE
NVVM_ERROR_IR_VERSION_MISMATCH
NVVM_ERROR_INVALID_INPUT
NVVM_ERROR_INVALID_PROGRAM
NVVM_ERROR_INVALID_IR
NVVM_ERROR_INVALID_OPTION
NVVM_ERROR_NO_MODULE_IN_PROGRAM
NVVM_ERROR_COMPILATION
'''.split()
for i, k in enumerate(RESULT_CODE_NAMES):
setattr(sys.modules[__name__], k, i)
def is_available():
"""
Return if libNVVM is available
"""
try:
NVVM()
except NvvmSupportError:
return False
else:
return True
_nvvm_lock = threading.Lock()
class NVVM(object):
'''Process-wide singleton.
'''
_PROTOTYPES = {
# nvvmResult nvvmVersion(int *major, int *minor)
'nvvmVersion': (nvvm_result, POINTER(c_int), POINTER(c_int)),
# nvvmResult nvvmCreateProgram(nvvmProgram *cu)
'nvvmCreateProgram': (nvvm_result, POINTER(nvvm_program)),
# nvvmResult nvvmDestroyProgram(nvvmProgram *cu)
'nvvmDestroyProgram': (nvvm_result, POINTER(nvvm_program)),
# nvvmResult nvvmAddModuleToProgram(nvvmProgram cu, const char *buffer,
# size_t size, const char *name)
'nvvmAddModuleToProgram': (
nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p),
# nvvmResult nvvmCompileProgram(nvvmProgram cu, int numOptions,
# const char **options)
'nvvmCompileProgram': (
nvvm_result, nvvm_program, c_int, POINTER(c_char_p)),
# nvvmResult nvvmGetCompiledResultSize(nvvmProgram cu,
# size_t *bufferSizeRet)
'nvvmGetCompiledResultSize': (
nvvm_result, nvvm_program, POINTER(c_size_t)),
# nvvmResult nvvmGetCompiledResult(nvvmProgram cu, char *buffer)
'nvvmGetCompiledResult': (nvvm_result, nvvm_program, c_char_p),
# nvvmResult nvvmGetProgramLogSize(nvvmProgram cu,
# size_t *bufferSizeRet)
'nvvmGetProgramLogSize': (nvvm_result, nvvm_program, POINTER(c_size_t)),
# nvvmResult nvvmGetProgramLog(nvvmProgram cu, char *buffer)
'nvvmGetProgramLog': (nvvm_result, nvvm_program, c_char_p),
}
# Singleton reference
__INSTANCE = None
def __new__(cls):
with _nvvm_lock:
if cls.__INSTANCE is None:
cls.__INSTANCE = inst = object.__new__(cls)
try:
inst.driver = open_cudalib('nvvm')
except OSError as e:
cls.__INSTANCE = None
errmsg = ("libNVVM cannot be found. Do `conda install "
"cudatoolkit`:\n%s")
raise NvvmSupportError(errmsg % e)
# Find & populate functions
for name, proto in inst._PROTOTYPES.items():
func = getattr(inst.driver, name)
func.restype = proto[0]
func.argtypes = proto[1:]
setattr(inst, name, func)
return cls.__INSTANCE
def get_version(self):
major = c_int()
minor = c_int()
err = self.nvvmVersion(byref(major), byref(minor))
self.check_error(err, 'Failed to get version.')
return major.value, minor.value
def check_error(self, error, msg, exit=False):
if error:
exc = NvvmError(msg, RESULT_CODE_NAMES[error])
if exit:
print(exc)
sys.exit(1)
else:
raise exc
class CompilationUnit(object):
def __init__(self):
self.driver = NVVM()
self._handle = nvvm_program()
err = self.driver.nvvmCreateProgram(byref(self._handle))
self.driver.check_error(err, 'Failed to create CU')
def __del__(self):
driver = NVVM()
err = driver.nvvmDestroyProgram(byref(self._handle))
driver.check_error(err, 'Failed to destroy CU', exit=True)
def add_module(self, buffer):
"""
Add a module level NVVM IR to a compilation unit.
- The buffer should contain an NVVM module IR either in the bitcode
representation (LLVM3.0) or in the text representation.
"""
err = self.driver.nvvmAddModuleToProgram(self._handle, buffer,
len(buffer), None)
self.driver.check_error(err, 'Failed to add module')
def compile(self, **options):
"""Perform Compilation
The valid compiler options are
* - -g (enable generation of debugging information)
* - -opt=
* - 0 (disable optimizations)
* - 3 (default, enable optimizations)
* - -arch=
* - compute_20 (default)
* - compute_30
* - compute_35
* - -ftz=
* - 0 (default, preserve denormal values, when performing
* single-precision floating-point operations)
* - 1 (flush denormal values to zero, when performing
* single-precision floating-point operations)
* - -prec-sqrt=
* - 0 (use a faster approximation for single-precision
* floating-point square root)
* - 1 (default, use IEEE round-to-nearest mode for
* single-precision floating-point square root)
* - -prec-div=
* - 0 (use a faster approximation for single-precision
* floating-point division and reciprocals)
* - 1 (default, use IEEE round-to-nearest mode for
* single-precision floating-point division and reciprocals)
* - -fma=
* - 0 (disable FMA contraction)
* - 1 (default, enable FMA contraction)
*
"""
# stringify options
opts = []
if 'debug' in options:
if options.pop('debug'):
opts.append('-g')
if 'opt' in options:
opts.append('-opt=%d' % options.pop('opt'))
if options.get('arch'):
opts.append('-arch=%s' % options.pop('arch'))
other_options = (
'ftz',
'prec_sqrt',
'prec_div',
'fma',
)
for k in other_options:
if k in options:
v = int(bool(options.pop(k)))
opts.append('-%s=%d' % (k.replace('_', '-'), v))
# If there are any option left
if options:
optstr = ', '.join(map(repr, options.keys()))
raise NvvmError("unsupported option {0}".format(optstr))
# compile
c_opts = (c_char_p * len(opts))(*[c_char_p(x.encode('utf8'))
for x in opts])
err = self.driver.nvvmCompileProgram(self._handle, len(opts), c_opts)
self._try_error(err, 'Failed to compile\n')
# get result
reslen = c_size_t()
err = self.driver.nvvmGetCompiledResultSize(self._handle, byref(reslen))
self._try_error(err, 'Failed to get size of compiled result.')
ptxbuf = (c_char * reslen.value)()
err = self.driver.nvvmGetCompiledResult(self._handle, ptxbuf)
self._try_error(err, 'Failed to get compiled result.')
# get log
self.log = self.get_log()
return ptxbuf[:]
def _try_error(self, err, msg):
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
def get_log(self):
reslen = c_size_t()
err = self.driver.nvvmGetProgramLogSize(self._handle, byref(reslen))
self.driver.check_error(err, 'Failed to get compilation log size.')
if reslen.value > 1:
logbuf = (c_char * reslen.value)()
err = self.driver.nvvmGetProgramLog(self._handle, logbuf)
self.driver.check_error(err, 'Failed to get compilation log.')
return logbuf.value.decode('utf8') # populate log attribute
return ''
data_layout = {
32: ('e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-'
'f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64'),
64: ('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-'
'f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64')}
default_data_layout = data_layout[tuple.__itemsize__ * 8]
_supported_cc = None
def get_supported_ccs():
global _supported_cc
if _supported_cc:
return _supported_cc
try:
from numba.cuda.cudadrv.runtime import runtime
cudart_version_major = runtime.get_version()[0]
except: # noqa: E722
# The CUDA Runtime may not be present
cudart_version_major = 0
# List of supported compute capability in sorted order
if cudart_version_major == 0:
_supported_cc = (),
elif cudart_version_major < 9:
# CUDA 8.x
_supported_cc = (2, 0), (2, 1), (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2) # noqa: E501
elif cudart_version_major < 10:
# CUDA 9.x
_supported_cc = (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0) # noqa: E501
elif cudart_version_major < 11:
# CUDA 10.x
_supported_cc = (3, 0), (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5) # noqa: E501
else:
# CUDA 11.0 and later
_supported_cc = (3, 5), (5, 0), (5, 2), (5, 3), (6, 0), (6, 1), (6, 2), (7, 0), (7, 2), (7, 5), (8, 0) # noqa: E501
return _supported_cc
def find_closest_arch(mycc):
"""
Given a compute capability, return the closest compute capability supported
by the CUDA toolkit.
:param mycc: Compute capability as a tuple ``(MAJOR, MINOR)``
:return: Closest supported CC as a tuple ``(MAJOR, MINOR)``
"""
supported_cc = get_supported_ccs()
for i, cc in enumerate(supported_cc):
if cc == mycc:
# Matches
return cc
elif cc > mycc:
# Exceeded
if i == 0:
# CC lower than supported
msg = "GPU compute capability %d.%d is not supported" \
"(requires >=%d.%d)" % (mycc + cc)
raise NvvmSupportError(msg)
else:
# return the previous CC
return supported_cc[i - 1]
# CC higher than supported
return supported_cc[-1] # Choose the highest
def get_arch_option(major, minor):
"""Matches with the closest architecture option
"""
if config.FORCE_CUDA_CC:
arch = config.FORCE_CUDA_CC
else:
arch = find_closest_arch((major, minor))
return 'compute_%d%d' % arch
MISSING_LIBDEVICE_FILE_MSG = '''Missing libdevice file for {arch}.
Please ensure you have package cudatoolkit >= 8.
Install package by:
conda install cudatoolkit
'''
class LibDevice(object):
_cache_ = {}
_known_arch = [
"compute_20",
"compute_30",
"compute_35",
"compute_50",
]
def __init__(self, arch):
"""
arch --- must be result from get_arch_option()
"""
if arch not in self._cache_:
arch = self._get_closest_arch(arch)
if get_libdevice(arch) is None:
raise RuntimeError(MISSING_LIBDEVICE_FILE_MSG.format(arch=arch))
self._cache_[arch] = open_libdevice(arch)
self.arch = arch
self.bc = self._cache_[arch]
def _get_closest_arch(self, arch):
res = self._known_arch[0]
for potential in self._known_arch:
if arch >= potential:
res = potential
return res
def get(self):
return self.bc
ir_numba_cas_hack = """
define internal i32 @___numba_cas_hack(i32* %ptr, i32 %cmp, i32 %val) alwaysinline {
%out = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %val monotonic
ret i32 %out
}
""" # noqa: E501
# Translation of code from CUDA Programming Guide v6.5, section B.12
ir_numba_atomic_double_add = """
define internal double @___numba_atomic_double_add(double* %ptr, double %val) alwaysinline {
entry:
%iptr = bitcast double* %ptr to i64*
%old2 = load volatile i64, i64* %iptr
br label %attempt
attempt:
%old = phi i64 [ %old2, %entry ], [ %cas, %attempt ]
%dold = bitcast i64 %old to double
%dnew = fadd double %dold, %val
%new = bitcast double %dnew to i64
%cas = cmpxchg volatile i64* %iptr, i64 %old, i64 %new monotonic
%repeat = icmp ne i64 %cas, %old
br i1 %repeat, label %attempt, label %done
done:
%result = bitcast i64 %old to double
ret double %result
}
""" # noqa: E501
ir_numba_atomic_minmax = """
define internal {T} @___numba_atomic_{T}_{NAN}{FUNC}({T}* %ptr, {T} %val) alwaysinline {{
entry:
%ptrval = load volatile {T}, {T}* %ptr
; Return early when:
; - For nanmin / nanmax when val is a NaN
; - For min / max when val or ptr is a NaN
%early_return = fcmp uno {T} %val, %{PTR_OR_VAL}val
br i1 %early_return, label %done, label %lt_check
lt_check:
%dold = phi {T} [ %ptrval, %entry ], [ %dcas, %attempt ]
; Continue attempts if dold less or greater than val (depending on whether min or max)
; or if dold is NaN (for nanmin / nanmax)
%cmp = fcmp {OP} {T} %dold, %val
br i1 %cmp, label %attempt, label %done
attempt:
; Attempt to swap in the value
%iold = bitcast {T} %dold to {Ti}
%iptr = bitcast {T}* %ptr to {Ti}*
%ival = bitcast {T} %val to {Ti}
%cas = cmpxchg volatile {Ti}* %iptr, {Ti} %iold, {Ti} %ival monotonic
%dcas = bitcast {Ti} %cas to {T}
br label %lt_check
done:
ret {T} %ptrval
}}
""" # noqa: E501
def _replace_datalayout(llvmir):
"""
Find the line containing the datalayout and replace it
"""
lines = llvmir.splitlines()
for i, ln in enumerate(lines):
if ln.startswith("target datalayout"):
tmp = 'target datalayout = "{0}"'
lines[i] = tmp.format(default_data_layout)
break
return '\n'.join(lines)
def llvm_to_ptx(llvmir, **opts):
if opts.pop('fastmath', False):
opts.update({
'ftz': True,
'fma': True,
'prec_div': False,
'prec_sqrt': False,
})
cu = CompilationUnit()
libdevice = LibDevice(arch=opts.get('arch', 'compute_20'))
# New LLVM generate a shorthand for datalayout that NVVM does not know
llvmir = _replace_datalayout(llvmir)
# Replace with our cmpxchg and atomic implementations because LLVM 3.5 has
# a new semantic for cmpxchg.
replacements = [
('declare i32 @___numba_cas_hack(i32*, i32, i32)',
ir_numba_cas_hack),
('declare double @___numba_atomic_double_add(double*, double)',
ir_numba_atomic_double_add),
('declare float @___numba_atomic_float_max(float*, float)',
ir_numba_atomic_minmax.format(T='float', Ti='i32', NAN='',
OP='nnan olt', PTR_OR_VAL='ptr',
FUNC='max')),
('declare double @___numba_atomic_double_max(double*, double)',
ir_numba_atomic_minmax.format(T='double', Ti='i64', NAN='',
OP='nnan olt', PTR_OR_VAL='ptr',
FUNC='max')),
('declare float @___numba_atomic_float_min(float*, float)',
ir_numba_atomic_minmax.format(T='float', Ti='i32', NAN='',
OP='nnan ogt', PTR_OR_VAL='ptr',
FUNC='min')),
('declare double @___numba_atomic_double_min(double*, double)',
ir_numba_atomic_minmax.format(T='double', Ti='i64', NAN='',
OP='nnan ogt', PTR_OR_VAL='ptr',
FUNC='min')),
('declare float @___numba_atomic_float_nanmax(float*, float)',
ir_numba_atomic_minmax.format(T='float', Ti='i32', NAN='nan',
OP='ult', PTR_OR_VAL='', FUNC='max')),
('declare double @___numba_atomic_double_nanmax(double*, double)',
ir_numba_atomic_minmax.format(T='double', Ti='i64', NAN='nan',
OP='ult', PTR_OR_VAL='', FUNC='max')),
('declare float @___numba_atomic_float_nanmin(float*, float)',
ir_numba_atomic_minmax.format(T='float', Ti='i32', NAN='nan',
OP='ugt', PTR_OR_VAL='', FUNC='min')),
('declare double @___numba_atomic_double_nanmin(double*, double)',
ir_numba_atomic_minmax.format(T='double', Ti='i64', NAN='nan',
OP='ugt', PTR_OR_VAL='', FUNC='min')),
('immarg', '')
]
for decl, fn in replacements:
llvmir = llvmir.replace(decl, fn)
# llvm.numba_nvvm.atomic is used to prevent LLVM 9 onwards auto-upgrading
# these intrinsics into atomicrmw instructions, which are not recognized by
# NVVM. We can now replace them with the real intrinsic names, ready to
# pass to NVVM.
llvmir = llvmir.replace('llvm.numba_nvvm.atomic', 'llvm.nvvm.atomic')
llvmir = llvm39_to_34_ir(llvmir)
cu.add_module(llvmir.encode('utf8'))
cu.add_module(libdevice.get())
ptx = cu.compile(**opts)
# XXX remove debug_pubnames seems to be necessary sometimes
return patch_ptx_debug_pubnames(ptx)
def patch_ptx_debug_pubnames(ptx):
"""
Patch PTX to workaround .debug_pubnames NVVM error::
ptxas fatal : Internal error: overlapping non-identical data
"""
while True:
# Repeatedly remove debug_pubnames sections
start = ptx.find(b'.section .debug_pubnames')
if start < 0:
break
stop = ptx.find(b'}', start)
if stop < 0:
raise ValueError('missing "}"')
ptx = ptx[:start] + ptx[stop + 1:]
return ptx
re_metadata_def = re.compile(r"\!\d+\s*=")
re_metadata_correct_usage = re.compile(r"metadata\s*\![{'\"0-9]")
re_metadata_ref = re.compile(r"\!\d+")
debuginfo_pattern = r"\!{i32 \d, \!\"Debug Info Version\", i32 \d}"
re_metadata_debuginfo = re.compile(debuginfo_pattern.replace(' ', r'\s+'))
re_attributes_def = re.compile(r"^attributes #\d+ = \{ ([\w\s]+)\ }")
supported_attributes = {'alwaysinline', 'cold', 'inlinehint', 'minsize',
'noduplicate', 'noinline', 'noreturn', 'nounwind',
'optnone', 'optisze', 'readnone', 'readonly'}
re_getelementptr = re.compile(r"\bgetelementptr\s(?:inbounds )?\(?")
re_load = re.compile(r"=\s*\bload\s(?:\bvolatile\s)?")
re_call = re.compile(r"(call\s[^@]+\))(\s@)")
re_range = re.compile(r"\s*!range\s+!\d+")
re_type_tok = re.compile(r"[,{}()[\]]")
re_annotations = re.compile(r"\bnonnull\b")
re_unsupported_keywords = re.compile(r"\b(local_unnamed_addr|writeonly)\b")
re_parenthesized_list = re.compile(r"\((.*)\)")
def llvm39_to_34_ir(ir):
"""
Convert LLVM 3.9 IR for LLVM 3.4.
"""
def parse_out_leading_type(s):
par_level = 0
pos = 0
# Parse out the first <ty> (which may be an aggregate type)
while True:
m = re_type_tok.search(s, pos)
if m is None:
# End of line
raise RuntimeError("failed parsing leading type: %s" % (s,))
break
pos = m.end()
tok = m.group(0)
if tok == ',':
if par_level == 0:
# End of operand
break
elif tok in '{[(':
par_level += 1
elif tok in ')]}':
par_level -= 1
return s[pos:].lstrip()
buf = []
for line in ir.splitlines():
# Fix llvm.dbg.cu
if line.startswith('!numba.llvm.dbg.cu'):
line = line.replace('!numba.llvm.dbg.cu', '!llvm.dbg.cu')
# We insert a dummy inlineasm to put debuginfo
if (line.lstrip().startswith('tail call void asm sideeffect "// dbg')
and '!numba.dbg' in line):
# Fix the metadata
line = line.replace('!numba.dbg', '!dbg')
if re_metadata_def.match(line):
# Rewrite metadata since LLVM 3.7 dropped the "metadata" type prefix
if None is re_metadata_correct_usage.search(line):
# Reintroduce the "metadata" prefix
line = line.replace('!{', 'metadata !{')
line = line.replace('!"', 'metadata !"')
assigpos = line.find('=')
lhs, rhs = line[:assigpos + 1], line[assigpos + 1:]
# Fix metadata reference
def fix_metadata_ref(m):
return 'metadata ' + m.group(0)
line = ' '.join((lhs,
re_metadata_ref.sub(fix_metadata_ref, rhs)))
if line.startswith('source_filename ='):
continue # skip line
if re_unsupported_keywords.search(line) is not None:
line = re_unsupported_keywords.sub(lambda m: '', line)
if line.startswith('attributes #'):
# Remove function attributes unsupported pre-3.8
m = re_attributes_def.match(line)
attrs = m.group(1).split()
attrs = ' '.join(a for a in attrs if a in supported_attributes)
line = line.replace(m.group(1), attrs)
if 'getelementptr ' in line:
# Rewrite "getelementptr ty, ty* ptr, ..."
# to "getelementptr ty *ptr, ..."
m = re_getelementptr.search(line)
if m is None:
raise RuntimeError("failed parsing getelementptr: %s" % (line,))
pos = m.end()
line = line[:pos] + parse_out_leading_type(line[pos:])
if 'load ' in line:
# Rewrite "load ty, ty* ptr"
# to "load ty *ptr"
m = re_load.search(line)
if m:
pos = m.end()
line = line[:pos] + parse_out_leading_type(line[pos:])
if 'call ' in line:
# Rewrite "call ty (...) @foo"
# to "call ty (...)* @foo"
line = re_call.sub(r"\1*\2", line)
# no !range metadata on calls
line = re_range.sub('', line).rstrip(',')
if '@llvm.memset' in line:
line = re_parenthesized_list.sub(
_replace_llvm_memset_usage,
line,
)
if 'declare' in line:
if '@llvm.memset' in line:
line = re_parenthesized_list.sub(
_replace_llvm_memset_declaration,
line,
)
# Remove unknown annotations
line = re_annotations.sub('', line)
buf.append(line)
return '\n'.join(buf)
def _replace_llvm_memset_usage(m):
"""Replace `llvm.memset` usage for llvm7+.
Used as functor for `re.sub.
"""
params = list(m.group(1).split(','))
align_attr = re.search(r'align (\d+)', params[0])
if not align_attr:
raise ValueError("No alignment attribute found on memset dest")
else:
align = align_attr.group(1)
params.insert(-1, 'i32 {}'.format(align))
out = ', '.join(params)
return '({})'.format(out)
def _replace_llvm_memset_declaration(m):
"""Replace `llvm.memset` declaration for llvm7+.
Used as functor for `re.sub.
"""
params = list(m.group(1).split(','))
params.insert(-1, 'i32')
out = ', '.join(params)
return '({})'.format(out)
def set_cuda_kernel(lfunc):
from llvmlite.llvmpy.core import MetaData, MetaDataString, Constant, Type
m = lfunc.module
ops = lfunc, MetaDataString.get(m, "kernel"), Constant.int(Type.int(), 1)
md = MetaData.get(m, ops)
nmd = m.get_or_insert_named_metadata('nvvm.annotations')
nmd.add(md)
# set nvvm ir version
i32 = ir.IntType(32)
md_ver = m.add_metadata([i32(1), i32(2), i32(2), i32(0)])
m.add_named_metadata('nvvmir.version', md_ver)
def fix_data_layout(module):
module.data_layout = default_data_layout
| sklam/numba | numba/cuda/cudadrv/nvvm.py | Python | bsd-2-clause | 24,855 | 0.000523 |
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| edisonlz/fruit | web_project/base/site-packages/django/conf/locale/mk/formats.py | Python | apache-2.0 | 1,758 | 0.001138 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for implementing the `datalab create` command."""
from __future__ import absolute_import
import json
import os
import subprocess
import sys
import tempfile
from . import connect, utils
try:
# If we are running in Python 2, builtins is available in 'future'.
from builtins import input as read_input
except Exception:
# We don't want to require the installation of future, so fallback
# to using raw_input from Py2.
read_input = raw_input # noqa: F821
description = ("""`{0} {1}` creates a new Datalab instances running in a Google
Compute Engine VM.
This command also creates the 'datalab-network' network if necessary.
By default, the command creates a persistent connection to the newly
created instance. You can disable that behavior by passing in the
'--no-connect' flag.""")
_DATALAB_NETWORK = 'datalab-network'
_DATALAB_NETWORK_DESCRIPTION = 'Network for Google Cloud Datalab instances'
_DATALAB_FIREWALL_RULE_TEMPLATE = '{0}-allow-ssh'
_DATALAB_FIREWALL_RULE_DESCRIPTION = 'Allow SSH access to Datalab instances'
_DATALAB_UNEXPECTED_FIREWALLS_WARNING_TEMPLATE = (
'The network `{0}` has firewall rules that were not created by the '
'`datalab` command line tool. Instances created in that network may '
'be open to traffic that they should not be exposed to.')
_DATALAB_DEFAULT_DISK_SIZE_GB = 200
_DATALAB_DISK_DESCRIPTION = (
'Persistent disk for a Google Cloud Datalab instance')
_DATALAB_NOTEBOOKS_REPOSITORY = 'datalab-notebooks'
_DATALAB_STARTUP_SCRIPT = """#!/bin/bash
# First, make sure the `datalab` and `logger` users exist with their
# home directories setup correctly.
useradd datalab -u 2000 || useradd datalab
useradd logger -u 2001 || useradd logger
# In case the instance has started before, the `/home/datalab` directory
# may already exist, but with the incorrect user ID (since `/etc/passwd`
# is saved in a tmpfs and changes after restarts). To account for that,
# we should force the file ownership under `/home/datalab` to match
# the current UID for the `datalab` user.
chown -R datalab /home/datalab
chown -R logger /home/logger
PERSISTENT_DISK_DEV="/dev/disk/by-id/google-datalab-pd"
MOUNT_DIR="/mnt/disks/datalab-pd"
MOUNT_CMD="mount -o discard,defaults ${{PERSISTENT_DISK_DEV}} ${{MOUNT_DIR}}"
download_docker_image() {{
# Since /root/.docker is not writable on the default image,
# we need to set HOME to be a writable directory. This same
# directory is used later on by the datalab.service.
export OLD_HOME=$HOME
export HOME=/home/datalab
echo "Getting Docker credentials"
docker-credential-gcr configure-docker
echo "Pulling latest image: {0}"
docker pull {0}
export HOME=$OLD_HOME
}}
clone_repo() {{
echo "Creating the datalab directory"
mkdir -p ${{MOUNT_DIR}}/content/datalab
echo "Cloning the repo {1}"
docker run --rm -v "${{MOUNT_DIR}}/content:/content" \
--entrypoint "/bin/bash" {0} \
gcloud source repos clone {1} /content/datalab/notebooks
}}
repo_is_populated() {{
cd ${{MOUNT_DIR}}/content/datalab/notebooks
git show-ref --quiet
}}
populate_repo() {{
echo "Populating datalab-notebooks repo"
docker run --rm -v "${{MOUNT_DIR}}/content:/content" \
--workdir=/content/datalab/notebooks \
--entrypoint "/bin/bash" {0} -c "\
echo '.ipynb_checkpoints' >> .gitignore; \
echo '*.pyc' >> .gitignore; \
echo '# Project Notebooks' >> README.md; \
git add .gitignore README.md; \
git -c user.email=nobody -c user.name=Datalab \
commit --message='Set up initial notebook repo.'; \
git push origin master; \
"
}}
format_disk() {{
echo "Formatting the persistent disk"
mkfs.ext4 -F \
-E lazy_itable_init=0,lazy_journal_init=0,discard \
${{PERSISTENT_DISK_DEV}}
${{MOUNT_CMD}}
clone_repo
if ! repo_is_populated; then
populate_repo
fi
}}
checked_format_disk() {{
echo "Checking if the persistent disk needs to be formatted"
if [ -z "$(blkid ${{PERSISTENT_DISK_DEV}})" ]; then
format_disk
else
echo "Disk already formatted, but mounting failed; rebooting..."
# The mount failed, but the disk seems to already
# be formatted. Reboot the machine to try again.
reboot now
fi
}}
mount_and_prepare_disk() {{
echo "Trying to mount the persistent disk"
mkdir -p "${{MOUNT_DIR}}"
${{MOUNT_CMD}} || checked_format_disk
if [ -z "$(mount | grep ${{MOUNT_DIR}})" ]; then
echo "Failed to mount the persistent disk; rebooting..."
reboot now
fi
chmod a+w "${{MOUNT_DIR}}"
mkdir -p "${{MOUNT_DIR}}/content"
old_dir="${{MOUNT_DIR}}/datalab"
new_dir="${{MOUNT_DIR}}/content/datalab"
if [ -d "${{old_dir}}" ] && [ ! -d "${{new_dir}}" ]; then
echo "Moving ${{old_dir}} to ${{new_dir}}"
mv "${{old_dir}}" "${{new_dir}}"
else
echo "Creating ${{new_dir}}"
mkdir -p "${{new_dir}}"
fi
}}
configure_swap() {{
if [ "{2}" == "false" ]; then
return
fi
mem_total_line=`cat /proc/meminfo | grep MemTotal`
mem_total_value=`echo "${{mem_total_line}}" | cut -d ':' -f 2`
memory_kb=`echo "${{mem_total_value}}" | cut -d 'k' -f 1 | tr -d '[:space:]'`
# Before proceeding, check if we have more disk than memory.
# Specifically, if the free space on disk is not N times the
# size of memory, then enabling swap makes no sense.
#
# Arbitrarily choosing a value of N=10
disk_kb_cutoff=`expr 10 "*" ${{memory_kb}}`
disk_kb_available=`df --output=avail ${{MOUNT_DIR}} | tail -n 1`
if [ "${{disk_kb_available}}" -lt "${{disk_kb_cutoff}}" ]; then
return
fi
swapfile="${{MOUNT_DIR}}/swapfile"
# Create the swapfile if it is either missing or not big enough
current_size="0"
if [ -e "${{swapfile}}" ]; then
current_size=`ls -s ${{swapfile}} | cut -d ' ' -f 1`
fi
if [ "${{memory_kb}}" -gt "${{current_size}}" ]; then
echo "Creating a ${{memory_kb}} kilobyte swapfile at ${{swapfile}}"
dd if=/dev/zero of="${{swapfile}}" bs=1024 count="${{memory_kb}}"
fi
chmod 0600 "${{swapfile}}"
mkswap "${{swapfile}}"
# Enable swap
sysctl vm.disk_based_swap=1
swapon "${{swapfile}}"
}}
cleanup_tmp() {{
tmpdir="${{MOUNT_DIR}}/tmp"
# First, make sure the temporary directory exists.
mkdir -p "${{tmpdir}}"
# Remove all files from it.
#
# We do not remove the directory itself, as that could lead to a broken
# volume mount if the Docker container has already started).
#
# We also do not just use `rm -rf ${{tmpdir}}/*`, as that would leave
# behind any hidden files.
find "${{tmpdir}}/" -mindepth 1 -delete
}}
download_docker_image
mount_and_prepare_disk
configure_swap
cleanup_tmp
journalctl -u google-startup-scripts --no-pager > /var/log/startupscript.log
"""
_DATALAB_CLOUD_CONFIG = """
#cloud-config
users:
- name: datalab
uid: 2000
groups: docker
- name: logger
uid: 2001
groups: docker
write_files:
- path: /etc/systemd/system/wait-for-startup-script.service
permissions: 0755
owner: root
content: |
[Unit]
Description=Wait for the startup script to setup required directories
Requires=network-online.target gcr-online.target
After=network-online.target gcr-online.target
[Service]
User=root
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/bash -c 'while [ ! -e /mnt/disks/datalab-pd/tmp ]; do \
sleep 1; \
done'
- path: /etc/systemd/system/datalab.service
permissions: 0644
owner: root
content: |
[Unit]
Description=datalab docker container
Requires=network-online.target gcr-online.target \
wait-for-startup-script.service
After=network-online.target gcr-online.target \
wait-for-startup-script.service
[Service]
Environment="HOME=/home/datalab"
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStart=/usr/bin/docker run --rm -u 0 \
--name=datalab \
-p 127.0.0.1:8080:8080 \
-v /mnt/disks/datalab-pd/content:/content \
-v /mnt/disks/datalab-pd/tmp:/tmp \
--env=HOME=/content \
--env=DATALAB_ENV=GCE \
--env=DATALAB_DEBUG=true \
--env='DATALAB_SETTINGS_OVERRIDES={{ \
"enableAutoGCSBackups": {1}, \
"consoleLogLevel": "{2}" \
}}' \
--env='DATALAB_GIT_AUTHOR={3}' \
--env='DATALAB_INITIAL_USER_SETTINGS={4}' \
{0}
Restart=always
RestartSec=1
- path: /etc/google-fluentd/fluentd.conf
permissions: 0644
owner: root
content: |
# This config comes from a heavily trimmed version of the
# container-engine-customize-fluentd project. The upstream config is here:
# https://github.com/GoogleCloudPlatform/container-engine-customize-fluentd/blob/6a46d72b29f3d8e8e495713bc3382ce28caf744e/kubernetes/fluentd-configmap.yaml
<source>
type tail
format json
time_key time
path /var/lib/docker/containers/*/*.log
pos_file /var/log/google-fluentd/containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%N%Z
tag containers
read_from_head true
</source>
<match **>
@type copy
<store>
@type google_cloud
# Set the buffer type to file to improve the reliability
# and reduce the memory consumption
buffer_type file
buffer_path /var/log/google-fluentd/cos-system.buffer
# Set queue_full action to block because we want to pause gracefully
# in case of the off-the-limits load instead of throwing an exception
buffer_queue_full_action block
# Set the chunk limit conservatively to avoid exceeding the GCL limit
# of 10MiB per write request.
buffer_chunk_limit 2M
# Cap the combined memory usage of this buffer and the one below to
# 2MiB/chunk * (6 + 2) chunks = 16 MiB
buffer_queue_limit 6
# Never wait more than 5 seconds before flushing logs in the non-error
# case.
flush_interval 5s
# Never wait longer than 30 seconds between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
</store>
</match>
- path: /etc/systemd/system/logger.service
permissions: 0644
owner: root
content: |
[Unit]
Description=logging docker container
Requires=network-online.target
After=network-online.target
[Service]
Environment="HOME=/home/logger"
ExecStartPre=/usr/share/google/dockercfg_update.sh
ExecStartPre=/bin/mkdir -p /var/log/google-fluentd/
ExecStartPre=-/usr/bin/docker rm -fv logger
ExecStart=/usr/bin/docker run --rm -u 0 \
--name=logger \
-v /var/log/:/var/log/ \
-v /var/lib/docker/containers:/var/lib/docker/containers \
-v /etc/google-fluentd/:/etc/fluent/config.d/ \
--env='FLUENTD_ARGS=-q' \
gcr.io/google-containers/fluentd-gcp:2.0.17
Restart=always
RestartSec=1
runcmd:
- systemctl daemon-reload
- systemctl start datalab.service
- systemctl start logger.service
"""
class RepositoryException(Exception):
_MESSAGE = (
'Failed to find or create the repository `{}`.'
'\n\n'
'Ask a project owner to create it for you.')
def __init__(self, repo_name):
super(RepositoryException, self).__init__(
RepositoryException._MESSAGE.format(repo_name))
class SubnetException(Exception):
_MESSAGE = (
'Failed to find the subnet `{}`.'
'\n\n'
'Ask a project owner to create it for you, '
'or double check your gcloud config for the correct region.')
def __init__(self, subnet_name):
super(SubnetException, self).__init__(
SubnetException._MESSAGE.format(subnet_name))
class NoSubnetsFoundException(Exception):
_MESSAGE = (
'Failed to find a subnet for the network `{}` in the region `{}`.'
'\n\n'
'Ask a network admin to check it for you.'
'\n\n'
'Note that if this is a legacy network, then an '
'external IP address is required.')
def __init__(self, network_name, region):
super(NoSubnetsFoundException, self).__init__(
NoSubnetsFoundException._MESSAGE.format(network_name, region))
class PrivateIpGoogleAccessException(Exception):
_MESSAGE = (
'The subnet `{}` in the region `{}` is not configured to '
'allow private IP addresses to access Google services.'
'\n\n'
'Either ask a network admin to configure it for you, or '
'create the instance with an external IP address.')
def __init__(self, subnet_name, region):
super(PrivateIpGoogleAccessException, self).__init__(
PrivateIpGoogleAccessException._MESSAGE.format(
subnet_name, region))
class CancelledException(Exception):
_MESSAGE = 'Operation cancelled.'
def __init__(self):
super(CancelledException, self).__init__(CancelledException._MESSAGE)
def flags(parser):
"""Add command line flags for the `create` subcommand.
Args:
parser: The argparse parser to which to add the flags.
"""
parser.add_argument(
'instance',
metavar='NAME',
help='a name for the newly created instance')
parser.add_argument(
'--image-name',
dest='image_name',
default='gcr.io/cloud-datalab/datalab:latest',
help=(
'name of the Datalab image to run.'
'\n\n'
'If not specified, this defaults to the most recently\n'
'published image.'))
parser.add_argument(
'--disk-name',
dest='disk_name',
default=None,
help=(
'name of the persistent disk used to store notebooks.'
'\n\n'
'If not specified, this defaults to having a name based\n'
'on the instance name.'))
parser.add_argument(
'--disk-size-gb',
type=int,
dest='disk_size_gb',
default=_DATALAB_DEFAULT_DISK_SIZE_GB,
help='size of the persistent disk in GB.')
parser.add_argument(
'--network-name',
dest='network_name',
default=_DATALAB_NETWORK,
help='name of the network to which the instance will be attached.')
parser.add_argument(
'--subnet-name',
dest='subnet_name',
default=None,
help='name of the subnet to which the instance will be attached.')
parser.add_argument(
'--idle-timeout',
dest='idle_timeout',
default=None,
help=(
'interval after which an idle Datalab instance will shut down.'
'\n\n'
'You can specify a mix of days, hours, minutes and seconds\n'
'using those names or d, h, m and s, for example "1h 30m".\n'
'Specify 0s to disable.'))
parser.add_argument(
'--machine-type',
dest='machine_type',
default='n1-standard-1',
help=(
'the machine type of the instance.'
'\n\n'
'To get a list of available machine types, run '
'\'gcloud compute machine-types list\'.'
'\n\n'
'If not specified, the default type is n1-standard-1.'))
parser.add_argument(
'--no-connect',
dest='no_connect',
action='store_true',
default=False,
help='do not connect to the newly created instance')
parser.add_argument(
'--no-swap',
dest='no_swap',
action='store_true',
default=False,
help='do not enable swap on the newly created instance')
parser.add_argument(
'--no-backups',
dest='no_backups',
action='store_true',
default=False,
help='do not automatically backup the disk contents to GCS')
parser.add_argument(
'--beta-no-external-ip',
dest='no_external_ip',
action='store_true',
default=False,
help=(
'do not assign the instance an external IP address.'
'\n\n'
'If specified, you must make sure that the machine where you '
'run `datalab connect` is on the same VPC as the instance '
'(the one specified via the `--network-name` flag).'
'\n\n'
'Additionally, you must pass the `--beta-internal-ip` flag '
'to the `datalab connect` command.'
'\n\n'
'Note that this is a beta feature and unsupported.'))
parser.add_argument(
'--no-create-repository',
dest='no_create_repository',
action='store_true',
default=False,
help='do not create the datalab-notebooks repository if it is missing')
parser.add_argument(
'--log-level',
dest='log_level',
choices=['trace', 'debug', 'info', 'warn', 'error', 'fatal'],
default='warn',
help=(
'the log level for Datalab instance.'
'\n\n'
'This is the threshold under which log entries from the '
'Datalab instance will not be written to StackDriver logging.'
'\n\n'
'The default log level is "warn".'))
parser.add_argument(
'--for-user',
dest='for_user',
help='create the datalab instance on behalf of the specified user')
parser.add_argument(
'--service-account',
dest='service_account',
help=('A service account is an identity attached to the instance. '
'Its access tokens can be accessed through the instance '
'metadata server and are used to authenticate API calls made '
'from Datalab. The account can be either an email address or '
'an alias corresponding to a service account. You can '
'explicitly specify the Compute Engine default service account '
'using the \'default\' alias.'
'\n\n'
'If not provided, the instance will get project\'s default '
'service account.'))
connect.connection_flags(parser)
return
def get_region_name(args, gcloud_compute):
"""Lookup the name of the GCP region.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
Raises:
subprocess.CalledProcessError: If a `gcloud` command fails
"""
get_zone_cmd = ['zones', 'describe', '--format=value(region)', args.zone]
with tempfile.TemporaryFile() as stdout, \
tempfile.TemporaryFile() as stderr:
try:
gcloud_compute(args, get_zone_cmd, stdout=stdout, stderr=stderr)
stdout.seek(0)
region_uri = stdout.read().decode('utf-8').strip()
except subprocess.CalledProcessError:
stderr.seek(0)
sys.stderr.write(stderr.read())
raise
get_region_cmd = [
'regions', 'describe', '--format=value(name)', region_uri]
with tempfile.TemporaryFile() as stdout, \
tempfile.TemporaryFile() as stderr:
try:
gcloud_compute(args, get_region_cmd, stdout=stdout, stderr=stderr)
stdout.seek(0)
return stdout.read().decode('utf-8').strip()
except subprocess.CalledProcessError:
stderr.seek(0)
sys.stderr.write(stderr.read())
raise
def create_network(args, gcloud_compute, network_name):
"""Create the specified network.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
network_name: The name of the network
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
if utils.print_info_messages(args):
print('Creating the network {0}'.format(network_name))
create_cmd = [
'networks', 'create', network_name,
'--description', _DATALAB_NETWORK_DESCRIPTION]
utils.call_gcloud_quietly(args, gcloud_compute, create_cmd)
return
def ensure_network_exists(args, gcloud_compute, network_name):
"""Create the specified network if it does not already exist.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
network_name: The name of the network
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
get_cmd = ['networks', 'describe', '--format', 'value(name)', network_name]
try:
utils.call_gcloud_quietly(
args, gcloud_compute, get_cmd, report_errors=False)
except subprocess.CalledProcessError:
create_network(args, gcloud_compute, network_name)
return
def get_subnet_name(args, gcloud_compute, network_name, region):
"""Lookup the name of the subnet.
The specified network must be either an `auto` or `custom` mode network;
legacy networks are not supported.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
network_name: Name of the VPC network
region: Name of the GCP region
Raises:
subprocess.CalledProcessError: If a `gcloud` command fails
"""
get_subnet_cmd = ['networks', 'subnets', 'list',
'--filter=network~/{}$ region~/{}$'.format(
network_name, region),
'--format=value(name)']
with tempfile.TemporaryFile() as stdout, \
tempfile.TemporaryFile() as stderr:
try:
gcloud_compute(args, get_subnet_cmd, stdout=stdout, stderr=stderr)
stdout.seek(0)
subnet_name = stdout.read().decode('utf-8').strip()
if not subnet_name:
raise NoSubnetsFoundException(network_name, region)
if utils.print_debug_messages(args):
print('Using the subnet {0}'.format(subnet_name))
return subnet_name
except subprocess.CalledProcessError:
stderr.seek(0)
sys.stderr.write(stderr.read())
raise
def ensure_private_ip_google_access(args, gcloud_compute, subnet_name, region):
"""Ensure that the subnet allows private IPs to access Google services.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
subnet_name: Name of the VPC sub-network
region: Name of the GCP region
Raises:
subprocess.CalledProcessError: If a `gcloud` command fails
subprocess.PrivateIpGoogleAccessException: If the check fails
"""
if utils.print_debug_messages(args):
print('Checking private IP access to Google services for '
'the subnet `{0}` in the region `{1}`'.format(
subnet_name, region))
get_subnet_cmd = ['networks', 'subnets', 'describe', subnet_name,
'--region', region,
'--format=get(privateIpGoogleAccess)']
with tempfile.TemporaryFile() as stdout, \
tempfile.TemporaryFile() as stderr:
try:
gcloud_compute(args, get_subnet_cmd, stdout=stdout, stderr=stderr)
stdout.seek(0)
has_access = stdout.read().decode('utf-8').strip()
if utils.print_debug_messages(args):
print('Private IP Google access allowed: `{0}`'.format(
has_access))
if not (has_access == 'True'):
raise PrivateIpGoogleAccessException(subnet_name, region)
except subprocess.CalledProcessError:
stderr.seek(0)
sys.stderr.write(stderr.read())
raise
def ensure_subnet_exists(args, gcloud_compute, subnet_region, subnet_name):
"""Check the specified subnet if it does not exit with error.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
subnet_region: The name of the region of the subnet
subnet_name: The name of the subnet
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
get_cmd = [
'networks', 'subnets', 'describe',
'--format', 'value(name)', '--region', subnet_region, subnet_name]
try:
utils.call_gcloud_quietly(
args, gcloud_compute, get_cmd, report_errors=False)
except subprocess.CalledProcessError:
raise SubnetException(subnet_name)
return
def create_firewall_rule(args, gcloud_compute, network_name, rule_name):
"""Create the specified firewall rule to allow SSH access.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
network_name: The name of the network on which to allow SSH access
rule_name: The name of the firewall rule
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
if utils.print_info_messages(args):
print('Creating the firewall rule {0}'.format(rule_name))
create_cmd = [
'firewall-rules', 'create', rule_name,
'--allow', 'tcp:22',
'--network', network_name,
'--description', _DATALAB_FIREWALL_RULE_DESCRIPTION]
utils.call_gcloud_quietly(args, gcloud_compute, create_cmd)
return
def has_unexpected_firewall_rules(args, gcloud_compute, network_name):
rule_name = _DATALAB_FIREWALL_RULE_TEMPLATE.format(network_name)
list_cmd = [
'firewall-rules', 'list',
'--filter', 'network~.^*{0}$'.format(network_name),
'--format', 'value(name)']
with tempfile.TemporaryFile() as tf:
gcloud_compute(args, list_cmd, stdout=tf)
tf.seek(0)
matching_rules = tf.read().decode('utf-8').strip()
if matching_rules and (matching_rules != rule_name):
return True
return False
def prompt_on_unexpected_firewall_rules(args, gcloud_compute, network_name):
if has_unexpected_firewall_rules(args, gcloud_compute, network_name):
warning = _DATALAB_UNEXPECTED_FIREWALLS_WARNING_TEMPLATE.format(
network_name)
print(warning)
resp = read_input('Do you still want to use this network? (y/[n]): ')
if len(resp) < 1 or (resp[0] != 'y' and resp[0] != 'Y'):
raise CancelledException()
return
def ensure_firewall_rule_exists(args, gcloud_compute, network_name):
"""Create a firewall rule to allow SSH access if necessary.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
network_name: The name of the network on which to allow SSH access
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
rule_name = _DATALAB_FIREWALL_RULE_TEMPLATE.format(network_name)
get_cmd = [
'firewall-rules', 'describe', rule_name, '--format', 'value(name)']
try:
utils.call_gcloud_quietly(
args, gcloud_compute, get_cmd, report_errors=False)
except subprocess.CalledProcessError:
create_firewall_rule(args, gcloud_compute, network_name, rule_name)
return
def create_disk(args, gcloud_compute, disk_name):
"""Create the user's persistent disk.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
disk_name: The name of the persistent disk to create
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
if utils.print_info_messages(args):
print('Creating the disk {0}'.format(disk_name))
create_cmd = ['disks', 'create']
if args.zone:
create_cmd.extend(['--zone', args.zone])
create_cmd.extend([
'--size', str(args.disk_size_gb) + 'GB',
'--description', _DATALAB_DISK_DESCRIPTION,
disk_name])
utils.call_gcloud_quietly(args, gcloud_compute, create_cmd)
return
def ensure_disk_exists(args, gcloud_compute, disk_name):
"""Create the given persistent disk if it does not already exist.
Args:
args: The Namespace returned by argparse
gcloud_compute: Function that can be used for invoking `gcloud compute`
disk_name: The name of the persistent disk
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
get_cmd = [
'disks', 'describe', disk_name, '--format', 'value(name)']
if args.zone:
get_cmd.extend(['--zone', args.zone])
try:
utils.call_gcloud_quietly(
args, gcloud_compute, get_cmd, report_errors=False)
except subprocess.CalledProcessError:
create_disk(args, gcloud_compute, disk_name)
return
def create_repo(args, gcloud_repos, repo_name):
"""Create the given repository.
Args:
args: The Namespace returned by argparse
gcloud_repos: Function that can be used for invoking
`gcloud source repos`
repo_name: The name of the repository to create
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
if utils.print_info_messages(args):
print('Creating the repository {0}'.format(repo_name))
create_cmd = ['create', repo_name]
utils.call_gcloud_quietly(args, gcloud_repos, create_cmd)
def ensure_repo_exists(args, gcloud_repos, repo_name):
"""Create the given repository if it does not already exist.
Args:
args: The Namespace returned by argparse
gcloud_repos: Function that can be used for invoking
`gcloud source repos`
repo_name: The name of the repository to check
Raises:
subprocess.CalledProcessError: If the `gcloud` command fails
"""
list_cmd = ['list', '--quiet',
'--filter', 'name~^.*/repos/{}$'.format(repo_name),
'--format', 'value(name)']
with tempfile.TemporaryFile() as tf:
gcloud_repos(args, list_cmd, stdout=tf)
tf.seek(0)
matching_repos = tf.read().decode('utf-8').strip()
if not matching_repos:
try:
create_repo(args, gcloud_repos, repo_name)
except Exception:
raise RepositoryException(repo_name)
def prepare(args, gcloud_compute, gcloud_repos):
"""Run preparation steps for VM creation.
Args:
args: The Namespace instance returned by argparse
gcloud_compute: Function that can be used to invoke `gcloud compute`
gcloud_repos: Function that can be used to invoke
`gcloud source repos`
Returns:
The disk config
Raises:
subprocess.CalledProcessError: If a nested `gcloud` calls fails
"""
network_name = args.network_name
ensure_network_exists(args, gcloud_compute, network_name)
prompt_on_unexpected_firewall_rules(args, gcloud_compute, network_name)
ensure_firewall_rule_exists(args, gcloud_compute, network_name)
disk_name = args.disk_name or '{0}-pd'.format(args.instance)
ensure_disk_exists(args, gcloud_compute, disk_name)
disk_cfg = (
'auto-delete=no,boot=no,device-name=datalab-pd,mode=rw,name=' +
disk_name)
region = get_region_name(args, gcloud_compute)
if args.subnet_name:
ensure_subnet_exists(args, gcloud_compute, region, args.subnet_name)
if args.no_external_ip:
subnet_name = args.subnet_name or get_subnet_name(
args, gcloud_compute, network_name, region)
ensure_private_ip_google_access(
args, gcloud_compute, subnet_name, region)
if not args.no_create_repository:
ensure_repo_exists(args, gcloud_repos, _DATALAB_NOTEBOOKS_REPOSITORY)
return disk_cfg
def run(args, gcloud_compute, gcloud_repos,
email='', in_cloud_shell=False, gcloud_zone=None,
sdk_version='UNKNOWN', datalab_version='UNKNOWN', **kwargs):
"""Implementation of the `datalab create` subcommand.
Args:
args: The Namespace instance returned by argparse
gcloud_compute: Function that can be used to invoke `gcloud compute`
gcloud_repos: Function that can be used to invoke
`gcloud source repos`
email: The user's email address
in_cloud_shell: Whether or not the command is being run in the
Google Cloud Shell
gcloud_zone: The zone that gcloud is configured to use
sdk_version: The version of the Cloud SDK being used
datalab_version: The version of the datalab CLI being used
Raises:
subprocess.CalledProcessError: If a nested `gcloud` calls fails
"""
if (not args.zone) and (not args.disk_name):
args.zone = gcloud_zone
if (not args.zone) and (not args.quiet):
args.zone = utils.prompt_for_zone(args, gcloud_compute)
disk_cfg = prepare(args, gcloud_compute, gcloud_repos)
print('Creating the instance {0}'.format(args.instance))
cmd = ['instances', 'create']
if args.zone:
cmd.extend(['--zone', args.zone])
if args.subnet_name:
cmd.extend(['--subnet', args.subnet_name])
enable_swap = "false" if args.no_swap else "true"
enable_backups = "false" if args.no_backups else "true"
idle_timeout = args.idle_timeout
console_log_level = args.log_level or "warn"
user_email = args.for_user or email
service_account = args.service_account or "default"
# We have to escape the user's email before using it in the YAML template.
escaped_email = user_email.replace("'", "''")
initial_user_settings = json.dumps({"idleTimeoutInterval": idle_timeout}) \
if idle_timeout else ''
with tempfile.NamedTemporaryFile(mode='w', delete=False) \
as startup_script_file, \
tempfile.NamedTemporaryFile(mode='w', delete=False) \
as user_data_file, \
tempfile.NamedTemporaryFile(mode='w', delete=False) \
as for_user_file, \
tempfile.NamedTemporaryFile(mode='w', delete=False) \
as os_login_file, \
tempfile.NamedTemporaryFile(mode='w', delete=False) \
as sdk_version_file, \
tempfile.NamedTemporaryFile(mode='w', delete=False) \
as datalab_version_file:
try:
startup_script_file.write(_DATALAB_STARTUP_SCRIPT.format(
args.image_name, _DATALAB_NOTEBOOKS_REPOSITORY, enable_swap))
startup_script_file.close()
user_data_file.write(_DATALAB_CLOUD_CONFIG.format(
args.image_name, enable_backups,
console_log_level, escaped_email, initial_user_settings))
user_data_file.close()
for_user_file.write(user_email)
for_user_file.close()
os_login_file.write("FALSE")
os_login_file.close()
sdk_version_file.write(sdk_version)
sdk_version_file.close()
datalab_version_file.write(datalab_version)
datalab_version_file.close()
metadata_template = (
'startup-script={0},' +
'user-data={1},' +
'for-user={2},' +
'enable-oslogin={3},' +
'created-with-sdk-version={4},' +
'created-with-datalab-version={5}')
metadata_from_file = (
metadata_template.format(
startup_script_file.name,
user_data_file.name,
for_user_file.name,
os_login_file.name,
sdk_version_file.name,
datalab_version_file.name))
cmd.extend([
'--format=none',
'--boot-disk-size=20GB',
'--network', args.network_name,
'--image-family', 'cos-stable',
'--image-project', 'cos-cloud',
'--machine-type', args.machine_type,
'--metadata-from-file', metadata_from_file,
'--tags', 'datalab',
'--disk', disk_cfg,
'--service-account', service_account,
'--scopes', 'cloud-platform',
args.instance])
if args.no_external_ip:
cmd.extend(['--no-address'])
gcloud_compute(args, cmd)
finally:
os.remove(startup_script_file.name)
os.remove(user_data_file.name)
os.remove(for_user_file.name)
os.remove(os_login_file.name)
os.remove(sdk_version_file.name)
os.remove(datalab_version_file.name)
if (not args.no_connect) and (not args.for_user):
if args.no_external_ip:
args.internal_ip = True
connect.connect(args, gcloud_compute, email, in_cloud_shell)
return
| parthea/datalab | tools/cli/commands/create.py | Python | apache-2.0 | 37,422 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gdb
import pwndbg.abi
import pwndbg.color.chain as C
import pwndbg.color.memory as M
import pwndbg.color.theme as theme
import pwndbg.enhance
import pwndbg.memory
import pwndbg.symbol
import pwndbg.typeinfo
import pwndbg.vmmap
LIMIT = pwndbg.config.Parameter('dereference-limit', 5, 'max number of pointers to dereference in a chain')
def get(address, limit=LIMIT, offset=0, hard_stop=None, hard_end=0, include_start=True):
"""
Recursively dereferences an address. For bare metal, it will stop when the address is not in any of vmmap pages to avoid redundant dereference.
Arguments:
address(int): the first address to begin dereferencing
limit(int): number of valid pointers
offset(int): offset into the address to get the next pointer
hard_stop(int): address to stop at
hard_end: value to append when hard_stop is reached
include_start(bool): whether to include starting address or not
Returns:
A list representing pointers of each ```address``` and reference
"""
limit = int(limit)
result = [address] if include_start else []
for i in range(limit):
# Don't follow cycles, except to stop at the second occurrence.
if result.count(address) >= 2:
break
if hard_stop is not None and address == hard_stop:
result.append(hard_end)
break
try:
address = address + offset
# Avoid redundant dereferences in bare metal mode by checking
# if address is in any of vmmap pages
if not pwndbg.abi.linux and not pwndbg.vmmap.find(address):
break
address = int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address))
address &= pwndbg.arch.ptrmask
result.append(address)
except gdb.MemoryError:
break
return result
config_arrow_left = theme.Parameter('chain-arrow-left', '◂—', 'left arrow of chain formatting')
config_arrow_right = theme.Parameter('chain-arrow-right', '—▸', 'right arrow of chain formatting')
config_contiguous = theme.Parameter('chain-contiguous-marker', '...', 'contiguous marker of chain formatting')
def format(value, limit=LIMIT, code=True, offset=0, hard_stop=None, hard_end=0):
"""
Recursively dereferences an address into string representation, or convert the list representation
of address dereferences into string representation.
Arguments:
value(int|list): Either the starting address to be sent to get, or the result of get (a list)
limit(int): Number of valid pointers
code(bool): Hint that indicates the value may be an instruction
offset(int): Offset into the address to get the next pointer
hard_stop(int): Value to stop on
hard_end: Value to append when hard_stop is reached: null, value of hard stop, a string.
Returns:
A string representing pointers of each address and reference
Strings format: 0x0804a10 —▸ 0x08061000 ◂— 0x41414141
"""
limit = int(limit)
# Allow results from get function to be passed to format
if isinstance(value, list):
chain = value
else:
chain = get(value, limit, offset, hard_stop, hard_end)
arrow_left = C.arrow(' %s ' % config_arrow_left)
arrow_right = C.arrow(' %s ' % config_arrow_right)
# Colorize the chain
rest = []
for link in chain:
symbol = pwndbg.symbol.get(link) or None
if symbol:
symbol = '%#x (%s)' % (link, symbol)
rest.append(M.get(link, symbol))
# If the dereference limit is zero, skip any enhancements.
if limit == 0:
return rest[0]
# Otherwise replace last element with the enhanced information.
rest = rest[:-1]
# Enhance the last entry
# If there are no pointers (e.g. eax = 0x41414141), then enhance
# the only element there is.
if len(chain) == 1:
enhanced = pwndbg.enhance.enhance(chain[-1], code=code)
# Otherwise, the last element in the chain is the non-pointer value.
# We want to enhance the last pointer value. If an offset was used
# chain failed at that offset, so display that offset.
elif len(chain) < limit + 1:
enhanced = pwndbg.enhance.enhance(chain[-2] + offset, code=code)
else:
enhanced = C.contiguous('%s' % config_contiguous)
if len(chain) == 1:
return enhanced
return arrow_right.join(rest) + arrow_left + enhanced
| anthraxx/pwndbg | pwndbg/chain.py | Python | mit | 4,555 | 0.003305 |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
from indra.java_vm import autoclass, JavaException
logger = logging.getLogger('reach_reader')
class ReachReader(object):
"""The ReachReader wraps a singleton instance of the REACH reader.
This allows calling the reader many times without having to wait for it to
start up each time.
Attributes
----------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
def __init__(self):
self.api_ruler = None
def get_api_ruler(self):
"""Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
if self.api_ruler is None:
try:
self.api_ruler = \
autoclass('org.clulab.reach.export.apis.ApiRuler')
except JavaException:
# This second autoclass is needed because of a jnius
# issue in which the first JavaException is not raised.
try:
autoclass('java.lang.String')
except JavaException as e:
logger.error(e)
pass
return None
return self.api_ruler
| jmuhlich/indra | indra/reach/reach_reader.py | Python | bsd-2-clause | 1,451 | 0.000689 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.