text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from dependencywatcher.crawler.detectors import Detector
import urllib2, json, logging, os
logger = logging.getLogger(__name__)
class RubyGemsDetector(Detector):
""" rubygems.org API based information detector """
url = "https://rubygems.org/api/v1/gems/%s.json"
auth = "af93e383246a774566bcf661f9c9f591"
def __init__(self, manifest):
self.json = None
super(RubyGemsDetector, self).__init__(manifest)
def get(self, library_name):
url = RubyGemsDetector.url % library_name
logger.debug("Opening URL: %s" % url)
request = urllib2.Request(url)
request.add_header("Authorization", RubyGemsDetector.auth)
return json.load(urllib2.urlopen(request))
def detect(self, what, options, result):
if self.json is None:
self.json = self.get(self.manifest["name"])
try:
if what == "url":
result[what] = self.normalize(what, self.json["homepage_uri"])
elif what == "license":
result[what] = self.normalize(what, ", ".join(self.json["licenses"]))
elif what in ["version"]:
result[what] = self.normalize(what, self.json[what])
if what == "description":
result[what] = self.normalize(what, self.json["info"])
except KeyError:
pass
|
DependencyWatcher/crawler
|
dependencywatcher/crawler/rubygems.py
|
Python
|
apache-2.0
| 1,359 | 0.003679 |
'''
Python bindings for bgfx.
'''
__author__ = "Jason Nadro"
__copyright__ = "Copyright 2016, Jason Nadro"
__credits__ = ["Jason Nadro"]
__license__ = "BSD 2-clause"
__version__ = "0.0.1"
__maintainer__ = "Jason Nadro"
__email__ = ""
__status__ = "Development"
import ctypes
from ctypes import Structure, POINTER, cast, byref, CFUNCTYPE
from ctypes import c_bool, c_int, c_int8, c_int16, c_int32, c_int64, c_uint8, c_uint16, c_uint32, c_uint64, c_float, c_char_p, c_void_p, c_size_t, c_char
import os
bgfx_dll_path = os.path.dirname(__file__) + "\\bgfx-shared-libRelease"
_bgfx = ctypes.CDLL(bgfx_dll_path)
enum_type = c_int
# bgfx_renderer_type
bgfx_renderer_type = enum_type
(
BGFX_RENDERER_TYPE_NOOP,
BGFX_RENDERER_TYPE_DIRECT3D9,
BGFX_RENDERER_TYPE_DIRECT3D11,
BGFX_RENDERER_TYPE_DIRECT3D12,
BGFX_RENDERER_TYPE_GNM,
BGFX_RENDERER_TYPE_METAL,
BGFX_RENDERER_TYPE_OPENGLES,
BGFX_RENDERER_TYPE_OPENGL,
BGFX_RENDERER_TYPE_VULKAN,
BGFX_RENDERER_TYPE_COUNT
) = map(bgfx_renderer_type,range(10))
# bgfx_access
bgfx_access = enum_type
(
BGFX_ACCESS_READ,
BGFX_ACCESS_WRITE,
BGFX_ACCESS_READWRITE,
BGFX_ACCESS_COUNT
) = map(bgfx_access, range(4))
# bgfx_attrib
bgfx_attrib = enum_type
(
BGFX_ATTRIB_POSITION,
BGFX_ATTRIB_NORMAL,
BGFX_ATTRIB_TANGENT,
BGFX_ATTRIB_BITANGENT,
BGFX_ATTRIB_COLOR0,
BGFX_ATTRIB_COLOR1,
BGFX_ATTRIB_COLOR2,
BGFX_ATTRIB_COLOR3,
BGFX_ATTRIB_INDICES,
BGFX_ATTRIB_WEIGHT,
BGFX_ATTRIB_TEXCOORD0,
BGFX_ATTRIB_TEXCOORD1,
BGFX_ATTRIB_TEXCOORD2,
BGFX_ATTRIB_TEXCOORD3,
BGFX_ATTRIB_TEXCOORD4,
BGFX_ATTRIB_TEXCOORD5,
BGFX_ATTRIB_TEXCOORD6,
BGFX_ATTRIB_TEXCOORD7,
BGFX_ATTRIB_COUNT
) = map(bgfx_attrib, range(19))
# bgfx_attrib_type
bgfx_attrib_type = enum_type
(
BGFX_ATTRIB_TYPE_UINT8,
BGFX_ATTRIB_TYPE_UINT10,
BGFX_ATTRIB_TYPE_INT16,
BGFX_ATTRIB_TYPE_HALF,
BGFX_ATTRIB_TYPE_FLOAT,
BGFX_ATTRIB_TYPE_COUNT
) = map(bgfx_attrib_type, range(6))
# bgfx_texture_format
bgfx_texture_format = enum_type
(
BGFX_TEXTURE_FORMAT_BC1,
BGFX_TEXTURE_FORMAT_BC2,
BGFX_TEXTURE_FORMAT_BC3,
BGFX_TEXTURE_FORMAT_BC4,
BGFX_TEXTURE_FORMAT_BC5,
BGFX_TEXTURE_FORMAT_BC6H,
BGFX_TEXTURE_FORMAT_BC7,
BGFX_TEXTURE_FORMAT_ETC1,
BGFX_TEXTURE_FORMAT_ETC2,
BGFX_TEXTURE_FORMAT_ETC2A,
BGFX_TEXTURE_FORMAT_ETC2A1,
BGFX_TEXTURE_FORMAT_PTC12,
BGFX_TEXTURE_FORMAT_PTC14,
BGFX_TEXTURE_FORMAT_PTC12A,
BGFX_TEXTURE_FORMAT_PTC14A,
BGFX_TEXTURE_FORMAT_PTC22,
BGFX_TEXTURE_FORMAT_PTC24,
BGFX_TEXTURE_FORMAT_ATC,
BGFX_TEXTURE_FORMAT_ATCE,
BGFX_TEXTURE_FORMAT_ATCI,
BGFX_TEXTURE_FORMAT_ASTC4x4,
BGFX_TEXTURE_FORMAT_ASTC5x5,
BGFX_TEXTURE_FORMAT_ASTC6x6,
BGFX_TEXTURE_FORMAT_ASTC8x5,
BGFX_TEXTURE_FORMAT_ASTC8x6,
BGFX_TEXTURE_FORMAT_ASTC10x5,
BGFX_TEXTURE_FORMAT_UNKNOWN,
BGFX_TEXTURE_FORMAT_R1,
BGFX_TEXTURE_FORMAT_A8,
BGFX_TEXTURE_FORMAT_R8,
BGFX_TEXTURE_FORMAT_R8I,
BGFX_TEXTURE_FORMAT_R8U,
BGFX_TEXTURE_FORMAT_R8S,
BGFX_TEXTURE_FORMAT_R16,
BGFX_TEXTURE_FORMAT_R16I,
BGFX_TEXTURE_FORMAT_R16U,
BGFX_TEXTURE_FORMAT_R16F,
BGFX_TEXTURE_FORMAT_R16S,
BGFX_TEXTURE_FORMAT_R32I,
BGFX_TEXTURE_FORMAT_R32U,
BGFX_TEXTURE_FORMAT_R32F,
BGFX_TEXTURE_FORMAT_RG8,
BGFX_TEXTURE_FORMAT_RG8I,
BGFX_TEXTURE_FORMAT_RG8U,
BGFX_TEXTURE_FORMAT_RG8S,
BGFX_TEXTURE_FORMAT_RG16,
BGFX_TEXTURE_FORMAT_RG16I,
BGFX_TEXTURE_FORMAT_RG16U,
BGFX_TEXTURE_FORMAT_RG16F,
BGFX_TEXTURE_FORMAT_RG16S,
BGFX_TEXTURE_FORMAT_RG32I,
BGFX_TEXTURE_FORMAT_RG32U,
BGFX_TEXTURE_FORMAT_RG32F,
BGFX_TEXTURE_FORMAT_RGB8,
BGFX_TEXTURE_FORMAT_RGB8I,
BGFX_TEXTURE_FORMAT_RGB8U,
BGFX_TEXTURE_FORMAT_RGB8S,
BGFX_TEXTURE_FORMAT_RGB9E5F,
BGFX_TEXTURE_FORMAT_BGRA8,
BGFX_TEXTURE_FORMAT_RGBA8,
BGFX_TEXTURE_FORMAT_RGBA8I,
BGFX_TEXTURE_FORMAT_RGBA8U,
BGFX_TEXTURE_FORMAT_RGBA8S,
BGFX_TEXTURE_FORMAT_RGBA16,
BGFX_TEXTURE_FORMAT_RGBA16I,
BGFX_TEXTURE_FORMAT_RGBA16U,
BGFX_TEXTURE_FORMAT_RGBA16F,
BGFX_TEXTURE_FORMAT_RGBA16S,
BGFX_TEXTURE_FORMAT_RGBA32I,
BGFX_TEXTURE_FORMAT_RGBA32U,
BGFX_TEXTURE_FORMAT_RGBA32F,
BGFX_TEXTURE_FORMAT_R5G6B5,
BGFX_TEXTURE_FORMAT_RGBA4,
BGFX_TEXTURE_FORMAT_RGB5A1,
BGFX_TEXTURE_FORMAT_RGB10A2,
BGFX_TEXTURE_FORMAT_RG11B10F,
BGFX_TEXTURE_FORMAT_UNKNOWN_DEPTH,
BGFX_TEXTURE_FORMAT_D16,
BGFX_TEXTURE_FORMAT_D24,
BGFX_TEXTURE_FORMAT_D24S8,
BGFX_TEXTURE_FORMAT_D32,
BGFX_TEXTURE_FORMAT_D16F,
BGFX_TEXTURE_FORMAT_D24F,
BGFX_TEXTURE_FORMAT_D32F,
BGFX_TEXTURE_FORMAT_D0S8,
BGFX_TEXTURE_FORMAT_COUNT
) = map(bgfx_texture_format, range(86))
# bgfx_uniform_type
bgfx_uniform_type = enum_type
(
BGFX_UNIFORM_TYPE_SAMPLER,
BGFX_UNIFORM_TYPE_END,
BGFX_UNIFORM_TYPE_VEC4,
BGFX_UNIFORM_TYPE_MAT3,
BGFX_UNIFORM_TYPE_MAT4,
BGFX_UNIFORM_TYPE_COUNT
) = map(bgfx_uniform_type, range(6))
# backbuffer_ratio
backbuffer_ratio = enum_type
(
BGFX_BACKBUFFER_RATIO_EQUAL,
BGFX_BACKBUFFER_RATIO_HALF,
BGFX_BACKBUFFER_RATIO_QUARTER,
BGFX_BACKBUFFER_RATIO_EIGHTH,
BGFX_BACKBUFFER_RATIO_SIXTEENTH,
BGFX_BACKBUFFER_RATIO_DOUBLE,
BGFX_BACKBUFFER_RATIO_COUNT
) = map(backbuffer_ratio, range(7))
# occlusion_query_result
occlusion_query_result = enum_type
(
BGFX_OCCLUSION_QUERY_RESULT_INVISIBLE,
BGFX_OCCLUSION_QUERY_RESULT_VISIBLE,
BGFX_OCCLUSION_QUERY_RESULT_NORESULT,
BGFX_OCCLUSION_QUERY_RESULT_COUNT
) = map(occlusion_query_result, range(4))
# topology
topology = enum_type
(
BGFX_TOPOLOGY_TRI_LIST,
BGFX_TOPOLOGY_TRI_STRIP,
BGFX_TOPOLOGY_LINE_LIST,
BGFX_TOPOLOGY_LINE_STRIP,
BGFX_TOPOLOGY_POINT_LIST,
BGFX_TOPOLOGY_COUNT
) = map(topology, range(6))
# topology_convert
topology_convert = enum_type
(
BGFX_TOPOLOGY_CONVERT_TRI_LIST_FLIP_WINDING,
BGFX_TOPOLOGY_CONVERT_TRI_STRIP_FLIP_WINDING,
BGFX_TOPOLOGY_CONVERT_TRI_LIST_TO_LINE_LIST,
BGFX_TOPOLOGY_CONVERT_TRI_STRIP_TO_TRI_LIST,
BGFX_TOPOLOGY_CONVERT_LINE_STRIP_TO_LINE_LIST,
BGFX_TOPOLOGY_CONVERT_COUNT
) = map(topology_convert, range(6))
# topology_sort
topology_sort = enum_type
(
BGFX_TOPOLOGY_SORT_DIRECTION_FRONT_TO_BACK_MIN,
BGFX_TOPOLOGY_SORT_DIRECTION_FRONT_TO_BACK_AVG,
BGFX_TOPOLOGY_SORT_DIRECTION_FRONT_TO_BACK_MAX,
BGFX_TOPOLOGY_SORT_DIRECTION_BACK_TO_FRONT_MIN,
BGFX_TOPOLOGY_SORT_DIRECTION_BACK_TO_FRONT_AVG,
BGFX_TOPOLOGY_SORT_DIRECTION_BACK_TO_FRONT_MAX,
BGFX_TOPOLOGY_SORT_DISTANCE_FRONT_TO_BACK_MIN,
BGFX_TOPOLOGY_SORT_DISTANCE_FRONT_TO_BACK_AVG,
BGFX_TOPOLOGY_SORT_DISTANCE_FRONT_TO_BACK_MAX,
BGFX_TOPOLOGY_SORT_DISTANCE_BACK_TO_FRONT_MIN,
BGFX_TOPOLOGY_SORT_DISTANCE_BACK_TO_FRONT_AVG,
BGFX_TOPOLOGY_SORT_DISTANCE_BACK_TO_FRONT_MAX,
BGFX_TOPOLOGY_SORT_COUNT
) = map(topology_sort, range(13))
# view_mode
view_mode = enum_type
(
BGFX_VIEW_MODE_DEFAULT,
BGFX_VIEW_MODE_SEQUENTIAL,
BGFX_VIEW_MODE_DEPTH_ASCENDING,
BGFX_VIEW_MODE_DEPTH_DESCENDING,
BGFX_VIEW_MODE_CCOUNT
) = map(view_mode, range(5))
BGFX_PCI_ID_NONE = 0x0000
BGFX_PCI_ID_SOFTWARE_RASTERIZER = 0x0001
BGFX_PCI_ID_AMD = 0x1002
BGFX_PCI_ID_INTEL = 0x8086
BGFX_PCI_ID_NVIDIA = 0x10de
BGFX_RESET_NONE = 0x00000000 # //!< No reset flags.
BGFX_RESET_FULLSCREEN = 0x00000001 # //!< Not supported yet.
BGFX_RESET_FULLSCREEN_SHIFT = 0 # //!< Fullscreen bit shift.
BGFX_RESET_FULLSCREEN_MASK = 0x00000001 # //!< Fullscreen bit mask.
BGFX_RESET_MSAA_X2 = 0x00000010 # //!< Enable 2x MSAA.
BGFX_RESET_MSAA_X4 = 0x00000020 # //!< Enable 4x MSAA.
BGFX_RESET_MSAA_X8 = 0x00000030 # //!< Enable 8x MSAA.
BGFX_RESET_MSAA_X16 = 0x00000040 # //!< Enable 16x MSAA.
BGFX_RESET_MSAA_SHIFT = 4 # //!< MSAA mode bit shift.
BGFX_RESET_MSAA_MASK = 0x00000070 # //!< MSAA mode bit mask.
BGFX_RESET_VSYNC = 0x00000080 # //!< Enable V-Sync.
BGFX_RESET_MAXANISOTROPY = 0x00000100 # //!< Turn on/off max anisotropy.
BGFX_RESET_CAPTURE = 0x00000200 # //!< Begin screen capture.
BGFX_RESET_HMD = 0x00000400 # //!< HMD stereo rendering.
BGFX_RESET_HMD_DEBUG = 0x00000800 # //!< HMD stereo rendering debug mode.
BGFX_RESET_HMD_RECENTER = 0x00001000 # //!< HMD calibration.
# //!< Flush rendering after submitting to GPU.
BGFX_RESET_FLUSH_AFTER_RENDER = 0x00002000
# //!< This flag specifies where flip occurs. Default behavior is that flip occurs before rendering new frame. This flag only has effect when `BGFX_CONFIG_MULTITHREADED=0`.
BGFX_RESET_FLIP_AFTER_RENDER = 0x00004000
BGFX_RESET_SRGB_BACKBUFFER = 0x00008000 # //!< Enable sRGB backbuffer.
BGFX_RESET_HIDPI = 0x00010000 # //!< Enable HiDPI rendering.
BGFX_RESET_DEPTH_CLAMP = 0x00020000 # //!< Enable depth clamp.
BGFX_RESET_RESERVED_SHIFT = 31 # //!< Internal bits shift.
BGFX_RESET_RESERVED_MASK = 0x80000000 # //!< Internal bits mask.
BGFX_CLEAR_NONE = 0x0000
BGFX_CLEAR_COLOR = 0x0001
BGFX_CLEAR_DEPTH = 0x0002
BGFX_CLEAR_STENCIL = 0x0004
BGFX_CLEAR_DISCARD_COLOR_0 = 0x0008
BGFX_CLEAR_DISCARD_COLOR_1 = 0x0010
BGFX_CLEAR_DISCARD_COLOR_2 = 0x0020
BGFX_CLEAR_DISCARD_COLOR_3 = 0x0040
BGFX_CLEAR_DISCARD_COLOR_4 = 0x0080
BGFX_CLEAR_DISCARD_COLOR_5 = 0x0100
BGFX_CLEAR_DISCARD_COLOR_6 = 0x0200
BGFX_CLEAR_DISCARD_COLOR_7 = 0x0400
BGFX_CLEAR_DISCARD_DEPTH = 0x0800
BGFX_CLEAR_DISCARD_STENCIL = 0x1000
BGFX_CLEAR_DISCARD_COLOR_MASK = 0 | BGFX_CLEAR_DISCARD_COLOR_0 | BGFX_CLEAR_DISCARD_COLOR_1 | BGFX_CLEAR_DISCARD_COLOR_2 | BGFX_CLEAR_DISCARD_COLOR_3 | BGFX_CLEAR_DISCARD_COLOR_4 | BGFX_CLEAR_DISCARD_COLOR_5 | BGFX_CLEAR_DISCARD_COLOR_6 | BGFX_CLEAR_DISCARD_COLOR_7
BGFX_CLEAR_DISCARD_MASK = 0 | BGFX_CLEAR_DISCARD_COLOR_MASK | BGFX_CLEAR_DISCARD_DEPTH | BGFX_CLEAR_DISCARD_STENCIL
BGFX_DEBUG_NONE = 0x00000000 # //!< No debug.
BGFX_DEBUG_WIREFRAME = 0x00000001 # //!< Enable wireframe for all primitives.
BGFX_DEBUG_IFH = 0x00000002
BGFX_DEBUG_STATS = 0x00000004 # //!< Enable statistics display.
BGFX_DEBUG_TEXT = 0x00000008 # //!< Enable debug text display.
BGFX_BUFFER_NONE = 0x0000
BGFX_INVALID_HANDLE = 0xFFFF
class bgfx_dynamic_index_buffer_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_dynamic_vertex_buffer_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_frame_buffer_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_index_buffer_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_indirect_buffer_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_occlusion_query_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_program_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_shader_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_texture_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_uniform_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_vertex_buffer_handle(Structure):
_fields_ = [("idx", c_uint16)]
class bgfx_vertex_decl_handle(Structure):
_fields_ = [("idx", c_uint16)]
RELEASEFUNC = CFUNCTYPE(None, c_void_p, c_void_p)
def bgfx_release_fn(ptr, user_data):
return
class bgfx_memory(Structure):
_fields_ = [("data", POINTER(c_uint8)),
("size", c_uint32)]
class bgfx_transform(Structure):
_fields_ = [("data", POINTER(c_float)),
("num", c_uint16)]
bgfx_view_id = c_uint16
class bgfx_view_stats(Structure):
_fields_ = [("name", c_char * 256),
("view", bgfx_view_id),
("cpuTimeElapsed", c_int64),
("gpuTimeElapsed", c_int64)]
class bgfx_encoder_stats(Structure):
_fields_ = [("cpuTimeBegin", c_int64),
("cpuTimeEnd", c_int64)]
class bgfx_stats(Structure):
_fields_ = [
("cpuTimeFrame", c_int64),
("cpuTimeBegin", c_int64),
("cpuTimeEnd", c_int64),
("cpuTimerFreq", c_int64),
("gpuTimeBegin", c_int64),
("gpuTimeEnd", c_int64),
("gpuTimerFreq", c_int64),
("waitRender", c_int64),
("waitSubmit", c_int64),
("numDraw", c_uint32),
("numCompute", c_uint32),
("numBlit", c_uint32),
("maxGpuLatency", c_uint32),
("numDynamicIndexBuffers", c_uint16),
("numDynamicVertexBuffers", c_uint16),
("numFrameBuffers", c_uint16),
("numIndexBuffers", c_uint16),
("numOcclusionQueries", c_uint16),
("numPrograms", c_uint16),
("numShaders", c_uint16),
("numTextures", c_uint16),
("numUniforms", c_uint16),
("numVertexBuffers", c_uint16),
("numVertexDecls", c_uint16),
("textureMemoryUsed", c_int64),
("rtMemoryUsed", c_int64),
("transientVbUsed", c_uint32),
("transientIbUsed", c_uint32),
("numPrims", c_uint32 * BGFX_TOPOLOGY_COUNT.value),
("gpuMemoryMax", c_int64),
("gpuMemoryUsed", c_int64),
("width", c_uint16),
("height", c_uint16),
("textWidth", c_uint16),
("textHeight", c_uint16),
("numViews", c_uint16),
("viewStats", POINTER(bgfx_view_stats)),
("numEncoders", c_uint8),
("encoderStats", POINTER(bgfx_encoder_stats))]
class vertex_decl(Structure):
_fields_ = [("hash", c_uint32),
("stride", c_uint16),
("offset", c_uint16 * BGFX_ATTRIB_COUNT.value),
("attributes", c_uint16 * BGFX_ATTRIB_COUNT.value)]
class bgfx_transient_index_buffer(Structure):
_fields_ = [("data", POINTER(c_uint8)),
("size", c_uint32),
("handle", bgfx_index_buffer_handle),
("startIndex", c_uint32)]
class transient_vertex_buffer(Structure):
_fields_ = [("data", POINTER(c_uint8)),
("size", c_uint32),
("startVertex", c_uint32),
("stride", c_uint16),
("handle", bgfx_vertex_buffer_handle),
("decl", bgfx_vertex_decl_handle)]
class bgfx_instance_data_buffer(Structure):
_fields_ = [
("data", c_uint8),
("size", c_uint32),
("offset", c_uint32),
("num", c_uint32),
("stride", c_uint16),
("handle", bgfx_vertex_buffer_handle)
]
class texture_info(Structure):
_fields_ = [
("format", bgfx_texture_format),
("storageSize", c_uint32),
("width", c_uint16),
("height", c_uint16),
("depth", c_uint16),
("numLayers", c_uint16),
("numMips", c_uint8),
("bitsPerPixel", c_uint8),
("cubeMap", c_bool)
]
class uniform_info(Structure):
_fields_ = [
("name", c_char * 256),
("type", bgfx_uniform_type),
("num", c_uint16)
]
class attachment(Structure):
_fields_ = [
("access", bgfx_access),
("handle", bgfx_texture_handle),
("mip", c_uint16),
("layer", c_uint16),
("resolve", c_uint8)
]
class caps_gpu(Structure):
_fields_ = [
("vendorId", c_uint16),
("deviceId", c_uint16)
]
class cap_limits(Structure):
_fields_ = [
("maxDrawCalls", c_uint32),
("maxBlits", c_uint32),
("maxTextureSize", c_uint32),
("maxTextureLayers", c_uint32),
("maxViews", c_uint32),
("maxFrameBuffers", c_uint32),
("maxFBAttachments", c_uint32),
("maxPrograms", c_uint32),
("maxShaders", c_uint32),
("maxTextures", c_uint32),
("maxTextureSamplers", c_uint32),
("maxComputeBindings", c_uint32),
("maxVertexDecls", c_uint32),
("maxVertexStreams", c_uint32),
("maxIndexBuffers", c_uint32),
("maxVertexBuffers", c_uint32),
("maxDynamicIndexBuffers", c_uint32),
("maxDynamicVertexBuffers", c_uint32),
("maxUniforms", c_uint32),
("maxOcclusionQueries", c_uint32),
("maxEncoders", c_uint32),
("transientVbSize", c_uint32),
("transientIbSize", c_uint32)
]
class caps(Structure):
_fields_ = [
("rendererType", bgfx_renderer_type),
("supported", c_uint64),
("vendorId", c_uint16),
("deviceId", c_uint16),
("homogeneousDepth", c_bool),
("originBottomLeft", c_bool),
("numGPUs", c_uint8),
("gpu", caps_gpu * 4),
("limits", cap_limits),
("formats", c_uint16 * BGFX_TEXTURE_FORMAT_COUNT.value)
]
# bgfx_fatal
bgfx_fatal = enum_type
(
BGFX_FATAL_DEBUG_CHECK,
BGFX_FATAL_INVALID_SHADER,
BGFX_FATAL_UNABLE_TO_INITIALIZE,
BGFX_FATAL_UNABLE_TO_CREATE_TEXTURE,
BGFX_FATAL_DEVICE_LOST,
BGFX_FATAL_COUNT
) = map(bgfx_fatal, range(6))
#fatal = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_char_p, c_uint16, c_int, c_char_p)
#trace_vargs = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_char_p, c_uint16, c_char_p) # BUG va_list _argList
#profiler_begin = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_char_p, c_uint32, c_char_p, c_uint16)
#profiler_begin_literal = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_char_p, c_uint32, c_char_p, c_uint16)
#profiler_end = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s))
#cache_read_size = CFUNCTYPE(c_uint32, POINTER(bgfx_callback_interface_s), c_uint64)
#cache_read = CFUNCTYPE(c_bool, POINTER(bgfx_callback_interface_s), c_uint64, c_void_p, c_uint32)
#cache_write = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_uint64, c_void_p, c_uint32)
#screen_shot = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_char_p, c_uint32, c_uint32, c_uint32, c_void_p, c_uint32, c_bool)
#capture_begin = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_uint32, c_uint32, c_uint32, c_int, c_bool)
#capture_end = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s))
#capture_frame = CFUNCTYPE(None, POINTER(bgfx_callback_interface_s), c_void_p, c_uint32)
#class bgfx_callback_vtbl_s(Structure):
# _fields_ = [
# ("fatal", fatal)
# ("trace_vargs", trace_vargs)
# ("profiler_begin", profiler_begin)
# ("profiler_begin_literal", profiler_begin_literal)
# ("profiler_end", profiler_end)
# ("cache_read_size", cache_read_size)
# ("cache_read", cache_read)
# ("cache_write", cache_write)
# ("screen_shot", screen_shot)
# ("capture_begin", capture_begin)
# ("capture_end", capture_end)
# ("capture_frame", capture_frame)
# ]
#class bgfx_callback_interface_s(Structure):
# _fields_ = [
# ("vtbl", POINTER(bgfx_callback_vtbl_s))
# ]
class bgfx_callback_interface_t(Structure):
_fields_ = []
#realloc = CFUNCTYPE(c_void_p, POINTER(bgfx_allocator_interface_s), c_void_p, c_size_t, c_size_t, c_char_p, c_uint32)
#
#class bgfx_allocator_interface_s(Structure):
# _fields_ = [
# ("realloc", realloc)
# ]
class bgfx_allocator_interface_t(Structure):
_fields_ = []
class bgfx_platform_data(Structure):
_fields_ = [
("ndt", c_void_p),
("nwh", c_void_p),
("context", c_void_p),
("backBuffer", c_void_p),
("backBufferDS", c_void_p)
]
class bgfx_resolution_s(Structure):
_fields_ = [
("format", c_int),
("width", c_uint32),
("height", c_uint32),
("reset", c_uint32),
("numBackBuffers", c_uint8),
("maxFrameLatency", c_uint8)
]
class bgfx_init_limits_s(Structure):
_fields_ = [
("maxEncoders", c_uint16),
("transientVbSize", c_uint32),
("transientIbSize", c_uint32)
]
# https://bkaradzic.github.io/bgfx/bgfx.html#_CPPv2N4bgfx4InitE
class bgfx_init_t(Structure):
_fields_ = [
("type", c_int),
("vendorId", c_uint16),
("deviceId", c_uint16),
("debug", c_bool),
("profile", c_bool),
("platformData", bgfx_platform_data),
("resolution", bgfx_resolution_s),
("limits", bgfx_init_limits_s),
("callback", POINTER(bgfx_callback_interface_t)),
("allocator", POINTER(bgfx_allocator_interface_t))
]
def _bind(funcname, args=None, returns=None):
func = getattr(_bgfx, funcname)
func.argtypes = args
func.restype = returns
return func
vertex_decl_begin = _bind("bgfx_vertex_decl_begin",
args=[POINTER(vertex_decl), bgfx_renderer_type],
returns=None)
vertex_decl_add = _bind("bgfx_vertex_decl_add",
args=[POINTER(vertex_decl), bgfx_attrib, c_uint8, bgfx_attrib_type, c_bool, c_bool],
returns=None)
vertex_decl_decode = _bind("bgfx_vertex_decl_decode",
args=[POINTER(vertex_decl), bgfx_attrib, POINTER(c_uint8), POINTER(bgfx_attrib_type), POINTER(c_bool), POINTER(c_bool)],
returns=None)
vertex_decl_has = _bind("bgfx_vertex_decl_has",
args=[POINTER(vertex_decl), c_uint8],
returns=c_bool)
vertex_decl_skip = _bind("bgfx_vertex_decl_skip",
args=[POINTER(vertex_decl), c_uint8],
returns=None)
vertex_decl_end = _bind("bgfx_vertex_decl_end",
args=[POINTER(vertex_decl)],
returns=None)
vertex_pack = _bind("bgfx_vertex_pack",
args=[POINTER(c_float), c_bool, bgfx_attrib, POINTER(vertex_decl), c_void_p, c_uint32],
returns=None)
vertex_unpack = _bind("bgfx_vertex_unpack",
args=[POINTER(c_float), bgfx_attrib, POINTER(vertex_decl), c_void_p, c_uint32],
returns=None)
vertex_convert = _bind("bgfx_vertex_convert",
args=[POINTER(vertex_decl), c_void_p, POINTER(vertex_decl), c_void_p, c_uint32],
returns=None)
weld_vertices = _bind("bgfx_weld_vertices",
args=[POINTER(c_uint16), POINTER(vertex_decl), c_void_p, c_uint16, c_float],
returns=c_uint16)
topology_convert = _bind("bgfx_topology_convert",
args=[topology_convert, c_void_p, c_uint32, c_void_p, c_uint32, c_bool],
returns=None)
topology_sort_tri_list = _bind("bgfx_topology_sort_tri_list",
args=[topology_sort, c_void_p, c_uint32, POINTER(c_float), POINTER(c_float), c_void_p, c_uint32, c_void_p, c_uint32, c_bool],
returns=None)
get_supported_renderers = _bind("bgfx_get_supported_renderers",
args=[c_uint8, POINTER(bgfx_renderer_type)],
returns=c_uint8)
get_renderer_name = _bind("bgfx_get_renderer_name",
args=[bgfx_renderer_type],
returns=c_char_p)
init_ctor = _bind("bgfx_init_ctor",
args=[POINTER(bgfx_init_t)],
returns=None)
# bgfx_init
# https://bkaradzic.github.io/bgfx/bgfx.html#_CPPv2N4bgfx4initERK4Init
init = _bind("bgfx_init",
args=[POINTER(bgfx_init_t)],
returns=c_bool)
shutdown = _bind("bgfx_shutdown")
reset = _bind("bgfx_reset",
args=[c_uint32, c_uint32, c_uint32, bgfx_texture_format])
class bgfx_encoder(Structure):
_fields_ = []
#begin = _bind("bgfx_begin",
# args=[],
# returns=POINTER(bgfx_encoder))
#end = _bind("bgfx_end",
# args=[POINTER(bgfx_encoder)],
# returns=None)
frame = _bind("bgfx_frame",
args=[c_bool],
returns=c_uint32)
get_renderer_type = _bind("bgfx_get_renderer_type",
args=[],
returns=bgfx_renderer_type)
get_caps = _bind("bgfx_get_caps",
args=[],
returns=POINTER(caps))
get_stats = _bind("bgfx_get_stats",
args=[],
returns=POINTER(bgfx_stats))
alloc = _bind("bgfx_alloc",
args=[c_uint32],
returns=POINTER(bgfx_memory))
copy = _bind("bgfx_copy",
args=[c_void_p, c_uint32],
returns=POINTER(bgfx_memory))
make_ref = _bind("bgfx_make_ref",
args=[c_void_p, c_uint32],
returns=POINTER(bgfx_memory))
make_ref_release = _bind("bgfx_make_ref_release",
args=[c_void_p, c_uint32, RELEASEFUNC, c_void_p],
returns=POINTER(bgfx_memory))
set_debug = _bind("bgfx_set_debug",
args=[c_uint32],
returns=None)
dbg_text_clear = _bind("bgfx_dbg_text_clear",
args=[c_uint8, c_bool])
dbg_text_printf = _bind("bgfx_dbg_text_printf",
args=[c_uint16, c_uint16, c_uint8, c_char_p])
dbg_text_image = _bind("bgfx_dbg_text_image",
args=[c_uint16, c_uint16, c_uint16, c_uint16, c_void_p, c_uint16])
dbg_text_vprintf = _bind("bgfx_dbg_text_vprintf",
args=[c_uint16, c_uint16, c_uint8, c_char_p],
returns=None)
dbg_text_image = _bind("bgfx_dbg_text_image",
args=[c_uint16, c_uint16, c_uint16, c_uint16, c_void_p, c_uint16],
returns=None)
create_index_buffer = _bind("bgfx_create_index_buffer",
args=[POINTER(bgfx_memory), c_uint16],
returns=bgfx_index_buffer_handle)
set_index_buffer_name = _bind("bgfx_set_index_buffer_name",
args=[bgfx_index_buffer_handle, c_char_p, c_int32],
returns=None)
destroy_index_buffer = _bind("bgfx_destroy_index_buffer",
args=[bgfx_index_buffer_handle])
create_vertex_buffer = _bind("bgfx_create_vertex_buffer",
args=[POINTER(bgfx_memory), POINTER(vertex_decl), c_uint16],
returns=bgfx_vertex_buffer_handle)
set_vertex_buffer_name = _bind("bgfx_set_vertex_buffer_name",
args=[bgfx_vertex_buffer_handle, c_char_p, c_int32],
returns=None)
destroy_vertex_buffer = _bind("bgfx_destroy_vertex_buffer",
args=[bgfx_vertex_buffer_handle])
create_dynamic_index_buffer = _bind("bgfx_create_dynamic_index_buffer",
args=[c_uint32, c_uint16],
returns=bgfx_dynamic_index_buffer_handle)
create_dynamic_index_buffer_mem = _bind("bgfx_create_dynamic_index_buffer_mem",
args=[POINTER(bgfx_memory), c_uint16],
returns=bgfx_dynamic_index_buffer_handle)
update_dynamic_index_buffer = _bind("bgfx_update_dynamic_index_buffer",
args=[bgfx_dynamic_index_buffer_handle, c_uint32, POINTER(bgfx_memory)],
returns=None)
destroy_dynamic_index_buffer = _bind("bgfx_destroy_dynamic_index_buffer",
args=[bgfx_dynamic_index_buffer_handle],
returns=None)
create_dynamic_vertex_buffer = _bind("bgfx_create_dynamic_vertex_buffer",
args=[c_uint32, POINTER(vertex_decl), c_uint16],
returns=bgfx_dynamic_vertex_buffer_handle)
create_dynamic_vertex_buffer_mem = _bind("bgfx_create_dynamic_vertex_buffer_mem",
args=[POINTER(bgfx_memory), POINTER(vertex_decl), c_uint16],
returns=bgfx_dynamic_vertex_buffer_handle)
update_dynamic_vertex_buffer = _bind("bgfx_update_dynamic_vertex_buffer",
args=[bgfx_dynamic_vertex_buffer_handle, c_uint32, POINTER(bgfx_memory)],
returns=None)
destroy_dynamic_vertex_buffer = _bind("bgfx_destroy_dynamic_vertex_buffer",
args=[bgfx_dynamic_vertex_buffer_handle],
returns=None)
get_avail_transient_index_buffer = _bind("bgfx_get_avail_transient_index_buffer",
args=[c_uint32],
returns=c_uint32)
get_avail_transient_vertex_buffer = _bind("bgfx_get_avail_transient_vertex_buffer",
args=[c_uint32, POINTER(vertex_decl)],
returns=c_uint32)
get_avail_instance_data_buffer = _bind("bgfx_get_avail_instance_data_buffer",
args=[c_uint32, c_uint16],
returns=c_uint32)
alloc_transient_index_buffer = _bind("bgfx_alloc_transient_index_buffer",
args=[POINTER(bgfx_transient_index_buffer), c_uint32],
returns=None)
alloc_transient_vertex_buffer = _bind("bgfx_alloc_transient_vertex_buffer",
args=[POINTER(transient_vertex_buffer), c_uint32, POINTER(vertex_decl)],
returns=None)
alloc_transient_buffers = _bind("bgfx_alloc_transient_buffers",
args=[POINTER(transient_vertex_buffer), POINTER(vertex_decl), c_uint32, POINTER(bgfx_transient_index_buffer), c_uint32],
returns=c_bool)
alloc_instance_data_buffer = _bind("bgfx_alloc_instance_data_buffer",
args=[POINTER(bgfx_instance_data_buffer), c_uint32, c_uint16],
returns=None)
create_indirect_buffer = _bind("bgfx_create_indirect_buffer",
args=[c_uint32],
returns=bgfx_indirect_buffer_handle)
destroy_indirect_buffer = _bind("bgfx_destroy_indirect_buffer",
args=[bgfx_indirect_buffer_handle],
returns=None)
create_shader = _bind("bgfx_create_shader",
args=[POINTER(bgfx_memory)],
returns=bgfx_shader_handle)
get_shader_uniforms = _bind("bgfx_get_shader_uniforms",
args=[bgfx_shader_handle, POINTER(bgfx_uniform_handle), c_uint16],
returns=c_uint16)
get_uniform_info = _bind("bgfx_get_uniform_info",
args=[bgfx_uniform_handle, POINTER(uniform_info)],
returns=None)
set_shader_name = _bind("bgfx_set_shader_name",
args=[bgfx_shader_handle, c_char_p, c_int32],
returns=None)
destroy_shader = _bind("bgfx_destroy_shader",
args=[bgfx_shader_handle],
returns=None)
create_program = _bind("bgfx_create_program",
args=[bgfx_shader_handle, bgfx_shader_handle, c_bool],
returns=bgfx_program_handle)
create_compute_program = _bind("bgfx_create_compute_program",
args=[bgfx_shader_handle, c_bool],
returns=bgfx_program_handle)
destroy_program = _bind("bgfx_destroy_program",
args=[bgfx_program_handle],
returns=None)
is_texture_valid = _bind("bgfx_is_texture_valid",
args=[c_uint16, c_bool, c_uint16, bgfx_texture_format, c_uint64],
returns=c_bool)
calc_texture_size = _bind("bgfx_calc_texture_size",
args=[POINTER(texture_info), c_uint16, c_uint16, c_uint16, c_bool, c_bool, c_uint16, bgfx_texture_format],
returns=None)
create_texture = _bind("bgfx_create_texture",
args=[POINTER(bgfx_memory), c_uint64, c_uint8, POINTER(texture_info)],
returns=bgfx_texture_handle)
create_texture_2d = _bind("bgfx_create_texture_2d",
args=[c_uint16, c_uint16, c_bool, c_uint16, bgfx_texture_format, c_uint64, POINTER(bgfx_memory)],
returns=bgfx_texture_handle)
create_texture_2d_scaled = _bind("bgfx_create_texture_2d_scaled",
args=[backbuffer_ratio, c_bool, c_uint16, bgfx_texture_format, c_uint64],
returns=bgfx_texture_handle)
create_texture_3d = _bind("bgfx_create_texture_3d",
args=[c_uint16, c_uint16, c_uint16, c_bool, bgfx_texture_format, c_uint64, POINTER(bgfx_memory)],
returns=bgfx_texture_handle)
create_texture_cube = _bind("bgfx_create_texture_cube",
args=[c_uint16, c_bool, c_uint16, bgfx_texture_format, c_uint64, POINTER(bgfx_memory)],
returns=bgfx_texture_handle)
update_texture_2d = _bind("bgfx_update_texture_2d",
args=[bgfx_texture_handle, c_uint16, c_uint8, c_uint16, c_uint16, c_uint16, c_uint16, POINTER(bgfx_memory), c_uint16],
returns=None)
update_texture_3d = _bind("bgfx_update_texture_3d",
args=[bgfx_texture_handle, c_uint8, c_uint16, c_uint16, c_uint16, c_uint16, c_uint16, c_uint16, POINTER(bgfx_memory)],
returns=None)
update_texture_cube = _bind("bgfx_update_texture_cube",
args=[bgfx_texture_handle, c_uint16, c_uint8, c_uint8, c_uint16, c_uint16, c_uint16, c_uint16, POINTER(bgfx_memory), c_uint16],
returns=None)
read_texture = _bind("bgfx_read_texture",
args=[bgfx_texture_handle, c_void_p, c_uint8],
returns=c_uint32)
set_texture_name = _bind("bgfx_set_texture_name",
args=[bgfx_texture_handle],
returns=None)
destroy_texture = _bind("bgfx_destroy_texture",
args=[bgfx_texture_handle],
returns=None)
create_frame_buffer = _bind("bgfx_create_frame_buffer",
args=[c_uint16, c_uint16, bgfx_texture_format, c_uint64],
returns=bgfx_frame_buffer_handle)
set_frame_buffer_name = _bind("bgfx_set_frame_buffer_name",
args=[bgfx_frame_buffer_handle, c_char_p, c_int32],
returns=None)
create_frame_buffer_scaled = _bind("bgfx_create_frame_buffer_scaled",
args=[backbuffer_ratio, bgfx_texture_format, c_uint64],
returns=bgfx_frame_buffer_handle)
create_frame_buffer_from_handles = _bind("bgfx_create_frame_buffer_from_handles",
args=[c_uint8, POINTER(bgfx_texture_handle), c_bool],
returns=bgfx_frame_buffer_handle)
create_frame_buffer_from_attachment = _bind("bgfx_create_frame_buffer_from_attachment",
args=[c_uint8, POINTER(attachment), c_bool],
returns=bgfx_frame_buffer_handle)
create_frame_buffer_from_nwh = _bind("bgfx_create_frame_buffer_from_nwh",
args=[c_void_p, c_uint16, c_uint16, bgfx_texture_format, bgfx_texture_format],
returns=bgfx_frame_buffer_handle)
get_texture = _bind("bgfx_get_texture",
args=[bgfx_frame_buffer_handle, c_uint8],
returns=bgfx_texture_handle)
destroy_frame_buffer = _bind("bgfx_destroy_frame_buffer",
args=[bgfx_frame_buffer_handle],
returns=None)
create_uniform = _bind("bgfx_create_uniform",
args=[c_char_p, bgfx_uniform_type, c_uint16],
returns=bgfx_uniform_handle)
destroy_uniform = _bind("bgfx_destroy_uniform",
args=[bgfx_uniform_handle],
returns=None)
create_occlusion_query = _bind("bgfx_create_occlusion_query",
args=[],
returns=bgfx_occlusion_query_handle)
get_result = _bind("bgfx_get_result",
args=[bgfx_occlusion_query_handle, POINTER(c_int32)],
returns=occlusion_query_result)
destroy_occlusion_query = _bind("bgfx_destroy_occlusion_query",
args=[bgfx_occlusion_query_handle],
returns=None)
set_palette_color = _bind("bgfx_set_palette_color",
args=[c_uint8, POINTER(c_float)],
returns=None)
set_view_name = _bind("bgfx_set_view_name",
args=[bgfx_view_id, c_char_p],
returns=None)
set_view_rect = _bind("bgfx_set_view_rect",
args=[bgfx_view_id, c_uint16, c_uint16, c_uint16, c_uint16],
returns=None)
set_view_rect_auto = _bind("bgfx_set_view_rect_auto",
args=[bgfx_view_id, c_uint16, c_uint16, backbuffer_ratio],
returns=None)
set_view_scissor = _bind("bgfx_set_view_scissor",
args=[bgfx_view_id, c_uint16, c_uint32, c_float, c_uint8],
returns=None)
set_view_clear = _bind("bgfx_set_view_clear",
args=[bgfx_view_id, c_uint16, c_uint32, c_float, c_uint8],
returns=None)
set_view_clear_mrt = _bind("bgfx_set_view_clear_mrt",
args=[bgfx_view_id, c_uint16, c_float, c_uint8, c_uint8, c_uint8, c_uint8, c_uint8, c_uint8, c_uint8, c_uint8, c_uint8],
returns=None)
set_view_mode = _bind("bgfx_set_view_mode",
args=[bgfx_view_id, view_mode],
returns=None)
set_view_frame_buffer = _bind("bgfx_set_view_frame_buffer",
args=[bgfx_view_id, bgfx_frame_buffer_handle],
returns=None)
set_view_transform = _bind("bgfx_set_view_transform",
args=[bgfx_view_id, c_void_p, c_void_p],
returns=None)
set_view_order = _bind("bgfx_set_view_order",
args=[bgfx_view_id, c_uint16, POINTER(bgfx_view_id)],
returns=None)
reset_view = _bind("bgfx_reset_view",
args=[bgfx_view_id],
returns=None)
set_marker = _bind("bgfx_set_marker",
args=[c_char_p],
returns=None)
set_state = _bind("bgfx_set_state",
args=[c_uint64, c_uint32],
returns=None)
set_condition = _bind("bgfx_set_condition",
args=[c_uint64, c_uint32],
returns=None)
set_stencil = _bind("bgfx_set_stencil",
args=[c_uint32, c_uint32],
returns=None)
set_scissor = _bind("bgfx_set_scissor",
args=[c_uint16, c_uint16, c_uint16, c_uint16],
returns=c_uint16)
set_scissor_cache = _bind("bgfx_set_scissor_cached",
args=[c_uint16],
returns=None)
_set_transform = _bind("bgfx_set_transform",
args=[c_void_p, c_uint16],
returns=c_uint32)
def set_transform(mtx, count):
return _set_transform(mtx.ctypes.data_as(POINTER(c_void_p)), 1)
alloc_transform = _bind("bgfx_alloc_transform",
args=[POINTER(bgfx_transform), c_uint16],
returns=c_uint32)
set_transform_cached = _bind("bgfx_set_transform_cached",
args=[c_uint32, c_uint16],
returns=None)
set_uniform = _bind("bgfx_set_uniform",
args=[bgfx_uniform_handle, c_void_p, c_uint16],
returns=None)
set_index_buffer = _bind("bgfx_set_index_buffer",
args=[bgfx_index_buffer_handle, c_uint32, c_uint32],
returns=None)
set_dynamic_index_buffer = _bind("bgfx_set_dynamic_index_buffer",
args=[bgfx_dynamic_index_buffer_handle, c_uint32, c_uint32],
returns=None)
set_transient_index_buffer = _bind("bgfx_set_transient_index_buffer",
args=[POINTER(bgfx_transient_index_buffer), c_uint32, c_uint32],
returns=None)
set_vertex_buffer = _bind("bgfx_set_vertex_buffer",
args=[c_uint8, bgfx_vertex_buffer_handle, c_uint32, c_uint32],
returns=None)
set_dynamic_vertex_buffer = _bind("bgfx_set_dynamic_vertex_buffer",
args=[c_uint8, POINTER(bgfx_dynamic_vertex_buffer_handle), c_uint32, c_uint32],
returns=None)
set_transient_vertex_buffer = _bind("bgfx_set_transient_vertex_buffer",
args=[c_uint8, POINTER(transient_vertex_buffer), c_uint32, c_uint32],
returns=None)
set_vertex_count = _bind("bgfx_set_vertex_count",
args=[c_uint32],
returns=None)
set_instance_data_buffer = _bind("bgfx_set_instance_data_buffer",
args=[POINTER(bgfx_instance_data_buffer), c_uint32, c_uint32],
returns=None)
set_instance_data_from_vertex_buffer = _bind("bgfx_set_instance_data_from_vertex_buffer",
args=[bgfx_vertex_buffer_handle, c_uint32, c_uint32],
returns=None)
set_instance_data_from_dynamic_vertex_buffer = _bind("bgfx_set_instance_data_from_dynamic_vertex_buffer",
args=[bgfx_dynamic_vertex_buffer_handle, c_uint32, c_uint32],
returns=None)
set_instance_count = _bind("bgfx_set_instance_count",
args=[c_uint32],
returns=None)
set_texture = _bind("bgfx_set_texture",
args=[c_uint8, bgfx_uniform_handle, bgfx_texture_handle, c_uint32],
returns=None)
touch = _bind("bgfx_touch",
args=[bgfx_view_id],
returns=None)
submit = _bind("bgfx_submit",
args=[bgfx_view_id, bgfx_program_handle, c_uint32, c_bool],
returns=None)
submit_occlusion_query = _bind("bgfx_submit_occlusion_query",
args=[bgfx_view_id, bgfx_program_handle, bgfx_occlusion_query_handle, c_uint32, c_bool],
returns=None)
submit_indirect = _bind("bgfx_submit_indirect",
args=[bgfx_view_id, bgfx_program_handle, bgfx_indirect_buffer_handle, c_uint16, c_uint16, c_uint32, c_bool],
returns=None)
set_image = _bind("bgfx_set_image",
args=[c_uint8, bgfx_texture_handle, c_uint8, bgfx_access, bgfx_texture_format],
returns=None)
set_compute_index_buffer = _bind("bgfx_set_compute_index_buffer",
args=[c_uint8, bgfx_index_buffer_handle, bgfx_access],
returns=None)
set_compute_vertex_buffer = _bind("bgfx_set_compute_vertex_buffer",
args=[c_uint8, bgfx_vertex_buffer_handle, bgfx_access],
returns=None)
set_compute_dynamic_index_buffer = _bind("bgfx_set_compute_dynamic_index_buffer",
args=[c_uint8, bgfx_dynamic_index_buffer_handle, bgfx_access],
returns=None)
set_compute_dynamic_vertex_buffer = _bind("bgfx_set_compute_dynamic_vertex_buffer",
args=[c_uint8, bgfx_dynamic_vertex_buffer_handle, bgfx_access],
returns=None)
set_compute_indirect_buffer = _bind("bgfx_set_compute_indirect_buffer",
args=[c_uint8, bgfx_indirect_buffer_handle, bgfx_access],
returns=None)
dispatch = _bind("bgfx_dispatch",
args=[bgfx_view_id, bgfx_program_handle, c_uint32, c_uint32, c_uint32],
returns=None)
dispatch_indirect = _bind("bgfx_dispatch_indirect",
args=[bgfx_view_id, bgfx_program_handle, bgfx_indirect_buffer_handle, c_uint16, c_uint16],
returns=None)
discard = _bind("bgfx_discard",
args=[c_void_p],
returns=None)
blit = _bind("bgfx_blit",
args=[bgfx_view_id, bgfx_texture_handle, c_uint8, c_uint16, c_uint16, c_uint16, bgfx_texture_handle, c_uint8, c_uint16, c_uint16, c_uint16, c_uint16, c_uint16, c_uint16],
returns=None)
encoder_set_marker = _bind("bgfx_encoder_set_marker",
args=[POINTER(bgfx_encoder), POINTER(c_char)],
returns=None)
encoder_set_state = _bind("bgfx_encoder_set_state",
args=[POINTER(bgfx_encoder), c_uint64, c_uint32],
returns=None)
encoder_set_condition = _bind("bgfx_encoder_set_condition",
args=[POINTER(bgfx_encoder), bgfx_occlusion_query_handle, c_bool],
returns=None)
encoder_set_stencil = _bind("bgfx_encoder_set_stencil",
args=[POINTER(bgfx_encoder), c_uint32, c_uint32],
returns=None)
encoder_set_scissor = _bind("bgfx_encoder_set_scissor",
args=[POINTER(bgfx_encoder), c_uint16, c_uint16, c_uint16, c_uint16],
returns=c_uint16)
encoder_set_scissor_cached = _bind("bgfx_encoder_set_scissor_cached",
args=[POINTER(bgfx_encoder), c_uint16],
returns=None)
encoder_set_transform = _bind("bgfx_encoder_set_transform",
args=[POINTER(bgfx_encoder), POINTER(c_void_p), c_uint16],
returns=c_uint32)
encoder_alloc_transform = _bind("bgfx_encoder_alloc_transform",
args=[POINTER(bgfx_encoder), POINTER(bgfx_transform), c_uint16],
returns=c_uint32)
encoder_set_transform_cached = _bind("bgfx_encoder_set_transform_cached",
args=[POINTER(bgfx_encoder), c_uint32, c_uint16],
returns=None)
encoder_set_uniform = _bind("bgfx_encoder_set_uniform",
args=[POINTER(bgfx_encoder), bgfx_uniform_handle, POINTER(c_void_p), c_uint16],
returns=None)
encoder_set_index_buffer = _bind("bgfx_encoder_set_index_buffer",
args=[POINTER(bgfx_encoder), bgfx_index_buffer_handle, c_uint32, c_uint32],
returns=None)
encoder_set_dynamic_index_buffer = _bind("bgfx_encoder_set_dynamic_index_buffer",
args=[POINTER(bgfx_encoder), bgfx_dynamic_index_buffer_handle, c_uint32, c_uint32],
returns=None)
encoder_set_transient_index_buffer = _bind("bgfx_encoder_set_transient_index_buffer",
args=[POINTER(bgfx_encoder), POINTER(bgfx_transient_index_buffer), c_uint32, c_uint32],
returns=None)
encoder_set_vertex_buffer = _bind("bgfx_encoder_set_vertex_buffer",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_vertex_buffer_handle, c_uint32, c_uint32],
returns=None)
encoder_set_dynamic_vertex_buffer = _bind("bgfx_encoder_set_dynamic_vertex_buffer",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_dynamic_vertex_buffer_handle, c_uint32, c_uint32],
returns=None)
encoder_set_transient_vertex_buffer = _bind("bgfx_encoder_set_transient_vertex_buffer",
args=[POINTER(bgfx_encoder), c_uint8, POINTER(bgfx_transient_index_buffer), c_uint32, c_uint32],
returns=None)
encoder_set_vertex_count = _bind("bgfx_encoder_set_vertex_count",
args=[POINTER(bgfx_encoder), c_uint32],
returns=None)
encoder_set_instance_data_buffer = _bind("bgfx_encoder_set_instance_data_buffer",
args=[POINTER(bgfx_encoder), POINTER(bgfx_instance_data_buffer), c_uint32, c_uint32],
returns=None)
encoder_set_instance_data_from_vertex_buffer = _bind("bgfx_encoder_set_instance_data_from_vertex_buffer",
args=[POINTER(bgfx_encoder), bgfx_vertex_buffer_handle, c_uint32, c_uint32],
returns=None)
encoder_set_instance_data_from_dynamic_vertex_buffer = _bind("bgfx_encoder_set_instance_data_from_dynamic_vertex_buffer",
args=[POINTER(bgfx_encoder), bgfx_dynamic_vertex_buffer_handle, c_uint32, c_uint32],
returns=None)
encoder_set_texture = _bind("bgfx_encoder_set_texture",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_uniform_handle, bgfx_texture_handle, c_uint32],
returns=None)
encoder_touch = _bind("bgfx_encoder_touch",
args=[POINTER(bgfx_encoder), bgfx_view_id],
returns=None)
encoder_submit = _bind("bgfx_encoder_submit",
args=[POINTER(bgfx_encoder), bgfx_view_id, bgfx_program_handle, c_uint32, c_bool],
returns=None)
encoder_submit_occlusion_query = _bind("bgfx_encoder_submit_occlusion_query",
args=[POINTER(bgfx_encoder), bgfx_view_id, bgfx_program_handle, bgfx_occlusion_query_handle, c_uint32, c_bool],
returns=None)
encoder_submit_indirect = _bind("bgfx_encoder_submit_indirect",
args=[POINTER(bgfx_encoder), bgfx_view_id, bgfx_program_handle, bgfx_indirect_buffer_handle, c_uint16, c_uint16, c_uint32, c_bool],
returns=None)
encoder_set_image = _bind("bgfx_encoder_set_image",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_texture_handle, c_uint8, bgfx_access, bgfx_texture_format],
returns=None)
encoder_set_compute_index_buffer = _bind("bgfx_encoder_set_compute_index_buffer",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_index_buffer_handle, bgfx_access],
returns=None)
encoder_set_compute_vertex_buffer = _bind("bgfx_encoder_set_compute_vertex_buffer",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_vertex_buffer_handle, bgfx_access],
returns=None)
encoder_set_compute_dynamic_index_buffer = _bind("bgfx_encoder_set_compute_dynamic_index_buffer",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_dynamic_index_buffer_handle, bgfx_access],
returns=None)
encoder_set_compute_dynamic_vertex_buffer = _bind("bgfx_encoder_set_compute_dynamic_vertex_buffer",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_dynamic_vertex_buffer_handle, bgfx_access],
returns=None)
encoder_set_compute_indirect_buffer = _bind("bgfx_encoder_set_compute_indirect_buffer",
args=[POINTER(bgfx_encoder), c_uint8, bgfx_indirect_buffer_handle, bgfx_access],
returns=None)
encoder_dispatch = _bind("bgfx_encoder_dispatch",
args=[POINTER(bgfx_encoder), bgfx_view_id, bgfx_program_handle, c_uint32, c_uint32, c_uint32],
returns=None)
encoder_dispatch_indirect = _bind("bgfx_encoder_dispatch_indirect",
args=[POINTER(bgfx_encoder), bgfx_view_id, bgfx_program_handle, bgfx_indirect_buffer_handle, c_uint16, c_uint16],
returns=None)
encoder_discard = _bind("bgfx_encoder_discard",
args=[POINTER(bgfx_encoder)],
returns=None)
encoder_blit = _bind("bgfx_encoder_blit",
args=[POINTER(bgfx_encoder), bgfx_view_id, bgfx_texture_handle, c_uint8, c_uint16, c_uint16, c_uint16, bgfx_texture_handle, c_uint8, c_uint16, c_uint16, c_uint16, c_uint16, c_uint16, c_uint16],
returns=None)
request_screen_shot = _bind("bgfx_request_screen_shot",
args=[bgfx_frame_buffer_handle, POINTER(c_char)],
returns=None)
class BGFX_PLATFORM_DATA(Structure):
_fields_ = [("ndt", c_void_p),
("nwh", c_void_p),
("context", c_void_p),
("backBuffer", c_void_p),
("backBufferDS", c_void_p)]
_bgfx.bgfx_set_platform_data.argtypes = [POINTER(BGFX_PLATFORM_DATA)]
def set_platform_data(handle):
platform_data = BGFX_PLATFORM_DATA(None, handle, None, None, None)
_bgfx.bgfx_set_platform_data(byref(platform_data))
|
jnadro/pybgfx
|
pybgfx/bgfx.py
|
Python
|
bsd-2-clause
| 45,223 | 0.010548 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask import Blueprint, render_template, Markup, url_for
from flask_toolbox.models import Package
package_page = Blueprint('package_page', __name__,
template_folder='templates')
@package_page.route('/packages')
def index():
packages = Package.query.order_by(Package.name).filter(Package.category_id != None).all()
sidebar_title = "All the packages"
package_list = [package.name for package in packages]
print(len(package_list))
return render_template(
'packages.html', packages=packages,
sidebar_title=sidebar_title, package_list=package_list)
@package_page.route('/packages/<package>')
def show(package):
the_package = Package.query.filter_by(name=package).first_or_404()
category = the_package.category
related_packages = [item.name for item in category.packages.order_by(Package.score.desc()).all()
if item.name != package]
sidebar_title = (
Markup("Other related packages in the <a href='{0}'>{1}</a> category".format(
url_for('category_page.show', category=category.name),
category.name
))
)
return render_template(
'package.html', package=the_package,
related_packages=related_packages, sidebar_title=sidebar_title)
@package_page.route('/packages/<package>/score')
def score(package):
flask = Package.query.filter_by(name="Flask").first()
the_package = Package.query.filter_by(name=package).first_or_404()
category = the_package.category
related_packages = [item.name for item in category.packages.order_by(Package.score.desc()).all()
if item.name != package]
sidebar_title = (
Markup("Other related packages in the <a href='{0}'>{1}</a> category".format(
url_for('category_page.index', category=category.name),
category.name
))
)
return render_template(
'score.html', package=the_package, flask=flask,
related_packages=related_packages, sidebar_title=sidebar_title)
|
lord63/flask_toolbox
|
flask_toolbox/views/package.py
|
Python
|
mit
| 2,151 | 0.002789 |
import copy
import json
import os
import asyncio
import pytest
import webdriver
from urllib.parse import urlunsplit
from tests.support import defaults
from tests.support.helpers import cleanup_session, deep_update
from tests.support.inline import build_inline
from tests.support.http_request import HTTPRequest
# The webdriver session can outlive a pytest session
_current_session = None
# The event loop needs to outlive the webdriver session
_event_loop = None
_custom_session = False
def pytest_configure(config):
# register the capabilities marker
config.addinivalue_line(
"markers",
"capabilities: mark test to use capabilities"
)
@pytest.fixture
def capabilities():
"""Default capabilities to use for a new WebDriver session."""
return {}
def pytest_generate_tests(metafunc):
if "capabilities" in metafunc.fixturenames:
marker = metafunc.definition.get_closest_marker(name="capabilities")
if marker:
metafunc.parametrize("capabilities", marker.args, ids=None)
@pytest.fixture(scope="session")
def event_loop():
"""Change event_loop fixture to global."""
global _event_loop
if _event_loop is None:
_event_loop = asyncio.get_event_loop_policy().new_event_loop()
return _event_loop
@pytest.fixture
def http(configuration):
return HTTPRequest(configuration["host"], configuration["port"])
@pytest.fixture
def server_config():
with open(os.environ.get("WD_SERVER_CONFIG_FILE"), "r") as f:
return json.load(f)
@pytest.fixture(scope="session")
def configuration():
host = os.environ.get("WD_HOST", defaults.DRIVER_HOST)
port = int(os.environ.get("WD_PORT", str(defaults.DRIVER_PORT)))
capabilities = json.loads(os.environ.get("WD_CAPABILITIES", "{}"))
return {
"host": host,
"port": port,
"capabilities": capabilities
}
async def reset_current_session_if_necessary(caps):
global _current_session
# If there is a session with different requested capabilities active than
# the one we would like to create, end it now.
if _current_session is not None:
if not _current_session.match(caps):
is_bidi = isinstance(_current_session, webdriver.BidiSession)
if is_bidi:
await _current_session.end()
else:
_current_session.end()
_current_session = None
@pytest.fixture(scope="function")
async def session(capabilities, configuration):
"""Create and start a session for a test that does not itself test session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test.
"""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps)
_current_session.start()
# Enforce a fixed default window size and position
if _current_session.capabilities.get("setWindowRect"):
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session
cleanup_session(_current_session)
@pytest.fixture(scope="function")
async def bidi_session(capabilities, configuration):
"""Create and start a bidi session.
Can be used for a test that does not itself test bidi session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test.
"""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
caps.update({"webSocketUrl": True})
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps,
enable_bidi=True)
_current_session.start()
await _current_session.bidi_session.start()
# Enforce a fixed default window size and position
if _current_session.capabilities.get("setWindowRect"):
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session.bidi_session
await _current_session.bidi_session.end()
cleanup_session(_current_session)
@pytest.fixture(scope="function")
def current_session():
return _current_session
@pytest.fixture
def url(server_config):
def url(path, protocol="http", domain="", subdomain="", query="", fragment=""):
domain = server_config["domains"][domain][subdomain]
port = server_config["ports"][protocol][0]
host = "{0}:{1}".format(domain, port)
return urlunsplit((protocol, host, path, query, fragment))
return url
@pytest.fixture
def inline(url):
"""Take a source extract and produces well-formed documents.
Based on the desired document type, the extract is embedded with
predefined boilerplate in order to produce well-formed documents.
The media type and character set may also be individually configured.
This helper function originally used data URLs, but since these
are not universally supported (or indeed standardised!) across
browsers, it now delegates the serving of the document to wptserve.
This file also acts as a wptserve handler (see the main function
below) which configures the HTTP response using query parameters.
This function returns a URL to the wptserve handler, which in turn
will serve an HTTP response with the requested source extract
inlined in a well-formed document, and the Content-Type header
optionally configured using the desired media type and character set.
Any additional keyword arguments are passed on to the build_url
function, which comes from the url fixture.
"""
def inline(src, **kwargs):
return build_inline(url, src, **kwargs)
return inline
@pytest.fixture
def iframe(inline):
"""Inline document extract as the source document of an <iframe>."""
def iframe(src, **kwargs):
return "<iframe src='{}'></iframe>".format(inline(src, **kwargs))
return iframe
|
nwjs/chromium.src
|
third_party/blink/web_tests/external/wpt/webdriver/tests/support/fixtures.py
|
Python
|
bsd-3-clause
| 7,196 | 0.001112 |
from __future__ import absolute_import, print_function, division
from copy import copy
from itertools import product as itertools_product
from unittest import TestCase
import numpy
from numpy import (arange, array, common_type, complex64, complex128, float32,
float64, newaxis, shape, transpose, zeros)
from numpy.testing import assert_array_almost_equal
from six.moves import xrange
import theano
import theano.tensor as T
from theano import tensor, In, shared, config
from theano.compat import exc_message
from theano.printing import pp
from theano.tensor.blas import (_dot22, _dot22scalar, res_is_a, _as_scalar,
_is_real_matrix, _gemm_canonicalize,
_factor_canonicalized, Gemm, Gemv,
gemm_inplace, gemm_no_inplace,
InconsistencyError, Ger, ger, ger_destructive)
from theano.tests import unittest_tools
from .test_basic import (as_tensor_variable, inplace_func,
compile, inplace)
import theano.tensor.blas_scipy
from theano.tests.unittest_tools import attr
if config.mode == 'FAST_COMPILE':
mode_not_fast_compile = 'FAST_RUN'
else:
mode_not_fast_compile = config.mode
mode_blas_opt = theano.compile.get_default_mode().including(
'BlasOpt', 'specialize', 'InplaceBlasOpt')
mode_blas_opt = mode_blas_opt.excluding('c_blas')
def test_dot_eq():
assert T.Dot() == T.Dot()
def sharedX(x, name):
return theano.shared(numpy.asarray(x, config.floatX), name=name)
class t_gemm(TestCase):
"""This test suite is supposed to establish that gemm works as it
is supposed to.
"""
def setUp(self):
unittest_tools.seed_rng()
Gemm.debug = False
@staticmethod
def _gemm(z, a, x, y, b):
assert a.shape == ()
assert b.shape == ()
return b * z + a * numpy.dot(x, y)
@staticmethod
def rand(*args):
return numpy.random.rand(*args)
def cmp(self, z_, a_, x_, y_, b_):
for dtype in ['float32', 'float64', 'complex64', 'complex128']:
z = numpy.asarray(z_, dtype=dtype)
a = numpy.asarray(a_, dtype=dtype)
x = numpy.asarray(x_, dtype=dtype)
y = numpy.asarray(y_, dtype=dtype)
b = numpy.asarray(b_, dtype=dtype)
def cmp_linker(z, a, x, y, b, l):
z, a, x, y, b = [numpy.asarray(p) for p in (z, a, x, y, b)]
z_orig = z.copy()
tz, ta, tx, ty, tb = [as_tensor_variable(p).type()
for p in (z, a, x, y, b)]
f = inplace_func([tz, ta, tx, ty, tb],
gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l))
new_z = f(z, a, x, y, b)
z_after = self._gemm(z_orig, a, x, y, b)
# print z_orig, z_after, z, type(z_orig), type(z_after), type(z)
unittest_tools.assert_allclose(z_after, z)
if a == 0.0 and b == 1.0:
return
elif z_orig.size == 0:
self.assertTrue(z.size == 0)
else:
self.assertFalse(numpy.all(z_orig == z))
cmp_linker(copy(z), a, x, y, b, 'c|py')
cmp_linker(copy(z), a, x, y, b, 'py')
if (not dtype.startswith("complex")
and theano.config.cxx):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
cmp_linker(copy(z), a, x, y, b, 'c')
def test0a(self):
Gemm.debug = True
try:
g = gemm_inplace([1.], 1., [1.], [1.], 1.)
except TypeError as e:
if exc_message(e) is Gemm.E_rank:
return
self.fail()
def test0(self):
try:
self.cmp(1., 0., 1.0, 1.0, 1.0)
except TypeError as e:
if exc_message(e) is Gemm.E_rank:
return
self.fail()
def test2(self):
try:
self.cmp(2., 1.0, [3, 2, 1.], [[1], [2], [3.]], 1.0)
except TypeError as e:
self.assertTrue(exc_message(e) == Gemm.E_rank)
return
self.fail()
def test4(self):
self.cmp(self.rand(3, 4), 1.0, self.rand(3, 5), self.rand(5, 4), 0.0)
def test5(self):
self.cmp(self.rand(3, 4), 1.0,
self.rand(3, 5), self.rand(5, 4), 1.0)
def test6(self):
self.cmp(self.rand(3, 4), 1.0,
self.rand(3, 5), self.rand(5, 4), -1.0)
def test7(self):
self.cmp(self.rand(3, 4), 0.0,
self.rand(3, 5), self.rand(5, 4), 0.0)
def test8(self):
self.cmp(self.rand(3, 4), 0.0,
self.rand(3, 5), self.rand(5, 4), 0.6)
def test9(self):
self.cmp(self.rand(3, 4), 0.0,
self.rand(3, 5), self.rand(5, 4), -1.0)
def test10(self):
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 5), self.rand(5, 4), 0.0)
def test11(self):
self.cmp(self.rand(3, 4), -1.0,
self.rand(3, 5), self.rand(5, 4), 1.0)
def test12(self):
self.cmp(self.rand(3, 4), -1.0,
self.rand(3, 5), self.rand(5, 4), -1.0)
def test_shape_0(self):
self.cmp(self.rand(0, 4), -1.0, self.rand(0, 5), self.rand(5, 4), -1.0)
self.cmp(self.rand(3, 0), -1.0, self.rand(3, 5), self.rand(5, 0), -1.0)
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 0), self.rand(0, 4), -1.0)
self.cmp(self.rand(0, 0), -1.0, self.rand(0, 5), self.rand(5, 0), -1.0)
self.cmp(self.rand(0, 0), -1.0, self.rand(0, 0), self.rand(0, 0), -1.0)
def test_factorised_scalar(self):
a = T.matrix()
b = T.matrix()
c = T.matrix()
s = theano.shared(numpy.zeros((5, 5)).astype(config.floatX))
lr1 = T.constant(0.01).astype(config.floatX)
lr2 = T.constant(2).astype(config.floatX)
l2_reg = T.constant(0.0001).astype(config.floatX)
# test constant merge with gemm
f = theano.function([a, b], updates=[(s, lr1 * T.dot(a, b) +
l2_reg * lr2 * s)],
mode=mode_not_fast_compile).maker.fgraph.toposort()
#[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# 2e-06)]
assert len(f) == 1
assert f[0].op == gemm_inplace
# test factored scalar with merge
f = theano.function([a, b], updates=[(s, lr1 * (T.dot(a, b) -
l2_reg * s))],
mode=mode_not_fast_compile).maker.fgraph.toposort()
#[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# -2e-06)]
assert len(f) == 1
assert f[0].op == gemm_inplace
# test factored scalar with merge and neg
f = theano.function([a, b],
updates=[(s, s - lr1 * (s * .0002 + T.dot(a, b)))],
mode=mode_not_fast_compile).maker.fgraph.toposort()
#[Gemm{inplace}(<TensorType(float64, matrix)>, -0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# 0.999998)]
assert len(f) == 1
assert f[0].op == gemm_inplace
def test_destroy_map0(self):
"""test that only first input can be overwritten"""
Z = as_tensor_variable(self.rand(2, 2))
try:
gemm_inplace(Z, 1.0, Z, Z, 1.0)
except InconsistencyError as e:
if exc_message(e) == Gemm.E_z_uniq:
return
self.fail()
def test_destroy_map1(self):
"""test that only first input can be overwritten"""
Z = as_tensor_variable(self.rand(2, 2))
A = as_tensor_variable(self.rand(2, 2))
try:
gemm_inplace(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0)
except InconsistencyError as e:
if exc_message(e) == Gemm.E_z_uniq:
return
self.fail()
def test_destroy_map2(self):
"""test that only first input can be overwritten"""
Z = as_tensor_variable(self.rand(2, 2))
A = as_tensor_variable(self.rand(2, 2))
try:
gemm_inplace(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0)
except InconsistencyError as e:
if exc_message(e) == Gemm.E_z_uniq:
return
self.fail()
def test_destroy_map3(self):
"""test that only first input can be overwritten"""
Z = as_tensor_variable(self.rand(2, 2))
A = as_tensor_variable(self.rand(2, 2))
try:
gemm_inplace(Z, 1.0, Z, A, 1.0)
except InconsistencyError as e:
if exc_message(e) == Gemm.E_z_uniq:
return
self.fail()
def test_destroy_map4(self):
"""test that dot args can be aliased"""
Z = shared(self.rand(2, 2), name='Z')
A = shared(self.rand(2, 2), name='A')
one = T.constant(1.0).astype(Z.dtype)
f = inplace_func([], gemm_inplace(Z, one, A, A, one))
f()
f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))
f()
def test_transposes(self):
# three square matrices which are not contiguous
A = self.rand(4, 5)[:, :4]
B = self.rand(4, 5)[:, :4]
C = self.rand(4, 5)[:, :4]
def t(z, x, y, a=1.0, b=0.0, l='c|py', dt='float64'):
z, a, x, y, b = [theano._asarray(p, dtype=dt)
for p in (z, a, x, y, b)]
z_orig = z.copy()
z_after = self._gemm(z, a, x, y, b)
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
# f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb),
# mode = compile.Mode(optimizer = None, linker=l))
#f(z, a, x, y, b)
f = inplace_func([], gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l))
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
# tz.value *= 0 # clear z's value
y_T = ty.get_value(borrow=True).T
ty.set_value(tx.get_value(borrow=True).T, borrow=True)
tx.set_value(y_T, borrow=True)
f()
# test that the transposed version of multiplication gives
# same answer
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True).T)
t(C, A, B)
t(C.T, A, B)
t(C, A.T, B, dt='float32')
t(C, A, B.T)
t(C.T, A.T, B)
t(C, A.T, B.T, dt='float32')
t(C.T, A, B.T)
t(C.T, A.T, B.T, dt='float32')
t(C, A[:, :2], B[:2, :])
t(C.T, A[:, :2], B[:2, :], dt='float32')
t(C, A[:2, :].T, B[:2, :])
t(C.T, A[:2, :].T, B[:2, :], dt='float32')
t(C, A[:2, :].T, B[:, :2].T)
t(C.T, A[:2, :].T, B[:, :2].T)
try:
t(C.T, A[:2, :], B[:, :2].T)
except ValueError as e:
if exc_message(e).find('aligned') >= 0:
return
self.fail()
def test_non_contiguous(self):
# Like test_transposes but with matrices without any
# continuous dimension
A = self.rand(4, 4, 3)
B = self.rand(4, 4, 3)
C = self.rand(4, 4, 3)
def t(z, x, y, a=1.0, b=0.0, l='c|py', dt='float64'):
z, a, x, y, b = [theano._asarray(p, dtype=dt)
for p in (z, a, x, y, b)]
z_orig = z.copy()
z_after = numpy.zeros_like(z_orig)
for i in xrange(3):
z_after[:, :, i] = self._gemm(z[:, :, i], a,
x[:, :, i], y[:, :, i], b)
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
for i in xrange(3):
f_i = inplace_func([],
gemm_inplace(tz[:, :, i],
ta, tx[:, :, i], ty[:, :, i], tb),
mode=compile.Mode(optimizer=None, linker=l))
for j in xrange(3):
# tz will not _always_ be overwritten,
# and adding update={...} in the call to function()
# will create cycles, so we update by hand.
z_i = f_i()
z = tz.get_value(borrow=True, return_internal_type=True)
z[:, :, i] = z_i
unittest_tools.assert_allclose(z_after[:, :, i],
tz.get_value(borrow=True)[:, :, i])
tz_i = gemm_no_inplace(tz[:, :, i], ta, tx[
:, :, i], ty[:, :, i], tb)
g_i = theano.function([], tz_i,
updates=[(tz, T.set_subtensor(tz[:, :, i], tz_i))],
mode=compile.Mode(optimizer=None, linker=l))
for j in xrange(3):
g_i()
unittest_tools.assert_allclose(z_after[:, :, i],
tz.get_value(borrow=True)[:, :, i])
t(C, A, B)
t(C.transpose((1, 0, 2)), A, B)
t(C, A.transpose((1, 0, 2)), B, dt='float32')
t(C, A, B.transpose((1, 0, 2)))
t(C.transpose((1, 0, 2)), A.transpose((1, 0, 2)), B)
t(C, A.transpose((1, 0, 2)), B.transpose((1, 0, 2)), dt='float32')
t(C.transpose((1, 0, 2)), A, B.transpose((1, 0, 2)))
t(C.transpose((1, 0, 2)), A.transpose((1, 0, 2)), B.transpose((
1, 0, 2)), dt='float32')
def test_res_is_a():
X, Y, Z, a, b = XYZab()
assert not res_is_a(a, T.sqrt)
assert not res_is_a(a + a, T.sqrt)
assert res_is_a(T.sqrt(a + a), T.sqrt)
# leave the maxclients stuff untested because it requires being in an fgraph.
class t_as_scalar(TestCase):
def test0(self):
"""Test that it works on scalar constants"""
a = T.constant(2.5)
b = T.constant(numpy.asarray([[[0.5]]]))
b2 = b.dimshuffle()
assert b2.ndim == 0
d_a = T.DimShuffle([], [])(a)
d_b = T.DimShuffle([True, True, True], [0, 2, 1])(b)
d_a2 = T.DimShuffle([], ['x', 'x', 'x'])(a)
self.assertTrue(_as_scalar(a) == a)
self.assertTrue(_as_scalar(b) != b)
self.assertTrue(_as_scalar(d_a) != d_a)
self.assertTrue(_as_scalar(d_b) != d_b)
self.assertTrue(_as_scalar(d_a2) != d_a2)
def test1(self):
"""Test that it fails on nonscalar constants"""
a = T.constant(numpy.ones(5))
self.assertTrue(None == _as_scalar(a))
self.assertTrue(None == _as_scalar(T.DimShuffle([False], [0, 'x'])(a)))
def test2(self):
"""Test that it works on scalar variables"""
a = T.dscalar()
d_a = T.DimShuffle([], [])(a)
d_a2 = T.DimShuffle([], ['x', 'x'])(a)
self.assertTrue(_as_scalar(a) is a)
self.assertTrue(_as_scalar(d_a) is a)
self.assertTrue(_as_scalar(d_a2) is a)
def test3(self):
"""Test that it fails on nonscalar variables"""
a = T.matrix()
self.assertTrue(None == _as_scalar(a))
self.assertTrue(None == _as_scalar(T.DimShuffle([False, False],
[0, 'x', 1])(a)))
class T_real_matrix(TestCase):
def test0(self):
self.assertTrue(_is_real_matrix(T.DimShuffle([False, False],
[1, 0])(T.matrix())))
self.assertTrue(not _is_real_matrix(T.DimShuffle([False],
['x', 0])
(T.dvector())))
def fail(msg):
print('FAIL', msg)
assert False
"""This test suite ensures that Gemm is inserted where it belongs, and
that the resulting functions compute the same things as the
originals.
"""
def XYZab():
return T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()
class Failure(Exception):
pass
def just_gemm(i, o, ishapes=[(4, 3), (3, 5), (4, 5), (), ()],
max_graphlen=0, expected_nb_gemm=1):
try:
f = inplace_func(
[In(ii, mutable=True, allow_downcast=True) for ii in i],
o,
mode='FAST_RUN',
on_unused_input='ignore')
nb_gemm = 0
for node in f.maker.fgraph.apply_nodes:
if isinstance(node.op, T.Dot):
raise Failure('dot not changed to gemm_inplace in graph')
if node.op == _dot22:
raise Failure('_dot22 not changed to gemm_inplace in graph')
if node.op == gemm_inplace:
nb_gemm += 1
assert nb_gemm == expected_nb_gemm, (nb_gemm, expected_nb_gemm)
g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),
allow_input_downcast=True, on_unused_input='ignore')
for node in g.maker.fgraph.apply_nodes:
if node.op == gemm_inplace:
raise Exception('gemm_inplace in original graph')
graphlen = len(f.maker.fgraph.toposort())
if max_graphlen and (graphlen <= max_graphlen):
# theano.printing.debugprint(f)
assert False, 'graphlen=%i>%i' % (graphlen, max_graphlen)
rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))
r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)
for sh in ishapes])
rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))
r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)
for sh in ishapes])
max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))
eps = 1.0e-8
if config.floatX == 'float32':
eps = 1.0e-6
if max_abs_err > eps:
raise Failure('GEMM is computing the wrong output. max_rel_err =',
max_abs_err)
except Failure:
for node in f.maker.fgraph.toposort():
print('GRAPH', node)
raise
def test_gemm_opt0():
"""Many subgraphs whose dots can be eliminated"""
X, Y, Z, a, b = XYZab()
just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a + Z * b])
just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) + b * Z])
just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a - Z * b])
just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) - b * Z])
just_gemm([X, Y, Z, a, b], [b * Z - a * T.dot(X, Y)])
# with transposes (transposes should be pushed through dot in canonicalize)
just_gemm([X, Y, Z, a, b], [b * Z.T - a * T.dot(Y.T, X.T)])
just_gemm([X, Y, Z, a, b], [b * Z.T + a * b * T.dot(X, Y).T])
just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y).T],
ishapes=[(5, 3), (3, 4), (4, 5), (), ()])
# with N multiplications instead of just one
just_gemm([X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * T.dot(X, Y) * b])
just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z * b + T.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z + a * b * a * T.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [(b * b) * Z * a - (a * a) * T.dot(X, Y) * b])
just_gemm([X, Y, Z, a, b], [Z - T.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z * b - T.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z - a * b * a * T.dot(X, Y)])
def test_gemm_opt_double_gemm():
"""This is the pattern that shows up in the autoencoder"""
X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()
R, S, c = T.matrix(), T.matrix(), T.scalar()
just_gemm([X, Y, Z, a, b, R, S, c],
[Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],
ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],
expected_nb_gemm=2)
ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]
i = [X, Y, Z, a, b, R, S, c]
o = [(a * T.dot(X, Y)
+ gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]
try:
f = inplace_func([In(ii, mutable=True) for ii in i], o,
mode='FAST_RUN', on_unused_input='ignore')
for node in f.maker.fgraph.apply_nodes:
if isinstance(node.op, T.Dot):
raise Failure('dot in graph')
if node.op == _dot22:
raise Failure('_dot22 in graph')
g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),
on_unused_input='ignore')
# for node in g.maker.fgraph.apply_nodes:
# if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')
rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))
r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)
for sh in ishapes])
rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))
r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)
for sh in ishapes])
max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))
eps = 1.0e-8
if config.floatX == 'float32':
eps = 1.0e-6
if max_abs_err > eps:
raise Failure(
'GEMM is computing the wrong output. max_rel_err =',
max_abs_err)
except Failure:
for node in f.maker.fgraph.toposort():
print('GRAPH', node)
raise
def test_gemm_canonicalize():
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, U, c, d = T.matrix('R'), T.matrix('S'), T.matrix('U'), T.scalar(
'c'), T.scalar('d')
u = T.row('u')
v = T.vector('v')
w = T.col('w')
can = []
_gemm_canonicalize(X + Y + Z, 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, Z)]
can = []
_gemm_canonicalize(X + Y + u, 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, u)], can
can = []
_gemm_canonicalize(X + Y + v, 1.0, can, 0)
# [(1.0, X), (1.0, Y), (1.0, InplaceDimShuffle{x,0}(v))]
assert can[:2] == [(1.0, X), (1.0, Y)]
assert isinstance(can[2], tuple)
assert len(can[2]) == 2
assert can[2][0] == 1.0
assert can[2][1].owner
assert isinstance(can[2][1].owner.op, T.DimShuffle)
assert can[2][1].owner.inputs == [v]
can = []
_gemm_canonicalize(X + Y + w, 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, w)], can
can = []
_gemm_canonicalize(a * X + Y - b * Z * c, 1.0, can, 0)
assert can[0] == (a, X)
assert can[1] == (1.0, Y)
assert can[2][0].owner.op == T.mul
assert can[2][0].owner.inputs[0].owner.op == T.neg
assert can[2][0].owner.inputs[0].owner.inputs[0] == c
assert can[2][0].owner.inputs[1] == b
can = []
_gemm_canonicalize((-d) * X - (a * X + Y - b * Z * c), 1.0, can, 0)
# print can
assert can[0][0].owner.op == T.neg
assert can[0][0].owner.inputs[0] == d
assert can[0][1] == X
assert can[1][0].owner.op == T.neg
assert can[1][0].owner.inputs[0] == a
assert can[2] == (-1.0, Y)
assert can[3][0].owner.op == T.mul
assert can[3][0].owner.inputs == [c, b]
def test_gemm_factor():
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, U, c, d = T.matrix('R'), T.matrix('S'), T.matrix('U'), T.scalar(
'c'), T.scalar('d')
assert [(1.0, X), (1.0, Y)] == _factor_canonicalized([(1.0, X), (1.0, Y)])
assert [(2.0, X)] == _factor_canonicalized([(1.0, X), (1.0, X)])
def test_upcasting_scalar_nogemm():
# Test that the optimization does not crash when the scale has an incorrect
# dtype, and forces upcasting of the result
v = T.fmatrix('v')
w = T.fmatrix('w')
t = T.fmatrix('t')
alpha = T.dscalar('a')
rval = T.dot(w, v) * alpha + t
f = theano.function([w, v, t, alpha], rval)
t = f.maker.fgraph.toposort()
assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
#theano.printing.debugprint(f, print_type=True)
v = T.fmatrix('v')
w = T.fmatrix('w')
t = T.fmatrix('t')
alpha = T.cscalar('a')
on_opt_error = config.on_opt_error
try:
config.on_opt_error = 'raise'
rval = T.dot(w, v) * alpha + t
f = theano.function([w, v, t, alpha], rval)
finally:
config.on_opt_error = on_opt_error
t = f.maker.fgraph.toposort()
assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
#theano.printing.debugprint(f, print_type=True)
def test_gemm_nested():
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, U, c, d = T.matrix('R'), T.matrix('S'), T.matrix('U'), T.scalar(
'c'), T.scalar('d')
just_gemm([X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * T.dot(X, Y) + d * Z)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
2, 4), (), (), (), ()],
max_graphlen=1)
# print "---------------------"
just_gemm([X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * T.dot(X, Y) + d * Z + c * Z)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
2, 4), (), (), (), ()],
max_graphlen=1)
# print "---------------------"
just_gemm([X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * T.dot(X, Y) + d * Z + c * U)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
2, 4), (), (), (), ()],
max_graphlen=3)
def test_gemm_opt_wishlist():
X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()
# with >2 additions of the same T.dot(X,Y term
just_gemm([X, Y, Z, a, b],
[(b * b) * Z * a + (a * a) * T.dot(X, Y) + b * T.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y) + T.dot(X, Y)])
def test_gemm_with_vector():
"""Many subgraphs whose dots can be eliminated. This adds a
vector two the previous test, which triggers the long-sought GEMM
bug.
"""
X, Y, Z, a, b = XYZab()
v = T.vector()
def my_just_gemm(o):
i = [X, Y, Z, a, b, v]
ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]
rval = just_gemm(i, o, ishapes=ishapes)
my_just_gemm([v + T.dot(X, Y) * a + Z * b])
my_just_gemm([v + a * T.dot(X, Y) + b * Z])
my_just_gemm([v + b * Z + a * T.dot(X, Y)])
my_just_gemm([v + T.dot(X, Y) * a - Z * b])
my_just_gemm([v + a * T.dot(X, Y) - b * Z])
my_just_gemm([v + b * Z - a * T.dot(X, Y)])
# with N multiplications instead of just one
my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])
my_just_gemm([v + Z + T.dot(X, Y)])
my_just_gemm([v + Z * b + T.dot(X, Y)])
my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])
my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])
my_just_gemm([Z - T.dot(X, Y) + v])
my_just_gemm([Z * b - T.dot(X, Y) + v])
my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])
def test_gemm_opt_vector_stuff():
X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()
u, v = T.vector(), T.vector()
f = inplace_func([a, u, v], a + T.dot(u, v), mode='FAST_RUN')
if gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]:
raise Failure('gemm_inplace in graph')
f = inplace_func([a, u, X, Y], a * u + T.dot(X, Y), mode='FAST_RUN')
if (gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
raise Failure('gemm_inplace in graph')
def test_gemm_unrolled():
"""This test that the gemm optimizer remove the dot22 that was
present in the graph. Otherwise, this add a gemm, but still
compute the dot22.
This was not always the case in the with this the following code.
"""
batch_size = 100
rep_size = 40
rng = numpy.random.RandomState([1, 2, 3])
for num_rounds in range(1, 10):
W = sharedX(rng.randn(rep_size, rep_size), name='W')
V = sharedX(numpy.zeros((batch_size, rep_size)), name='V')
H = sharedX(numpy.zeros((batch_size, rep_size)), name='H')
G = sharedX(numpy.zeros((batch_size, rep_size)), name='G')
init_V = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_V')
init_H = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_H')
cur_V = V
cur_H = H
def update_V(cur_H):
return T.nnet.sigmoid(T.dot(cur_H, W.T))
def update_H(cur_V):
return T.nnet.sigmoid(T.dot(cur_V, W) + T.dot(G, W.T))
for i in xrange(num_rounds):
cur_V = update_V(cur_H)
cur_H = update_H(cur_V)
unrolled_theano = theano.function([], updates=[(V, cur_V), (H, cur_H)],
name='unrolled_theano')
nb_dot = sum([1 for node in unrolled_theano.maker.fgraph.toposort()
if isinstance(node.op, (theano.tensor.Dot,
theano.tensor.blas.Dot22,
theano.tensor.blas.Gemm))])
# Each num_rounds add 3 dot, but one of them is always the same.
# So the final graph should have 1 + 2* num_rounds dot varient op.
assert nb_dot == num_rounds * 2 + 1, nb_dot
unrolled_theano()
def test_inplace0():
# should fail to insert gemm_inplace because gemm_inplace would
# create cycles
X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
'a'), T.scalar('b')
R, S, c = T.matrix('R'), T.matrix('S'), T.scalar('c')
f = inplace_func([Z, b, R, S],
[Z * (Z + b * T.dot(R, S).T)], mode='FAST_RUN')
if (gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
print(pp(f.maker.fgraph.outputs[0]))
raise Failure('gemm_inplace in graph')
assert gemm_no_inplace in [n.op for n in f.maker.fgraph.apply_nodes]
# gemm_inplace should be inserted here, to work in-place on Z*c
f = inplace_func([X, Y, Z, a, b, R, S, c],
[Z * (c * Z + a * T.dot(X, Y) + b * T.dot(R, S).T)],
mode='FAST_RUN')
if (not gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
theano.printing.debugprint(f)
raise Failure('no gemm_inplace in graph')
def test_inplace1():
X, Y, Z, a, b = XYZab()
# with > 2 terms in the overall addition
f = inplace_func([X, Y, Z],
[Z + Z + T.dot(X, Y)], mode='FAST_RUN')
# theano.printing.debugprint(f)
# it doesn't work inplace because we didn't mark Z as mutable input
assert [n.op for n in f.maker.fgraph.apply_nodes] == [gemm_no_inplace]
def test_dot22():
for dtype1 in ['float32', 'float64', 'complex64', 'complex128']:
a = T.matrix(dtype=dtype1)
for dtype2 in ['float32', 'float64', 'complex64', 'complex128']:
b = T.matrix(dtype=dtype2)
f = theano.function([a, b], T.dot(a, b), mode=mode_blas_opt)
topo = f.maker.fgraph.toposort()
if dtype1 == dtype2:
assert _dot22 in [x.op for x in topo], (dtype1, dtype2)
else:
check = [isinstance(x.op, T.Dot) for x in topo]
assert any(check), (dtype1, dtype2)
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
def cmp(a_shp, b_shp):
av = rng.uniform(size=a_shp).astype(dtype1)
bv = rng.uniform(size=b_shp).astype(dtype2)
f(av, bv)
cmp((3, 4), (4, 5))
cmp((0, 4), (4, 5))
cmp((3, 0), (0, 5))
cmp((3, 4), (4, 0))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
@attr('slow')
def test_dot22scalar():
# including does not seem to work for 'local_dot_to_dot22' and
# 'local_dot22_to_dot22scalar'
# TODO: exclude other optimizations in BlasOpt?
# m = theano.compile.get_default_mode().including('local_dot_to_dot22',
# 'local_dot22_to_dot22scalar','specialize')
#m = theano.compile.get_default_mode().including('BlasOpt', 'specialize')
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
for dtype1 in ['complex64', 'complex128']:
a = T.matrix('a', dtype=dtype1)
for dtype2 in ['complex64', 'complex128']:
b = T.matrix('b', dtype=dtype2)
for dtype3 in ['complex64', 'complex128']:
c = T.matrix('c', dtype=dtype3)
for dtype4 in ['complex64', 'complex128']:
cst = theano.tensor.basic.constant(.2, dtype=dtype4)
cst2 = theano.tensor.basic.constant(.1, dtype=dtype4)
def check_dot22scalar(func, len_topo_scalar=-1):
topo = func.maker.fgraph.toposort()
ops = [x.op for x in topo]
classes = [type(x.op) for x in topo]
dtype4_upcast = theano.scalar.upcast(dtype4, dtype1,
dtype2)
if dtype1 == dtype2 == dtype3 == dtype4_upcast:
if len_topo_scalar > 0:
assert len(topo) == len_topo_scalar
assert _dot22scalar in ops, (dtype1, dtype2,
dtype3, dtype4)
elif dtype1 == dtype2 == dtype4_upcast:
if not (len_topo_scalar > 0):
assert len(topo) == len_topo_scalar
assert _dot22scalar in ops, (dtype1, dtype2,
dtype3, dtype4)
else:
# Currently there is a problem of
# optimization order The constant get
# upcasted to float64 before we try to
# merge it with the dot22 of
# float32. So this prevent the merge.
assert _dot22scalar in ops or _dot22 in ops, (
dtype1, dtype2, dtype3, dtype4)
elif dtype1 == dtype2:
assert _dot22 in ops, (dtype1, dtype2,
dtype3, dtype4)
else:
check = [isinstance(o, T.Dot) for o in ops]
assert any(check), (dtype1, dtype2, dtype3, dtype4)
def cmp(a_shp, b_shp, c_shp, sqr_shp=(5, 5)):
av = rng.uniform(size=a_shp).astype(dtype1)
bv = rng.uniform(size=b_shp).astype(dtype2)
cv = rng.uniform(size=c_shp).astype(dtype3)
sv = rng.uniform(size=sqr_shp).astype(dtype1)
if False:
f = theano.function([a, b], cst * T.dot(a, b),
mode=mode_blas_opt)
topo = f.maker.fgraph.toposort()
check_dot22scalar(f, 1)
f(av, bv)
if True:
f = theano.function([a, b, c],
cst * c * T.dot(a, b),
mode=mode_blas_opt)
topo = f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(av, bv, cv)
f = theano.function([a, b, c],
c * cst * T.dot(a, b),
mode=mode_blas_opt)
topo = f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(av, bv, cv)
# Here, canonicalize also seems needed
# TODO: add only the optimizations needed?
m2 = mode_blas_opt.including('canonicalize')
f = theano.function([a, b, c],
cst2 * c * cst * T.dot(a, b),
mode=m2)
topo = f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(av, bv, cv)
if dtype1 == dtype2 == dtype3:
f = theano.function([a, b, c],
c * cst * a * T.dot(a, b),
mode=m2)
topo = f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(sv, sv, sv)
f = theano.function([a, b, c],
cst * c * a * T.dot(a, b),
mode=mode_blas_opt)
topo = f.maker.fgraph.toposort()
# currently the canonizer don't always
# merge all Mul together... dot22scalar
# optimizer does not do a recursive search
# therefore, it doesn't find potential
# matches of the scalar. TODO: combine
# with the 'canonicalization' that is part
# of the Gemm optimizer.
#
# assert _dot22scalar in [x.op for x in topo]
# assert len(topo)==2
f(sv, sv, sv)
f = theano.function([a, b, c],
c * a * cst * T.dot(a, b),
mode=m2)
topo = f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(sv, sv, sv)
cmp((3, 4), (4, 5), (3, 5))
cmp((0, 4), (4, 5), (0, 5))
cmp((3, 0), (0, 5), (3, 5))
cmp((3, 4), (4, 0), (3, 0), (0, 0))
cmp((0, 4), (4, 0), (0, 0))
cmp((0, 0), (0, 0), (0, 0))
def test_dot22scalar_cast():
"""
Test that in `dot22_to_dot22scalar` we properly cast integers to floats.
"""
# Note that this test was failing before d5ff6904.
A = T.dmatrix()
for scalar_int_type in T.int_dtypes:
y = T.scalar(dtype=scalar_int_type)
f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
A = T.fmatrix()
for scalar_int_type in T.int_dtypes:
y = T.scalar(dtype=scalar_int_type)
f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
if scalar_int_type in ['int32', 'int64']:
assert _dot22 in [x.op for x in f.maker.fgraph.toposort()]
else:
assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
def test_local_dot22_to_dot22scalar():
"""
This test that the bug in gh-1507 is really fixed
"""
A = T.dmatrix()
mode = theano.compile.mode.get_default_mode()
opt = theano.tensor.opt.in2out(
theano.tensor.blas.local_dot22_to_dot22scalar)
mode = mode.__class__(optimizer=opt)
x = T.dscalar()
y = T.dscalar()
z = T.dscalar()
# make sure to don't have dimshuffle as we don't opt those cases
m = T.dmatrix()
r = T.drow()
for idx, node in enumerate([
# Old working cases
T.mul(_dot22(A, A), x),
T.mul(_dot22(A, A), x, y),
T.mul(_dot22(A, A), x, r),
T.mul(_dot22(A, A), m, x),
T.mul(_dot22(A, A), x, m),
T.mul(_dot22(A, A), x, (m * y)),
T.mul(_dot22(A, A), (m * y), x),
T.mul(_dot22(A, A), x, (r * y)),
T.mul(_dot22(A, A), (r * y), x),
T.mul(_dot22(A, A), (x * y), (m * x)),
T.mul(_dot22(A, A), (r * y), (y * x)),
# Case that was raising an assert that is fixed in gh-1507
T.mul(_dot22(A, A), (m * y), m),
T.mul(_dot22(A, A), m, (m * y)),
T.mul(_dot22(A, A), (r * y), (m * x)),
# assert fixed in gh-1507 and opt case added in gh-1515
T.mul(_dot22(A, A), (m * y * z), m),
T.mul(_dot22(A, A), m, (m * y * z)),
# Opt case added in gh-1515
T.mul(_dot22(A, A), T.mul(m, y, z), m),
T.mul(_dot22(A, A), m, T.mul(m, y, z)),
# Case that opt later in gh-1515
T.mul(_dot22(A, A), (r * m), (m * x)),
]):
node2 = theano.tensor.blas.local_dot22_to_dot22scalar.transform(
node.owner)
assert node2
f = theano.function([x, y, z, m, r, A], node,
mode=mode, on_unused_input='ignore')
f(.1, .2, .3, [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10]])
def test_dot_w_self():
# This can trigger problems in the optimization because what would
# normally be a gemm must not be because the output is aliased to
# one of the inputs.
A = shared(value=numpy.ones((2, 2)))
B = T.matrix()
p = T.dot(A, A) * B
grad = T.grad(T.mean(p), A)
f = theano.function([B], p, updates=[(A, A - grad)])
# tests correctness in debugmode
f(numpy.asarray([[0, 1], [2, 3]], dtype=config.floatX))
###############################################################################
# Tests for Gemv
###############################################################################
class TestGemv(TestCase, unittest_tools.TestOptimizationMixin):
def test_dot_vv(self):
''' Currently we generate a gemv for that case'''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
w = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
f = theano.function([], theano.dot(v, w), mode=mode_blas_opt)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, T.dot)
self.assertFunctionContains1(f, Gemv(True))
# Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(v.get_value(), w.get_value()))
def test_dot_vm(self):
''' Test vector dot matrix '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
m = theano.shared(numpy.array(rng.uniform(size=(2, 3)),
dtype='float32'))
f = theano.function([], theano.dot(v, m), mode=mode_blas_opt)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, T.dot)
self.assertFunctionContains1(f, Gemv(True))
# Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
# Assert it works when m has no contiguous dimension
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
def test_dot_mv(self):
''' Test matrix dot vector '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
m = theano.shared(numpy.array(rng.uniform(size=(3, 2)),
dtype='float32'))
f = theano.function([], theano.dot(m, v), mode=mode_blas_opt)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, T.dot)
self.assertFunctionContains1(f, Gemv(True))
# Assert they produce the same output
assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
# Assert it works when m has no contiguous dimension
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
@staticmethod
def t_gemv1(m_shp):
''' test vector2+dot(matrix,vector1) '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(numpy.array(rng.uniform(size=(m_shp[1],)
), dtype='float32'))
v2_orig = numpy.array(rng.uniform(size=(m_shp[0],)), dtype='float32')
v2 = theano.shared(v2_orig)
m = theano.shared(numpy.array(rng.uniform(size=m_shp),
dtype='float32'))
f = theano.function([], v2 + theano.dot(m, v1), mode=mode_blas_opt)
# Assert they produce the same output
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, Gemv)
assert topo[0].op.inplace == False
# test the inplace version
g = theano.function([], [], updates=[(v2, v2 + theano.dot(m, v1))],
mode=mode_blas_opt)
# Assert they produce the same output
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = g.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, Gemv)
if config.mode != 'FAST_COMPILE':
assert topo[0].op.inplace == True
# Do the same tests with a matrix with strides in both dimensions
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
@attr('slow')
def test_gemv1(self):
self.t_gemv1((3, 2))
self.t_gemv1((0, 2))
self.t_gemv1((3, 0))
self.t_gemv1((0, 0))
def test_gemv2(self):
''' test vector2+dot(vector1,matrix) '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(numpy.array(rng.uniform(size=(2,)),
dtype='float32'))
v2_orig = numpy.array(rng.uniform(size=(3,)), dtype='float32')
v2 = theano.shared(v2_orig)
m = theano.shared(numpy.array(rng.uniform(size=(2, 3)),
dtype='float32'))
f = theano.function([], v2 + theano.dot(v1, m), mode=mode_blas_opt)
# Assert they produce the same output
assert numpy.allclose(f(),
numpy.dot(v1.get_value(), m.get_value()) + v2.get_value())
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
assert topo[-1].op.inplace == False
# test the inplace version
g = theano.function([], [], updates=[(v2, v2 + theano.dot(v1, m))],
mode=mode_blas_opt)
# Assert they produce the same output
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(v1.get_value(), m.get_value()) + v2_orig)
topo = g.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
if config.mode != 'FAST_COMPILE':
assert topo[-1].op.inplace == True
# Do the same tests with a matrix with strides in both dimensions
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(v1.get_value(), m.get_value()) + v2.get_value())
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(v1.get_value(), m.get_value()) + v2_orig)
def test_gemv_broadcast(self):
''' test gemv with some broadcasted input '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(numpy.array(rng.uniform(size=(2,)),
dtype='float32'))
v2_orig = numpy.array(rng.uniform(size=(1,)), dtype='float32')
v2 = theano.shared(v2_orig)
m = theano.shared(numpy.array(rng.uniform(size=(1, 2)),
dtype='float32'),
broadcastable=(True, False))
o = theano.dot(m, v1)
f = theano.function([], o + v2, mode=mode_blas_opt)
# Assert they produce the same output
assert numpy.allclose(
f(),
numpy.dot(m.get_value(), v1.get_value()) + v2.get_value())
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
# call gemv directly for mixed broadcast pattern.
o = theano.tensor.blas.gemv_no_inplace(v2, 0.5, m, v1, 0.25)
f = theano.function([], o, mode=mode_blas_opt)
assert numpy.allclose(
f(),
0.5*numpy.dot(m.get_value(), v1.get_value()) + 0.25*v2.get_value())
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
def test_gemv_dimensions(self):
A = T.matrix('A')
x, y = T.vectors('x', 'y')
alpha = theano.shared(theano._asarray(1.0, dtype=config.floatX),
name='alpha')
beta = theano.shared(theano._asarray(1.0, dtype=config.floatX),
name='beta')
z = beta * y + alpha * T.dot(A, x)
f = theano.function([A, x, y], z)
# Matrix value
A_val = numpy.ones((5, 3), dtype=config.floatX)
# Different vector length
ones_3 = numpy.ones(3, dtype=config.floatX)
ones_4 = numpy.ones(4, dtype=config.floatX)
ones_5 = numpy.ones(5, dtype=config.floatX)
ones_6 = numpy.ones(6, dtype=config.floatX)
f(A_val, ones_3, ones_5)
f(A_val[::-1, ::-1], ones_3, ones_5)
self.assertRaises(ValueError, f, A_val, ones_4, ones_5)
self.assertRaises(ValueError, f, A_val, ones_3, ones_6)
self.assertRaises(ValueError, f, A_val, ones_4, ones_6)
# The following gemv tests were added in March 2011 by Ian Goodfellow
# and are based on the gemv tests from scipy
# http://projects.scipy.org/scipy/browser/trunk/scipy/linalg/tests/test_fblas.py?rev=6803
# NOTE: At the time these tests were written, theano did not have a
# conjugate function. If such a thing is ever added, the tests involving
# conjugate should be ported over as well.
def matrixmultiply(a, b):
if len(b.shape) == 1:
b_is_vector = True
b = b[:, newaxis]
else:
b_is_vector = False
assert a.shape[1] == b.shape[0]
c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
for i in xrange(a.shape[0]):
for j in xrange(b.shape[1]):
s = 0
for k in xrange(a.shape[1]):
s += a[i, k] * b[k, j]
c[i, j] = s
if b_is_vector:
c = c.reshape((a.shape[0],))
return c
class BaseGemv(object):
mode = mode_blas_opt # can be overridden with self.mode
shared = staticmethod(theano.shared)
def get_data(self, x_stride=1, y_stride=1):
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
mult = array(1, dtype=self.dtype)
if self.dtype in [complex64, complex128]:
mult = array(1 + 1j, dtype=self.dtype)
alpha = array(1., dtype=self.dtype) * mult
beta = array(1., dtype=self.dtype) * mult
a = rng.randn(3, 3).astype(self.dtype) * mult
x = arange(shape(a)[0] * x_stride, dtype=self.dtype) * mult
y = arange(shape(a)[1] * y_stride, dtype=self.dtype) * mult
return alpha, beta, a, x, y
def test_simple(self):
alpha, beta, a, x, y = [self.shared(value)
for value in self.get_data()]
desired_oy = alpha.get_value() * matrixmultiply(a.
get_value(), x.get_value()) + beta.get_value() * y.get_value()
oy = alpha * T.dot(a, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
topo = oy_func.maker.fgraph.toposort()
self.assertFunctionContains1(oy_func, self.gemv)
oy_val = oy_func()
assert_array_almost_equal(desired_oy, oy_val)
def test_default_beta_y(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
a = self.shared(a_v)
x = self.shared(x_v)
desired_oy = matrixmultiply(a_v, x_v)
oy = T.dot(a, x)
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv_inplace)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_simple_transpose(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(transpose(a_v),
x_v) + beta_v * y_v
oy = alpha * T.dot(a.T, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_x_stride(self):
vs = self.get_data(x_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(a_v, x_v[::2]) + beta_v * y_v
oy = alpha * T.dot(a, x[::2]) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_x_stride_transpose(self):
vs = self.get_data(x_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(transpose(a_v), x_v[::
2]) + beta_v * y_v
oy = alpha * T.dot(a.T, x[::2]) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_y_stride(self):
vs = self.get_data(y_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(a_v, x_v) + beta_v * y_v[::2]
oy = alpha * T.dot(a, x) + beta * y[::2]
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_y_stride_transpose(self):
vs = self.get_data(y_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(transpose(a_v),
x_v) + beta_v * y_v[::2]
oy = alpha * T.dot(a.T, x) + beta * y[::2]
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_a_strides(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
desired_oy = alpha_v * matrixmultiply(a_v, x_v) + beta_v * y_v
oy = alpha * T.dot(a, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_a_strides_transpose(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
desired_oy = alpha_v * matrixmultiply(transpose(a_v),
x_v) + beta_v * y_v
oy = alpha * T.dot(a.T, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_upcasting_scalar_nogemv(self):
# Test that the optimization does not crash when the scale has
# an incorrect dtype, and forces upcasting of the result
# We put this test in this class to test it on the gpu too.
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha_v = alpha_v.astype("float64")
a_v = a_v.astype("float32")
x_v = x_v.astype("float32")
y_v = y_v.astype("float32")
alpha = T.dscalar('alpha')
a = self.shared(a_v)
x = self.shared(x_v)
y = self.shared(y_v)
rval = T.dot(a, x) * alpha + y
f = theano.function([alpha], rval, mode=self.mode)
# this function is currently optimized so that the gemv is
# done inplace on a temporarily allocated-buffer, which is
# then scaled by alpha and to t with a fused elemwise.
n_gemvs = 0
#theano.printing.debugprint(f, print_type=True)
for node in f.maker.fgraph.toposort():
if node.op == self.gemv_inplace:
n_gemvs += 1
assert node.outputs[0].dtype == 'float32'
assert n_gemvs == 1, n_gemvs
self.assertFunctionContains1(f, self.gemv_inplace)
f(alpha_v)
class TestSgemv(TestCase, BaseGemv, unittest_tools.TestOptimizationMixin):
dtype = float32
gemv = theano.tensor.blas.gemv_no_inplace
gemv_inplace = theano.tensor.blas.gemv_inplace
class TestDgemv(TestCase, BaseGemv, unittest_tools.TestOptimizationMixin):
dtype = float64
gemv = theano.tensor.blas.gemv_no_inplace
gemv_inplace = theano.tensor.blas.gemv_inplace
# The optimization to put Gemv don't work for complex type for now.
# See ticket 653.
# class TestCgemv(TestCase, BaseGemv):
# dtype = complex64
# class TestZgemv(TestCase, BaseGemv):
# dtype = complex128
###############################################################################
# Tests for Ger
###############################################################################
class TestGer_make_node(TestCase):
def setUp(self):
self.iv = T.tensor(dtype='int32', broadcastable=(False,))
self.fv = T.tensor(dtype='float32', broadcastable=(False,))
self.fv1 = T.tensor(dtype='float32', broadcastable=(True,))
self.dv = T.tensor(dtype='float64', broadcastable=(False,))
self.dv1 = T.tensor(dtype='float64', broadcastable=(True,))
self.cv = T.tensor(dtype='complex64', broadcastable=(False,))
self.zv = T.tensor(dtype='complex128', broadcastable=(False,))
self.fv_2 = T.tensor(dtype='float32', broadcastable=(False,))
self.fv1_2 = T.tensor(dtype='float32', broadcastable=(True,))
self.dv_2 = T.tensor(dtype='float64', broadcastable=(False,))
self.dv1_2 = T.tensor(dtype='float64', broadcastable=(True,))
self.cv_2 = T.tensor(dtype='complex64', broadcastable=(False,))
self.zv_2 = T.tensor(dtype='complex128', broadcastable=(False,))
self.fm = T.fmatrix()
self.dm = T.dmatrix()
self.cm = T.cmatrix()
self.zm = T.zmatrix()
self.fa = T.fscalar()
self.da = T.dscalar()
self.ca = T.cscalar()
self.za = T.zscalar()
def test_works_on_all_valid_dtypes(self):
self.assertEqual(self.fm.type,
ger(self.fm, self.fa, self.fv, self.fv_2).type)
self.assertEqual(self.fm.type,
ger(self.fm, self.fa, self.fv, self.fv_2).type)
self.assertEqual(self.fm.type,
ger(self.fm, self.fa, self.fv, self.fv_2).type)
self.assertEqual(self.fm.type,
ger(self.fm, self.fa, self.fv, self.fv_2).type)
def test_fails_on_invalid_dtypes(self):
self.assertRaises(TypeError,
ger, T.imatrix(), T.iscalar(), T.ivector(),
T.ivector())
def test_fails_for_nonscalar_alpha(self):
self.assertRaises(TypeError,
ger, self.fm, self.fm, self.fv, self.fv_2)
# boundary case - fv1 has the right dtype and could be dimshuffled to a
# scalar, but that's not make_node's job.
self.assertRaises(TypeError,
ger, self.fm, self.fv1, self.fv, self.fv_2)
# actually doing the aforementioned dimshuffle makes it work
self.assertEqual(self.fm.type,
ger(self.fm, self.fv1.dimshuffle(), self.fv, self.fv_2).type)
def test_fails_for_nonmatrix_A(self):
self.assertRaises(TypeError,
ger, self.fv, self.fa, self.fv, self.fv_2)
def test_fails_for_nonvector_x_or_y(self):
self.assertRaises(TypeError,
ger, self.fm, self.fa, self.fv.dimshuffle('x', 0), self.fv_2)
self.assertRaises(TypeError,
ger, self.fm, self.fa, self.fv, self.fv_2.dimshuffle('x', 0))
def test_fails_for_mixed_dtypes(self):
self.assertRaises(TypeError, ger, self.dm, self.fa, self.fv, self.fv_2)
self.assertRaises(TypeError, ger, self.fm, self.da, self.fv, self.fv_2)
self.assertRaises(TypeError, ger, self.fm, self.fa, self.dv, self.fv_2)
self.assertRaises(TypeError, ger, self.fm, self.fa, self.fv, self.dv_2)
self.assertRaises(TypeError, ger, self.cm, self.fa, self.fv, self.dv_2)
self.assertRaises(TypeError, ger, self.cm, self.fa, self.fv, self.zv_2)
class TestGer_OpContract(TestCase, unittest_tools.T_OpContractMixin):
def setUp(self):
self.ops = [ger, ger_destructive]
def clone(self, op):
return Ger(op.destructive)
class TestGer(TestCase, unittest_tools.TestOptimizationMixin):
shared = staticmethod(theano.shared)
def setUp(self):
self.mode = theano.compile.get_default_mode().including('fast_run')
self.mode = self.mode.excluding('c_blas', 'scipy_blas')
dtype = self.dtype = 'float64' # optimization isn't dtype-dependent
self.A = T.tensor(dtype=dtype, broadcastable=(False, False))
self.a = T.tensor(dtype=dtype, broadcastable=())
self.x = T.tensor(dtype=dtype, broadcastable=(False,))
self.y = T.tensor(dtype=dtype, broadcastable=(False,))
self.ger = ger
self.ger_destructive = ger_destructive
self.gemm = gemm_no_inplace
def function(self, inputs, outputs, updates=None):
if updates is None:
updates = []
return theano.function(inputs, outputs, self.mode, updates=updates)
def b(self, bval):
return T.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype))
def test_b_0_triggers_ger(self):
""" test local_gemm_to_ger opt"""
assert T.blas.local_gemm_to_ger.transform(
gemm_no_inplace(
self.A, self.a, self.x.dimshuffle(0, 'x'),
self.y.dimshuffle('x', 0), self.b(0)).owner)
def test_b_1_triggers_ger(self):
""" test local_gemm_to_ger opt"""
assert T.blas.local_gemm_to_ger.transform(
gemm_no_inplace(
self.A, self.a, self.x.dimshuffle(0, 'x'),
self.y.dimshuffle('x', 0), self.b(1)).owner)
def test_b_other_does_not_triggers_ger(self):
""" test local_gemm_to_ger opt"""
assert not T.blas.local_gemm_to_ger.transform(
gemm_no_inplace(
self.A, self.a, self.x.dimshuffle(0, 'x'),
self.y.dimshuffle('x', 0), self.b(1.5)).owner)
def test_b_nonconst_does_not_triggers_ger(self):
""" test local_gemm_to_ger opt"""
assert not T.blas.local_gemm_to_ger.transform(
gemm_no_inplace(
self.A, self.a, self.x.dimshuffle(0, 'x'),
self.y.dimshuffle('x', 0), self.a).owner)
def test_outer(self):
f = self.function([self.x, self.y], T.outer(self.x, self.y))
self.assertFunctionContains(f, self.ger_destructive)
f(numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
def test_A_plus_outer(self):
f = self.function([self.A, self.x, self.y],
self.A + T.outer(self.x, self.y))
self.assertFunctionContains(f, self.ger)
f(numpy.random.rand(5, 4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
f(numpy.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
def test_A_plus_scaled_outer(self):
f = self.function([self.A, self.x, self.y],
self.A + 0.1 * T.outer(self.x, self.y))
self.assertFunctionContains(f, self.ger)
f(numpy.random.rand(5, 4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
f(numpy.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
def test_scaled_A_plus_scaled_outer(self):
f = self.function([self.A, self.x, self.y],
numpy.asarray(0.2, self.dtype) * self.A +
numpy.asarray(0.1, self.dtype) * T.outer(
self.x, self.y))
# Why gemm? This make the graph simpler did we test that it
# make it faster?
self.assertFunctionContains(f, self.gemm)
f(numpy.random.rand(5, 4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
f(numpy.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(5).astype(self.dtype),
numpy.random.rand(4).astype(self.dtype))
def given_dtype(self, dtype, M, N):
""" test corner case shape and dtype"""
f = self.function([self.A, self.x, self.y],
self.A + 0.1 * T.outer(self.x, self.y))
self.assertFunctionContains(f, self.ger)
f(numpy.random.rand(M, N).astype(self.dtype),
numpy.random.rand(M).astype(self.dtype),
numpy.random.rand(N).astype(self.dtype))
f(numpy.random.rand(M, N).astype(self.dtype)[::-1, ::-1],
numpy.random.rand(M).astype(self.dtype),
numpy.random.rand(N).astype(self.dtype))
def test_f32_0_0(self):
return self.given_dtype('float32', 0, 0)
def test_f32_1_0(self):
return self.given_dtype('float32', 1, 0)
def test_f32_0_1(self):
return self.given_dtype('float32', 0, 1)
def test_f32_1_1(self):
return self.given_dtype('float32', 1, 1)
def test_f32_4_4(self):
return self.given_dtype('float32', 4, 4)
def test_f32_7_1(self):
return self.given_dtype('float32', 7, 1)
def test_f32_1_2(self):
return self.given_dtype('float32', 1, 2)
def test_f64_4_5(self):
return self.given_dtype('float64', 4, 5)
def test_c64_7_1(self):
return self.given_dtype('complex64', 7, 1)
def test_c128_1_9(self):
return self.given_dtype('complex128', 1, 9)
def test_inplace(self):
A = self.shared(numpy.random.rand(4, 5).astype(self.dtype))
f = self.function([self.x, self.y], [],
updates=[(A, A + T.constant(0.1, dtype=self.dtype) *
T.outer(self.x, self.y))])
self.assertFunctionContains(f, self.ger_destructive)
f(numpy.random.rand(4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype))
A.set_value(
A.get_value(borrow=True, return_internal_type=True)[::-1, ::-1],
borrow=True)
f(numpy.random.rand(4).astype(self.dtype),
numpy.random.rand(5).astype(self.dtype))
class TestBlasStrides(TestCase):
dtype = 'float64'
shared = staticmethod(tensor._shared)
mode = theano.compile.get_default_mode()
mode = mode.including('fast_run').excluding('gpu', 'c_blas', 'scipy_blas')
rng = numpy.random.RandomState(seed=unittest_tools.fetch_seed())
def rand(self, *shape):
return theano._asarray(self.rng.rand(*shape), dtype=self.dtype)
def cmp_dot22(self, b_shp, c_shp):
av = numpy.zeros((0, 0), dtype=self.dtype)
bv = self.rand(*b_shp)
cv = self.rand(*c_shp)
a = self.shared(av, 'a')
b = self.shared(bv, 'b')
c = self.shared(cv, 'c')
b_t = self.shared(bv.T, 'b.T')
c_t = self.shared(cv.T, 'c.T')
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
bt_dev = b_t.get_value(borrow=False, return_internal_type=True)
ct_dev = c_t.get_value(borrow=False, return_internal_type=True)
f_nn = theano.function([], [], updates=[(a, tensor.dot(b, c))],
mode=self.mode)
# print 'class name:', self.__class__.__name__
# theano.printing.debugprint(f_nn)
f_nt = theano.function([], [], updates=[(a, tensor.dot(b, c_t.T))],
mode=self.mode)
f_tn = theano.function([], [], updates=[(a, tensor.dot(b_t.T, c))],
mode=self.mode)
f_tt = theano.function([], [], updates=[(a, tensor.dot(b_t.T, c_t.T))],
mode=self.mode)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools_product((-1, 1), repeat=4):
for step in (1, 2):
b_step1, b_step2, c_step1, c_step2 = (s * step
for s in step_signs)
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_dev.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_dev.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_dev.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy result
a_n = numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2])
f_nn()
assert numpy.allclose(a.get_value(), a_n)
f_nt()
assert numpy.allclose(a.get_value(), a_n)
f_tn()
assert numpy.allclose(a.get_value(), a_n)
f_tt()
assert numpy.allclose(a.get_value(), a_n)
def test_dot22(self):
self.cmp_dot22((3, 4), (4, 5))
self.cmp_dot22((1, 4), (4, 5))
self.cmp_dot22((3, 4), (4, 1))
self.cmp_dot22((3, 1), (1, 1))
self.cmp_dot22((1, 4), (4, 1))
self.cmp_dot22((3, 1), (1, 5))
self.cmp_dot22((0, 4), (4, 5))
self.cmp_dot22((0, 4), (4, 1))
self.cmp_dot22((0, 1), (1, 5))
self.cmp_dot22((3, 4), (4, 0))
self.cmp_dot22((3, 0), (0, 5))
self.cmp_dot22((0, 4), (4, 0))
self.cmp_dot22((0, 0), (0, 0))
def cmp_dot22scalar(self, b_shp, c_shp):
av = numpy.zeros((0, 0), dtype=self.dtype)
bv = self.rand(*b_shp)
cv = self.rand(*c_shp)
l = numpy.float32(0.2)
a = self.shared(av, 'a')
b = self.shared(bv, 'b')
c = self.shared(cv, 'c')
b_t = self.shared(bv.T, 'b.T')
c_t = self.shared(cv.T, 'c.T')
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
bt_dev = b_t.get_value(borrow=False, return_internal_type=True)
ct_dev = c_t.get_value(borrow=False, return_internal_type=True)
f_nn = theano.function([], [], updates=[(a, l * tensor.dot(b, c))],
mode=self.mode)
f_nt = theano.function([], [], updates=[(a, l * tensor.dot(b, c_t.T))],
mode=self.mode)
f_tn = theano.function([], [], updates=[(a, l * tensor.dot(b_t.T, c))],
mode=self.mode)
f_tt = theano.function([], [],
updates=[(a, l * tensor.dot(b_t.T, c_t.T))],
mode=self.mode)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools_product((-1, 1), repeat=4):
for step in (1, 2):
b_step1, b_step2, c_step1, c_step2 = (s * step
for s in step_signs)
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_dev.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_dev.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_dev.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy result
a_n = l * numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2])
f_nn()
assert numpy.allclose(a.get_value(), a_n)
f_nt()
assert numpy.allclose(a.get_value(), a_n)
f_tn()
assert numpy.allclose(a.get_value(), a_n)
f_tt()
assert numpy.allclose(a.get_value(), a_n)
def test_dot22scalar(self):
self.cmp_dot22scalar((3, 4), (4, 5))
self.cmp_dot22scalar((1, 4), (4, 5))
self.cmp_dot22scalar((3, 4), (4, 1))
self.cmp_dot22scalar((3, 1), (1, 1))
self.cmp_dot22scalar((1, 4), (4, 1))
self.cmp_dot22scalar((3, 1), (1, 5))
self.cmp_dot22scalar((0, 4), (4, 5))
self.cmp_dot22scalar((0, 4), (4, 1))
self.cmp_dot22scalar((0, 1), (1, 5))
self.cmp_dot22scalar((3, 4), (4, 0))
self.cmp_dot22scalar((3, 0), (0, 5))
self.cmp_dot22scalar((0, 4), (4, 0))
self.cmp_dot22scalar((0, 0), (0, 0))
def cmp_gemm(self, a_shp, b_shp, c_shp):
av = self.rand(*a_shp)
bv = self.rand(*b_shp)
cv = self.rand(*c_shp)
l = numpy.float32(0.2)
a = self.shared(av, 'a')
b = self.shared(bv, 'b')
c = self.shared(cv, 'c')
a_t = self.shared(av.T, 'a.T')
b_t = self.shared(bv.T, 'b.T')
c_t = self.shared(cv.T, 'c.T')
a_dev = a.get_value(borrow=False, return_internal_type=True)
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
bt_dev = b_t.get_value(borrow=False, return_internal_type=True)
ct_dev = c_t.get_value(borrow=False, return_internal_type=True)
f_nnn = theano.function([], [],
updates=[(a, (l * a + tensor.dot(b, c)))],
mode=self.mode)
f_nnt = theano.function([], [],
updates=[(a, (l * a + tensor.dot(b, c_t.T)))],
mode=self.mode)
f_ntn = theano.function([], [],
updates=[(a, (l * a + tensor.dot(b_t.T, c)))],
mode=self.mode)
f_ntt = theano.function([], [],
updates=[(a, (l * a + tensor.dot(b_t.T, c_t.T)))],
mode=self.mode)
f_tnn = theano.function([], [],
updates=[(a_t, (l * a_t + tensor.dot(b, c).T))],
mode=self.mode)
f_tnt = theano.function([], [],
updates=[(a_t, (l * a_t + tensor.dot(b, c_t.T).T))],
mode=self.mode)
f_ttn = theano.function([], [],
updates=[(a_t, (l * a_t + tensor.dot(b_t.T, c).T))],
mode=self.mode)
f_ttt = theano.function([], [],
updates=[(a_t, (l * a_t + tensor.dot(b_t.T, c_t.T).T))],
mode=self.mode)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools_product((-1, 1), repeat=6):
for step in (1, 2):
a_step1, a_step2, b_step1, b_step2, c_step1, c_step2 = \
(s * step for s in step_signs)
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_dev.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_dev.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_dev.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy results
a_n = (l * av[::a_step1, ::a_step2]
+ numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2]))
at_n = (l * av[::a_step1, ::a_step2].T
+ numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step1, ::c_step2]).T)
# a's value is updated, so we need to reinitialize it each time
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_nnn()
assert numpy.allclose(a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_nnt()
assert numpy.allclose(a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_ntn()
assert numpy.allclose(a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_ntt()
assert numpy.allclose(a.get_value(), a_n)
a_t.set_value(transpose(a_dev.copy())[::a_step2, ::a_step1],
borrow=True)
f_tnn()
assert numpy.allclose(a_t.get_value(), at_n)
a_t.set_value(transpose(a_dev.copy())[::a_step2, ::a_step1],
borrow=True)
f_tnt()
assert numpy.allclose(a_t.get_value(), at_n)
a_t.set_value(transpose(a_dev.copy())[::a_step2, ::a_step1],
borrow=True)
f_ttn()
assert numpy.allclose(a_t.get_value(), at_n)
a_t.set_value(transpose(a_dev.copy())[::a_step2, ::a_step1],
borrow=True)
f_ttt()
assert numpy.allclose(a_t.get_value(), at_n)
def test_gemm(self):
self.cmp_gemm((3, 5), (3, 4), (4, 5))
self.cmp_gemm((1, 5), (1, 4), (4, 5))
self.cmp_gemm((3, 1), (3, 4), (4, 1))
self.cmp_gemm((3, 1), (3, 1), (1, 1))
self.cmp_gemm((1, 1), (1, 4), (4, 1))
self.cmp_gemm((3, 5), (3, 1), (1, 5))
self.cmp_gemm((0, 5), (0, 4), (4, 5))
self.cmp_gemm((0, 1), (0, 4), (4, 1))
self.cmp_gemm((0, 5), (0, 1), (1, 5))
self.cmp_gemm((3, 0), (3, 4), (4, 0))
self.cmp_gemm((3, 5), (3, 0), (0, 5))
self.cmp_gemm((0, 0), (0, 4), (4, 0))
self.cmp_gemm((0, 0), (0, 0), (0, 0))
def cmp_gemv(self, a_shp, b_shp, c_shp):
av = self.rand(a_shp)
bv = self.rand(*b_shp)
cv = self.rand(c_shp)
l = numpy.float32(0.2)
a = self.shared(av, 'a')
b = self.shared(bv, 'b')
c = self.shared(cv, 'c')
b_t = self.shared(bv.T, 'b.T')
a_dev = a.get_value(borrow=False, return_internal_type=True)
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
f_n = theano.function([], [], updates=[(a, (a + l * tensor.dot(b, c)))],
mode=self.mode)
f_t = theano.function([], [],
updates=[(a, (a + l * tensor.dot(b_t.T, c)))],
mode=self.mode)
# Try with all stride patterns, and all transposed pattern
for step_signs in itertools_product((1, -1), repeat=4):
for step in (1, 2):
a_step, b_step1, b_step2, c_step = (s * step
for s in step_signs)
a.set_value(a_dev.copy()[::a_step], borrow=True)
b.set_value(b_dev.copy()[::b_step1, ::b_step2],
borrow=True)
b_t.set_value(transpose(b_dev.copy())[::b_step2, ::b_step1],
borrow=True)
c.set_value(c_dev.copy()[::c_step], borrow=True)
a_n = (av[::a_step]
+ l * numpy.dot(bv[::b_step1, ::b_step2],
cv[::c_step]))
f_n()
assert numpy.allclose(a.get_value(), a_n), (a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step], borrow=True)
f_t()
assert numpy.allclose(a.get_value(), a_n), (a.get_value(), a_n)
def test_gemv(self):
self.cmp_gemv(3, (3, 5), 5)
self.cmp_gemv(1, (1, 5), 5)
self.cmp_gemv(3, (3, 1), 1)
self.cmp_gemv(0, (0, 5), 5)
self.cmp_gemv(3, (3, 0), 0)
self.cmp_gemv(0, (0, 1), 1)
self.cmp_gemv(1, (1, 0), 0)
self.cmp_gemv(0, (0, 0), 0)
def cmp_ger(self, a_shp, b_shp, c_shp):
av = self.rand(*a_shp)
bv = self.rand(b_shp)
cv = self.rand(c_shp)
l = numpy.float32(0.2)
a = self.shared(av, 'a')
b = self.shared(bv, 'b')
c = self.shared(cv, 'c')
a_t = self.shared(av.T, 'a.T')
a_dev = a.get_value(borrow=False, return_internal_type=True)
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
f_n = theano.function([], [],
updates=[(a, (a + l * tensor.outer(b, c)))],
mode=self.mode)
f_t = theano.function([], [],
updates=[(a_t, (a_t + l * tensor.outer(b, c).T))],
mode=self.mode)
# Try with all stride patterns, and all transposed patterns
for step_signs in itertools_product((1, -1), repeat=4):
for step in (1, 2):
a_step1, a_step2, b_step, c_step = (s * step
for s in step_signs)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
a_t.set_value(transpose(a_dev.copy())[::a_step1, ::a_step2],
borrow=True)
b.set_value(b_dev.copy()[::b_step], borrow=True)
c.set_value(c_dev.copy()[::c_step], borrow=True)
f_n()
n_n = (av[::a_step1, ::a_step2]
+ l * numpy.outer(bv[::b_step], cv[::c_step]))
assert numpy.allclose(a.get_value(), n_n), (a.get_value(), n_n)
f_t()
n_t = (av.T[::a_step1, ::a_step2]
+ l * numpy.outer(bv[::b_step], cv[::c_step]).T)
assert numpy.allclose(a_t.get_value(), n_t),\
(a_t.get_value(), n_t)
def test_ger_strides(self):
self.cmp_ger((3, 5), 3, 5)
self.cmp_ger((1, 5), 1, 5)
self.cmp_ger((3, 1), 3, 1)
self.cmp_ger((0, 5), 0, 5)
self.cmp_ger((3, 0), 3, 0)
self.cmp_ger((0, 1), 0, 1)
self.cmp_ger((1, 0), 1, 0)
self.cmp_ger((0, 0), 0, 0)
def test_gemm_non_contiguous(self):
"""test_gemm_non_contiguous: Test if GEMM works well with non-contiguous matrices."""
aval = numpy.ones((6, 2))
bval = numpy.ones((2, 7))
cval = numpy.arange(7) + numpy.arange(0, .6, .1)[:, numpy.newaxis]
a = theano.shared(aval[:3], borrow=True)
b = theano.shared(bval[:, :5], borrow=True)
c = theano.shared(cval[:3, :5], borrow=True)
s = theano.tensor.scalar()
upd_c = s * c + theano.tensor.dot(a, b)
f = theano.function([s], [], updates={c: upd_c})
f(0)
ref_output = numpy.ones((3, 5)) * 2
unittest_tools.assert_allclose(c.get_value(), ref_output)
class test_infer_shape(unittest_tools.InferShapeTester):
def test_dot22(self):
x, y = T.matrices('xy')
self._compile_and_check(
[x, y], [T.blas._dot22(x, y)],
[numpy.random.random((2, 3)).astype(config.floatX),
numpy.random.random((3, 4)).astype(config.floatX)],
T.blas.Dot22)
def test_dot22scalar(self):
x, y = T.matrices('xy')
a = T.scalar('a')
self._compile_and_check(
[x, y, a], [T.blas._dot22scalar(x, y, a)],
[numpy.random.random((2, 3)).astype(config.floatX),
numpy.random.random((3, 4)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX)],
T.blas.Dot22Scalar)
def test_gemm(self):
x, y, z = T.matrices('xyz')
a = T.scalar('a')
b = T.scalar('b')
self._compile_and_check(
[x, y, a, z, b], [T.blas.gemm(z, a, x, y, b)],
[numpy.random.random((2, 3)).astype(config.floatX),
numpy.random.random((3, 4)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX),
numpy.random.random((2, 4)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX)],
T.blas.Gemm)
def test_gemv(self):
A = T.matrix('A')
x, y = T.vectors('xy')
a = T.scalar('a')
b = T.scalar('b')
self._compile_and_check(
[y, a, A, x, b], [T.blas.gemv(y, a, A, x, b)],
[numpy.random.random((2,)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX),
numpy.random.random((2, 3)).astype(config.floatX),
numpy.random.random((3,)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX)],
T.blas.Gemv)
def test_ger(self):
A = T.matrix('A')
x, y = T.vectors('xy')
a = T.scalar('a')
self._compile_and_check(
[A, a, x, y], [T.blas.ger(A, a, x, y)],
[numpy.random.random((2, 3)).astype(config.floatX),
numpy.asarray(0.5, dtype=config.floatX),
numpy.random.random((2,)).astype(config.floatX),
numpy.random.random((3,)).astype(config.floatX)],
T.blas.Ger)
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/theano/tensor/tests/test_blas.py
|
Python
|
agpl-3.0
| 87,605 | 0.00145 |
from __future__ import print_function
import logging
import mock
import pytest
from faker import Factory
from website import settings as website_settings
from framework.celery_tasks import app as celery_app
logger = logging.getLogger(__name__)
# Silence some 3rd-party logging and some "loud" internal loggers
SILENT_LOGGERS = [
'api.caching.tasks',
'factory.generate',
'factory.containers',
'framework.analytics',
'framework.auth.core',
'website.app',
'website.archiver.tasks',
'website.mails',
'website.notifications.listeners',
'website.search.elastic_search',
'website.search_migration.migrate',
'website.util.paths',
'requests_oauthlib.oauth2_session',
'raven.base.Client',
'raven.contrib.django.client.DjangoClient',
'transitions.core',
'MARKDOWN',
'elasticsearch',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
@pytest.fixture(autouse=True)
def override_settings():
"""Override settings for the test environment.
"""
# Make tasks run synchronously, and make sure exceptions get propagated
celery_app.conf.update({
'task_always_eager': True,
'task_eager_propagates': True,
})
website_settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
# TODO: Remove if this is unused?
website_settings.BCRYPT_LOG_ROUNDS = 1
# Make sure we don't accidentally send any emails
website_settings.SENDGRID_API_KEY = None
# Set this here instead of in SILENT_LOGGERS, in case developers
# call setLevel in local.py
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
@pytest.fixture()
def fake():
return Factory.create()
_MOCKS = {
'osf.models.user.new_bookmark_collection': {
'mark': 'enable_bookmark_creation',
'replacement': lambda *args, **kwargs: None,
},
'osf.models.user._create_quickfiles_project': {
'mark': 'enable_quickfiles_creation',
'replacement': lambda *args, **kwargs: None,
},
'framework.celery_tasks.handlers._enqueue_task': {
'mark': 'enable_enqueue_task',
'replacement': lambda *args, **kwargs: None,
},
'osf.models.base.BaseModel.full_clean': {
'mark': 'enable_implicit_clean',
'replacement': lambda *args, **kwargs: None,
},
'osf.models.base._check_blacklist': {
'mark': 'enable_blacklist_check',
'replacement': lambda *args, **kwargs: False,
},
'website.search.search.search_engine': {
'mark': 'enable_search',
'replacement': mock.MagicMock()
},
'website.search.elastic_search': {
'mark': 'enable_search',
'replacement': mock.MagicMock()
}
}
@pytest.fixture(autouse=True, scope='session')
def _test_speedups():
mocks = {}
for target, config in _MOCKS.items():
mocks[target] = mock.patch(target, config['replacement'])
mocks[target].start()
yield mocks
for patcher in mocks.values():
patcher.stop()
@pytest.fixture(autouse=True)
def _test_speedups_disable(request, settings, _test_speedups):
patchers = []
for target, config in _MOCKS.items():
if not request.node.get_marker(config['mark']):
continue
patchers.append(_test_speedups[target])
patchers[-1].stop()
yield
for patcher in patchers:
patcher.start()
|
mattclark/osf.io
|
conftest.py
|
Python
|
apache-2.0
| 3,405 | 0.000881 |
import urllib.request
import re
def getHtml(url):
page = urllib.request.urlopen(url)
html = page.read().decode('utf-8')
return html
def getImg(html):
reg = r'src="(.+?\.jpg)" pic_ext'
imgre = re.compile(reg)
imglist = re.findall(imgre,html)
x = 0
for imgurl in imglist:
urllib.request.urlretrieve(imgurl,'pic/%s.jpg' % x)
x+=1
return imglist
html = getHtml("http://tieba.baidu.com/p/2460150866")
list=getImg(html)
for i in list:
print(i)
|
Octoberr/swmcdh
|
cong/zijixieyige.py
|
Python
|
apache-2.0
| 496 | 0.014113 |
import time
from robograph.datamodel.nodes.lib import buffers
def test_buffer():
instance = buffers.Buffer()
assert instance.requirements == []
expected = dict(a=1, b=2, c=3)
instance.input(expected)
instance.set_output_label('any')
assert instance.output() == expected
def test_detlayed_buffer():
delay = 2.5
instance = buffers.DelayedBuffer(seconds=delay)
assert instance.requirements == ['seconds']
expected = dict(a=1, b=2, c=3)
instance.input(expected)
instance.set_output_label('any')
start_time = time.time()
result = instance.output()
end_time = time.time()
assert result == expected
assert end_time - start_time >= delay
|
csparpa/robograph
|
robograph/datamodel/tests/test_buffers.py
|
Python
|
apache-2.0
| 703 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class PersonGroupOperations(object):
"""PersonGroupOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create(
self, person_group_id, name, user_data=None, recognition_model="recognition_01", custom_headers=None, raw=False, **operation_config):
"""Create a new person group with specified personGroupId, name,
user-provided userData and recognitionModel.
<br /> A person group is the container of the uploaded person data,
including face recognition features.
<br /> After creation, use [PersonGroup Person -
Create](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/create)
to add persons into the group, and then call [PersonGroup -
Train](https://docs.microsoft.com/rest/api/faceapi/persongroup/train)
to get this group ready for [Face -
Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify).
<br /> No image will be stored. Only the person's extracted face
features and userData will be stored on server until [PersonGroup
Person -
Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/delete)
or [PersonGroup -
Delete](https://docs.microsoft.com/rest/api/faceapi/persongroup/delete)
is called.
<br/>'recognitionModel' should be specified to associate with this
person group. The default value for 'recognitionModel' is
'recognition_01', if the latest model needed, please explicitly specify
the model you need in this parameter. New faces that are added to an
existing person group will use the recognition model that's already
associated with the collection. Existing face features in a person
group can't be updated to features extracted by another version of
recognition model.
Person group quota:
* Free-tier subscription quota: 1,000 person groups. Each holds up to
1,000 persons.
* S0-tier subscription quota: 1,000,000 person groups. Each holds up to
10,000 persons.
* to handle larger scale face identification problem, please consider
using
[LargePersonGroup](https://docs.microsoft.com/rest/api/faceapi/largepersongroup).
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param recognition_model: Possible values include: 'recognition_01',
'recognition_02', 'recognition_03', 'recognition_04'
:type recognition_model: str or
~azure.cognitiveservices.vision.face.models.RecognitionModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.MetaDataContract(name=name, user_data=user_data, recognition_model=recognition_model)
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'MetaDataContract')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
create.metadata = {'url': '/persongroups/{personGroupId}'}
def delete(
self, person_group_id, custom_headers=None, raw=False, **operation_config):
"""Delete an existing person group. Persisted face features of all people
in the person group will also be deleted.
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/persongroups/{personGroupId}'}
def get(
self, person_group_id, return_recognition_model=False, custom_headers=None, raw=False, **operation_config):
"""Retrieve person group name, userData and recognitionModel. To get
person information under this personGroup, use [PersonGroup Person -
List](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/list).
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param return_recognition_model: A value indicating whether the
operation should return 'recognitionModel' in response.
:type return_recognition_model: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PersonGroup or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.PersonGroup or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if return_recognition_model is not None:
query_parameters['returnRecognitionModel'] = self._serialize.query("return_recognition_model", return_recognition_model, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PersonGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/persongroups/{personGroupId}'}
def update(
self, person_group_id, name=None, user_data=None, custom_headers=None, raw=False, **operation_config):
"""Update an existing person group's display name and userData. The
properties which does not appear in request body will not be updated.
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.NameAndUserDataContract(name=name, user_data=user_data)
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'NameAndUserDataContract')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
update.metadata = {'url': '/persongroups/{personGroupId}'}
def get_training_status(
self, person_group_id, custom_headers=None, raw=False, **operation_config):
"""Retrieve the training status of a person group (completed or ongoing).
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TrainingStatus or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.face.models.TrainingStatus or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = self.get_training_status.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrainingStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_training_status.metadata = {'url': '/persongroups/{personGroupId}/training'}
def list(
self, start=None, top=1000, return_recognition_model=False, custom_headers=None, raw=False, **operation_config):
"""List person groups’ personGroupId, name, userData and
recognitionModel.<br />
* Person groups are stored in alphabetical order of personGroupId.
* "start" parameter (string, optional) is a user-provided personGroupId
value that returned entries have larger ids by string comparison.
"start" set to empty to indicate return from the first item.
* "top" parameter (int, optional) specifies the number of entries to
return. A maximal of 1000 entries can be returned in one call. To fetch
more, you can specify "start" with the last returned entry’s Id of the
current call.
<br />
For example, total 5 person groups: "group1", ..., "group5".
<br /> "start=&top=" will return all 5 groups.
<br /> "start=&top=2" will return "group1", "group2".
<br /> "start=group2&top=3" will return "group3", "group4", "group5".
.
:param start: List person groups from the least personGroupId greater
than the "start".
:type start: str
:param top: The number of person groups to list.
:type top: int
:param return_recognition_model: A value indicating whether the
operation should return 'recognitionModel' in response.
:type return_recognition_model: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.cognitiveservices.vision.face.models.PersonGroup]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start is not None:
query_parameters['start'] = self._serialize.query("start", start, 'str', max_length=64)
if top is not None:
query_parameters['top'] = self._serialize.query("top", top, 'int', maximum=1000, minimum=1)
if return_recognition_model is not None:
query_parameters['returnRecognitionModel'] = self._serialize.query("return_recognition_model", return_recognition_model, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[PersonGroup]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list.metadata = {'url': '/persongroups'}
def train(
self, person_group_id, custom_headers=None, raw=False, **operation_config):
"""Queue a person group training task, the training task may not be
started immediately.
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = self.train.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
train.metadata = {'url': '/persongroups/{personGroupId}/train'}
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_operations.py
|
Python
|
mit
| 21,247 | 0.001459 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Ahti Kitsik <ak@ahtik.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: lineinfile
author:
- "Daniel Hokka Zakrissoni (@dhozac)"
- "Ahti Kitsik (@ahtik)"
extends_documentation_fragment:
- files
- validate
short_description: Ensure a particular line is in a file, or replace an
existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in
a file only. See the M(replace) module if you want to change
multiple, similar lines or check M(blockinfile) if you want to insert/update/remove a block of lines in a file.
For other cases, see the M(copy) or M(template) modules.
version_added: "0.7"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: false
version_added: 1.7
description:
- The regular expression to look for in every line of the file. For
C(state=present), the pattern to replace if found; only the last line
found will be replaced. For C(state=absent), the pattern of the line
to remove. Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
state:
required: false
choices: [ present, absent ]
default: "present"
aliases: []
description:
- Whether the line should be there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the
file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.1"
description:
- Used with C(state=present). If set, line can contain backreferences
(both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly;
C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
If the C(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted
after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
If specified regular expression has no matches, EOF will be used instead.
May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
version_added: "1.1"
description:
- Used with C(state=present). If specified, the line will be inserted
before the last match of specified regular expression. A value is
available; C(BOF) for inserting the line at the beginning of the file.
If specified regular expression has no matches, the line will be
inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. By default it will fail if the file
is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- lineinfile:
dest: /etc/selinux/config
regexp: '^SELINUX='
line: 'SELINUX=enforcing'
- lineinfile:
dest: /etc/sudoers
state: absent
regexp: '^%wheel'
- lineinfile:
dest: /etc/hosts
regexp: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
owner: root
group: root
mode: 0644
- lineinfile:
dest: /etc/httpd/conf/httpd.conf
regexp: '^Listen '
insertafter: '^#Listen '
line: 'Listen 8080'
- lineinfile:
dest: /etc/services
regexp: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
# Add a line to a file if it does not exist, without passing regexp
- lineinfile:
dest: /tmp/testfile
line: '192.168.1.99 foo.lab.net foo'
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- lineinfile: "
dest: /etc/sudoers
state: present
regexp: '^%wheel'
line: '%wheel ALL=(ALL) NOPASSWD: ALL'
- lineinfile:
dest: /opt/jboss-as/bin/standalone.conf
regexp: '^(.*)Xms(\d+)m(.*)$'
line: '\1Xms${xms}m\3'
backrefs: yes
# Validate the sudoers file before saving
- lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%ADMIN ALL='
line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
"""
import re
import os
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b
from ansible.module_utils._text import to_bytes, to_native
def write_changes(module, b_lines, dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.writelines(b_lines)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile,
to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message, diff):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False, diff=diff):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def present(module, dest, regexp, line, insertafter, insertbefore, create,
backup, backrefs):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
b_destpath = os.path.dirname(b_dest)
if not os.path.exists(b_destpath) and not module.check_mode:
os.makedirs(b_destpath)
b_lines = []
else:
f = open(b_dest, 'rb')
b_lines = f.readlines()
f.close()
if module._diff:
diff['before'] = to_native(b('').join(b_lines))
if regexp is not None:
bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
if insertafter not in (None, 'BOF', 'EOF'):
bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
elif insertbefore not in (None, 'BOF'):
bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
else:
bre_ins = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/inserbefore has been found
index = [-1, -1]
m = None
b_line = to_bytes(line, errors='surrogate_or_strict')
for lineno, b_cur_line in enumerate(b_lines):
if regexp is not None:
match_found = bre_m.search(b_cur_line)
else:
match_found = b_line == b_cur_line.rstrip(b('\r\n'))
if match_found:
index[0] = lineno
m = match_found
elif bre_ins is not None and bre_ins.search(b_cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if insertbefore:
# + 1 for the previous line
index[1] = lineno
msg = ''
changed = False
# Regexp matched a line in the file
b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
if index[0] != -1:
if backrefs:
b_new_line = m.expand(b_line)
else:
# Don't do backref expansion if not asked.
b_new_line = b_line
if not b_new_line.endswith(b_linesep):
b_new_line += b_linesep
if b_lines[index[0]] != b_new_line:
b_lines[index[0]] = b_new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
b_lines.insert(0, b_line + b_linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if len(b_lines) > 0 and not b_lines[-1][-1:] in (b('\n'), b('\r')):
b_lines.append(b_linesep)
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
# insert* matched, but not the regexp
else:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
if module._diff:
diff['after'] = to_native(b('').join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(b_dest):
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if module.check_mode and not os.path.exists(b_dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
def absent(module, dest, regexp, line, backup):
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
module.exit_json(changed=False, msg="file not present")
msg = ''
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
f = open(b_dest, 'rb')
b_lines = f.readlines()
f.close()
if module._diff:
diff['before'] = to_native(b('').join(b_lines))
if regexp is not None:
bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
found = []
b_line = to_bytes(line, errors='surrogate_or_strict')
def matcher(b_cur_line):
if regexp is not None:
match_found = bre_c.search(b_cur_line)
else:
match_found = b_line == b_cur_line.rstrip(b('\r\n'))
if match_found:
found.append(b_cur_line)
return not match_found
b_lines = [l for l in b_lines if matcher(l)]
changed = len(found) > 0
if module._diff:
diff['after'] = to_native(b('').join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup:
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if changed:
msg = "%s line(s) removed" % len(found)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile'], type='path'),
state=dict(default='present', choices=['absent', 'present']),
regexp=dict(default=None),
line=dict(aliases=['value']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
backrefs=dict(default=False, type='bool'),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
create = params['create']
backup = params['backup']
backrefs = params['backrefs']
dest = params['dest']
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.isdir(b_dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if params['state'] == 'present':
if backrefs and params['regexp'] is None:
module.fail_json(msg='regexp= is required with backrefs=true')
if params.get('line', None) is None:
module.fail_json(msg='line= is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
line = params['line']
present(module, dest, params['regexp'], line,
ins_aft, ins_bef, create, backup, backrefs)
else:
if params['regexp'] is None and params.get('line', None) is None:
module.fail_json(msg='one of line= or regexp= is required with state=absent')
absent(module, dest, params['regexp'], params.get('line', None), backup)
if __name__ == '__main__':
main()
|
chrismeyersfsu/ansible-modules-core
|
files/lineinfile.py
|
Python
|
gpl-3.0
| 15,603 | 0.001795 |
# -*- coding: UTF-8 -*-
from django.template.context import RequestContext
from project.tramitacao.models import Tbpecastecnicas, \
Tbprocessorural,Tbchecklistprocessobase, Tbprocessosanexos, Tbprocessobase,Tbprocessourbano, Tbcaixa, AuthUser, Tbmunicipio, Tbprocessoclausula, Tbpendencia, Tbetapa, Tbtransicao
from project.geoinformacao.models import TbparcelaGeo
from project.tramitacao.relatorio_base import relatorio_ods_base_header,\
relatorio_ods_base
from django.db.models import Q
from django.contrib.auth.decorators import permission_required
from django.http.response import HttpResponse
from odslib import ODS
from django.shortcuts import render_to_response
from django.db.models import Q
from project.livro.models import Tbtituloprocesso
import datetime
import urllib2
import json
def lista(request):
return render_to_response('sicop/relatorio/lista.html',{}, context_instance = RequestContext(request))
#PROCESSOS QUE TEM PECA TECNICA
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processo_peca(request):
if request.method == "POST":
p_rural = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessorural.objects.filter( tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
p_rural_com_peca = []
p_rural = consulta.order_by( request.POST['ordenacao'] )
for r in p_rural:
if Tbpecastecnicas.objects.filter( nrcpfrequerente = r.nrcpfrequerente.replace('.','').replace('-','') ):
p_rural_com_peca.append( r )
#GERACAO
nome_relatorio = "relatorio-processos-com-peca"
titulo_relatorio = "RELATORIO DOS PROCESSOS COM PECAS TECNICAS"
planilha_relatorio = "Processos com peca"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(p_rural_com_peca), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Conjuge' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in p_rural_com_peca:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nmconjuge)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 3)
)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processo_peca.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def peca_processo(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbpecastecnicas.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
pecas_com_proc = []
pecas = consulta.order_by( request.POST['ordenacao'] )
for p in pecas:
if len(Tbprocessorural.objects.filter( nrcpfrequerente = p.nrcpfrequerente )) > 0:
pecas_com_proc.append(p)
#GERACAO
nome_relatorio = "relatorio-pecas-com-processo"
titulo_relatorio = "RELATORIO DAS PECAS TECNICAS COM PROCESSO"
planilha_relatorio = "Pecas com processo"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas_com_proc), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Contrato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Entrega' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Pasta' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Perimetro' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("5in")
sheet.getColumn(3).setWidth("3.5in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
#DADOS DA CONSULTA
x = 5
for obj in pecas_com_proc:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcontrato.nrcontrato)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nrentrega)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nrarea)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrperimetro)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
if obj.tbmunicipio is None:
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue('')
else:
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbmunicipio.nome_mun)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/peca_processo.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def peca_gleba(request):
#buscar as pecas agrupadas por glebas
pecas = Tbpecastecnicas.objects.distinct('tbgleba')
glebas = []
#buscando as glebas que tem pecas
for obj in pecas:
glebas.append( obj.tbgleba )
#todas as pecas
# pecas = Tbpecastecnicas.objects.all()
for g in glebas:
qtd = 0
for p in pecas:
if p.tbgleba.id == g.id:
qtd += 1
print 'Total: '+str(qtd)
context = dict(
titulo='Relatório das Peças Técnicas por Gleba',
total=len(glebas),
glebas=glebas
)
return render_to_response('relatorio/pecas-por-gleba.odt',dictionary=context,format='odt',filename='relatorio-pecas-por-gleba.odt')
#PECAS TECNICAS NAO APROVADAS
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def peca_nao_aprovada(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbpecastecnicas.objects.filter( Q(stpecatecnica = False, tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id) )
pecas = consulta.order_by( request.POST['ordenacao'] )
#GERACAO
nome_relatorio = "relatorio-pecas-nao-aprovadas"
titulo_relatorio = "RELATORIO DAS PECAS TECNICAS NAO APROVADAS"
planilha_relatorio = "Pecas nao aprovadas"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Contrato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Entrega' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Pasta' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Perimetro' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("5in")
sheet.getColumn(3).setWidth("3.5in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
#DADOS DA CONSULTA
x = 5
for obj in pecas:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcontrato.nrcontrato)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nrentrega)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nrarea)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrperimetro)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/peca_nao_aprovada.html',{}, context_instance = RequestContext(request))
#PECAS TECNICAS REJEITADAS
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def peca_rejeitada(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbpecastecnicas.objects.filter( Q(stenviadobrasilia = False, tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id) )
pecas = consulta.order_by( request.POST['ordenacao'] )
#GERACAO
nome_relatorio = "relatorio-pecas-rejeitadas"
titulo_relatorio = "RELATORIO DAS PECAS TECNICAS REJEITADAS"
planilha_relatorio = "Pecas rejeitadas"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Contrato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Entrega' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Pasta' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Perimetro' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("5in")
sheet.getColumn(3).setWidth("3.5in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
#DADOS DA CONSULTA
x = 5
for obj in pecas:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcontrato.nrcontrato)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nrentrega)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nrarea)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrperimetro)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/peca_rejeitada.html',{}, context_instance = RequestContext(request))
#PECAS TECNICAS SEM PROCESSO
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def peca_sem_processo(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbpecastecnicas.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
pecas_sem_proc = []
pecas = consulta.order_by( request.POST['ordenacao'] )
for p in pecas:
if len(Tbprocessorural.objects.filter( nrcpfrequerente = p.nrcpfrequerente )) == 0:
pecas_sem_proc.append(p)
#GERACAO
nome_relatorio = "relatorio-pecas-sem-processo"
titulo_relatorio = "RELATORIO DAS PECAS TECNICAS SEM PROCESSO"
planilha_relatorio = "Pecas sem processo"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas_sem_proc), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Contrato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Entrega' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Pasta' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Perimetro' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("5in")
sheet.getColumn(3).setWidth("3.5in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
#DADOS DA CONSULTA
x = 5
for obj in pecas_sem_proc:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcontrato.nrcontrato)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nrentrega)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nrarea)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrperimetro)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
if obj.tbmunicipio is None:
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue('')
else:
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbmunicipio.nome_mun)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/peca_sem_processo.html',{}, context_instance = RequestContext(request))
#PROCESSO SEM PECA CONCLUIR
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processo_sem_peca(request):
if request.method == "POST":
p_rural = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessorural.objects.filter( tbprocessobase__tbclassificacaoprocesso__id = 1, tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
p_rural_sem_peca = []
p_rural = consulta.order_by( request.POST['ordenacao'] )
x = 0
for rr in p_rural:
if not Tbpecastecnicas.objects.filter( nrcpfrequerente = rr.nrcpfrequerente ):
if rr.nrcpfrequerente != '99999999999' and rr.nrcpfrequerente != '00000000000':
p_rural_sem_peca.append(rr)
#GERACAO
nome_relatorio = "relatorio-processos-sem-peca"
titulo_relatorio = "RELATORIO DOS PROCESSOS SEM PECAS TECNICAS"
planilha_relatorio = "Processos sem peca"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(p_rural_sem_peca), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Conjuge' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("1.5in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in p_rural_sem_peca:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nmconjuge)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 3)
)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processo_sem_peca.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processo_sem_peca_com_parcela_sigef(request):
if request.method == "POST":
p_rural = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessorural.objects.filter( tbprocessobase__tbclassificacaoprocesso__id = 1, tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
p_rural_sem_peca = []
p_rural_sem_peca_com_parcela_sigef = []
p_rural = consulta.order_by( request.POST['ordenacao'] )
x = 0
for rr in p_rural:
if not Tbpecastecnicas.objects.filter( nrcpfrequerente = rr.nrcpfrequerente ):
if rr.nrcpfrequerente != '99999999999' and rr.nrcpfrequerente != '00000000000':
p_rural_sem_peca.append(rr)
for r in p_rural_sem_peca:
# obj r nao tem peca no sicop
#buscar no sigef
try:
response = urllib2.urlopen('https://sigef.incra.gov.br/api/destinacao/parcelas/?cpf='+r.nrcpfrequerente)
retorno = json.loads(response.read())
if retorno['status'] == 'OK':
if retorno['parcelas']:
p_rural_sem_peca_com_parcela_sigef.append(r)
print r.nrcpfrequerente
x += 1
except:
x += 1
print str( x )+' - '+str(len(p_rural_sem_peca))
#GERACAO
nome_relatorio = "relatorio-processos-sem-peca-com-parcela-sigef"
titulo_relatorio = "RELATORIO DOS PROCESSOS SEM PECAS TECNICAS COM PARCELA(S) NO SIGEF"
planilha_relatorio = "Processos sem peca com parcela(s) no SIGEF"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(p_rural_sem_peca_com_parcela_sigef), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Conjuge' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("1.5in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in p_rural_sem_peca_com_parcela_sigef:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nmconjuge)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 3)
)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processo_sem_peca_com_parcela_sigef.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processos(request):
if request.method == "POST":
p_rural = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessorural.objects.filter( tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
p_rural_com_peca = []
if request.POST['ordenacao'] == 'nmrequerente':
p_rural = consulta.order_by( request.POST['ordenacao'] )
else:
p_rural = consulta.order_by( 'tbprocessobase__'+request.POST['ordenacao'] )
for r in p_rural:
if r.nrcpfrequerente != '99999999999' and r.nrcpfrequerente != '00000000000':
p_rural_com_peca.append( r )
#GERACAO
nome_relatorio = "relatorio-todos-processos-rurais"
titulo_relatorio = "RELATORIO DOS PROCESSOS RURAIS"
planilha_relatorio = "Processos Rurais"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(p_rural_com_peca), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Conjuge' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("1.5in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in p_rural_com_peca:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nmconjuge)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 3)
)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processos.html',{}, context_instance = RequestContext(request))
#buscar as pecas tecnicas que nao estao ligadas a um processo
pecas = Tbpecastecnicas.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
pecas_sem_proc = []
for p in pecas:
if not Tbprocessorural.objects.filter( nrcpfrequerente = p.nrcpfrequerente ):
pecas_sem_proc.append(p)
context = dict(
titulo='Relatório das Peças Técnicas sem processo',
total=len(pecas_sem_proc),
lista=pecas_sem_proc
)
return render_to_response('relatorio/pecas-sem-processo.odt',dictionary=context,format='odt',filename='relatorio-pecas-sem-processo.odt')
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def varredura_processos(request):
if request.method == "POST":
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessobase.objects.filter( nrprocesso__icontains=request.POST['processo'],tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
processos = consulta.order_by( request.POST['ordenacao'] )
#GERACAO
nome_relatorio = "relatorio-todos-processos"
titulo_relatorio = "RELATORIO DE TODOS OS PROCESSOS"
planilha_relatorio = "Todos os Processos"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(processos), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'CPF/CNPJ' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Anexos' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Tipo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("3in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("2.5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("5in")
sheet.getColumn(6).setWidth("5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("1.5in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(10).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in processos:
#verificar se existe obj tipo processo no processobase
if ( Tbprocessorural.objects.filter( tbprocessobase__id = obj.id ) or Tbprocessoclausula.objects.filter( tbprocessobase__id = obj.id ) or Tbprocessourbano.objects.filter( tbprocessobase__id = obj.id ) ) and obj.nrprocesso != '99999999999999999':
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
#print str(obj.id)
#buscar nome requerente (rural,clausula) e povoado (urbano)
requerente = ''
cpfcnpj = ''
if obj.tbtipoprocesso.id == 1:
requerente = Tbprocessorural.objects.filter( tbprocessobase__id = obj.id )[0].nmrequerente
cpfcnpj = Tbprocessorural.objects.filter( tbprocessobase__id = obj.id )[0].nrcpfrequerente
elif obj.tbtipoprocesso.id == 2:
requerente = Tbprocessoclausula.objects.filter( tbprocessobase__id = obj.id )[0].nminteressado
cpfcnpj = Tbprocessoclausula.objects.filter( tbprocessobase__id = obj.id )[0].nrcpfinteressado
else:
requerente = Tbprocessourbano.objects.filter( tbprocessobase__id = obj.id )[0].nmpovoado
cpfcnpj = Tbprocessourbano.objects.filter( tbprocessobase__id = obj.id )[0].nrcnpj
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(requerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(cpfcnpj)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.nrprocesso)
#buscar os anexos do obj e concatenar numero com nome requerente ou povoado
anexo = ''
anexos = Tbprocessosanexos.objects.filter( tbprocessobase__id = obj.id )
for an in anexos:
if an.tbprocessobase_id_anexo.tbtipoprocesso.id == 1:
objan = Tbprocessorural.objects.filter( tbprocessobase__id = an.tbprocessobase_id_anexo.id )
anexo += str(objan[0].tbprocessobase.nrprocesso.encode("utf-8"))+':'+objan[0].nmrequerente.encode("utf-8")+"|"
elif an.tbprocessobase_id_anexo.tbtipoprocesso.id == 2:
objan = Tbprocessoclausula.objects.filter( tbprocessobase__id = an.tbprocessobase_id_anexo.id )
anexo += str(objan[0].tbprocessobase.nrprocesso.encode("utf-8"))+':'+str(objan[0].nmrequerente.encode("utf-8"))+"|"
else:
objan = Tbprocessorural.objects.filter( tbprocessobase__id = an.tbprocessobase_id_anexo.id )
anexo += str(objan[0].tbprocessobase.nrprocesso.encode("utf-8"))+':'+str(objan[0].nmpovoado.encode("utf-8"))+"|"
#print anexo
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(anexo.decode("utf-8"))
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.tbmunicipio.nome_mun.encode("utf-8"))
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbtipoprocesso.nome.encode("utf-8"))
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.id, tbstatuspendencia__id = 3)
)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processos_varredura.html',{}, context_instance = RequestContext(request))
#buscar as pecas tecnicas que nao estao ligadas a um processo
pecas = Tbpecastecnicas.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
pecas_sem_proc = []
for p in pecas:
if not Tbprocessorural.objects.filter( nrcpfrequerente = p.nrcpfrequerente ):
pecas_sem_proc.append(p)
context = dict(
titulo='Relatório das Peças Técnicas sem processo',
total=len(pecas_sem_proc),
lista=pecas_sem_proc
)
return render_to_response('relatorio/pecas-sem-processo.odt',dictionary=context,format='odt',filename='relatorio-pecas-sem-processo.odt')
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processos_agrupados(request):
if request.method == "POST":
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
processos = Tbprocessobase.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
#GERACAO
nome_relatorio = "relatorio-todos-processos-agrupados"
titulo_relatorio = "RELATORIO DE TODOS OS PROCESSOS AGRUPADOS"
planilha_relatorio = "Todos os Processos Agrupados"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(processos), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Principal' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Tipo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Conjuge/Titulado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Telefone' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("3in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("2.5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("5in")
sheet.getColumn(6).setWidth("5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("1.5in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#agrupar os processos pai proximos dos seus anexos
lp = []
for p in processos:
if p.tbclassificacaoprocesso.id == 1:
lp.append( p )
anexos = Tbprocessosanexos.objects.filter( tbprocessobase__id = p.id )
if anexos:
for an in anexos:
objan = Tbprocessobase.objects.filter( id = an.tbprocessobase_id_anexo.id )
lp.append( objan[0] )
processos = lp
#DADOS DA CONSULTA
x = 5
for obj in lp:
#verificar se existe obj tipo processo no processobase
if ( Tbprocessorural.objects.filter( tbprocessobase__id = obj.id ) or Tbprocessoclausula.objects.filter( tbprocessobase__id = obj.id ) ) and obj.nrprocesso != '99999999999999999':
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
#print str(obj.id)
#buscar nome requerente (rural,clausula) e povoado (urbano)
requerente = ''
cpfcnpj = ''
if obj.tbtipoprocesso.id == 1:
requerente = Tbprocessorural.objects.filter( tbprocessobase__id = obj.id )[0].nmrequerente
conjuge_titulado = Tbprocessorural.objects.filter( tbprocessobase__id = obj.id )[0].nmconjuge
cpfcnpj = Tbprocessorural.objects.filter( tbprocessobase__id = obj.id )[0].nrcpfrequerente
elif obj.tbtipoprocesso.id == 2:
requerente = Tbprocessoclausula.objects.filter( tbprocessobase__id = obj.id )[0].nminteressado
conjuge_titulado = Tbprocessoclausula.objects.filter( tbprocessobase__id = obj.id )[0].nmrequerente
cpfcnpj = Tbprocessoclausula.objects.filter( tbprocessobase__id = obj.id )[0].nrcpfinteressado
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(requerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(cpfcnpj)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue(conjuge_titulado)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.nrprocesso)
if obj.tbclassificacaoprocesso.id == 1:
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nrprocesso.decode("utf-8"))
else:
an = Tbprocessosanexos.objects.filter( tbprocessobase_id_anexo = obj.id )[0]
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(an.tbprocessobase.nrprocesso.decode("utf-8"))
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.tbmunicipio.nome_mun.encode("utf-8"))
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbtipoprocesso.nome.encode("utf-8"))
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.id, tbstatuspendencia__id = 2)
)
#pendencias_notificado = Tbpendencia.objects.filter(
# Q(tbprocessobase__id = obj.id, tbstatuspendencia__id = 3)
# )
#sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
#desc_pendencias = ''
#for pend in pendencias_notificado:
# desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
#sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( obj.nmendereco )
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( obj.nmcontato )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processos_agrupados.html',{}, context_instance = RequestContext(request))
#buscar as pecas tecnicas que nao estao ligadas a um processo
pecas = Tbpecastecnicas.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
pecas_sem_proc = []
for p in pecas:
if not Tbprocessorural.objects.filter( nrcpfrequerente = p.nrcpfrequerente ):
pecas_sem_proc.append(p)
context = dict(
titulo='Relatório das Peças Técnicas sem processo',
total=len(pecas_sem_proc),
lista=pecas_sem_proc
)
return render_to_response('relatorio/pecas-sem-processo.odt',dictionary=context,format='odt',filename='relatorio-pecas-sem-processo.odt')
#PECAS TECNICAS VALIDADAS
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def peca_validada(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbpecastecnicas.objects.filter( Q(stenviadobrasilia = True, tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id) )
pecas = consulta.order_by( request.POST['ordenacao'] )
#GERACAO
nome_relatorio = "relatorio-pecas-validadas"
titulo_relatorio = "RELATORIO DAS PECAS TECNICAS VALIDADAS"
planilha_relatorio = "Pecas validadas"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Contrato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Entrega' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Pasta' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Perimetro' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("5in")
sheet.getColumn(3).setWidth("3.5in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
#DADOS DA CONSULTA
x = 5
for obj in pecas:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcontrato.nrcontrato)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nrentrega)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nrarea)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrperimetro)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/peca_validada.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def pecas(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbpecastecnicas.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
pecas = consulta.order_by( request.POST['ordenacao'] )
#GERACAO
nome_relatorio = "relatorio-todas-as-pecas-tecnicas"
titulo_relatorio = "RELATORIO DE TODAS AS PECAS TECNICAS CADASTRADAS"
planilha_relatorio = "Pecas Tecnicas"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Contrato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Entrega' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Pasta' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Perimetro' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("5in")
sheet.getColumn(3).setWidth("3.5in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
#DADOS DA CONSULTA
x = 5
for obj in pecas:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbcontrato.nrcontrato)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nrentrega)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbcaixa.nmlocalarquivo)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nrarea)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrperimetro)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbgleba.nmgleba)
if obj.tbmunicipio is None:
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue('')
else:
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbmunicipio.nome_mun)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/pecas.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def etapa_p23(request):
etapas = Tbetapa.objects.filter(
tbtipoprocesso__id = 1,
tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id ).order_by( 'ordem', 'nmfase' )
if request.method == 'POST':
etapa = request.POST['etapa']
p23 = Tbprocessorural.objects.filter(
tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id,
tbprocessobase__tbetapaatual__id = etapa
)
for obj in p23 :
print obj.tbprocessobase.nrprocesso
return render_to_response('sicop/relatorio/etapa_p23.html',{'etapas':etapas}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def etapa_p80(request):
etapas = Tbetapa.objects.filter( tbtipoprocesso__id = 2 ).order_by( 'ordem', 'nmfase' )
if request.method == 'POST':
etapa = request.POST['etapa']
consulta = Tbprocessoclausula.objects.filter(
tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id,
tbprocessobase__tbetapaatual__id = etapa
)
#GERACAO
nome_relatorio = "relatorio-processos-etapa"
titulo_relatorio = "RELATORIO DOS PROCESSOS DA ETAPA"
planilha_relatorio = "Processos na Etapa"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(consulta), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Titulado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'CPF Titulado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Interessado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF Interessado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Imovel' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Domicilio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("3in")
sheet.getColumn(1).setWidth("2.5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("2in")
sheet.getColumn(4).setWidth("4in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("1.5in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in consulta:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nminteressado)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfinteressado)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.nmimovel)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
if obj.tbprocessobase.tbmunicipiodomicilio is not None:
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipiodomicilio.nome_mun)
else:
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue('')
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/etapa_p80.html',{'etapas':etapas}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def etapa_urbano(request):
etapas = Tbetapa.objects.filter( tbtipoprocesso__id = 3 ).order_by( 'ordem', 'nmfase' )
if request.method == 'POST':
etapa = request.POST['etapa']
urb = Tbprocessourbano.objects.filter(
tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id,
tbprocessobase__tbetapaatual__id = etapa
)
for obj in urb :
print obj.tbprocessobase.nrprocesso
return render_to_response('sicop/relatorio/etapa_urbano.html',{'etapas':etapas}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def titulos(request):
caixa = Tbcaixa.objects.filter( blativo = True, tbtipocaixa__nmtipocaixa = 'TIT' ).order_by( 'nmlocalarquivo' )
if request.method == "POST":
ids = []
for obj in caixa:
if request.POST.get(str(obj.id), False):
ids.append(obj.id)
if ids:
titulos = Tbtituloprocesso.objects.filter( tbtitulo__tbcaixa__tbtipocaixa__nmtipocaixa = 'TIT', tbtitulo__tbcaixa__pk__in = ids ).order_by( 'tbtitulo__cdtitulo' )
else:
titulos = Tbtituloprocesso.objects.filter( tbtitulo__tbcaixa__tbtipocaixa__nmtipocaixa = 'TIT' ).order_by( 'tbtitulo__cdtitulo' )
if titulos:
#GERACAO
nome_relatorio = "relatorio-titulos"
titulo_relatorio = "RELATORIO DE TITULOS "
planilha_relatorio = "Titulos"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(titulos), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Titulo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Tipo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Imovel em' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("1.5in")
sheet.getColumn(2).setWidth("2in")
sheet.getColumn(3).setWidth("4.5in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("4in")
sheet.getColumn(6).setWidth("4in")
sheet.getColumn(7).setWidth("4in")
#DADOS DA CONSULTA
x = 5
for obj in titulos:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbtitulo.cdtitulo)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.tbtitulo.tbtipotitulo.cdtipo)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue( obj.tbtitulo.tbcaixa.nmlocalarquivo )
r = Tbprocessorural.objects.get( tbprocessobase__id = obj.tbprocessobase.id )
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(r.nmrequerente)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(r.nrcpfrequerente)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/titulos.html',{"caixa":caixa}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def em_programacao_p80(request):
#if request.method == "POST":
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = []
checks = Tbchecklistprocessobase.objects.filter( tbprocessobase__tbtipoprocesso__id = 2, tbprocessobase__tbclassificacaoprocesso__id = 1, tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id, bl_em_programacao = True, tbchecklist__blprogramacao = True ).order_by('tbprocessobase')
for c in checks:
consulta.append( Tbprocessoclausula.objects.filter(tbprocessobase__id = c.tbprocessobase.id)[0] )
#GERACAO
nome_relatorio = "relatorio-todos-processos-em-programacao"
titulo_relatorio = "RELATORIO DOS PROCESSOS EM PROGRAMACAO"
planilha_relatorio = "Processos em Programacao"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(consulta), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Titulado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'CPF Titulado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Interessado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF Interessado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Imovel' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Domicilio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(12, 6).setAlignHorizontal('center').stringValue( 'Programacao' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("3in")
sheet.getColumn(1).setWidth("2.5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("2in")
sheet.getColumn(4).setWidth("4in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("1.5in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
sheet.getColumn(12).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in consulta:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nminteressado)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfinteressado)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.nmimovel)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
if obj.tbprocessobase.tbmunicipiodomicilio is not None:
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipiodomicilio.nome_mun)
else:
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue('')
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
if obj.tbprocessobase.tbetapaatual is not None:
sheet.getCell(12, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbetapaatual.titulo)
else:
sheet.getCell(12, x+2).setAlignHorizontal('center').stringValue('')
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def prazos_notificacoes_p80(request):
#if request.method == "POST":
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
prazos = []
consulta = []
checksprazos = Tbchecklistprocessobase.objects.filter( tbchecklist__bl_data_prazo = True, blnao_obrigatorio = False, blsanado = False ).order_by('tbprocessobase')
for obj in checksprazos:
if obj.dtcustom is not None:
if obj.tbchecklist.nrprazo is not None:
dias = obj.tbchecklist.nrprazo - (datetime.datetime.now() - obj.dtcustom).days
if dias >= 0 and dias <= 15:
prazos.append( dict({'obj':obj,'dias':dias}) )
if prazos:
for op in prazos:
proc = Tbprocessoclausula.objects.filter( tbprocessobase__id = op['obj'].tbprocessobase.id )
consulta.append( dict({'proc':proc[0],'check':op['obj'].tbchecklist.nmchecklist,'etapa':op['obj'].tbchecklist.tbetapa.nmfase,'dias':op['dias']}) )
#GERACAO
nome_relatorio = "relatorio-todos-processos-prazo-notificacao"
titulo_relatorio = "RELATORIO DOS PROCESSOS COM PRAZOS DE NOTIFICACAO"
planilha_relatorio = "Processos com prazos de notificacao"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(consulta), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Titulado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'CPF Titulado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Interessado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF Interessado' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Etapa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Checklist' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Restante (dias)' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("3in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("2in")
sheet.getColumn(4).setWidth("4in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("3.5in")
sheet.getColumn(8).setWidth("5in")
sheet.getColumn(9).setWidth("1.5in")
#DADOS DA CONSULTA
x = 5
for obj in consulta:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj['proc'].tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj['proc'].tbprocessobase.nrprocesso)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj['proc'].nmrequerente)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj['proc'].nrcpfrequerente)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj['proc'].nminteressado)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj['proc'].nrcpfinteressado)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj['proc'].tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj['etapa'])
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj['check'])
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue(obj['dias'])
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
#PROCESSOS QUE TEM PARCELA
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processo_parcela(request):
if request.method == "POST":
p_rural = []
parcelas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessorural.objects.filter( tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
p_rural_com_parcela = []
p_rural = consulta.order_by( request.POST['ordenacao'] )
for r in p_rural:
parcelas = TbparcelaGeo.objects.filter( cpf_detent = r.nrcpfrequerente.replace('.','').replace('-','') ) or Tbpecastecnicas.objects.filter( nrcpfrequerente = r.nrcpfrequerente.replace('.','').replace('-','') )
if parcelas:
p_rural_com_parcela.append( r )
#GERACAO
nome_relatorio = "relatorio-processos-com-parcela"
titulo_relatorio = "RELATORIO DOS PROCESSOS COM PARCELA(S)"
planilha_relatorio = "Processos com parcela"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(p_rural_com_parcela), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Conjuge' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(12, 6).setAlignHorizontal('center').stringValue( 'Area (ha)' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in p_rural_com_parcela:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nmconjuge)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 3)
)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
area_total = 0
for p in TbparcelaGeo.objects.filter( cpf_detent = obj.nrcpfrequerente.replace('.','').replace('-','') ):
area_total += p.area_ha_ut
sheet.getCell(12, x+2).setAlignHorizontal('center').stringValue( str(area_total) )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processo_parcela.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processo_sem_parcela(request):
if request.method == "POST":
p_rural = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessorural.objects.filter( tbprocessobase__tbclassificacaoprocesso__id = 1, tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
p_rural_sem_parcela = []
p_rural = consulta.order_by( request.POST['ordenacao'] )
x = 0
for rr in p_rural:
if not TbparcelaGeo.objects.filter( cpf_detent = rr.nrcpfrequerente ) and not Tbpecastecnicas.objects.filter( nrcpfrequerente = rr.nrcpfrequerente ):
if rr.nrcpfrequerente != '99999999999' and rr.nrcpfrequerente != '00000000000':
try:
response = urllib2.urlopen('https://sigef.incra.gov.br/api/destinacao/parcelas/?cpf='+rr.nrcpfrequerente,timeout=1)
retorno = json.loads(response.read())
if not retorno['parcelas']:
p_rural_sem_parcela.append(rr)
except:
p_rural_sem_parcela.append(rr)
#GERACAO
nome_relatorio = "relatorio-processos-sem-parcela"
titulo_relatorio = "RELATORIO DOS PROCESSOS SEM PARCELA(S)"
planilha_relatorio = "Processos sem parcela"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(p_rural_sem_parcela), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Conjuge' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("1.5in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in p_rural_sem_parcela:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nmconjuge)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 3)
)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processo_sem_parcela.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def parcela_sem_processo(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
parcelas = TbparcelaGeo.objects.all()#filter(gleba__icontains = 'CIGANA')
#pesquisar pela base local e sigef
#parcelas = TbparcelaGeo.objects.filter( cpf_detent = r.nrcpfrequerente.replace('.','').replace('-','') ) or Tbpecastecnicas.objects.filter( nrcpfrequerente = r.nrcpfrequerente.replace('.','').replace('-','') )
pecas_sem_proc = []
#pecas = consulta.order_by( request.POST['ordenacao'] )
for p in parcelas:
if len(Tbprocessorural.objects.filter( nrcpfrequerente = p.cpf_detent )) == 0 and len(Tbprocessoclausula.objects.filter( nrcpfrequerente = p.cpf_detent )) == 0 and len(Tbprocessoclausula.objects.filter( nrcpfinteressado = p.cpf_detent )) == 0 and len(Tbprocessoclausula.objects.filter( nrcpfrequerente = p.cpf_detent )) == 0:
pecas_sem_proc.append(p)
#GERACAO
nome_relatorio = "relatorio-parcelas-sem-processo"
titulo_relatorio = "RELATORIO DAS PARCELAS SEM PROCESSO"
planilha_relatorio = "Parcelas sem processo"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas_sem_proc), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Imovel' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2.5in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("2in")
sheet.getColumn(3).setWidth("2in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in pecas_sem_proc:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.nome_deten)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.cpf_detent)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.area_ha_ut)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.nome)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.municipio)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.gleba)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/peca_sem_processo.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def parcela_processo(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
parcelas = TbparcelaGeo.objects.all()
#pesquisar pela base local e sigef
#parcelas = TbparcelaGeo.objects.filter( cpf_detent = r.nrcpfrequerente.replace('.','').replace('-','') ) or Tbpecastecnicas.objects.filter( nrcpfrequerente = r.nrcpfrequerente.replace('.','').replace('-','') )
pecas_com_proc = []
#pecas = consulta.order_by( request.POST['ordenacao'] )
for p in parcelas:
if len(Tbprocessorural.objects.filter( nrcpfrequerente = p.cpf_detent )) > 0:
pecas_com_proc.append(p)
#GERACAO
nome_relatorio = "relatorio-parcelas-sem-processo"
titulo_relatorio = "RELATORIO DAS PARCELAS COM PROCESSO(S) P23"
planilha_relatorio = "Parcelas com processo(s) P23"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas_com_proc), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Area' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Imovel' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2.5in")
sheet.getColumn(1).setWidth("2in")
sheet.getColumn(2).setWidth("2in")
sheet.getColumn(3).setWidth("2in")
sheet.getColumn(4).setWidth("2in")
sheet.getColumn(5).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in pecas_com_proc:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.nome_deten)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.cpf_detent)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.area_ha_ut)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.nome)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.municipio)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.gleba)
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/parcela_processo.html',{}, context_instance = RequestContext(request))
|
waldenilson/TerraLegal
|
project/tramitacao/restrito/relatorio.py
|
Python
|
gpl-2.0
| 111,754 | 0.016833 |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
class PantsDaemonStats(object):
"""Tracks various stats about the daemon."""
def __init__(self):
self.target_root_size = 0
self.affected_targets_size = 0
self.affected_targets_file_count = 0
self.scheduler_metrics = {}
def set_scheduler_metrics(self, scheduler_metrics):
self.scheduler_metrics = scheduler_metrics
def set_target_root_size(self, size):
self.target_root_size = size
def set_affected_targets_size(self, size):
self.affected_targets_size = size
def get_all(self):
res = dict(self.scheduler_metrics)
res.update({
'target_root_size': self.target_root_size,
'affected_targets_size': self.affected_targets_size,
})
return res
|
baroquebobcat/pants
|
src/python/pants/goal/pantsd_stats.py
|
Python
|
apache-2.0
| 1,009 | 0.006938 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains tests related to MLPerf.
Note this test only passes if the MLPerf compliance library is installed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import logging
import re
import six
import tensorflow.compat.v1 as tf
import benchmark_cnn
import datasets
import mlperf
import test_util
from models import model
from mlperf_compliance import mlperf_log
class _MlPerfTestModel(model.CNNModel):
"""A model to test the MLPerf compliance logging on."""
def __init__(self):
super(_MlPerfTestModel, self).__init__(
'mlperf_test_model', image_size=224, batch_size=2, learning_rate=1)
def add_inference(self, cnn):
assert cnn.top_layer.shape[1:] == (3, 224, 224)
cnn.conv(1, 1, 1, 1, 1, use_batch_norm=True)
cnn.mpool(1, 1, 1, 1, num_channels_in=1)
cnn.reshape([-1, 224 * 224])
cnn.affine(1, activation=None)
# Assert that the batch norm variables are filtered out for L2 loss.
variables = tf.global_variables() + tf.local_variables()
assert len(variables) > len(self.filter_l2_loss_vars(variables))
class MlPerfComplianceTest(tf.test.TestCase):
"""Tests the MLPerf compliance logs.
This serves as a quick check that we probably didn't break the compliance
logging. It is not mean to be as comprehensive as the official MLPerf
compliance checker will be.
"""
def setUp(self):
super(MlPerfComplianceTest, self).setUp()
benchmark_cnn.setup(benchmark_cnn.make_params())
# Map between regex and the number of times we expect to see that regex in the
# logs. Entry commented out with the comment FIXME indicate that
# tf_cnn_benchmarks currently fails compliance in that regard, and needs to be
# fixed to be MLPerf compliant.
EXPECTED_LOG_REGEXES = {
# Preprocessing tags
mlperf.tags.INPUT_ORDER: 2, # 1 for training, 1 for eval
# We pass --tf_random_seed=9876 in the test.
r'%s: 9876' % mlperf.tags.RUN_SET_RANDOM_SEED: 2,
# The Numpy random seed is hardcoded to 4321.
r'%s: 4321' % mlperf.tags.RUN_SET_RANDOM_SEED: 2,
r'%s: %d' % (mlperf.tags.PREPROC_NUM_TRAIN_EXAMPLES,
datasets.IMAGENET_NUM_TRAIN_IMAGES): 1,
r'%s: %d' % (mlperf.tags.PREPROC_NUM_EVAL_EXAMPLES,
datasets.IMAGENET_NUM_VAL_IMAGES): 1,
mlperf.tags.PREPROC_NUM_EVAL_EXAMPLES + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_MIN_OBJ_COV + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_RATIO_RANGE + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_AREA_RANGE + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_MAX_ATTEMPTS + '.*': 1,
mlperf.tags.INPUT_RANDOM_FLIP + '.*': 1,
r'%s: \[224, 224\].*' % mlperf.tags.INPUT_CENTRAL_CROP: 1,
r'%s: \[123.68, 116.78, 103.94\].*' % mlperf.tags.INPUT_MEAN_SUBTRACTION:
2,
r'%s: {"min": 256}.*' % mlperf.tags.INPUT_RESIZE_ASPECT_PRESERVING: 1,
# 1 for training, 1 for eval
r'%s: \[224, 224\].*' % mlperf.tags.INPUT_RESIZE: 2,
# Resnet model tags
mlperf.tags.MODEL_HP_BATCH_NORM + '.*': 2,
# 2 for training, 2 for eval. Although there's only 1 conv2d, each conv2d
# produces 2 logs.
mlperf.tags.MODEL_HP_CONV2D_FIXED_PADDING + '.*': 4,
mlperf.tags.MODEL_HP_RELU + '.*': 2,
mlperf.tags.MODEL_HP_INITIAL_MAX_POOL + '.*': 2,
mlperf.tags.MODEL_HP_DENSE + '.*': 4,
mlperf.tags.MODEL_HP_DENSE + '.*': 4,
# Note that tags our test model does not emit, like MODEL_HP_SHORTCUT_ADD,
# are omitted here.
r'%s: "categorical_cross_entropy".*' % mlperf.tags.MODEL_HP_LOSS_FN: 1,
# 1 for training, 2 because the _MlPerfTestModel calls this when building
# the model for both training and eval
r'%s: true' % mlperf.tags.MODEL_EXCLUDE_BN_FROM_L2: 3,
r'%s: 0.5.*' % mlperf.tags.MODEL_L2_REGULARIZATION: 1,
# Note we do not handle OPT_LR, since that is printed to stderr using
# tf.Print, which we cannot easily intercept.
# Other tags
'%s: "%s"' % (mlperf.tags.OPT_NAME, mlperf.tags.SGD_WITH_MOMENTUM): 1,
'%s: 0.5' % mlperf.tags.OPT_MOMENTUM: 1,
mlperf.tags.RUN_START: 1,
'%s: 2' % mlperf.tags.INPUT_BATCH_SIZE: 1,
mlperf.tags.TRAIN_LOOP: 1,
mlperf.tags.TRAIN_EPOCH + '.*': 1,
'%s: 2' % mlperf.tags.INPUT_SIZE: 2,
mlperf.tags.EVAL_START: 2,
mlperf.tags.EVAL_STOP: 2,
'%s: 6' % mlperf.tags.EVAL_SIZE: 2,
mlperf.tags.EVAL_ACCURACY + '.*': 2,
'%s: 2.0' % mlperf.tags.EVAL_TARGET: 2,
mlperf.tags.RUN_STOP + '.*': 1,
mlperf.tags.RUN_FINAL: 1
}
EXPECTED_LOG_REGEXES = Counter({re.compile(k): v for
k, v in EXPECTED_LOG_REGEXES.items()})
def testMlPerfCompliance(self):
string_io = six.StringIO()
handler = logging.StreamHandler(string_io)
data_dir = test_util.create_black_and_white_images()
try:
mlperf_log.LOGGER.addHandler(handler)
params = benchmark_cnn.make_params(data_dir=data_dir,
data_name='imagenet',
batch_size=2,
num_warmup_batches=0,
num_batches=2,
num_eval_batches=3,
eval_during_training_every_n_steps=1,
distortions=False,
weight_decay=0.5,
optimizer='momentum',
momentum=0.5,
stop_at_top_1_accuracy=2.0,
tf_random_seed=9876,
ml_perf=True)
with mlperf.mlperf_logger(use_mlperf_logger=True, model='resnet50_v1.5'):
bench_cnn = benchmark_cnn.BenchmarkCNN(params, model=_MlPerfTestModel())
bench_cnn.run()
logs = string_io.getvalue().splitlines()
log_regexes = Counter()
for log in logs:
for regex in self.EXPECTED_LOG_REGEXES:
if regex.search(log):
log_regexes[regex] += 1
if log_regexes != self.EXPECTED_LOG_REGEXES:
diff_counter = Counter(log_regexes)
diff_counter.subtract(self.EXPECTED_LOG_REGEXES)
differences = []
for regex in (k for k in diff_counter.keys() if diff_counter[k]):
found_count = log_regexes[regex]
expected_count = self.EXPECTED_LOG_REGEXES[regex]
differences.append(' For regex %s: Found %d lines matching but '
'expected to find %d' %
(regex.pattern, found_count, expected_count))
raise AssertionError('Logs did not match expected logs. Differences:\n'
'%s' % '\n'.join(differences))
finally:
mlperf_log.LOGGER.removeHandler(handler)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/mlperf_test.py
|
Python
|
apache-2.0
| 7,794 | 0.003977 |
# vim: set fileencoding=utf-8 ts=4 sw=4 expandtab fdm=marker:
"""
Small wrapper around the python ConfigParser module.
"""
import ConfigParser
CONFIG = ConfigParser.ConfigParser()
DEFAULTS = {
'patterns': {
'path' : '(?P<artist>\w+) - (?P<year>\d+) - (?P<album>\w+)'
}
}
def get_param(section, name):
try:
param = CONFIG.get(section, name)
except ConfigParser.NoOptionError or ConfigParser.NoSectionError:
param = None
if not param:
# Do a default lookup
try:
param = DEFAULTS[section][name]
except KeyError:
# Parameter is not in defaults
LOG.error("Error: Parameter [%s][%s] does not exist", section, name)
param = ""
return param
|
turbofish/mcverify
|
config.py
|
Python
|
bsd-2-clause
| 772 | 0.009067 |
"""
Validate the dependencies are installed.
"""
from __future__ import print_function
__all__ = [
'installRequiredToolbox',
'downloadDependency',
'addExtensionPath',
'loadExtensions',
'installPackage',
'tokenRefresher',
'validateToken',
'checkInstall',
'updateToken',
'TokenError',
'userToken',
]
# python imports
import re
import os
import sys
import imp
import json
import urllib
import shutil
import inspect
import zipfile
import threading
import traceback
import subprocess
# Tools imports
import tools
from tools import HFX_PATH
TOKEN = None
PIP_CHECKED = False
INSTALL_CHECKED = False
INSTALLED = []
class TokenError(Exception):
def __init__(self, *args):
tools.TOOLS_LOGGER.error(args[0])
# install handling
def downloadDependency(url, saveAs=None):
"""
Download a file required for FT.
:param url:
:param saveAs:
"""
localPath = os.path.join(tools.PACKAGES, os.path.basename(url) if not saveAs else saveAs)
return urllib.urlretrieve(url, localPath)[0]
def installPackage(package, pipName=None, test=True):
"""
Install packages into FT from pip.
:param package: the name to import the package
:param pipName: in-case the pip install name is different from the module name.
"""
global PIP_CHECKED
# check if pip is installed. This is installed at the Python installs site-packages. Everything else is installed in
# the FloatingTools/packages directory.
executable = None
args = []
if tools.activeWrapper():
if tools.activeWrapper().ARGS:
args = tools.activeWrapper().ARGS
if tools.activeWrapper().EXECUTABLE:
executable = tools.activeWrapper().EXECUTABLE
if not executable:
# determine executable from the application wrapper
executable = os.path.abspath(sys.executable)
prefix = os.listdir(sys.exec_prefix)
prefixLower = [f.lower() for f in prefix]
for possible in ['python', 'python.exe']:
if possible in prefixLower:
executable = os.path.abspath(os.path.join(sys.exec_prefix, prefix[prefixLower.index(possible)]))
break
try:
import pip
except ImportError:
tools.TOOLS_LOGGER.info("Python executable (+args) for pip install: " + executable)
# install pip
downloadPath = downloadDependency("https://raw.githubusercontent.com/aldmbmtl/tools/master/get-pip.py")
with open(downloadPath, 'r') as pipDL:
code = pipDL.read()
code = code.replace('sys.exit(pip.main(["install", "--upgrade"] + args))',
'sys.exit(pip.main(["install", "pip", "-t", "%s"]))' % tools.PACKAGES)
with open(downloadPath, 'w') as pipDL:
pipDL.write(code)
command = [os.path.abspath(executable)] + args + [downloadPath]
# execute the python pip install call
subprocess.call(command)
# delete get-pip.py
os.unlink(downloadPath)
try:
import pip
except ImportError:
raise Exception('Pip is required for install.' %
os.path.abspath(tools.__file__ + '/../../'))
# Verify the the target package exists
try:
__import__(package)
except ImportError:
if not pipName:
pipName = package
command = [os.path.abspath(executable), os.path.dirname(pip.__file__), 'install', pipName, '-t', tools.PACKAGES, '--no-cache-dir']
tools.TOOLS_LOGGER.info('Installing: ' + pipName)
tools.TOOLS_LOGGER.info(command)
subprocess.call(command)
if test:
# verify install
__import__(package)
# handle token system
def updateToken(token):
"""
For internal use.
"""
with open(tools.HFX_TOKEN, 'w') as tokenFile:
tokenFile.write(token)
validateToken()
# relaunch the initialize process with the new token
tools.initialize()
def validateToken():
"""
Checks the token created.
"""
global TOKEN
import requests
response = requests.get(tools.buildCall('shed'), headers={'Authorization': str(TOKEN)}, verify=False)
if not TOKEN or response.status_code == 401:
if not os.path.exists(tools.HFX_TOKEN):
with open(tools.HFX_TOKEN, 'w') as tokenFile:
tokenFile.write('')
with open(tools.HFX_TOKEN, 'r') as tokenFile:
refreshToken = tokenFile.read()
tools.TOOLS_LOGGER.info('Validating Access...')
data = 'grant_type=refresh_token&client_id=2okiehdqupvt6icqil6nl255pg&refresh_token=' + refreshToken
refreshResponse = requests.post(
'https://dev-floating-tools.auth.us-west-2.amazoncognito.com/oauth2/token',
data=data,
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
if 'id_token' in refreshResponse.json():
tools.TOOLS_LOGGER.info('Access Granted...')
TOKEN = refreshResponse.json()['id_token']
return
raise TokenError('%s' % response.json()['message'])
def tokenRefresher():
try:
validateToken()
except TokenError:
if tools.activeWrapper():
tools.activeWrapper().updateToken()
else:
tools.Wrapper.updateToken()
def userToken():
"""
Grab the users token.
"""
global TOKEN
# check the token saved in memory
if not TOKEN:
tokenRefresher()
return TOKEN
# extension handling
def addExtensionPath(path):
"""
Add a custom extensions path for your scripts and modifications to FloatingTools.
:param path: str to a place on disk.
"""
if not os.path.exists(path):
tools.TOOLS_LOGGER.warning('Extension path passed does not exist: ' + path)
return
for f in os.listdir(path):
if f == 'hfx_init.py':
try:
imp.load_source('hfx_init', os.path.join(path, f))
except ImportError:
traceback.print_exc()
def loadExtensions():
if 'HFX_PATH' in os.environ:
path = os.environ['HFX_PATH']
addExtensionPath(path)
# pipeline installers
def checkInstall():
"""
Updates the existing install of the HFX pipeline.
"""
global INSTALL_CHECKED
if INSTALL_CHECKED or 'HFX_DEV':
return
tools.TOOLS_LOGGER.info('Running version check...')
targetVersion = os.environ['HFX_INSTALL_VERSION']
if targetVersion == 'latest':
targetVersion = max(eval(urllib.urlopen('https://api.hatfieldfx.com/ft/releases').read()))
INSTALL_CHECKED = True
currentVersion = 'unknown'
if os.path.exists(os.path.expanduser('~/.hfx/version')):
with open(os.path.expanduser('~/.hfx/version'), 'r') as versionFile:
currentVersion = versionFile.read()
tools.TOOLS_LOGGER.info('Installed version: ' + currentVersion)
if targetVersion != currentVersion:
tools.TOOLS_LOGGER.info('Updating install: %s => %s' % (currentVersion, targetVersion))
os.environ['HFX_INSTALL_VERSION'] = targetVersion
os.environ['HFX_UPDATE'] = '1'
exec urllib.urlopen('https://raw.githubusercontent.com/aldmbmtl/tools/master/installer.py').read()
# force reload FT
imp.reload(tools)
tools.TOOLS_LOGGER.info('Upgraded to: %s' % targetVersion)
INSTALL_CHECKED = True
def installRequiredToolbox(uid, service='Local_Path', **kwargs):
"""
Install a toolbox programmatically.
:param uid: name of the toolbox or uid number. MUST BE UNIQUE!!
:param service: name of the service
:param kwargs: fields required to install the toolbox
"""
global INSTALLED
service = str(service)
envVar = 'HFX-ENV-' + str(uid)
devMode = False
serviceName = 'Local_Path'
box = None
try:
toolboxConstruct = dict(service=service, **kwargs)
if toolboxConstruct in INSTALLED:
return
INSTALLED.append(toolboxConstruct)
if envVar in os.environ:
envValue = os.environ[envVar]
paths = envValue.split(';')
tools.TOOLS_LOGGER.info('Development path(s) detected in ENV: %s = %s' % (uid, ', '.join(paths)))
for path in paths:
cleaned = path.strip()
if os.path.exists(cleaned):
service = tools.Service.get('Local_Path')
box = service(source_tag=uid, Path=cleaned)
box.setName(box.name())
devMode = True
tools.TOOLS_LOGGER.info('Development Path Set: %s => %s' % (uid, cleaned))
break
if not devMode:
tools.TOOLS_LOGGER.warning('Development Path(s) Not Found: %s' % uid)
if not devMode:
# pull service
serviceName = 'Local_Path'
for loadedService in tools.shed()['services']:
if loadedService['name'] == service:
serviceName = loadedService['name']
break
if service.isdigit() and loadedService['id'] == int(service):
serviceName = loadedService['name']
break
if serviceName == 'Local_Path':
kwargs = dict(Path=service)
if not serviceName:
raise Exception('Service passed for install does not exist! Cant install toolbox.\n\tPassed Service: %s' % serviceName)
service = tools.Service.get(serviceName)
box = service(source_tag=uid, **kwargs)
# only install if it is required
if not os.path.exists(box.installDirectory()):
tools.TOOLS_LOGGER.info(
'%s => %s %s.\n\t+=> %s\n' % (
serviceName,
'Referencing' if serviceName == 'Local_Path' else 'Installing',
box.name(),
box.installDirectory()
))
box.install()
tools.TOOLS_LOGGER.info(
'Loading %s (CACHED)\n\t+=> %s' % (
box.name(),
box.installDirectory()
))
box.loadTools()
except:
traceback.print_exc()
|
aldmbmtl/FloatingTools
|
tools/utilities.py
|
Python
|
mit
| 10,347 | 0.002996 |
import numpy as np
from scipy.sparse import csr_matrix
class AliasArray(np.ndarray):
"""An ndarray with a mapping of values to user-friendly names -- see example
This ndarray subclass enables comparing sub_id and hop_id arrays directly with
their friendly string identifiers. The mapping parameter translates sublattice
or hopping names into their number IDs.
Only the `==` and `!=` operators are overloaded to handle the aliases.
Examples
--------
>>> a = AliasArray([0, 1, 0], mapping={"A": 0, "B": 1})
>>> list(a == 0)
[True, False, True]
>>> list(a == "A")
[True, False, True]
>>> list(a != "A")
[False, True, False]
>>> a = AliasArray([0, 1, 0, 2], mapping={"A|1": 0, "B": 1, "A|2": 2})
>>> list(a == "A")
[True, False, True, True]
>>> list(a != "A")
[False, True, False, False]
"""
def __new__(cls, array, mapping):
obj = np.asarray(array).view(cls)
obj.mapping = {SplitName(k): v for k, v in mapping.items()}
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.mapping = getattr(obj, "mapping", None)
def _mapped_eq(self, other):
if other in self.mapping:
return super().__eq__(self.mapping[other])
else:
result = np.zeros(len(self), dtype=np.bool)
for k, v in self.mapping.items():
if k == other:
result = np.logical_or(result, super().__eq__(v))
return result
def __eq__(self, other):
if isinstance(other, str):
return self._mapped_eq(other)
else:
return super().__eq__(other)
def __ne__(self, other):
if isinstance(other, str):
return np.logical_not(self._mapped_eq(other))
else:
return super().__ne__(other)
# noinspection PyAbstractClass
class AliasCSRMatrix(csr_matrix):
"""Same as :class:`AliasArray` but for a CSR matrix
Examples
--------
>>> from scipy.sparse import spdiags
>>> m = AliasCSRMatrix(spdiags([1, 2, 1], [0], 3, 3), mapping={'A': 1, 'B': 2})
>>> list(m.data == 'A')
[True, False, True]
>>> list(m.tocoo().data == 'A')
[True, False, True]
>>> list(m[:2].data == 'A')
[True, False]
"""
def __init__(self, *args, **kwargs):
mapping = kwargs.pop('mapping', {})
if not mapping:
mapping = getattr(args[0], 'mapping', {})
super().__init__(*args, **kwargs)
self.data = AliasArray(self.data, mapping)
@property
def format(self):
return 'csr'
@format.setter
def format(self, _):
pass
@property
def mapping(self):
return self.data.mapping
def tocoo(self, *args, **kwargs):
coo = super().tocoo(*args, **kwargs)
coo.data = AliasArray(coo.data, mapping=self.mapping)
return coo
def __getitem__(self, item):
result = super().__getitem__(item)
if getattr(result, 'format', '') == 'csr':
return AliasCSRMatrix(result, mapping=self.mapping)
else:
return result
class AliasIndex:
"""An all-or-nothing array index based on equality with a specific value
The `==` and `!=` operators are overloaded to return a lazy array which is either
all `True` or all `False`. See the examples below. This is useful for modifiers
where the each call gets arrays with the same sub_id/hop_id for all elements.
Instead of passing an `AliasArray` with `.size` identical element, `AliasIndex`
does the same all-or-nothing indexing.
Examples
--------
>>> l = np.array([1, 2, 3])
>>> ai = AliasIndex("A", len(l))
>>> list(l[ai == "A"])
[1, 2, 3]
>>> list(l[ai == "B"])
[]
>>> list(l[ai != "A"])
[]
>>> list(l[ai != "B"])
[1, 2, 3]
>>> np.logical_and([True, False, True], ai == "A")
array([ True, False, True], dtype=bool)
>>> np.logical_and([True, False, True], ai != "A")
array([False, False, False], dtype=bool)
>>> bool(ai == "A")
True
>>> bool(ai != "A")
False
>>> str(ai)
'A'
>>> hash(ai) == hash("A")
True
>>> int(ai.eye)
1
>>> np.allclose(AliasIndex("A", 1, (2, 2)).eye, np.eye(2))
True
"""
class LazyArray:
def __init__(self, value, shape):
self.value = value
self.shape = shape
def __bool__(self):
return bool(self.value)
def __array__(self):
return np.full(self.shape, self.value)
def __init__(self, name, shape, orbs=(1, 1)):
self.name = name
self.shape = shape
self.orbs = orbs
def __str__(self):
return self.name
def __eq__(self, other):
return self.LazyArray(self.name == other, self.shape)
def __ne__(self, other):
return self.LazyArray(self.name != other, self.shape)
def __hash__(self):
return hash(self.name)
@property
def eye(self):
return np.eye(*self.orbs)
class SplitName(str):
"""String subclass with special support for strings of the form "first|second"
Operators `==` and `!=` are overloaded to return `True` even if only the first part matches.
Examples
--------
>>> s = SplitName("first|second")
>>> s == "first|second"
True
>>> s != "first|second"
False
>>> s == "first"
True
>>> s != "first"
False
>>> s == "second"
False
>>> s != "second"
True
"""
@property
def first(self):
return self.split("|")[0]
def __eq__(self, other):
return super().__eq__(other) or self.first == other
def __ne__(self, other):
return super().__ne__(other) and self.first != other
def __hash__(self):
return super().__hash__()
|
MAndelkovic/pybinding
|
pybinding/support/alias.py
|
Python
|
bsd-2-clause
| 5,869 | 0.001704 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import tensorflow as tf
class BaseConverter(object):
@staticmethod
def to_int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
a TF-Feature.
"""
if not isinstance(values, list):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
@staticmethod
def to_bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
a TF-Feature.
"""
if not isinstance(values, list):
values = [values]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
@staticmethod
def to_float_feature(values):
"""Returns a TF-Feature of floats.
Args:
values: A string.
Returns:
a TF-Feature.
"""
if not isinstance(values, list):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
@classmethod
def to_feature(cls, value, value_type):
if value_type == 'int':
return cls.to_int64_feature(value)
if value_type == 'float':
return cls.to_float_feature(value)
if value_type == 'bytes':
return cls.to_bytes_feature(value)
raise TypeError("value type: `{}` is not supported.".format(value_type))
@classmethod
def to_sequence_feature(cls, sequence, sequence_type):
"""Returns a FeatureList based on a list fo features of type sequence_type
Args:
sequence: list of values
sequence_type: type of the sequence.
Returns:
list of TF-FeatureList
"""
if sequence_type == 'int':
feature_list = [cls.to_int64_feature(i) for i in sequence]
elif sequence_type == 'float':
feature_list = [cls.to_float_feature(i) for i in sequence]
elif sequence_type == 'bytes':
feature_list = [cls.to_bytes_feature(i) for i in sequence]
else:
raise TypeError("sequence type: `{}` is not supported.".format(sequence_type))
return tf.train.FeatureList(feature=feature_list)
|
polyaxon/polyaxon-api
|
polyaxon_lib/datasets/converters/base.py
|
Python
|
mit
| 2,397 | 0.001252 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Base class for postprocessing of RC files.
'''
from __future__ import print_function
class PostProcessor(object):
''' Base class for postprocessing of the RC file data before being
output through the RC2GRD tool. You should implement this class if
you want GRIT to do specific things to the RC files after it has
converted the data into GRD format, i.e. change the content of the
RC file, and put it into a P4 changelist, etc.'''
def Process(self, rctext, rcpath, grdnode):
''' Processes the data in rctext and grdnode.
Args:
rctext: string containing the contents of the RC file being processed.
rcpath: the path used to access the file.
grdtext: the root node of the grd xml data generated by
the rc2grd tool.
Return:
The root node of the processed GRD tree.
'''
raise NotImplementedError()
|
scheib/chromium
|
tools/grit/grit/tool/postprocess_interface.py
|
Python
|
bsd-3-clause
| 1,031 | 0.00388 |
from math import pi
import pandas as pd
from bokeh.io import output_file, show
from bokeh.palettes import Category20c
from bokeh.plotting import figure
from bokeh.transform import cumsum
output_file("pie.html")
x = {
'United States': 157,
'United Kingdom': 93,
'Japan': 89,
'China': 63,
'Germany': 44,
'India': 42,
'Italy': 40,
'Australia': 35,
'Brazil': 32,
'France': 31,
'Taiwan': 31,
'Spain': 29
}
data = pd.Series(x).reset_index(name='value').rename(columns={'index':'country'})
data['angle'] = data['value']/data['value'].sum() * 2*pi
data['color'] = Category20c[len(x)]
p = figure(plot_height=350, title="Pie Chart", toolbar_location=None,
tools="hover", tooltips="@country: @value", x_range=(-0.5, 1.0))
p.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='country', source=data)
p.axis.axis_label=None
p.axis.visible=False
p.grid.grid_line_color = None
show(p)
|
mindriot101/bokeh
|
examples/plotting/file/pie.py
|
Python
|
bsd-3-clause
| 1,053 | 0.004748 |
import decimal
import json
import unittest
import uuid
from django import forms
from django.core import checks, exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, modify_settings, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLTestCase, PostgreSQLWidgetTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel, Tag,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import (
SimpleArrayField, SplitArrayField, SplitArrayWidget,
)
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_in_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__in=IntegerArrayModel.objects.all().values_list('field', flat=True)
),
self.objs[2:3]
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F('id')]]),
self.objs[:2]
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F('field')]),
self.objs[:4]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
@unittest.expectedFailure
def test_contained_by_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[models.F('id'), 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=['FoO'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__icontains='foo'),
[instance]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0),
[obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
def test_unsupported_lookup(self):
msg = "Unsupported lookup '0_bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = "Unsupported lookup '0bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
class TestDateTimeExactQuerying(PostgreSQLTestCase):
def setUp(self):
now = timezone.now()
self.datetimes = [now]
self.dates = [now.date()]
self.times = [now.time()]
self.objs = [
DateTimeArrayModel.objects.create(
datetimes=self.datetimes,
dates=self.dates,
times=self.times,
)
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes),
self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates),
self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times),
self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
def setUp(self):
self.ips = ['192.168.0.1', '::1']
self.uuids = [uuid.uuid4()]
self.decimals = [decimal.Decimal(1.25), 1.75]
self.tags = [Tag(1), Tag(2), Tag(3)]
self.objs = [
OtherTypesArrayModel.objects.create(
ips=self.ips,
uuids=self.uuids,
decimals=self.decimals,
tags=self.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips),
self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids),
self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals),
self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags),
self.objs
)
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=[])
model = MyModel()
self.assertEqual(model.check(), [
checks.Warning(
msg=(
"ArrayField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint='Use a callable instead, e.g., use `list` instead of `[]`.',
obj=MyModel._meta.get_field('field'),
id='postgres.E003',
)
])
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=list)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=None)
model = MyModel()
self.assertEqual(model.check(), [])
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v['columns']
for k, v in list(connection.introspection.get_constraints(cursor, table_name).items())
if k.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [['char2']])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c['columns'][0]
for c in connection.introspection.get_constraints(cursor, table_name).values()
if c['index'] and len(c['columns']) == 1
]
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 1 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc'], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 0, 'limit_value': 1, 'show_value': 0})
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('abc,c,defg')
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(first_error.code, 'item_invalid')
self.assertEqual(first_error.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
second_error = errors[1]
self.assertEqual(
second_error.message,
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).'
)
self.assertEqual(second_error.code, 'item_invalid')
self.assertEqual(second_error.params, {'nth': 2, 'value': 'defg', 'limit_value': 2, 'show_value': 4})
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_model_field_choices(self):
model_field = ArrayField(models.IntegerField(choices=((1, 'A'), (2, 'B'))))
form_field = model_field.formfield()
self.assertEqual(form_field.clean('1,2'), [1, 2])
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ['a', 'b', 'c']
self.assertEqual(field.clean(vals), vals)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {'array_0': '', 'array_1': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_invalid_integer(self):
msg = 'Item 1 in the array did not validate: Ensure this value is less than or equal to 100.'
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
# To locate the widget's template.
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" required />
<input id="id_array_1" name="array_1" type="text" required />
<input id="id_array_2" name="array_2" type="text" required />
</td>
</tr>
''')
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc', 'c', 'defg'])
self.assertEqual(cm.exception.messages, [
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).',
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).',
])
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ('field',)
form = Form({'field_0': '1', 'field_1': '2'})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
class TestSplitFormWidget(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context('name', ['val1', 'val2']),
{
'widget': {
'name': 'name',
'is_hidden': False,
'required': False,
'value': "['val1', 'val2']",
'attrs': {},
'template_name': 'postgres/widgets/split_array.html',
'subwidgets': [
{
'name': 'name_0',
'is_hidden': False,
'required': False,
'value': 'val1',
'attrs': {},
'template_name': 'django/forms/widgets/text.html',
'type': 'text',
},
{
'name': 'name_1',
'is_hidden': False,
'required': False,
'value': 'val2',
'attrs': {},
'template_name': 'django/forms/widgets/text.html',
'type': 'text',
},
]
}
}
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2), 'array', None,
"""
<input name="array_0" type="text" />
<input name="array_1" type="text" />
"""
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
'array', ['val1', 'val2'], attrs={'id': 'foo'},
html=(
"""
<input id="foo_0" name="array_0" type="text" value="val1" />
<input id="foo_1" name="array_1" type="text" value="val2" />
"""
)
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), True)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_1': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value', 'field_1': 'value'}, {}, 'field'), False)
|
piquadrat/django
|
tests/postgres_tests/test_array.py
|
Python
|
bsd-3-clause
| 34,363 | 0.001659 |
# -*- coding: utf-8 -*-
import unittest
import datetime
from cwr.parser.encoder.dictionary import ComponentDictionaryEncoder
from cwr.work import ComponentRecord
"""
ComponentRecord to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestComponentRecordDictionaryEncoding(unittest.TestCase):
def setUp(self):
self._encoder = ComponentDictionaryEncoder()
def test_encoded(self):
data = ComponentRecord(record_type='COM',
transaction_sequence_n=3,
record_sequence_n=15,
title='TITLE',
writer_1_last_name='LAST NAME 1',
submitter_work_n='ABCD123',
writer_1_first_name='FIRST NAME 1',
writer_2_first_name='FIRST NAME 2',
writer_2_last_name='LAST NAME 2',
writer_1_ipi_base_n='I-000000229-7',
writer_1_ipi_name_n=14107338,
writer_2_ipi_base_n='I-000000339-7',
writer_2_ipi_name_n=14107400,
iswc='T0123456789',
duration=datetime.datetime.strptime('011200',
'%H%M%S').time())
encoded = self._encoder.encode(data)
self.assertEqual('COM', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual('TITLE', encoded['title'])
self.assertEqual('LAST NAME 1', encoded['writer_1_last_name'])
self.assertEqual('ABCD123', encoded['submitter_work_n'])
self.assertEqual('FIRST NAME 1', encoded['writer_1_first_name'])
self.assertEqual('FIRST NAME 2', encoded['writer_2_first_name'])
self.assertEqual('LAST NAME 2', encoded['writer_2_last_name'])
self.assertEqual('LAST NAME 2', encoded['writer_2_last_name'])
self.assertEqual(14107338, encoded['writer_1_ipi_name_n'])
self.assertEqual(14107400, encoded['writer_2_ipi_name_n'])
self.assertEqual(datetime.datetime.strptime('011200', '%H%M%S').time(),
encoded['duration'])
self.assertEqual('I-000000229-7', encoded['writer_1_ipi_base_n'])
self.assertEqual('I-000000339-7', encoded['writer_2_ipi_base_n'])
self.assertEqual('T0123456789', encoded['iswc'])
|
weso/CWR-DataApi
|
tests/parser/dictionary/encoder/record/test_component.py
|
Python
|
mit
| 2,680 | 0.000373 |
# pylint: disable-msg=too-many-lines
"""OPP Hardware interface.
Contains the hardware interface and drivers for the Open Pinball Project
platform hardware, including the solenoid, input, incandescent, and neopixel
boards.
"""
import asyncio
from collections import defaultdict
from typing import Dict, List, Set, Union, Tuple, Optional # pylint: disable-msg=cyclic-import,unused-import
from mpf.core.platform_batch_light_system import PlatformBatchLightSystem
from mpf.core.utility_functions import Util
from mpf.platforms.base_serial_communicator import HEX_FORMAT
from mpf.platforms.interfaces.driver_platform_interface import PulseSettings, HoldSettings
from mpf.platforms.opp.opp_coil import OPPSolenoidCard
from mpf.platforms.opp.opp_incand import OPPIncandCard
from mpf.platforms.opp.opp_modern_lights import OPPModernLightChannel, OPPNeopixelCard, OPPModernMatrixLightsCard
from mpf.platforms.opp.opp_serial_communicator import OPPSerialCommunicator, BAD_FW_VERSION
from mpf.platforms.opp.opp_switch import OPPInputCard
from mpf.platforms.opp.opp_switch import OPPMatrixCard
from mpf.platforms.opp.opp_rs232_intf import OppRs232Intf
from mpf.core.platform import SwitchPlatform, DriverPlatform, LightsPlatform, SwitchSettings, DriverSettings, \
DriverConfig, SwitchConfig, RepulseSettings
MYPY = False
if MYPY: # pragma: no cover
from mpf.platforms.opp.opp_coil import OPPSolenoid # pylint: disable-msg=cyclic-import,unused-import
from mpf.platforms.opp.opp_incand import OPPIncand # pylint: disable-msg=cyclic-import,unused-import
from mpf.platforms.opp.opp_switch import OPPSwitch # pylint: disable-msg=cyclic-import,unused-import
# pylint: disable-msg=too-many-instance-attributes
class OppHardwarePlatform(LightsPlatform, SwitchPlatform, DriverPlatform):
"""Platform class for the OPP hardware.
Args:
----
machine: The main ``MachineController`` instance.
"""
__slots__ = ["opp_connection", "serial_connections", "opp_incands", "opp_solenoid", "sol_dict",
"opp_inputs", "inp_dict", "inp_addr_dict", "matrix_inp_addr_dict", "read_input_msg",
"neo_card_dict", "num_gen2_brd", "gen2_addr_arr", "bad_crc", "min_version", "_poll_task",
"config", "_poll_response_received", "machine_type", "opp_commands", "_incand_task", "_light_system",
"matrix_light_cards"]
def __init__(self, machine) -> None:
"""Initialise OPP platform."""
super().__init__(machine)
self.opp_connection = {} # type: Dict[str, OPPSerialCommunicator]
self.serial_connections = set() # type: Set[OPPSerialCommunicator]
self.opp_incands = dict() # type: Dict[str, OPPIncandCard]
self.opp_solenoid = [] # type: List[OPPSolenoidCard]
self.sol_dict = dict() # type: Dict[str, OPPSolenoid]
self.opp_inputs = [] # type: List[Union[OPPInputCard, OPPMatrixCard]]
self.inp_dict = dict() # type: Dict[str, OPPSwitch]
self.inp_addr_dict = dict() # type: Dict[str, OPPInputCard]
self.matrix_inp_addr_dict = dict() # type: Dict[str, OPPMatrixCard]
self.read_input_msg = {} # type: Dict[str, bytes]
self.neo_card_dict = dict() # type: Dict[str, OPPNeopixelCard]
self.matrix_light_cards = dict() # type: Dict[str, OPPModernMatrixLightsCard]
self.num_gen2_brd = 0
self.gen2_addr_arr = {} # type: Dict[str, Dict[int, Optional[int]]]
self.bad_crc = defaultdict(lambda: 0)
self.min_version = defaultdict(lambda: 0xffffffff) # type: Dict[str, int]
self._poll_task = {} # type: Dict[str, asyncio.Task]
self._incand_task = None # type: Optional[asyncio.Task]
self._light_system = None # type: Optional[PlatformBatchLightSystem]
self.features['tickless'] = True
self.config = self.machine.config_validator.validate_config("opp", self.machine.config.get('opp', {}))
self._configure_device_logging_and_debug("OPP", self.config)
self._poll_response_received = {} # type: Dict[str, asyncio.Event]
assert self.log is not None
if self.config['driverboards']:
self.machine_type = self.config['driverboards']
else:
self.machine_type = self.machine.config['hardware']['driverboards'].lower()
if self.machine_type == 'gen1':
raise AssertionError("Original OPP boards not currently supported.")
if self.machine_type == 'gen2':
self.debug_log("Configuring the OPP Gen2 boards")
else:
self.raise_config_error('Invalid driverboards type: {}'.format(self.machine_type), 15)
# Only including responses that should be received
self.opp_commands = {
ord(OppRs232Intf.INV_CMD): self.inv_resp,
ord(OppRs232Intf.EOM_CMD): self.eom_resp,
ord(OppRs232Intf.GET_GEN2_CFG): self.get_gen2_cfg_resp,
ord(OppRs232Intf.READ_GEN2_INP_CMD): self.read_gen2_inp_resp_initial,
ord(OppRs232Intf.GET_VERS_CMD): self.vers_resp,
ord(OppRs232Intf.READ_MATRIX_INP): self.read_matrix_inp_resp_initial,
}
async def initialize(self):
"""Initialise connections to OPP hardware."""
await self._connect_to_hardware()
self.opp_commands[ord(OppRs232Intf.READ_GEN2_INP_CMD)] = self.read_gen2_inp_resp
self.opp_commands[ord(OppRs232Intf.READ_MATRIX_INP)] = self.read_matrix_inp_resp
self._light_system = PlatformBatchLightSystem(self.machine.clock, self._send_multiple_light_update,
self.machine.config['mpf']['default_light_hw_update_hz'],
128)
async def _send_multiple_light_update(self, sequential_brightness_list: List[Tuple[OPPModernLightChannel,
float, int]]):
first_light, _, common_fade_ms = sequential_brightness_list[0]
number_leds = len(sequential_brightness_list)
msg = bytearray()
msg.append(int(ord(OppRs232Intf.CARD_ID_GEN2_CARD) + first_light.addr))
msg.append(OppRs232Intf.SERIAL_LED_CMD_FADE)
msg.append(int(first_light.pixel_num / 256))
msg.append(int(first_light.pixel_num % 256))
msg.append(int(number_leds / 256))
msg.append(int(number_leds % 256))
msg.append(int(common_fade_ms / 256))
msg.append(int(common_fade_ms % 256))
for _, brightness, _ in sequential_brightness_list:
msg.append(int(brightness * 255))
msg.extend(OppRs232Intf.calc_crc8_whole_msg(msg))
cmd = bytes(msg)
if self.debug:
self.debug_log("Set color on %s: %s", first_light.chain_serial, "".join(HEX_FORMAT % b for b in cmd))
self.send_to_processor(first_light.chain_serial, cmd)
async def start(self):
"""Start polling and listening for commands."""
# start polling
for chain_serial in self.read_input_msg:
self._poll_task[chain_serial] = self.machine.clock.loop.create_task(self._poll_sender(chain_serial))
self._poll_task[chain_serial].add_done_callback(Util.raise_exceptions)
# start listening for commands
for connection in self.serial_connections:
await connection.start_read_loop()
if [version for version in self.min_version.values() if version < 0x02010000]:
# if we run any CPUs with firmware prior to 2.1.0 start incands updater
self._incand_task = self.machine.clock.schedule_interval(self.update_incand,
1 / self.config['incand_update_hz'])
self._light_system.start()
def stop(self):
"""Stop hardware and close connections."""
if self._light_system:
self._light_system.stop()
for task in self._poll_task.values():
task.cancel()
self._poll_task = {}
if self._incand_task:
self._incand_task.cancel()
self._incand_task = None
for connections in self.serial_connections:
connections.stop()
self.serial_connections = []
def __repr__(self):
"""Return string representation."""
return '<Platform.OPP>'
def process_received_message(self, chain_serial, msg):
"""Send an incoming message from the OPP hardware to the proper method for servicing.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
if len(msg) >= 1:
# Verify valid Gen2 address
if (msg[0] & 0xe0) == 0x20:
if len(msg) >= 2:
cmd = msg[1]
else:
cmd = OppRs232Intf.ILLEGAL_CMD
# Look for EOM or INV commands
elif msg[0] == ord(OppRs232Intf.INV_CMD) or msg[0] == ord(OppRs232Intf.EOM_CMD):
cmd = msg[0]
else:
cmd = OppRs232Intf.ILLEGAL_CMD
else:
# No messages received, fake an EOM
cmd = OppRs232Intf.EOM_CMD
# Can't use try since it swallows too many errors for now
if cmd in self.opp_commands:
self.opp_commands[cmd](chain_serial, msg)
else:
self.log.warning("Received unknown serial command?%s. (This is "
"very worrisome.)", "".join(HEX_FORMAT % b for b in msg))
# TODO: This means synchronization is lost. Send EOM characters
# until they come back
self.opp_connection[chain_serial].lost_synch()
@staticmethod
def _get_numbers(mask):
number = 0
ref = 1
result = []
while mask > ref:
if mask & ref:
result.append(number)
number += 1
ref = ref << 1
return result
def get_info_string(self):
"""Dump infos about boards."""
if not self.serial_connections:
return "No connection to any CPU board."
infos = "Connected CPUs:\n"
for connection in sorted(self.serial_connections, key=lambda x: x.chain_serial):
infos += " - Port: {} at {} baud. Chain Serial: {}\n".format(connection.port, connection.baud,
connection.chain_serial)
for board_id, board_firmware in self.gen2_addr_arr[connection.chain_serial].items():
if board_firmware is None:
infos += " -> Board: 0x{:02x} Firmware: broken\n".format(board_id)
else:
infos += " -> Board: 0x{:02x} Firmware: 0x{:02x}\n".format(board_id, board_firmware)
infos += "\nIncand cards:\n" if self.opp_incands else ""
card_format_string = " - Chain: {} Board: 0x{:02x} Card: {} Numbers: {}\n"
for incand in self.opp_incands.values():
infos += card_format_string.format(incand.chain_serial, incand.addr,
incand.card_num,
self._get_numbers(incand.mask))
infos += "\nInput cards:\n"
for inputs in self.opp_inputs:
infos += card_format_string.format(inputs.chain_serial, inputs.addr,
inputs.card_num,
self._get_numbers(inputs.mask))
infos += "\nSolenoid cards:\n"
for outputs in self.opp_solenoid:
infos += card_format_string.format(outputs.chain_serial, outputs.addr,
outputs.card_num,
self._get_numbers(outputs.mask))
infos += "\nLEDs:\n" if self.neo_card_dict else ""
for leds in self.neo_card_dict.values():
infos += " - Chain: {} Board: 0x{:02x} Card: {}\n".format(leds.chain_serial, leds.addr, leds.card_num)
infos += "\nMatrix lights:\n" if self.matrix_light_cards else ''
for matrix_light in self.matrix_light_cards.values():
infos += " - Chain: {} Board: 0x{:02x} Card: {} Numbers: 0 - 63\n".format(
matrix_light.chain_serial, matrix_light.addr, matrix_light.card_num)
return infos
async def _connect_to_hardware(self):
"""Connect to each port from the config.
This process will cause the OPPSerialCommunicator to figure out which chains they've connected to
and to register themselves.
"""
port_chain_serial_map = {v: k for k, v in self.config['chains'].items()}
for port in self.config['ports']:
# overwrite serial if defined for port
overwrite_chain_serial = port_chain_serial_map.get(port, None)
if overwrite_chain_serial is None and len(self.config['ports']) == 1:
overwrite_chain_serial = port
comm = OPPSerialCommunicator(platform=self, port=port, baud=self.config['baud'],
overwrite_serial=overwrite_chain_serial)
await comm.connect()
self.serial_connections.add(comm)
for chain_serial, versions in self.gen2_addr_arr.items():
for chain_id, version in versions.items():
if not version:
self.raise_config_error("Could not read version for board {}-{}.".format(chain_serial, chain_id),
16)
if self.min_version[chain_serial] != version:
self.raise_config_error("Version mismatch. Board {}-{} has version {:d}.{:d}.{:d}.{:d} which is not"
" the minimal version "
"{:d}.{:d}.{:d}.{:d}".format(chain_serial, chain_id, (version >> 24) & 0xFF,
(version >> 16) & 0xFF, (version >> 8) & 0xFF,
version & 0xFF,
(self.min_version[chain_serial] >> 24) & 0xFF,
(self.min_version[chain_serial] >> 16) & 0xFF,
(self.min_version[chain_serial] >> 8) & 0xFF,
self.min_version[chain_serial] & 0xFF), 1)
def register_processor_connection(self, serial_number, communicator):
"""Register the processors to the platform.
Args:
----
serial_number: Serial number of chain.
communicator: Instance of OPPSerialCommunicator
"""
self.opp_connection[serial_number] = communicator
def send_to_processor(self, chain_serial, msg):
"""Send message to processor with specific serial number.
Args:
----
chain_serial: Serial of the processor.
msg: Message to send.
"""
self.opp_connection[chain_serial].send(msg)
def update_incand(self):
"""Update all the incandescents connected to OPP hardware.
This is done once per game loop if changes have been made.
It is currently assumed that the UART oversampling will guarantee proper
communication with the boards. If this does not end up being the case,
this will be changed to update all the incandescents each loop.
This is used for board with firmware < 2.1.0
"""
for incand in self.opp_incands.values():
if self.min_version[incand.chain_serial] >= 0x02010000:
continue
whole_msg = bytearray()
# Check if any changes have been made
if incand.old_state is None or (incand.old_state ^ incand.new_state) != 0:
# Update card
incand.old_state = incand.new_state
msg = bytearray()
msg.append(incand.addr)
msg.extend(OppRs232Intf.INCAND_CMD)
msg.extend(OppRs232Intf.INCAND_SET_ON_OFF)
msg.append((incand.new_state >> 24) & 0xff)
msg.append((incand.new_state >> 16) & 0xff)
msg.append((incand.new_state >> 8) & 0xff)
msg.append(incand.new_state & 0xff)
msg.extend(OppRs232Intf.calc_crc8_whole_msg(msg))
whole_msg.extend(msg)
if whole_msg:
# Note: No need to send EOM at end of cmds
send_cmd = bytes(whole_msg)
if self.debug:
self.debug_log("Update incand on %s cmd:%s", incand.chain_serial,
"".join(HEX_FORMAT % b for b in send_cmd))
self.send_to_processor(incand.chain_serial, send_cmd)
@classmethod
def get_coil_config_section(cls):
"""Return coil config section."""
return "opp_coils"
async def get_hw_switch_states(self):
"""Get initial hardware switch states.
This changes switches from active low to active high
"""
hw_states = dict()
for opp_inp in self.opp_inputs:
if not opp_inp.is_matrix:
curr_bit = 1
for index in range(0, 32):
if (curr_bit & opp_inp.mask) != 0:
if (curr_bit & opp_inp.old_state) == 0:
hw_states[opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index)] = 1
else:
hw_states[opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index)] = 0
curr_bit <<= 1
else:
for index in range(0, 64):
if ((1 << index) & opp_inp.old_state) == 0:
hw_states[opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index + 32)] = 1
else:
hw_states[opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index + 32)] = 0
return hw_states
def inv_resp(self, chain_serial, msg):
"""Parse inventory response.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
self.debug_log("Received Inventory Response: %s for %s", "".join(HEX_FORMAT % b for b in msg), chain_serial)
index = 1
self.gen2_addr_arr[chain_serial] = {}
while msg[index] != ord(OppRs232Intf.EOM_CMD):
if (msg[index] & ord(OppRs232Intf.CARD_ID_TYPE_MASK)) == ord(OppRs232Intf.CARD_ID_GEN2_CARD):
self.num_gen2_brd += 1
self.gen2_addr_arr[chain_serial][msg[index]] = None
else:
self.log.warning("Invalid inventory response %s for %s.", msg[index], chain_serial)
index += 1
self.debug_log("Found %d Gen2 OPP boards on %s.", self.num_gen2_brd, chain_serial)
# pylint: disable-msg=too-many-statements
@staticmethod
def eom_resp(chain_serial, msg):
"""Process an EOM.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
# An EOM command can be used to resynchronize communications if message synch is lost
def _parse_gen2_board(self, chain_serial, msg, read_input_msg):
has_neo = False
has_sw_matrix = False
has_lamp_matrix = False
wing_index = 0
sol_mask = 0
inp_mask = 0
incand_mask = 0
while wing_index < OppRs232Intf.NUM_G2_WING_PER_BRD:
if msg[2 + wing_index] == ord(OppRs232Intf.WING_SOL):
sol_mask |= (0x0f << (4 * wing_index))
inp_mask |= (0x0f << (8 * wing_index))
elif msg[2 + wing_index] == ord(OppRs232Intf.WING_INP):
inp_mask |= (0xff << (8 * wing_index))
elif msg[2 + wing_index] == ord(OppRs232Intf.WING_INCAND):
incand_mask |= (0xff << (8 * wing_index))
elif msg[2 + wing_index] in (ord(OppRs232Intf.WING_SW_MATRIX_OUT),
ord(OppRs232Intf.WING_SW_MATRIX_OUT_LOW_WING)):
has_sw_matrix = True
elif msg[2 + wing_index] == ord(OppRs232Intf.WING_NEO):
has_neo = True
inp_mask |= (0xef << (8 * wing_index))
elif msg[2 + wing_index] == ord(OppRs232Intf.WING_HI_SIDE_INCAND):
incand_mask |= (0xff << (8 * wing_index))
elif msg[2 + wing_index] == ord(OppRs232Intf.WING_NEO_SOL):
inp_mask |= (0x0e << (8 * wing_index))
sol_mask |= (0x0f << (4 * wing_index))
has_neo = True
elif msg[2 + wing_index] in (ord(OppRs232Intf.WING_LAMP_MATRIX_COL_WING),
ord(OppRs232Intf.WING_LAMP_MATRIX_ROW_WING)):
has_lamp_matrix = True
wing_index += 1
if incand_mask != 0:
card = OPPIncandCard(chain_serial, msg[0], incand_mask, self.machine)
self.opp_incands["{}-{}".format(chain_serial, card.card_num)] = card
if sol_mask != 0:
self.opp_solenoid.append(
OPPSolenoidCard(chain_serial, msg[0], sol_mask, self.sol_dict, self))
if inp_mask != 0:
# Create the input object, and add to the command to read all inputs
self.opp_inputs.append(OPPInputCard(chain_serial, msg[0], inp_mask, self.inp_dict,
self.inp_addr_dict, self))
# Add command to read all inputs to read input message
inp_msg = bytearray()
inp_msg.append(msg[0])
inp_msg.extend(OppRs232Intf.READ_GEN2_INP_CMD)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.extend(OppRs232Intf.calc_crc8_whole_msg(inp_msg))
read_input_msg.extend(inp_msg)
if has_sw_matrix:
# Create the matrix object, and add to the command to read all matrix inputs
self.opp_inputs.append(OPPMatrixCard(chain_serial, msg[0], self.inp_dict,
self.matrix_inp_addr_dict, self))
# Add command to read all matrix inputs to read input message
inp_msg = bytearray()
inp_msg.append(msg[0])
inp_msg.extend(OppRs232Intf.READ_MATRIX_INP)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.append(0)
inp_msg.extend(OppRs232Intf.calc_crc8_whole_msg(inp_msg))
read_input_msg.extend(inp_msg)
if has_neo:
card = OPPNeopixelCard(chain_serial, msg[0], self)
self.neo_card_dict[chain_serial + '-' + card.card_num] = card
if has_lamp_matrix:
card = OPPModernMatrixLightsCard(chain_serial, msg[0], self)
self.matrix_light_cards[chain_serial + '-' + card.card_num] = card
def _bad_crc(self, chain_serial, msg):
"""Show warning and increase counter."""
self.bad_crc[chain_serial] += 1
self.log.warning("Chain: %sMsg contains bad CRC: %s.", chain_serial, "".join(HEX_FORMAT % b for b in msg))
def get_gen2_cfg_resp(self, chain_serial, msg):
"""Process cfg response.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
# Multiple get gen2 cfg responses can be received at once
self.debug_log("Received Gen2 Cfg Response:%s", "".join(HEX_FORMAT % b for b in msg))
curr_index = 0
read_input_msg = bytearray()
while True:
# check that message is long enough, must include crc8
if len(msg) < curr_index + 7:
self.log.warning("Msg is too short: %s.", "".join(HEX_FORMAT % b for b in msg))
self.opp_connection[chain_serial].lost_synch()
break
# Verify the CRC8 is correct
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, curr_index, 6)
if msg[curr_index + 6] != ord(crc8):
self._bad_crc(chain_serial, msg)
break
self._parse_gen2_board(chain_serial, msg[curr_index:curr_index + 6], read_input_msg)
if (len(msg) > curr_index + 7) and (msg[curr_index + 7] == ord(OppRs232Intf.EOM_CMD)):
break
if (len(msg) > curr_index + 8) and (msg[curr_index + 8] == ord(OppRs232Intf.GET_GEN2_CFG)):
curr_index += 7
else:
self.log.warning("Malformed GET_GEN2_CFG response:%s.",
"".join(HEX_FORMAT % b for b in msg))
self.opp_connection[chain_serial].lost_synch()
break
read_input_msg.extend(OppRs232Intf.EOM_CMD)
self.read_input_msg[chain_serial] = bytes(read_input_msg)
self._poll_response_received[chain_serial] = asyncio.Event()
self._poll_response_received[chain_serial].set()
def vers_resp(self, chain_serial, msg):
"""Process version response.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
# Multiple get version responses can be received at once
self.debug_log("Received Version Response (Chain: %s): %s", chain_serial, "".join(HEX_FORMAT % b for b in msg))
curr_index = 0
while True:
# check that message is long enough, must include crc8
if len(msg) < curr_index + 7:
self.log.warning("Msg is too short (Chain: %s): %s.", chain_serial,
"".join(HEX_FORMAT % b for b in msg))
self.opp_connection[chain_serial].lost_synch()
break
# Verify the CRC8 is correct
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, curr_index, 6)
if msg[curr_index + 6] != ord(crc8):
self._bad_crc(chain_serial, msg)
break
version = (msg[curr_index + 2] << 24) | \
(msg[curr_index + 3] << 16) | \
(msg[curr_index + 4] << 8) | \
msg[curr_index + 5]
self.debug_log("Firmware version of board 0x%02x (Chain: %s): %d.%d.%d.%d", msg[curr_index], chain_serial,
msg[curr_index + 2], msg[curr_index + 3], msg[curr_index + 4], msg[curr_index + 5])
if msg[curr_index] not in self.gen2_addr_arr[chain_serial]:
self.log.warning("Got firmware response for %s but not in inventory at %s", msg[curr_index],
chain_serial)
else:
self.gen2_addr_arr[chain_serial][msg[curr_index]] = version
if version < self.min_version[chain_serial]:
self.min_version[chain_serial] = version
if version == BAD_FW_VERSION:
raise AssertionError("Original firmware sent only to Brian before adding "
"real version numbers. The firmware must be updated before "
"MPF will work.")
if (len(msg) > curr_index + 7) and (msg[curr_index + 7] == ord(OppRs232Intf.EOM_CMD)):
break
if (len(msg) > curr_index + 8) and (msg[curr_index + 8] == ord(OppRs232Intf.GET_VERS_CMD)):
curr_index += 7
else:
self.log.warning("Malformed GET_VERS_CMD response (Chain %s): %s.", chain_serial,
"".join(HEX_FORMAT % b for b in msg))
self.opp_connection[chain_serial].lost_synch()
break
def read_gen2_inp_resp_initial(self, chain_serial, msg):
"""Read initial switch states.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
# Verify the CRC8 is correct
if len(msg) < 7:
raise AssertionError("Received too short initial input response: " + "".join(HEX_FORMAT % b for b in msg))
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, 0, 6)
if msg[6] != ord(crc8):
self._bad_crc(chain_serial, msg)
else:
if chain_serial + '-' + str(msg[0]) not in self.inp_addr_dict:
self.log.warning("Got input response for invalid card at initial request: %s. Msg: %s.", msg[0],
"".join(HEX_FORMAT % b for b in msg))
return
opp_inp = self.inp_addr_dict[chain_serial + '-' + str(msg[0])]
new_state = (msg[2] << 24) | \
(msg[3] << 16) | \
(msg[4] << 8) | \
msg[5]
opp_inp.old_state = new_state
def read_gen2_inp_resp(self, chain_serial, msg):
"""Read switch changes.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
# Single read gen2 input response. Receive function breaks them down
# Verify the CRC8 is correct
if len(msg) < 7:
self.log.warning("Msg too short: %s.", "".join(HEX_FORMAT % b for b in msg))
self.opp_connection[chain_serial].lost_synch()
return
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, 0, 6)
if msg[6] != ord(crc8):
self._bad_crc(chain_serial, msg)
else:
if chain_serial + '-' + str(msg[0]) not in self.inp_addr_dict:
self.log.warning("Got input response for invalid card: %s. Msg: %s.", msg[0],
"".join(HEX_FORMAT % b for b in msg))
return
opp_inp = self.inp_addr_dict[chain_serial + '-' + str(msg[0])]
new_state = (msg[2] << 24) | \
(msg[3] << 16) | \
(msg[4] << 8) | \
msg[5]
# Update the state which holds inputs that are active
changes = opp_inp.old_state ^ new_state
if changes != 0:
curr_bit = 1
for index in range(0, 32):
if (curr_bit & changes) != 0:
if (curr_bit & new_state) == 0:
self.machine.switch_controller.process_switch_by_num(
state=1,
num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index),
platform=self)
else:
self.machine.switch_controller.process_switch_by_num(
state=0,
num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index),
platform=self)
curr_bit <<= 1
opp_inp.old_state = new_state
# we can continue to poll
self._poll_response_received[chain_serial].set()
def read_matrix_inp_resp_initial(self, chain_serial, msg):
"""Read initial matrix switch states.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
# Verify the CRC8 is correct
if len(msg) < 11:
raise AssertionError("Received too short initial input response: " + "".join(HEX_FORMAT % b for b in msg))
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, 0, 10)
if msg[10] != ord(crc8):
self._bad_crc(chain_serial, msg)
else:
if chain_serial + '-' + str(msg[0]) not in self.matrix_inp_addr_dict:
self.log.warning("Got input response for invalid matrix card at initial request: %s. Msg: %s.", msg[0],
"".join(HEX_FORMAT % b for b in msg))
return
opp_inp = self.matrix_inp_addr_dict[chain_serial + '-' + str(msg[0])]
opp_inp.old_state = ((msg[2] << 56) | (msg[3] << 48) | (msg[4] << 40) | (msg[5] << 32) |
(msg[6] << 24) | (msg[7] << 16) | (msg[8] << 8) | msg[9])
# pylint: disable-msg=too-many-nested-blocks
def read_matrix_inp_resp(self, chain_serial, msg):
"""Read matrix switch changes.
Args:
----
chain_serial: Serial of the chain which received the message.
msg: Message to parse.
"""
# Single read gen2 input response. Receive function breaks them down
# Verify the CRC8 is correct
if len(msg) < 11:
self.log.warning("Msg too short: %s.", "".join(HEX_FORMAT % b for b in msg))
self.opp_connection[chain_serial].lost_synch()
return
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, 0, 10)
if msg[10] != ord(crc8):
self._bad_crc(chain_serial, msg)
else:
if chain_serial + '-' + str(msg[0]) not in self.matrix_inp_addr_dict:
self.log.warning("Got input response for invalid matrix card: %s. Msg: %s.", msg[0],
"".join(HEX_FORMAT % b for b in msg))
return
opp_inp = self.matrix_inp_addr_dict[chain_serial + '-' + str(msg[0])]
new_state = ((msg[2] << 56) | (msg[3] << 48) | (msg[4] << 40) | (msg[5] << 32) |
(msg[6] << 24) | (msg[7] << 16) | (msg[8] << 8) | msg[9])
changes = opp_inp.old_state ^ new_state
if changes != 0:
curr_bit = 1
for index in range(32, 96):
if (curr_bit & changes) != 0:
if (curr_bit & new_state) == 0:
self.machine.switch_controller.process_switch_by_num(
state=1,
num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index),
platform=self)
else:
self.machine.switch_controller.process_switch_by_num(
state=0,
num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index),
platform=self)
curr_bit <<= 1
opp_inp.old_state = new_state
# we can continue to poll
self._poll_response_received[chain_serial].set()
def _get_dict_index(self, input_str):
if not isinstance(input_str, str):
self.raise_config_error("Invalid number format for OPP. Number should be card-number or chain-card-number "
"(e.g. 0-1)", 2)
try:
chain_str, card_str, number_str = input_str.split("-")
except ValueError:
if len(self.serial_connections) > 1:
self.raise_config_error("You need to specify a chain as chain-card-number in: {}".format(input_str), 17)
else:
chain_str = list(self.serial_connections)[0].chain_serial
try:
card_str, number_str = input_str.split("-")
except ValueError:
card_str = '0'
number_str = input_str
if chain_str not in self.opp_connection:
self.raise_config_error("Chain {} does not exist. Existing chains: {}".format(
chain_str, list(self.opp_connection.keys())), 3)
return chain_str + "-" + card_str + "-" + number_str
def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict):
"""Configure a driver.
Args:
----
config: Config dict.
number: Number of this driver.
platform_settings: Platform specific settings.
"""
if not self.opp_connection:
self.raise_config_error("A request was made to configure an OPP solenoid, "
"but no OPP connection is available", 4)
number = self._get_dict_index(number)
if number not in self.sol_dict:
self.raise_config_error("A request was made to configure an OPP solenoid "
"with number {} which doesn't exist".format(number), 5)
# Use new update individual solenoid command
opp_sol = self.sol_dict[number]
opp_sol.config = config
opp_sol.platform_settings = platform_settings
if self.debug:
self.debug_log("Configure driver %s", number)
default_pulse = PulseSettings(config.default_pulse_power, config.default_pulse_ms)
default_hold = HoldSettings(config.default_hold_power)
opp_sol.reconfigure_driver(default_pulse, default_hold)
# Removing the default input is not necessary since the
# CFG_SOL_USE_SWITCH is not being set
return opp_sol
def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict):
"""Configure a switch.
Args:
----
number: Number of this switch.
config: Config dict.
platform_config: Platform specific settings.
"""
del platform_config
del config
# A switch is termed as an input to OPP
if not self.opp_connection:
self.raise_config_error("A request was made to configure an OPP switch, "
"but no OPP connection is available", 6)
number = self._get_dict_index(number)
if number not in self.inp_dict:
self.raise_config_error("A request was made to configure an OPP switch "
"with number {} which doesn't exist".format(number), 7)
return self.inp_dict[number]
def parse_light_number_to_channels(self, number: str, subtype: str):
"""Parse number and subtype to channel."""
if subtype in ("matrix", "incand"):
return [
{
"number": self._get_dict_index(number)
}
]
if not subtype or subtype == "led":
full_index = self._get_dict_index(number)
chain_serial, card, index = full_index.split('-')
number_format = "{}-{}-{}"
return [
{
"number": number_format.format(chain_serial, card, int(index) * 3)
},
{
"number": number_format.format(chain_serial, card, int(index) * 3 + 1)
},
{
"number": number_format.format(chain_serial, card, int(index) * 3 + 2)
},
]
self.raise_config_error("Unknown subtype {}".format(subtype), 8)
return []
def configure_light(self, number, subtype, config, platform_settings):
"""Configure a led or matrix light."""
del config
if not self.opp_connection:
self.raise_config_error("A request was made to configure an OPP light, "
"but no OPP connection is available", 9)
chain_serial, card, light_num = number.split('-')
index = chain_serial + '-' + card
if not subtype or subtype == "led":
if index not in self.neo_card_dict:
self.raise_config_error("A request was made to configure an OPP neopixel "
"with card number {} which doesn't exist".format(card), 10)
if not self.neo_card_dict[index].is_valid_light_number(light_num):
self.raise_config_error("A request was made to configure an OPP neopixel "
"with card number {} but number '{}' is "
"invalid".format(card, light_num), 22)
light = OPPModernLightChannel(chain_serial, int(card), int(light_num), self._light_system)
self._light_system.mark_dirty(light)
return light
if subtype == "matrix" and self.min_version[chain_serial] >= 0x02010000:
# modern matrix lights
if index not in self.matrix_light_cards:
self.raise_config_error("A request was made to configure an OPP matrix light "
"with card number {} which doesn't exist".format(card), 18)
if not self.matrix_light_cards[index].is_valid_light_number(light_num):
self.raise_config_error("A request was made to configure an OPP matrix light "
"with card number {} but number '{}' is "
"invalid".format(card, light_num), 19)
light = OPPModernLightChannel(chain_serial, int(card), int(light_num) + 0x2000, self._light_system)
self._light_system.mark_dirty(light)
return light
if subtype in ("incand", "matrix"):
if index not in self.opp_incands:
self.raise_config_error("A request was made to configure an OPP incand light "
"with card number {} which doesn't exist".format(card), 20)
if not self.opp_incands[index].is_valid_light_number(light_num):
self.raise_config_error("A request was made to configure an OPP incand light "
"with card number {} but number '{}' is "
"invalid".format(card, light_num), 21)
if self.min_version[chain_serial] >= 0x02010000:
light = self.opp_incands[index].configure_modern_fade_incand(light_num, self._light_system)
self._light_system.mark_dirty(light)
return light
# legacy incands with new or old subtype
return self.opp_incands[index].configure_software_fade_incand(light_num)
self.raise_config_error("Unknown subtype {}".format(subtype), 12)
return None
async def _poll_sender(self, chain_serial):
"""Poll switches."""
if len(self.read_input_msg[chain_serial]) <= 1:
# there is no point in polling without switches
return
while True:
# wait for previous poll response
timeout = 1 / self.config['poll_hz'] * 25
try:
await asyncio.wait_for(self._poll_response_received[chain_serial].wait(), timeout)
except asyncio.TimeoutError:
self.log.warning("Poll took more than %sms for %s", timeout * 1000, chain_serial)
else:
self._poll_response_received[chain_serial].clear()
# send poll
self.send_to_processor(chain_serial, self.read_input_msg[chain_serial])
await self.opp_connection[chain_serial].writer.drain()
# the line above saturates the link and seems to overwhelm the hardware. limit it to 100Hz
await asyncio.sleep(1 / self.config['poll_hz'])
def _verify_coil_and_switch_fit(self, switch, coil):
chain_serial, card, solenoid = coil.hw_driver.number.split('-')
sw_chain_serial, sw_card, sw_num = switch.hw_switch.number.split('-')
if self.min_version[chain_serial] >= 0x20000:
if chain_serial != sw_chain_serial or card != sw_card:
self.raise_config_error('Invalid switch being configured for driver. Driver = {} '
'Switch = {}. Driver and switch have to be on the same '
'board.'.format(coil.hw_driver.number, switch.hw_switch.number), 13)
else:
matching_sw = ((int(solenoid) & 0x0c) << 1) | (int(solenoid) & 0x03)
if chain_serial != sw_chain_serial or card != sw_card or matching_sw != int(sw_num):
self.raise_config_error('Invalid switch being configured for driver. Driver = {} '
'Switch = {}. For Firmware < 0.2.0 they have to be on the same board and '
'have the same number'.format(coil.hw_driver.number, switch.hw_switch.number),
14)
def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
"""Set pulse on hit rule on driver.
Pulses a driver when a switch is hit. When the switch is released the pulse continues. Typically used for
autofire coils such as pop bumpers.
"""
self._write_hw_rule(enable_switch, coil, use_hold=False, can_cancel=False)
def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):
"""Set pulse on hit and release rule to driver.
When a switch is hit and a certain delay passed it pulses a driver.
When the switch is released the pulse continues.
Typically used for kickbacks.
"""
if delay_ms <= 0:
raise AssertionError("set_delayed_pulse_on_hit_rule should be used with a positive delay "
"not {}".format(delay_ms))
if delay_ms > 255:
raise AssertionError("set_delayed_pulse_on_hit_rule is limited to max 255ms "
"(was {})".format(delay_ms))
self._write_hw_rule(enable_switch, coil, use_hold=False, can_cancel=False, delay_ms=int(delay_ms))
def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
"""Set pulse on hit and release rule to driver.
Pulses a driver when a switch is hit. When the switch is released the pulse is canceled. Typically used on
the main coil for dual coil flippers without eos switch.
"""
self._write_hw_rule(enable_switch, coil, use_hold=False, can_cancel=True)
def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
"""Set pulse on hit and enable and relase rule on driver.
Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released
the pulse is canceled and the driver gets disabled. Typically used for single coil flippers.
"""
self._write_hw_rule(enable_switch, coil, use_hold=True, can_cancel=True)
def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,
eos_switch: SwitchSettings, coil: DriverSettings,
repulse_settings: Optional[RepulseSettings]):
"""Set pulse on hit and release and disable rule on driver.
Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released
the pulse is canceled and the driver gets disabled. When the second disable_switch is hit the pulse is canceled
and the driver gets disabled. Typically used on the main coil for dual coil flippers with eos switch.
"""
raise AssertionError("Not implemented in OPP currently")
def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,
eos_switch: SwitchSettings, coil: DriverSettings,
repulse_settings: Optional[RepulseSettings]):
"""Set pulse on hit and enable and release and disable rule on driver.
Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released
the pulse is canceled and the driver becomes disabled. When the eos_switch is hit the pulse is canceled
and the driver becomes enabled (likely with PWM).
Typically used on the coil for single-wound coil flippers with eos switch.
"""
raise AssertionError("Not implemented in OPP currently")
# pylint: disable-msg=too-many-arguments
def _write_hw_rule(self, switch_obj: SwitchSettings, driver_obj: DriverSettings, use_hold, can_cancel,
delay_ms=None):
if switch_obj.invert:
raise AssertionError("Cannot handle inverted switches")
if driver_obj.hold_settings and not use_hold:
raise AssertionError("Invalid call")
self._verify_coil_and_switch_fit(switch_obj, driver_obj)
self.debug_log("Setting HW Rule. Driver: %s", driver_obj.hw_driver.number)
driver_obj.hw_driver.switches.append(switch_obj.hw_switch.number)
driver_obj.hw_driver.set_switch_rule(driver_obj.pulse_settings, driver_obj.hold_settings, driver_obj.recycle,
can_cancel, delay_ms)
_, _, switch_num = switch_obj.hw_switch.number.split("-")
switch_num = int(switch_num)
self._add_switch_coil_mapping(switch_num, driver_obj.hw_driver)
def _remove_switch_coil_mapping(self, switch_num, driver: "OPPSolenoid"):
"""Remove mapping between switch and coil."""
if self.min_version[driver.sol_card.chain_serial] < 0x20000:
return
_, _, coil_num = driver.number.split('-')
# mirror switch matrix columns to handle the fact that OPP matrix is in reverse column order
if switch_num >= 32:
switch_num = 8 * (15 - (switch_num // 8)) + switch_num % 8
msg = bytearray()
msg.append(driver.sol_card.addr)
msg.extend(OppRs232Intf.SET_SOL_INP_CMD)
msg.append(int(switch_num))
msg.append(int(coil_num) + ord(OppRs232Intf.CFG_SOL_INP_REMOVE))
msg.extend(OppRs232Intf.calc_crc8_whole_msg(msg))
msg.extend(OppRs232Intf.EOM_CMD)
final_cmd = bytes(msg)
if self.debug:
self.debug_log("Unmapping input %s and coil %s on %s", switch_num, coil_num, driver.sol_card.chain_serial)
self.send_to_processor(driver.sol_card.chain_serial, final_cmd)
def _add_switch_coil_mapping(self, switch_num, driver: "OPPSolenoid"):
"""Add mapping between switch and coil."""
if self.min_version[driver.sol_card.chain_serial] < 0x20000:
return
_, _, coil_num = driver.number.split('-')
# mirror switch matrix columns to handle the fact that OPP matrix is in reverse column order
if switch_num >= 32:
switch_num = 8 * (15 - (switch_num // 8)) + switch_num % 8
msg = bytearray()
msg.append(driver.sol_card.addr)
msg.extend(OppRs232Intf.SET_SOL_INP_CMD)
msg.append(int(switch_num))
msg.append(int(coil_num))
msg.extend(OppRs232Intf.calc_crc8_whole_msg(msg))
msg.extend(OppRs232Intf.EOM_CMD)
final_cmd = bytes(msg)
if self.debug:
self.debug_log("Mapping input %s and coil %s on %s", switch_num, coil_num, driver.sol_card.chain_serial)
self.send_to_processor(driver.sol_card.chain_serial, final_cmd)
def clear_hw_rule(self, switch: SwitchSettings, coil: DriverSettings):
"""Clear a hardware rule.
This is used if you want to remove the linkage between a switch and
some driver activity. For example, if you wanted to disable your
flippers (so that a player pushing the flipper buttons wouldn't cause
the flippers to flip), you'd call this method with your flipper button
as the *sw_num*.
"""
if switch.hw_switch.number in coil.hw_driver.switches:
if self.debug:
self.debug_log("Clearing HW Rule for switch: %s, coils: %s", switch.hw_switch.number,
coil.hw_driver.number)
coil.hw_driver.switches.remove(switch.hw_switch.number)
_, _, switch_num = switch.hw_switch.number.split("-")
switch_num = int(switch_num)
self._remove_switch_coil_mapping(switch_num, coil.hw_driver)
# disable rule if there are no more switches
# Technically not necessary unless the solenoid parameters are
# changing. MPF may not know when initial kick and hold values
# are changed, so this might need to be called each time.
if not coil.hw_driver.switches:
coil.hw_driver.remove_switch_rule()
|
missionpinball/mpf
|
mpf/platforms/opp/opp.py
|
Python
|
mit
| 53,276 | 0.003942 |
#!/usr/bin/env python
import sys
sys.path.append("../build/")
import phisSchema
import pyxb
import warnings
# Strategy:
# Perhaps cleanest would be to build a separate interface for data that may vary from VFB.
# This also allows separation of Jython code
# OTOH - this gives another layer of mappings to maintain.
# Sketch of interface:
# minimal vars to set (for now):
# image_id, image URL, source links; expressed feature (+ its type - gene or transgene); classification of struc & overlapped region
# Generator functions live outside the classes. They generate objects that must then be bound.
def gen_OntologyTerm(id_name, ID):
"""Takes id_name lookup dict for ontology terms and an ID
Returns a phisSchema.OntologyTerm object"""
ot = phisSchema.OntologyTerm()
ot.termId = ID
ot.termLabel = id_name[ID]
return ot
def gen_Link(display_name, url):
"""Takes display_name and URI as args and returns a phisSchema.Link object"""
gen_Link = phisSchema.Link()
gen_Link.display_name = display_name
gen_Link.url = url
return gen_Link
def gen_Annotation(ot, text, mode):
"""Generate a phisSchema.Annotation object based on specified:
ot: ontology term
text: free text
mode: Manual/Automated"""
annotation = phisSchema.Annotation()
annotation.annotation_freetext = text
annotation.ontology_term = ot
annotation.annotationMode = mode
return annotation
def gen_roi_Coordinates(x, y, z):
"""Generate a phisSchema.Coordinates object for an roi
Each arg specifies a range in the form of a list or tuple
with 2 elements
"""
try:
assert len(x) == 2
assert len(y) == 2
assert len(z) == 2
except:
warnings.warn("Percent arrays should have only 2 members - specifying a range.")
coord = phisSchema.Coordinates()
coord.x_coordinates = _gen_PercentArray(*x)
coord.y_coordinates = _gen_PercentArray(*y)
coord.z_coordinates = _gen_PercentArray(*z)
return coord
def _gen_PercentArray(a, b):
AB = (a, b)
pa = phisSchema.PercentArray()
pa.extend(AB)
return pa
def gen_GenotypeComponent(gf_symbol=False, gf_id=False, gene_symbol=False, gene_id=False, gf_ensembl_id=False):
## How to specify channel. Use defaults? ###
"""Generate a phisSchema.GenotypeComponent object.
All args are strings. Please specify each arg with a keyword
"""
gc = phisSchema.GenotypeComponent()
if gene_id:
gc.gene_id = gene_id
if gene_symbol:
gc.gene_symbol = gene_symbol
if gf_symbol:
gc.genetic_feature_symbol = gf_symbol
if gf_id:
gc.genetic_feature_id = gf_id
if gf_ensembl_id:
gc.genetic_feature_ensembl_id = gf_ensembl_id
return gc
class imageDataSet():
"""Class to use for generating sets of images from a common source.
Assumes all datasets have common source name and URL.
And that they share a background channel marker and visualization methods
for background and signal channels. All of these are set by methods rather than KWARGS.
"""
# May not be worth bothering with a class here
def __init__(self, ont_dict):
### Do we have a way to distinguish general source from specific source links?
self.doc = phisSchema.Doc()
self.source = ''
self.background_channel_marker = ''
self.signal_channel_visualisation_methods = []
self.background_channel_visualisation_methods = []
self.ont_dict = ont_dict
def set_source(self, source_name, source_url):
"""source_name and source_url are strings"""
self.source = gen_Link(source_name, source_url)
def set_background_channel_marker(self, genotype_component):
"""Takes a phisSchema.genotypeComponent object as an arg"""
self.background_channel_marker = genotype_component
def add_signal_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId of and FBbi visualisation method"""
self.signal_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
def add_background_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId of and FBbi visualisation method"""
self.background_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
class VfbImage():
"""Interface class for loading VFB data.
Assumes 3D confocal image with 2 channels -
a background stain channel and a signal channel
depicting some interesting expression/anatomy"""
# Define constants here: Or should this just jump straight to populating model?
host = gen_Link("Virtual Fly Brain", "http://www.virtualflybrain.org") # for image_description.host
def __init__(self, ont, image_dataset):
"""ont: an ID:name dict of ontology terms used in XML to be produced
d: A image_dataset object
"""
self.ont = ont
self._initialise_image()
self._unpack_image_dataset(image_dataset)
self.image.image_description.host = self.host
def _unpack_image_dataset(self, image_dataset):
self.set_source(image_dataset.source)
# self.set_signal_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
# self.set_background_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
self.set_expressed_feature_for_background_channel(image_dataset.background_channel_marker)
def set_organism(self, stage, sex):
"""stage must be a phisSchema.ontologyTerm object; sex must be the string 'Male' or 'Female'"""
organism = phisSchema.Organism()
organism.taxon = "Drosophila melanogaster"
organism.sex = sex
organism.ncbi_taxon_id = "NCBItaxon_7227"
organism.stage=stage
self.image.organism = organism
def _initialise_image(self):
"""Assume 2 channels each with an associated ROI at 100%.
All objects generated by multiple iterations appended to common doc.
Generate IDs for two channels and corresponding ROIs according to the scheme:
image_id-a/b roi_id-a/b; channel_id-a/b - where id = self.vfb_image_id.
channel1/roi1 = background. channel2/roi2 = signal."""
# Generate Root objects
self.image = phisSchema.Image()
self.channel1 = phisSchema.Channel()
self.channel2 = phisSchema.Channel()
self.roi1 = phisSchema.Roi()
self.roi2 = phisSchema.Roi()
# bind root objects to doc
# Which pattern??
# This doesn't work for multiple images rois: self.doc.append(image)
# Need to work on checking the more obvious self.doc.image.append(self.image)
self.doc.image.append(self.image)
self.doc.channel.append(self.channel1)
self.doc.channel.append(self.channel2)
self.doc.roi.append(self.roi1)
self.doc.roi.append(self.roi2)
# Populate IDs
self.image.id = "image_" + self.vfb_image_id
self.channel1.id = "channel_" + self.vfb_image_id + "-a"
self.channel2.id = "channel_" + self.vfb_image_id + "-b"
self.roi1.id = "roi_" + self.vfb_image_id + "-a"
self.roi2.id = "roi_" + self.vfb_image_id + "-b"
self.image.associated_roi = pyxb.BIND() # Special magic
self.image.associated_roi.el.append(self.roi1.id) # Is this correct, or should I be populating a string array and appending that?
self.image.associated_roi.el.append(self.roi2.id)
self.image.associated_channel = pyxb.BIND()
self.image.associated_channel.el.append(self.channel1.id)
self.image.associated_channel.el.append(self.channel2.id)
self.channel1.associated_image = self.image.id
self.channel2.associated_image = self.image.id
self.roi1.associated_image = self.image.id
self.roi2.associated_image = self.image.id
self.roi1.associated_channel = pyxb.BIND()
self.roi1.associated_channel.el.append(self.channel1.id)
self.roi2.associated_channel = pyxb.BIND()
self.roi2.associated_channel.el.append(self.channel2.id)
self.channel1.associated_roi = pyxb.BIND()
self.channel1.associated_roi.el.append(self.roi1.id)
self.channel2.associated_roi = pyxb.BIND()
self.channel2.associated_roi.el.append(self.roi2.id)
# both ROIs cover whole image:
self.roi1.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100))
self.roi2.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100))
self.depicted_anatomy_background = phisSchema.AnnotationArray()
self.roi1.depicted_anatomical_structure = self.depicted_anatomy_background
self.depicted_anatomy_exp_channel = phisSchema.AnnotationArray()
self.roi2.depicted_anatomical_structure = self.depicted_anatomy_exp_channel
# Expansions. Add more here as needed.
self.image_description = phisSchema.ImageDescription()
self.image.image_description = self.image_description
self.image.image_description.sample_preparation = pyxb.BIND()
self.image.image_description.imaging_method = pyxb.BIND()
# Method 1 - intermediate node and directly bind
imaging_methods = phisSchema.OntologyTermArray()
self.image.image_description.imaging_method = imaging_methods # But remember - this is only possible because of an earlier pyxB expansion
imaging_methods.append(gen_OntologyTerm(self.ont, "FBbi_00000251"))
# Method 2 - pyxB.BIND() expansion
self.image.image_description.sample_preparation = pyxb.BIND()
self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000024")) # whole mount tissue
self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000002")) # chemically fixed
# Set methods generate the relevant object and bind it.
def set_dimensions(self, x, y, z=0):
"""x, y and z are dimensions in pixels. Z is optional (default 0)"""
dimensions = phisSchema.Dimensions()
dimensions.image_width = x
dimensions.image_height = y
dimensions.image_depth = z
self.image_description.image_dimensions = dimensions
def set_image_and_sample_type(self, wt_or_mut, exp_anat_phen):
self.image.image_description.sample_type = "wild type"
ita = phisSchema.ImageTypeArray()
ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both.
self.image.image_description.image_type = ita
def set_source(self, source):
"""source must be a phisSchema.Link object.
Assumes source of image and organism are the same."""
self.image.image_description.image_generated_by = source
self.image.image_description.organism_generated_by = source
def set_background_channel_visualisation_method(self, sfid):
self.channel2.visualisation_method = pyxb.BIND()
self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid))
def set_signal_channel_visualisation_method(self, sfid):
self.channel2.visualisation_method = pyxb.BIND()
self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid))
def add_background_depicted_entity(self, sfid, text, mode):
# By convention, background channel is always roi1
annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode)
self.depicted_anatomy_background.append(annotation)
def add_depicted_anatomy_for_expressed_feature(self, sfid, text, mode):
# By convention, background channel is always roi1
annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode)
self.depicted_anatomy_exp_channel.append(annotation)
def set_is_expression_pattern(self, s = True):
"""By convention channel2 is signal channel."""
# Should really just be a boolean.
if s:
self.channel2.is_expression_pattern = "Yes"
else:
self.channel2.is_expression_pattern = "No"
def set_expressed_feature_for_signal_channel(self, genotype_component):
"""genotype_component: a phisSchema.GenotypeComponent object."""
self.channel2.depicts_expression_of = genotype_component
def set_expressed_feature_for_background_channel(self, genotype_component):
"""genotype_component: a phisSchema.GenotypeComponent object."""
self.channel1.depicts_expression_of = genotype_component
def set_image_context_url(self, url):
self.image.image_description.image_context_url = url
class VfbWtAdultBrainImage(VfbImage):
"""Args:
- ont is a name_id dict lookup for ontology terms.
- image_dataset is an imageDataSet object
- vfb_image_id is an id string for the image
- image_url is also a string
Compulsory fields to set in order to generate XML:
- set_sex("Male/Female")
- set_is_expression_pattern(True/False)
- add_depicted_anatomy_for_expressed_feature(ont_term)
Other necessary fields to set for usable XML:
- set_expressed_feature
- set_visualisation_method
Set by default:
- sample prep: chemically fixed; whole mount tissue
- imaging methods: confocal microscopy
- image has 2 channels - one background, and one signal.
- organism: Dmel
- stage: adult
- Background channel anatomy: adult brain
- Dimensions = 512,512,512
"""
# Consider ditching this subclass if don't find a bunch of more specific things to say. Might be better to have subclasses for neuron, clone and expression pattern
# One doc for all images.
def __init__(self, ont, image_dataset, vfb_image_id, image_url):
self.ont = ont
self.doc = image_dataset.doc
self.vfb_image_id = vfb_image_id
self._initialise_image()
self.image.image_description.image_url = image_url
self.set_source(image_dataset.source)
self.stage = gen_OntologyTerm(ont, "FBdv_00005369") # Hmmmm - global!
self.image.image_description.host = self.host
self.set_dimensions(512, 512, 512)
self.add_background_depicted_entity("FBbt_00003624", "background channel", "Manual")
ita = phisSchema.ImageTypeArray()
ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both.
self.image.image_description.image_type = ita
self.image.image_description.sample_type = "wild type"
def set_sex(self, sex):
"""sex = string "Male"/"Femle". Automatically sets doc.image.organism"""
self.set_organism(self.stage, sex)
# Test
# For testing purposes. Will be autogenerated from ontology files in full run)
# Notes
# Assignment is simple - once you get all the way out to a node.
#depicted.termId = "FBbi_1234567"
#depicted.termLabel = "fubar"
# Append and instance of depicted to the list (el)
#image.depicted_anatomical_structure = pyxb.BIND()
#image.depicted_anatomical_structure.append(depicted)
# Testing
#print image.depicted_anatomical_structure.toxml()
# '<?xml version="1.0" ?><depicted_anatomical_structure><el><anatomy_ontology_id>FBbi_1234567</anatomy_ontology_id><anatomy_ontology_term>fubar</anatomy_ontology_term></el></depicted_anatomical_structure>'
# But all this feels quite verbose - can I make use of the Factory methods on some nodes to make this easier?
|
PhenoImageShare/PhenoImageShare
|
VFB_import/src/VFB2PhisXML.py
|
Python
|
apache-2.0
| 15,991 | 0.00863 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common
import datetime
class TestAutoBlacklist(common.TransactionCase):
def test_mail_bounced_auto_blacklist(self):
mass_mailing_contacts = self.env['mail.mass_mailing.contact']
mass_mailing = self.env['mail.mass_mailing']
mail_blacklist = self.env['mail.blacklist']
mail_statistics = self.env['mail.mail.statistics']
mail_thread = self.env['mail.thread']
# create mailing contact record
self.mailing_contact_1 = mass_mailing_contacts.create({'name': 'test email 1', 'email': 'Test1@email.com'})
# create bounced history
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=2),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=3),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=4),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=5),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
# create mass mailing record
self.mass_mailing = mass_mailing.create({
'name': 'test',
'subject': 'Booooounce!',
'mailing_domain': [('id', 'in',
[self.mailing_contact_1.id])],
'body_html': 'This is a bounced mail for auto blacklist demo'})
self.mass_mailing.put_in_queue()
res_ids = self.mass_mailing.get_remaining_recipients()
composer_values = {
'body': self.mass_mailing.convert_links()[self.mass_mailing.id],
'subject': self.mass_mailing.name,
'model': self.mass_mailing.mailing_model_real,
'email_from': self.mass_mailing.email_from,
'composition_mode': 'mass_mail',
'mass_mailing_id': self.mass_mailing.id,
'mailing_list_ids': [(4, l.id) for l in self.mass_mailing.contact_list_ids],
}
composer = self.env['mail.compose.message'].with_context(
active_ids=res_ids,
mass_mailing_seen_list=self.mass_mailing._get_seen_list()
).create(composer_values)
composer.send_mail()
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now(),
'email': self.mailing_contact_1.email
})
# call bounced
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
# check blacklist
blacklist_record = mail_blacklist.search([('email', '=', self.mailing_contact_1.email)])
self.assertEqual(len(blacklist_record), 1,
'The email %s must be blacklisted' % self.mailing_contact_1.email)
|
t3dev/odoo
|
addons/test_mass_mailing/tests/test_mail_auto_blacklist.py
|
Python
|
gpl-3.0
| 4,008 | 0.002994 |
# -*- coding: utf-8 -*-
"""
Exceptions
"""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
import re
import json
try:
long
except NameError:
long = int
class WbTime(object):
"""A Wikibase time representation."""
PRECISION = {'1000000000': 0,
'100000000': 1,
'10000000': 2,
'1000000': 3,
'100000': 4,
'10000': 5,
'millenia': 6,
'century': 7,
'decade': 8,
'year': 9,
'month': 10,
'day': 11,
'hour': 12,
'minute': 13,
'second': 14
}
FORMATSTR = '{0:+04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
def __init__(self, year=None, month=None, day=None,
hour=None, minute=None, second=None,
precision=None, before=0, after=0,
timezone=0, calendarmodel=None):
"""
Create a new WbTime object.
The precision can be set by the Wikibase int value (0-14) or by a human
readable string, e.g., 'hour'. If no precision is given, it is set
according to the given time units.
"""
if year is None:
raise ValueError('no year given')
self.precision = self.PRECISION['second']
if second is None:
self.precision = self.PRECISION['minute']
second = 0
if minute is None:
self.precision = self.PRECISION['hour']
minute = 0
if hour is None:
self.precision = self.PRECISION['day']
hour = 0
if day is None:
self.precision = self.PRECISION['month']
day = 1
if month is None:
self.precision = self.PRECISION['year']
month = 1
self.year = long(year)
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.after = after
self.before = before
self.timezone = timezone
self.calendarmodel = calendarmodel
# if precision is given it overwrites the autodetection above
if precision is not None:
if (isinstance(precision, int) and
precision in self.PRECISION.values()):
self.precision = precision
elif precision in self.PRECISION:
self.precision = self.PRECISION[precision]
else:
raise ValueError('Invalid precision: "%s"' % precision)
@classmethod
def fromTimestr(cls, datetimestr, precision=14, before=0, after=0,
timezone=0, calendarmodel=None):
match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z',
datetimestr)
if not match:
raise ValueError(u"Invalid format: '%s'" % datetimestr)
t = match.groups()
return cls(long(t[0]), int(t[1]), int(t[2]),
int(t[3]), int(t[4]), int(t[5]),
precision, before, after, timezone, calendarmodel)
def toTimestr(self):
"""
Convert the data to a UTC date/time string.
@return: str
"""
return self.FORMATSTR.format(self.year, self.month, self.day,
self.hour, self.minute, self.second)
def toWikibase(self):
"""
Convert the data to a JSON object for the Wikibase API.
@return: dict
"""
json = {'time': self.toTimestr(),
'precision': self.precision,
'after': self.after,
'before': self.before,
'timezone': self.timezone,
'calendarmodel': self.calendarmodel
}
return json
@classmethod
def fromWikibase(cls, ts):
return cls.fromTimestr(ts[u'time'], ts[u'precision'],
ts[u'before'], ts[u'after'],
ts[u'timezone'], ts[u'calendarmodel'])
def __str__(self):
return json.dumps(self.toWikibase(), indent=4, sort_keys=True,
separators=(',', ': '))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return u"WbTime(year=%(year)d, month=%(month)d, day=%(day)d, " \
u"hour=%(hour)d, minute=%(minute)d, second=%(second)d, " \
u"precision=%(precision)d, before=%(before)d, after=%(after)d, " \
u"timezone=%(timezone)d, calendarmodel='%(calendarmodel)s')" \
% self.__dict__
|
wikimedia/pywikibot-wikibase
|
pywikibase/wbtime.py
|
Python
|
mit
| 4,755 | 0 |
class Solution:
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
num1, cnt1 = 0, 0
num2, cnt2 = 1, 0
for num in nums:
if num == num1:
cnt1 += 1
elif num == num2:
cnt2 += 1
else:
if cnt1 == 0:
num1, cnt1 = num, 1
elif cnt2 == 0:
num2, cnt2 = num, 1
else:
cnt1, cnt2 = cnt1 - 1, cnt2 - 1
return [num for num in (num1, num2) if nums.count(num) > len(nums) // 3]
|
YiqunPeng/Leetcode-pyq
|
solutions/229MajorityElementII.py
|
Python
|
gpl-3.0
| 671 | 0.007452 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
import time
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import api
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
# Pseudo-domain to represent an empty filter, constructed using
# osv.expression's DUMMY_LEAF
DOMAIN_ALL = [(1, '=', 1)]
# A good selection of easy to read password characters (e.g. no '0' vs 'O', etc.)
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, [uid], ['groups_id'], context=context)[0]['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='base', group_xml_id='group_no_one', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
@api.cr_uid_ids_context
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr, uid, context = wizard.env.args
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, password=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to share."),
'view_type': fields.char('Current View Type', required=True),
'domain': fields.char('Domain', help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
help="Access rights to be granted on the shared documents."),
'result_line_ids': fields.one2many('share.wizard.result.line', 'share_wizard_id', 'Summary', readonly=True),
'share_root_url': fields.function(_share_root_url, string='Share Access URL', type='char', readonly=True,
help='Main access page for users that are granted shared access'),
'name': fields.char('Share Title', required=True, help="Title for the share (displayed to users as menu and shortcut name)"),
'record_name': fields.char('Record name', help="Name of the shared record, if sharing a precise record"),
'message': fields.text("Personal Message", help="An optional personal message, to be included in the email notification."),
'embed_code': fields.function(_embed_code, type='text', string='Code',
help="Embed this code in your documents to provide a link to the "\
"shared document."),
'embed_option_title': fields.boolean('Display title'),
'embed_option_search': fields.boolean('Display search view'),
'embed_url': fields.function(_embed_url, string='Share URL', size=512, type='char', readonly=True),
}
_defaults = {
'view_type': 'page',
'user_type' : 'embedded',
'invite': False,
'domain': lambda self, cr, uid, context, *a: context.get('domain', '[]'),
'action_id': lambda self, cr, uid, context, *a: context.get('action_id'),
'access_mode': 'readwrite',
'embed_option_title': True,
'embed_option_search': True,
}
def has_email(self, cr, uid, context=None):
return bool(self.pool.get('res.users').browse(cr, uid, uid, context=context).email)
def go_step_1(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr,uid,ids,context)[0]
if wizard_data.user_type == 'emails' and not self.has_email(cr, uid, context=context):
raise osv.except_osv(_('No email address configured'),
_('You must configure your email address in the user preferences before using the Share button.'))
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'action_share_wizard_step1')
action = self.pool[model].read(cr, uid, [res_id], context=context)[0]
action['res_id'] = ids[0]
action.pop('context', '')
return action
def _create_share_group(self, cr, uid, wizard_data, context=None):
group_obj = self.pool.get('res.groups')
share_group_name = '%s: %s (%d-%s)' %('Shared', wizard_data.name, uid, time.time())
# create share group without putting admin in it
return group_obj.create(cr, UID_ROOT, {'name': share_group_name, 'share': True}, {'noadmin': True})
def _create_new_share_users(self, cr, uid, wizard_data, group_id, context=None):
"""Create one new res.users record for each email address provided in
wizard_data.new_users, ignoring already existing users.
Populates wizard_data.result_line_ids with one new line for
each user (existing or not). New users will also have a value
for the password field, so they can receive it by email.
Returns the ids of the created users, and the ids of the
ignored, existing ones."""
context = dict(context or {})
user_obj = self.pool.get('res.users')
current_user = user_obj.browse(cr, UID_ROOT, uid, context=context)
# modify context to disable shortcuts when creating share users
context['noshortcut'] = True
context['no_reset_password'] = True
created_ids = []
existing_ids = []
if wizard_data.user_type == 'emails':
# get new user list from email data
new_users = (wizard_data.new_users or '').split('\n')
new_users += [wizard_data.email_1 or '', wizard_data.email_2 or '', wizard_data.email_3 or '']
for new_user in new_users:
# Ignore blank lines
new_user = new_user.strip()
if not new_user: continue
# Ignore the user if it already exists.
if not wizard_data.invite:
existing = user_obj.search(cr, UID_ROOT, [('login', '=', new_user)])
else:
existing = user_obj.search(cr, UID_ROOT, [('email', '=', new_user)])
existing_ids.extend(existing)
if existing:
new_line = { 'user_id': existing[0],
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
continue
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_user,
'password': new_pass,
'name': new_user,
'email': new_user,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
elif wizard_data.user_type == 'embedded':
new_login = 'embedded-%s' % (uuid.uuid4().hex,)
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_login,
'password': new_pass,
'name': new_login,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
return created_ids, existing_ids
def _create_action(self, cr, uid, values, context=None):
if context is None:
context = {}
new_context = context.copy()
for key in context:
if key.startswith('default_'):
del new_context[key]
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, values, new_context)
return action_id
def _cleanup_action_context(self, context_str, user_id):
"""Returns a dict representing the context_str evaluated (safe_eval) as
a dict where items that are not useful for shared actions
have been removed. If the evaluation of context_str as a
dict fails, context_str is returned unaltered.
:param user_id: the integer uid to be passed as 'uid' in the
evaluation context
"""
result = False
if context_str:
try:
context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True)
result = dict(context)
for key in context:
# Remove all context keys that seem to toggle default
# filters based on the current user, as it makes no sense
# for shared users, who would not see any data by default.
if key and key.startswith('search_default_') and 'user_id' in key:
result.pop(key)
except Exception:
# Note: must catch all exceptions, as UnquoteEvalContext may cause many
# different exceptions, as it shadows builtins.
_logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True)
result = context_str
return result
def _shared_action_def(self, cr, uid, wizard_data, context=None):
copied_action = wizard_data.action_id
if wizard_data.access_mode == 'readonly':
view_mode = wizard_data.view_type
view_id = copied_action.view_id.id if copied_action.view_id.type == wizard_data.view_type else False
else:
view_mode = copied_action.view_mode
view_id = copied_action.view_id.id
action_def = {
'name': wizard_data.name,
'domain': copied_action.domain,
'context': self._cleanup_action_context(wizard_data.action_id.context, uid),
'res_model': copied_action.res_model,
'view_mode': view_mode,
'view_type': copied_action.view_type,
'search_view_id': copied_action.search_view_id.id if wizard_data.access_mode != 'readonly' else False,
'view_id': view_id,
'auto_search': True,
}
if copied_action.view_ids:
action_def['view_ids'] = [(0,0,{'sequence': x.sequence,
'view_mode': x.view_mode,
'view_id': x.view_id.id })
for x in copied_action.view_ids
if (wizard_data.access_mode != 'readonly' or x.view_mode == wizard_data.view_type)
]
return action_def
def _setup_action_and_shortcut(self, cr, uid, wizard_data, user_ids, make_home, context=None):
"""Create a shortcut to reach the shared data, as well as the corresponding action, for
each user in ``user_ids``, and assign it as their home action if ``make_home`` is True.
Meant to be overridden for special cases.
"""
values = self._shared_action_def(cr, uid, wizard_data, context=None)
user_obj = self.pool.get('res.users')
for user_id in user_ids:
action_id = self._create_action(cr, user_id, values)
if make_home:
# We do this only for new share users, as existing ones already have their initial home
# action. Resetting to the default menu does not work well as the menu is rather empty
# and does not contain the shortcuts in most cases.
user_obj.write(cr, UID_ROOT, [user_id], {'action_id': action_id})
def _get_recursive_relations(self, cr, uid, model, ttypes, relation_fields=None, suffix=None, context=None):
"""Returns list of tuples representing recursive relationships of type ``ttypes`` starting from
model with ID ``model_id``.
:param model: browsable model to start loading relationships from
:param ttypes: list of relationship types to follow (e.g: ['one2many','many2many'])
:param relation_fields: list of previously followed relationship tuples - to avoid duplicates
during recursion
:param suffix: optional suffix to append to the field path to reach the main object
"""
if relation_fields is None:
relation_fields = []
local_rel_fields = []
models = [x[1].model for x in relation_fields]
model_obj = self.pool.get('ir.model')
model_osv = self.pool[model.model]
for field in model_osv._fields.itervalues():
ftype = field.type
relation_field = None
if ftype in ttypes and field.comodel_name not in models:
relation_model_id = model_obj.search(cr, UID_ROOT, [('model','=',field.comodel_name)])[0]
relation_model_browse = model_obj.browse(cr, UID_ROOT, relation_model_id, context=context)
relation_osv = self.pool[field.comodel_name]
#skip virtual one2many fields (related, ...) as there is no reverse relationship
if ftype == 'one2many' and field.inverse_name:
# don't record reverse path if it's not a real m2o (that happens, but rarely)
dest_fields = relation_osv._fields
reverse_rel = field.inverse_name
if reverse_rel in dest_fields and dest_fields[reverse_rel].type == 'many2one':
relation_field = ('%s.%s'%(reverse_rel, suffix)) if suffix else reverse_rel
local_rel_fields.append((relation_field, relation_model_browse))
for parent in relation_osv._inherits:
if parent not in models:
parent_model = self.pool[parent]
parent_fields = parent_model._fields
parent_model_browse = model_obj.browse(cr, UID_ROOT,
model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
if relation_field and field.inverse_name in parent_fields:
# inverse relationship is available in the parent
local_rel_fields.append((relation_field, parent_model_browse))
else:
# TODO: can we setup a proper rule to restrict inherited models
# in case the parent does not contain the reverse m2o?
local_rel_fields.append((None, parent_model_browse))
if relation_model_id != model.id and ftype in ['one2many', 'many2many']:
local_rel_fields += self._get_recursive_relations(cr, uid, relation_model_browse,
[ftype], relation_fields + local_rel_fields, suffix=relation_field, context=context)
return local_rel_fields
def _get_relationship_classes(self, cr, uid, model, context=None):
"""Computes the *relationship classes* reachable from the given
model. The 4 relationship classes are:
- [obj0]: the given model itself (and its parents via _inherits, if any)
- [obj1]: obj0 and all other models recursively accessible from
obj0 via one2many relationships
- [obj2]: obj0 and all other models recursively accessible from
obj0 via one2many and many2many relationships
- [obj3]: all models recursively accessible from obj1 via many2one
relationships
Each class is returned as a list of pairs [(field,model_browse)], where
``model`` is the browse_record of a reachable ir.model, and ``field`` is
the dot-notation reverse relationship path coming from that model to obj0,
or None if there is no reverse path.
:return: ([obj0], [obj1], [obj2], [obj3])
"""
# obj0 class and its parents
obj0 = [(None, model)]
model_obj = self.pool[model.model]
ir_model_obj = self.pool.get('ir.model')
for parent in model_obj._inherits:
parent_model_browse = ir_model_obj.browse(cr, UID_ROOT,
ir_model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
obj0 += [(None, parent_model_browse)]
obj1 = self._get_recursive_relations(cr, uid, model, ['one2many'], relation_fields=obj0, context=context)
obj2 = self._get_recursive_relations(cr, uid, model, ['one2many', 'many2many'], relation_fields=obj0, context=context)
obj3 = self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
for dummy, model in obj1:
obj3 += self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
return obj0, obj1, obj2, obj3
def _get_access_map_for_groups_and_models(self, cr, uid, group_ids, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
user_right_ids = model_access_obj.search(cr, uid,
[('group_id', 'in', group_ids), ('model_id', 'in', model_ids)],
context=context)
user_access_matrix = {}
if user_right_ids:
for access_right in model_access_obj.browse(cr, uid, user_right_ids, context=context):
access_line = user_access_matrix.setdefault(access_right.model_id.model, set())
for perm in FULL_ACCESS:
if getattr(access_right, perm, 0):
access_line.add(perm)
return user_access_matrix
def _add_access_rights_for_share_group(self, cr, uid, group_id, mode, fields_relations, context=None):
"""Adds access rights to group_id on object models referenced in ``fields_relations``,
intersecting with access rights of current user to avoid granting too much rights
"""
model_access_obj = self.pool.get('ir.model.access')
user_obj = self.pool.get('res.users')
target_model_ids = [x[1].id for x in fields_relations]
perms_to_add = (mode == 'readonly') and READ_ONLY_ACCESS or READ_WRITE_ACCESS
current_user = user_obj.browse(cr, uid, uid, context=context)
current_user_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[x.id for x in current_user.groups_id], target_model_ids, context=context)
group_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[group_id], target_model_ids, context=context)
_logger.debug("Current user access matrix: %r", current_user_access_map)
_logger.debug("New group current access matrix: %r", group_access_map)
# Create required rights if allowed by current user rights and not
# already granted
for dummy, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
values = {
'name': _('Copied access for sharing'),
'group_id': group_id,
'model_id': model.id,
}
current_user_access_line = current_user_access_map.get(model.model,set())
existing_group_access_line = group_access_map.get(model.model,set())
need_creation = False
for perm in perms_to_add:
if perm in current_user_access_line \
and perm not in existing_group_access_line:
values.update({perm:True})
group_access_map.setdefault(model.model, set()).add(perm)
need_creation = True
if need_creation:
model_access_obj.create(cr, UID_ROOT, values)
_logger.debug("Creating access right for model %s with values: %r", model.model, values)
def _link_or_copy_current_user_rules(self, cr, current_user, group_id, fields_relations, context=None):
rule_obj = self.pool.get('ir.rule')
rules_done = set()
for group in current_user.groups_id:
for dummy, model in fields_relations:
for rule in group.rule_groups:
if rule.id in rules_done:
continue
rules_done.add(rule.id)
if rule.model_id.id == model.id:
if 'user.' in rule.domain_force:
# Above pattern means there is likely a condition
# specific to current user, so we must copy the rule using
# the evaluated version of the domain.
# And it's better to copy one time too much than too few
rule_obj.copy(cr, UID_ROOT, rule.id, default={
'name': '%s %s' %(rule.name, _('(Copy for sharing)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain, # evaluated version!
})
_logger.debug("Copying rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
else:
# otherwise we can simply link the rule to keep it dynamic
rule_obj.write(cr, SUPERUSER_ID, [rule.id], {
'groups': [(4,group_id)]
})
_logger.debug("Linking rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
def _check_personal_rule_or_duplicate(self, cr, group_id, rule, context=None):
"""Verifies that the given rule only belongs to the given group_id, otherwise
duplicate it for the current group, and unlink the previous one.
The duplicated rule has the original domain copied verbatim, without
any evaluation.
Returns the final rule to use (browse_record), either the original one if it
only belongs to this group, or the copy."""
if len(rule.groups) == 1:
return rule
# duplicate it first:
rule_obj = self.pool.get('ir.rule')
new_id = rule_obj.copy(cr, UID_ROOT, rule.id,
default={
'name': '%s %s' %(rule.name, _('(Duplicated for modified sharing permissions)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain_force, # non evaluated!
})
_logger.debug("Duplicating rule %s (%s) (domain: %s) for modified access ", rule.name, rule.id, rule.domain_force)
# then disconnect from group_id:
rule.write({'groups':[(3,group_id)]}) # disconnects, does not delete!
return rule_obj.browse(cr, UID_ROOT, new_id, context=context)
def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
"""Add a new ir.rule entry for model_id and domain on the target group_id.
If ``restrict`` is True, instead of adding a rule, the domain is
combined with AND operator with all existing rules in the group, to implement
an additional restriction (as of 6.1, multiple rules in the same group are
OR'ed by default, so a restriction must alter all existing rules)
This is necessary because the personal rules of the user that is sharing
are first copied to the new share group. Afterwards the filters used for
sharing are applied as an additional layer of rules, which are likely to
apply to the same model. The default rule algorithm would OR them (as of 6.1),
which would result in a combined set of permission that could be larger
than those of the user that is sharing! Hence we must forcefully AND the
rules at this stage.
One possibly undesirable effect can appear when sharing with a
pre-existing group, in which case altering pre-existing rules would not
be desired. This is addressed in the portal module.
"""
if rule_name is None:
rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
rule_obj = self.pool.get('ir.rule')
rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
if rule_ids:
for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
if rule.domain_force == domain:
# don't create it twice!
if restrict:
continue
else:
_logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
return
if restrict:
# restricting existing rules is done by adding the clause
# with an AND, but we can't alter the rule if it belongs to
# other groups, so we duplicate if needed
rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
eval_ctx = rule_obj._eval_context_for_combinations()
org_domain = expression.normalize_domain(eval(rule.domain_force, eval_ctx))
new_clause = expression.normalize_domain(eval(domain, eval_ctx))
combined_domain = expression.AND([new_clause, org_domain])
rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
_logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
if not rule_ids or not restrict:
# Adding the new rule in the group is ok for normal cases, because rules
# in the same group and for the same model will be combined with OR
# (as of v6.1), so the desired effect is achieved.
rule_obj.create(cr, UID_ROOT, {
'name': rule_name,
'model_id': model_id,
'domain_force': domain,
'groups': [(4,group_id)]
})
_logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None):
rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
try:
domain = safe_eval(wizard_data.domain)
if domain:
for rel_field, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
related_domain = []
if not rel_field: continue
for element in domain:
if expression.is_leaf(element):
left, operator, right = element
left = '%s.%s'%(rel_field, left)
element = left, operator, right
related_domain.append(element)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=str(related_domain),
rule_name=rule_name, restrict=True, context=context)
except Exception:
_logger.exception('Failed to create share access')
raise osv.except_osv(_('Sharing access cannot be created.'),
_('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _check_preconditions(self, cr, uid, wizard_data, context=None):
self._assert(wizard_data.action_id and wizard_data.access_mode,
_('Action and Access Mode are required to create a shared access.'),
context=context)
self._assert(self.has_share(cr, uid, wizard_data, context=context),
_('You must be a member of the Technical group to use the share wizard.'),
context=context)
if wizard_data.user_type == 'emails':
self._assert((wizard_data.new_users or wizard_data.email_1 or wizard_data.email_2 or wizard_data.email_3),
_('Please indicate the emails of the persons to share with, one per line.'),
context=context)
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
"""Creates the appropriate share group and share users, and populates
result_line_ids of wizard_data with one line for each user.
:return: a tuple composed of the new group id (to which the shared access should be granted),
the ids of the new share users that have been created and the ids of the existing share users
"""
group_id = self._create_share_group(cr, uid, wizard_data, context=context)
# First create any missing user, based on the email addresses provided
new_ids, existing_ids = self._create_new_share_users(cr, uid, wizard_data, group_id, context=context)
# Finally, setup the new action and shortcut for the users.
if existing_ids:
# existing users still need to join the new group
self.pool.get('res.users').write(cr, UID_ROOT, existing_ids, {
'groups_id': [(4,group_id)],
})
# existing user don't need their home action replaced, only a new shortcut
self._setup_action_and_shortcut(cr, uid, wizard_data, existing_ids, make_home=False, context=context)
if new_ids:
# new users need a new shortcut AND a home action
self._setup_action_and_shortcut(cr, uid, wizard_data, new_ids, make_home=True, context=context)
return group_id, new_ids, existing_ids
def go_step_2(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr, uid, ids[0], context=context)
self._check_preconditions(cr, uid, wizard_data, context=context)
# Create shared group and users
group_id, new_ids, existing_ids = self._create_share_users_group(cr, uid, wizard_data, context=context)
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
model_obj = self.pool.get('ir.model')
model_id = model_obj.search(cr, uid, [('model','=', wizard_data.action_id.res_model)])[0]
model = model_obj.browse(cr, uid, model_id, context=context)
# ACCESS RIGHTS
# We have several classes of objects that should receive different access rights:
# Let:
# - [obj0] be the target model itself (and its parents via _inherits, if any)
# - [obj1] be the target model and all other models recursively accessible from
# obj0 via one2many relationships
# - [obj2] be the target model and all other models recursively accessible from
# obj0 via one2many and many2many relationships
# - [obj3] be all models recursively accessible from obj1 via many2one relationships
# (currently not used)
obj0, obj1, obj2, obj3 = self._get_relationship_classes(cr, uid, model, context=context)
mode = wizard_data.access_mode
# Add access to [obj0] and [obj1] according to chosen mode
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj0, context=context)
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj1, context=context)
# Add read-only access (always) to [obj2]
self._add_access_rights_for_share_group(cr, uid, group_id, 'readonly', obj2, context=context)
# IR.RULES
# A. On [obj0], [obj1], [obj2]: add all rules from all groups of
# the user that is sharing
# Warning: rules must be copied instead of linked if they contain a reference
# to uid or if the rule is shared with other groups (and it must be replaced correctly)
# B. On [obj0]: 1 rule with domain of shared action
# C. For each model in [obj1]: 1 rule in the form:
# many2one_rel.domain_of_obj0
# where many2one_rel is the many2one used in the definition of the
# one2many, and domain_of_obj0 is the sharing domain
# For example if [obj0] is project.project with a domain of
# ['id', 'in', [1,2]]
# then we will have project.task in [obj1] and we need to create this
# ir.rule on project.task:
# ['project_id.id', 'in', [1,2]]
# A.
all_relations = obj0 + obj1 + obj2
self._link_or_copy_current_user_rules(cr, current_user, group_id, all_relations, context=context)
# B.
main_domain = wizard_data.domain if wizard_data.domain != '[]' else str(DOMAIN_ALL)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=main_domain,
restrict=True, context=context)
# C.
self._create_indirect_sharing_rules(cr, current_user, wizard_data, group_id, obj1, context=context)
# refresh wizard_data
wizard_data = self.browse(cr, uid, ids[0], context=context)
# EMAILS AND NOTIFICATIONS
# A. Not invite: as before
# -> send emails to destination users
# B. Invite (OpenSocial)
# -> subscribe all users (existing and new) to the record
# -> send a notification with a summary to the current record
# -> send a notification to all users; users allowing to receive
# emails in preferences will receive it
# new users by default receive all notifications by email
# A.
if not wizard_data.invite:
self.send_emails(cr, uid, wizard_data, context=context)
# B.
else:
# Invite (OpenSocial): automatically subscribe users to the record
res_id = 0
for cond in safe_eval(main_domain):
if cond[0] == 'id':
res_id = cond[2]
# Record id not found: issue
if res_id <= 0:
raise osv.except_osv(_('Record id not found'), _('The share engine has not been able to fetch a record_id for your invitation.'))
self.pool[model.model].message_subscribe(cr, uid, [res_id], new_ids + existing_ids, context=context)
# self.send_invite_email(cr, uid, wizard_data, context=context)
# self.send_invite_note(cr, uid, model.model, res_id, wizard_data, context=context)
# CLOSE
# A. Not invite: as before
# B. Invite: skip summary screen, get back to the record
# A.
if not wizard_data.invite:
dummy, step2_form_view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'share_step2_form')
return {
'name': _('Shared access created!'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'share.wizard',
'view_id': False,
'res_id': ids[0],
'views': [(step2_form_view_id, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'target': 'new'
}
# B.
else:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': model.model,
'view_id': False,
'res_id': res_id,
'views': [(False, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def send_invite_note(self, cr, uid, model_name, res_id, wizard_data, context=None):
subject = _('Invitation')
body = 'has been <b>shared</b> with'
tmp_idx = 0
for result_line in wizard_data.result_line_ids:
body += ' @%s' % (result_line.user_id.login)
if tmp_idx < len(wizard_data.result_line_ids)-2:
body += ','
elif tmp_idx == len(wizard_data.result_line_ids)-2:
body += ' and'
body += '.'
return self.pool[model_name].message_post(cr, uid, [res_id], body=body, context=context)
def send_invite_email(self, cr, uid, wizard_data, context=None):
# TDE Note: not updated because will disappear
message_obj = self.pool.get('mail.message')
notification_obj = self.pool.get('mail.notification')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = _('Invitation to collaborate about %s') % (wizard_data.record_name)
body = _("Hello,\n\n")
body += _("I have shared %s (%s) with you!\n\n") % (wizard_data.record_name, wizard_data.name)
if wizard_data.message:
body += "%s\n\n" % (wizard_data.message)
if result_line.newly_created:
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s" % (_("Username"), result_line.user_id.login) + "\n"
body += "%s: %s" % (_("Password"), result_line.password) + "\n"
body += "%s: %s" % (_("Database"), cr.dbname) + "\n"
body += _("The documents have been automatically added to your subscriptions.\n\n")
body += '%s\n\n' % ((user.signature or ''))
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
msg_id = message_obj.schedule_with_attach(cr, uid, user.email, [email_to], subject, body, model='', context=context)
notification_obj.create(cr, uid, {'user_id': result_line.user_id.id, 'message_id': msg_id}, context=context)
def send_emails(self, cr, uid, wizard_data, context=None):
_logger.info('Sending share notifications by email...')
mail_mail = self.pool.get('mail.mail')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
mail_ids = []
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = wizard_data.name
body = _("Hello,\n\n")
body += _("I've shared %s with you!\n\n") % wizard_data.name
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
if wizard_data.message:
body += '%s\n\n' % (wizard_data.message)
if result_line.newly_created:
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s\n" % (_("Username"), result_line.user_id.login)
body += "%s: %s\n" % (_("Password"), result_line.password)
body += "%s: %s\n" % (_("Database"), cr.dbname)
else:
body += _("The documents have been automatically added to your current Odoo documents.\n")
body += _("You may use your current login (%s) and password to view them.\n") % result_line.user_id.login
body += "\n\n%s\n\n" % ( (user.signature or '') )
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
mail_ids.append(mail_mail.create(cr, uid, {
'email_from': user.email,
'email_to': email_to,
'subject': subject,
'body_html': '<pre>%s</pre>' % body}, context=context))
# force direct delivery, as users expect instant notification
mail_mail.send(cr, uid, mail_ids, context=context)
_logger.info('%d share notification(s) sent.', len(mail_ids))
def onchange_embed_options(self, cr, uid, ids, opt_title, opt_search, context=None):
wizard = self.browse(cr, uid, ids[0], context)
options = dict(title=opt_title, search=opt_search)
return {'value': {'embed_code': self._generate_embedded_code(wizard, options)}}
class share_result_line(osv.osv_memory):
_name = 'share.wizard.result.line'
_rec_name = 'user_id'
def _share_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
data = dict(dbname=cr.dbname, login=this.login, password=this.password)
if this.share_wizard_id and this.share_wizard_id.action_id:
data['action_id'] = this.share_wizard_id.action_id.id
this = this.with_context(share_url_template_hash_arguments=['action_id'])
result[this.id] = this.share_wizard_id.share_url_template() % data
return result
_columns = {
'user_id': fields.many2one('res.users', required=True, readonly=True),
'login': fields.related('user_id', 'login', string='Login', type='char', size=64, required=True, readonly=True),
'password': fields.char('Password', size=64, readonly=True),
'share_url': fields.function(_share_url, string='Share URL', type='char', size=512),
'share_wizard_id': fields.many2one('share.wizard', 'Share Wizard', required=True, ondelete='cascade'),
'newly_created': fields.boolean('Newly created', readonly=True),
}
_defaults = {
'newly_created': True,
}
|
OpusVL/odoo
|
addons/share/wizard/share_wizard.py
|
Python
|
agpl-3.0
| 50,754 | 0.006147 |
#!/usr/bin/env vpython3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from flake_suppressor import data_types
class ExpectationUnittest(unittest.TestCase):
def testAppliesToResultNonResult(self):
"""Tests that AppliesToResult properly fails when given a non-Result."""
e = data_types.Expectation('test', ['win', 'nvidia'], ['Failure'])
with self.assertRaises(AssertionError):
e.AppliesToResult(None)
def testAppliesToResultApplies(self):
"""Tests that AppliesToResult properly returns True on expected Results."""
# Exact match.
e = data_types.Expectation('test', ['win', 'nvidia'], ['Failure'])
r = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
self.assertTrue(e.AppliesToResult(r))
# Tag subset
r = data_types.Result('suite', 'test', ('win', 'nvidia', 'release'), 'id')
self.assertTrue(e.AppliesToResult(r))
# Glob match
e = data_types.Expectation('t*', ['win', 'nvidia'], ['Failure'])
self.assertTrue(e.AppliesToResult(r))
def testAppliesToResultDoesNotApply(self):
"""Tests that AppliesToResult properly returns False on expected Results."""
# Name mismatch
e = data_types.Expectation('test', ['win', 'nvidia'], ['Failure'])
r = data_types.Result('suite', 'notatest', ('win', 'nvidia'), 'id')
self.assertFalse(e.AppliesToResult(r))
# Tag superset
r = data_types.Result('suite', 'test', tuple(['win']), 'id')
self.assertFalse(e.AppliesToResult(r))
class ResultUnittest(unittest.TestCase):
def testTupleEnforced(self):
"""Tests that tags must be in a tuple."""
with self.assertRaises(AssertionError):
_ = data_types.Result('suite', 'test', ['win', 'nvidia'], 'id')
def testWildcardsDisallowed(self):
with self.assertRaises(AssertionError):
_ = data_types.Result('suite', 't*', ('win', 'nvidia'), 'id')
def testHashability(self):
"""Tests that Result objects are hashable."""
r = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
_ = set([r])
def testEquality(self):
"""Tests that equality is properly calculated."""
r = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
other = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
self.assertEqual(r, other)
other = data_types.Result('notsuite', 'test', ('win', 'nvidia'), 'id')
self.assertNotEqual(r, other)
other = data_types.Result('suite', 'nottest', ('win', 'nvidia'), 'id')
self.assertNotEqual(r, other)
other = data_types.Result('suite', 'test', tuple(['win']), 'id')
self.assertNotEqual(r, other)
other = data_types.Result('suite', 'test', ('win', 'nvidia'), 'notid')
self.assertNotEqual(r, other)
other = None
self.assertNotEqual(r, other)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
nwjs/chromium.src
|
content/test/gpu/flake_suppressor/data_types_unittest.py
|
Python
|
bsd-3-clause
| 2,947 | 0.004411 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Window selection algorithm.
This module aims to provide a window selection algorithm suitable for
calculating phase misfits between two seismic waveforms.
The main function is the select_windows() function. The selection process is a
multi-stage process. Initially all time steps are considered to be valid in
the sense as being suitable for window selection. Then a number of selectors
is applied, progressively excluding more and more time steps.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
import itertools
import math
import numpy as np
from obspy import geodetics
import obspy.signal.filter
from scipy.signal import argrelextrema
def flatnotmasked_contiguous(time_windows):
"""
Helper function enabling to loop over empty time windows.
"""
fc = np.ma.flatnotmasked_contiguous(time_windows)
# If nothing could be found, set the mask to true (which should already
# be the case).
if fc is None:
return []
else:
return fc
def find_local_extrema(data):
"""
Function finding local extrema. It can also deal with flat extrema,
e.g. a flat top or bottom. In that case the first index of all flat
values will be returned.
Returns a tuple of maxima and minima indices.
"""
length = len(data) - 1
diff = np.diff(data)
flats = np.argwhere(diff == 0)
# Discard neighbouring flat points.
new_flats = list(flats[0:1])
for i, j in zip(flats[:-1], flats[1:]):
if j - i == 1:
continue
new_flats.append(j)
flats = new_flats
maxima = []
minima = []
# Go over each flats position and check if its a maxima/minima.
for idx in flats:
l_type = "left"
r_type = "right"
for i in itertools.count():
this_idx = idx - i - 1
if diff[this_idx] < 0:
l_type = "minima"
break
elif diff[this_idx] > 0:
l_type = "maxima"
break
for i in itertools.count():
this_idx = idx + i + 1
if this_idx >= len(diff):
break
if diff[this_idx] < 0:
r_type = "maxima"
break
elif diff[this_idx] > 0:
r_type = "minima"
break
if r_type != l_type:
continue
if r_type == "maxima":
maxima.append(int(idx))
else:
minima.append(int(idx))
maxs = set(list(argrelextrema(data, np.greater)[0]))
mins = set(list(argrelextrema(data, np.less)[0]))
peaks, troughs = (
sorted(list(maxs.union(set(maxima)))),
sorted(list(mins.union(set(minima)))))
# Special case handling for missing one or the other.
if not peaks and not troughs:
return np.array([], dtype=np.int32), np.array([], dtype=np.int32)
elif not peaks:
if 0 not in troughs:
peaks.insert(0, 0)
if length not in troughs:
peaks.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
elif not troughs:
if 0 not in peaks:
troughs.insert(0, 0)
if length not in peaks:
troughs.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
# Mark the first and last values as well to facilitate the peak and
# trough marching algorithm
if 0 not in peaks and 0 not in troughs:
if peaks[0] < troughs[0]:
troughs.insert(0, 0)
else:
peaks.insert(0, 0)
if length not in peaks and length not in troughs:
if peaks[-1] < troughs[-1]:
peaks.append(length)
else:
troughs.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
def find_closest(ref_array, target):
"""
For every value in target, find the index of ref_array to which
the value is closest.
from http://stackoverflow.com/a/8929827/1657047
:param ref_array: The reference array. Must be sorted!
:type ref_array: :class:`numpy.ndarray`
:param target: The target array.
:type target: :class:`numpy.ndarray`
>>> ref_array = np.arange(0, 20.)
>>> target = np.array([-2, 100., 2., 2.4, 2.5, 2.6])
>>> find_closest(ref_array, target)
array([ 0, 19, 2, 2, 3, 3])
"""
# A must be sorted
idx = ref_array.searchsorted(target)
idx = np.clip(idx, 1, len(ref_array) - 1)
left = ref_array[idx - 1]
right = ref_array[idx]
idx -= target - left < right - target
return idx
def _plot_mask(new_mask, old_mask, name=None):
"""
Helper function plotting the remaining time segments after an elimination
stage.
Useful to figure out which stage is responsible for a certain window
being picked/rejected.
:param new_mask: The mask after the elimination stage.
:param old_mask: The mask before the elimination stage.
:param name: The name of the elimination stage.
:return:
"""
# Lazy imports as not needed by default.
import matplotlib.pylab as plt # NOQA
import matplotlib.patheffects as PathEffects # NOQA
old_mask = old_mask.copy()
new_mask = new_mask.copy()
new_mask.mask = np.bitwise_xor(old_mask.mask, new_mask.mask)
old_mask.mask = np.invert(old_mask.mask)
for i in flatnotmasked_contiguous(old_mask):
plt.fill_between((i.start, i.stop), (-1.0, -1.0), (2.0, 2.0),
color="gray", alpha=0.3, lw=0)
new_mask.mask = np.invert(new_mask.mask)
for i in flatnotmasked_contiguous(new_mask):
plt.fill_between((i.start, i.stop), (-1.0, -1.0), (2.0, 2.0),
color="#fb9a99", lw=0)
if name:
plt.text(len(new_mask) - 1 - 20, 0.5, name, verticalalignment="center",
horizontalalignment="right",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")],
fontweight=500)
plt.xlim(0, len(new_mask) - 1)
plt.ylim(0, 1)
plt.yticks([])
plt.gca().xaxis.set_ticklabels([])
def _window_generator(data_length, window_width):
"""
Simple generator yielding start and stop indices for sliding windows.
:param data_length: The complete length of the data series over which to
slide the window.
:param window_width: The desired window width.
"""
window_start = 0
while True:
window_end = window_start + window_width
if window_end > data_length:
break
yield (window_start, window_end, window_start + window_width // 2)
window_start += 1
def _log_window_selection(tr_id, msg):
"""
Helper function for consistent output during the window selection.
:param tr_id: The id of the current trace.
:param msg: The message to be printed.
"""
print "[Window selection for %s] %s" % (tr_id, msg)
# Dictionary to cache the TauPyModel so there is no need to reinitialize it
# each time which is a fairly expensive operation.
TAUPY_MODEL_CACHE = {}
def select_windows(data_trace, synthetic_trace, event_latitude,
event_longitude, event_depth_in_km,
station_latitude, station_longitude, minimum_period,
maximum_period,
min_cc=0.10, max_noise=0.10, max_noise_window=0.4,
min_velocity=2.4, threshold_shift=0.30,
threshold_correlation=0.75, min_length_period=1.5,
min_peaks_troughs=2, max_energy_ratio=10.0,
min_envelope_similarity=0.2,
verbose=False, plot=False):
"""
Window selection algorithm for picking windows suitable for misfit
calculation based on phase differences.
Returns a list of windows which might be empty due to various reasons.
This function is really long and a lot of things. For a more detailed
description, please see the LASIF paper.
:param data_trace: The data trace.
:type data_trace: :class:`~obspy.core.trace.Trace`
:param synthetic_trace: The synthetic trace.
:type synthetic_trace: :class:`~obspy.core.trace.Trace`
:param event_latitude: The event latitude.
:type event_latitude: float
:param event_longitude: The event longitude.
:type event_longitude: float
:param event_depth_in_km: The event depth in km.
:type event_depth_in_km: float
:param station_latitude: The station latitude.
:type station_latitude: float
:param station_longitude: The station longitude.
:type station_longitude: float
:param minimum_period: The minimum period of the data in seconds.
:type minimum_period: float
:param maximum_period: The maximum period of the data in seconds.
:type maximum_period: float
:param min_cc: Minimum normalised correlation coefficient of the
complete traces.
:type min_cc: float
:param max_noise: Maximum relative noise level for the whole trace.
Measured from maximum amplitudes before and after the first arrival.
:type max_noise: float
:param max_noise_window: Maximum relative noise level for individual
windows.
:type max_noise_window: float
:param min_velocity: All arrivals later than those corresponding to the
threshold velocity [km/s] will be excluded.
:type min_velocity: float
:param threshold_shift: Maximum allowable time shift within a window,
as a fraction of the minimum period.
:type threshold_shift: float
:param threshold_correlation: Minimum normalised correlation coeeficient
within a window.
:type threshold_correlation: float
:param min_length_period: Minimum length of the time windows relative to
the minimum period.
:type min_length_period: float
:param min_peaks_troughs: Minimum number of extrema in an individual
time window (excluding the edges).
:type min_peaks_troughs: float
:param max_energy_ratio: Maximum energy ratio between data and
synthetics within a time window. Don't make this too small!
:type max_energy_ratio: float
:param min_envelope_similarity: The minimum similarity of the envelopes of
both data and synthetics. This essentially assures that the
amplitudes of data and synthetics can not diverge too much within a
window. It is a bit like the inverse of the ratio of both envelopes
so a value of 0.2 makes sure neither amplitude can be more then 5
times larger than the other.
:type min_envelope_similarity: float
:param verbose: No output by default.
:type verbose: bool
:param plot: Create a plot of the algortihm while it does its work.
:type plot: bool
"""
# Shortcuts to frequently accessed variables.
data_starttime = data_trace.stats.starttime
data_delta = data_trace.stats.delta
dt = data_trace.stats.delta
npts = data_trace.stats.npts
synth = synthetic_trace.data
data = data_trace.data
times = data_trace.times()
# Fill cache if necessary.
if not TAUPY_MODEL_CACHE:
from obspy.taup import TauPyModel # NOQA
TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
model = TAUPY_MODEL_CACHE["model"]
# -------------------------------------------------------------------------
# Geographical calculations and the time of the first arrival.
# -------------------------------------------------------------------------
dist_in_deg = geodetics.locations2degrees(station_latitude,
station_longitude,
event_latitude, event_longitude)
dist_in_km = geodetics.calc_vincenty_inverse(
station_latitude, station_longitude, event_latitude,
event_longitude)[0] / 1000.0
# Get only a couple of P phases which should be the first arrival
# for every epicentral distance. Its quite a bit faster than calculating
# the arrival times for every phase.
# Assumes the first sample is the centroid time of the event.
tts = model.get_travel_times(source_depth_in_km=event_depth_in_km,
distance_in_degree=dist_in_deg,
phase_list=["ttp"])
# Sort just as a safety measure.
tts = sorted(tts, key=lambda x: x.time)
first_tt_arrival = tts[0].time
# -------------------------------------------------------------------------
# Window settings
# -------------------------------------------------------------------------
# Number of samples in the sliding window. Currently, the length of the
# window is set to a multiple of the dominant period of the synthetics.
# Make sure it is an uneven number; just to have a trivial midpoint
# definition and one sample does not matter much in any case.
window_length = int(round(float(2 * minimum_period) / dt))
if not window_length % 2:
window_length += 1
# Use a Hanning window. No particular reason for it but its a well-behaved
# window and has nice spectral properties.
taper = np.hanning(window_length)
# =========================================================================
# check if whole seismograms are sufficiently correlated and estimate
# noise level
# =========================================================================
# Overall Correlation coefficient.
norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2))
cc = np.sum(data * synth) / norm
if verbose:
_log_window_selection(data_trace.id,
"Correlation Coefficient: %.4f" % cc)
# Estimate noise level from waveforms prior to the first arrival.
idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
idx_end = max(10, idx_end)
idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
idx_start = max(10, idx_start)
if idx_start >= idx_end:
idx_start = max(0, idx_end - 10)
abs_data = np.abs(data)
noise_absolute = abs_data[idx_start:idx_end].max()
noise_relative = noise_absolute / abs_data.max()
if verbose:
_log_window_selection(data_trace.id,
"Absolute Noise Level: %e" % noise_absolute)
_log_window_selection(data_trace.id,
"Relative Noise Level: %e" % noise_relative)
# Basic global rejection criteria.
accept_traces = True
if (cc < min_cc) and (noise_relative > max_noise / 3.0):
msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc)
if verbose:
_log_window_selection(data_trace.id, msg)
accept_traces = msg
if noise_relative > max_noise:
msg = "Noise level %.3f is above threshold of %.3f" % (
noise_relative, max_noise)
if verbose:
_log_window_selection(
data_trace.id, msg)
accept_traces = msg
# Calculate the envelope of both data and synthetics. This is to make sure
# that the amplitude of both is not too different over time and is
# used as another selector. Only calculated if the trace is generally
# accepted as it is fairly slow.
if accept_traces is True:
data_env = obspy.signal.filter.envelope(data)
synth_env = obspy.signal.filter.envelope(synth)
# -------------------------------------------------------------------------
# Initial Plot setup.
# -------------------------------------------------------------------------
# All the plot calls are interleaved. I realize this is really ugly but
# the alternative would be to either have two functions (one with plots,
# one without) or split the plotting function in various subfunctions,
# neither of which are acceptable in my opinion. The impact on
# performance is minimal if plotting is turned off: all imports are lazy
# and a couple of conditionals are cheap.
if plot:
import matplotlib.pylab as plt # NOQA
import matplotlib.patheffects as PathEffects # NOQA
if accept_traces is True:
plt.figure(figsize=(18, 12))
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.95,
wspace=None, hspace=0.0)
grid = (31, 1)
# Axes showing the data.
data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8)
else:
# Only show one axes it the traces are not accepted.
plt.figure(figsize=(18, 3))
# Plot envelopes if needed.
if accept_traces is True:
plt.plot(times, data_env, color="black", alpha=0.5, lw=0.4,
label="data envelope")
plt.plot(synthetic_trace.times(), synth_env, color="#e41a1c",
alpha=0.4, lw=0.5, label="synthetics envelope")
plt.plot(times, data, color="black", label="data", lw=1.5)
plt.plot(synthetic_trace.times(), synth, color="#e41a1c",
label="synthetics", lw=1.5)
# Symmetric around y axis.
middle = data.mean()
d_max, d_min = data.max(), data.min()
r = max(d_max - middle, middle - d_min) * 1.1
ylim = (middle - r, middle + r)
xlim = (times[0], times[-1])
plt.ylim(*ylim)
plt.xlim(*xlim)
offset = (xlim[1] - xlim[0]) * 0.005
plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2)
plt.text(first_tt_arrival + offset,
ylim[1] - (ylim[1] - ylim[0]) * 0.02,
"first arrival", verticalalignment="top",
horizontalalignment="left", color="#ee6e00",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.vlines(first_tt_arrival - minimum_period / 2.0, ylim[0], ylim[1],
colors="#ff7f00", lw=2)
plt.text(first_tt_arrival - minimum_period / 2.0 - offset,
ylim[0] + (ylim[1] - ylim[0]) * 0.02,
"first arrival - min period / 2", verticalalignment="bottom",
horizontalalignment="right", color="#ee6e00",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
for velocity in [6, 5, 4, 3, min_velocity]:
tt = dist_in_km / velocity
plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2)
if velocity == min_velocity:
hal = "right"
o_s = -1.0 * offset
else:
hal = "left"
o_s = offset
plt.text(tt + o_s, ylim[0] + (ylim[1] - ylim[0]) * 0.02,
str(velocity) + " km/s", verticalalignment="bottom",
horizontalalignment=hal, color="0.15")
plt.vlines(dist_in_km / min_velocity + minimum_period / 2.0,
ylim[0], ylim[1], colors="gray", lw=2)
plt.text(dist_in_km / min_velocity + minimum_period / 2.0 - offset,
ylim[1] - (ylim[1] - ylim[0]) * 0.02,
"min surface velocity + min period / 2",
verticalalignment="top",
horizontalalignment="right", color="0.15", path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.hlines(noise_absolute, xlim[0], xlim[1], linestyle="--",
color="gray")
plt.hlines(-noise_absolute, xlim[0], xlim[1], linestyle="--",
color="gray")
plt.text(offset, noise_absolute + (ylim[1] - ylim[0]) * 0.01,
"noise level", verticalalignment="bottom",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
plt.gca().xaxis.set_ticklabels([])
# Plot the basic global information.
ax = plt.gca()
txt = (
"Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f"
% (cc, noise_absolute, noise_relative))
ax.text(0.01, 0.95, txt, transform=ax.transAxes,
fontdict=dict(fontsize="small", ha='left', va='top'),
bbox=dict(boxstyle="round", fc="w", alpha=0.8))
plt.suptitle("Channel %s" % data_trace.id, fontsize="larger")
# Show plot and return if not accepted.
if accept_traces is not True:
txt = "Rejected: %s" % (accept_traces)
ax.text(0.99, 0.95, txt, transform=ax.transAxes,
fontdict=dict(fontsize="small", ha='right', va='top'),
bbox=dict(boxstyle="round", fc="red", alpha=1.0))
plt.show()
if accept_traces is not True:
return []
# Initialise masked arrays. The mask will be set to True where no
# windows are chosen.
time_windows = np.ma.ones(npts)
time_windows.mask = False
if plot:
old_time_windows = time_windows.copy()
# Elimination Stage 1: Eliminate everything half a period before or
# after the minimum and maximum travel times, respectively.
# theoretical arrival as positive.
min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt)
max_idx = int(math.ceil((
dist_in_km / min_velocity + minimum_period / 2.0) / dt))
time_windows.mask[:min_idx + 1] = True
time_windows.mask[max_idx:] = True
if plot:
plt.subplot2grid(grid, (8, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="TRAVELTIME ELIMINATION")
old_time_windows = time_windows.copy()
# -------------------------------------------------------------------------
# Compute sliding time shifts and correlation coefficients for time
# frames that passed the traveltime elimination stage.
# -------------------------------------------------------------------------
# Allocate arrays to collect the time dependent values.
sliding_time_shift = np.ma.zeros(npts, dtype="float32")
sliding_time_shift.mask = True
max_cc_coeff = np.ma.zeros(npts, dtype="float32")
max_cc_coeff.mask = True
for start_idx, end_idx, midpoint_idx in _window_generator(npts,
window_length):
if not min_idx < midpoint_idx < max_idx:
continue
# Slice windows. Create a copy to be able to taper without affecting
# the original time series.
data_window = data[start_idx: end_idx].copy() * taper
synthetic_window = \
synth[start_idx: end_idx].copy() * taper
# Elimination Stage 2: Skip windows that have essentially no energy
# to avoid instabilities. No windows can be picked in these.
if synthetic_window.ptp() < synth.ptp() * 0.001:
time_windows.mask[midpoint_idx] = True
continue
# Calculate the time shift. Here this is defined as the shift of the
# synthetics relative to the data. So a value of 2, for instance, means
# that the synthetics are 2 timesteps later then the data.
cc = np.correlate(data_window, synthetic_window, mode="full")
time_shift = cc.argmax() - window_length + 1
# Express the time shift in fraction of the minimum period.
sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period
# Normalized cross correlation.
max_cc_value = cc.max() / np.sqrt((synthetic_window ** 2).sum() *
(data_window ** 2).sum())
max_cc_coeff[midpoint_idx] = max_cc_value
if plot:
plt.subplot2grid(grid, (9, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="NO ENERGY IN CC WINDOW")
# Axes with the CC coeffs
plt.subplot2grid(grid, (15, 0), rowspan=4)
plt.hlines(0, xlim[0], xlim[1], color="lightgray")
plt.hlines(-threshold_shift, xlim[0], xlim[1], color="gray",
linestyle="--")
plt.hlines(threshold_shift, xlim[0], xlim[1], color="gray",
linestyle="--")
plt.text(5, -threshold_shift - (2) * 0.03,
"threshold", verticalalignment="top",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.plot(times, sliding_time_shift, color="#377eb8",
label="Time shift in fraction of minimum period", lw=1.5)
ylim = plt.ylim()
plt.yticks([-0.75, 0, 0.75])
plt.xticks([300, 600, 900, 1200, 1500, 1800])
plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0])
plt.ylim(-1.0, 1.0)
plt.xlim(xlim)
plt.gca().xaxis.set_ticklabels([])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
plt.subplot2grid(grid, (10, 0), rowspan=4)
plt.hlines(threshold_correlation, xlim[0], xlim[1], color="0.15",
linestyle="--")
plt.hlines(1, xlim[0], xlim[1], color="lightgray")
plt.hlines(0, xlim[0], xlim[1], color="lightgray")
plt.text(5, threshold_correlation + (1.4) * 0.01,
"threshold", verticalalignment="bottom",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.plot(times, max_cc_coeff, color="#4daf4a",
label="Maximum CC coefficient", lw=1.5)
plt.ylim(-0.2, 1.2)
plt.yticks([0, 0.5, 1])
plt.xticks([300, 600, 900, 1200, 1500, 1800])
plt.xlim(xlim)
plt.gca().xaxis.set_ticklabels([])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
# Elimination Stage 3: Mark all areas where the normalized cross
# correlation coefficient is under threshold_correlation as negative
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[max_cc_coeff < threshold_correlation] = True
if plot:
plt.subplot2grid(grid, (14, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="CORRELATION COEFF THRESHOLD ELIMINATION")
# Elimination Stage 4: Mark everything with an absolute travel time
# shift of more than # threshold_shift times the dominant period as
# negative
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True
if plot:
plt.subplot2grid(grid, (19, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="TIME SHIFT THRESHOLD ELIMINATION")
# Elimination Stage 5: Mark the area around every "travel time shift
# jump" (based on the traveltime time difference) negative. The width of
# the area is currently chosen to be a tenth of a dominant period to
# each side.
if plot:
old_time_windows = time_windows.copy()
sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0]
for index in indices:
time_windows.mask[index - sample_buffer: index + sample_buffer] = True
if plot:
plt.subplot2grid(grid, (20, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="TIME SHIFT JUMPS ELIMINATION")
# Clip both to avoid large numbers by division.
stacked = np.vstack([
np.ma.clip(synth_env, synth_env.max() * min_envelope_similarity * 0.5,
synth_env.max()),
np.ma.clip(data_env, data_env.max() * min_envelope_similarity * 0.5,
data_env.max())])
# Ratio.
ratio = stacked.min(axis=0) / stacked.max(axis=0)
# Elimination Stage 6: Make sure the amplitudes of both don't vary too
# much.
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[ratio < min_envelope_similarity] = True
if plot:
plt.subplot2grid(grid, (25, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION")
if plot:
plt.subplot2grid(grid, (21, 0), rowspan=4)
plt.hlines(min_envelope_similarity, xlim[0], xlim[1], color="gray",
linestyle="--")
plt.text(5, min_envelope_similarity + (2) * 0.03,
"threshold", verticalalignment="bottom",
horizontalalignment="left", color="0.15",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")])
plt.plot(times, ratio, color="#9B59B6",
label="Envelope amplitude similarity", lw=1.5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
plt.ylim(0.05, 1.05)
plt.xticks([300, 600, 900, 1200, 1500, 1800])
plt.xlim(xlim)
plt.gca().xaxis.set_ticklabels([])
plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
fontsize="small")
# First minimum window length elimination stage. This is cheap and if
# not done it can easily destabilize the peak-and-trough marching stage
# which would then have to deal with way more edge cases.
if plot:
old_time_windows = time_windows.copy()
min_length = \
min(minimum_period / dt * min_length_period, maximum_period / dt)
for i in flatnotmasked_contiguous(time_windows):
# Step 7: Throw away all windows with a length of less then
# min_length_period the dominant period.
if (i.stop - i.start) < min_length:
time_windows.mask[i.start: i.stop] = True
if plot:
plt.subplot2grid(grid, (26, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="MINIMUM WINDOW LENGTH ELIMINATION 1")
# -------------------------------------------------------------------------
# Peak and trough marching algorithm
# -------------------------------------------------------------------------
final_windows = []
for i in flatnotmasked_contiguous(time_windows):
# Cut respective windows.
window_npts = i.stop - i.start
synthetic_window = synth[i.start: i.stop]
data_window = data[i.start: i.stop]
# Find extrema in the data and the synthetics.
data_p, data_t = find_local_extrema(data_window)
synth_p, synth_t = find_local_extrema(synthetic_window)
window_mask = np.ones(window_npts, dtype="bool")
closest_peaks = find_closest(data_p, synth_p)
diffs = np.diff(closest_peaks)
for idx in np.where(diffs == 1)[0]:
if idx > 0:
start = synth_p[idx - 1]
else:
start = 0
if idx < (len(synth_p) - 1):
end = synth_p[idx + 1]
else:
end = -1
window_mask[start: end] = False
closest_troughs = find_closest(data_t, synth_t)
diffs = np.diff(closest_troughs)
for idx in np.where(diffs == 1)[0]:
if idx > 0:
start = synth_t[idx - 1]
else:
start = 0
if idx < (len(synth_t) - 1):
end = synth_t[idx + 1]
else:
end = -1
window_mask[start: end] = False
window_mask = np.ma.masked_array(window_mask,
mask=window_mask)
if window_mask.mask.all():
continue
for j in flatnotmasked_contiguous(window_mask):
final_windows.append((i.start + j.start, i.start + j.stop))
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[:] = True
for start, stop in final_windows:
time_windows.mask[start:stop] = False
if plot:
plt.subplot2grid(grid, (27, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="PEAK AND TROUGH MARCHING ELIMINATION")
# Loop through all the time windows, remove windows not satisfying the
# minimum number of peaks and troughs per window. Acts mainly as a
# safety guard.
old_time_windows = time_windows.copy()
for i in flatnotmasked_contiguous(old_time_windows):
synthetic_window = synth[i.start: i.stop]
data_window = data[i.start: i.stop]
data_p, data_t = find_local_extrema(data_window)
synth_p, synth_t = find_local_extrema(synthetic_window)
if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \
min_peaks_troughs:
time_windows.mask[i.start: i.stop] = True
if plot:
plt.subplot2grid(grid, (28, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="PEAK/TROUGH COUNT ELIMINATION")
# Second minimum window length elimination stage.
if plot:
old_time_windows = time_windows.copy()
min_length = \
min(minimum_period / dt * min_length_period, maximum_period / dt)
for i in flatnotmasked_contiguous(time_windows):
# Step 7: Throw away all windows with a length of less then
# min_length_period the dominant period.
if (i.stop - i.start) < min_length:
time_windows.mask[i.start: i.stop] = True
if plot:
plt.subplot2grid(grid, (29, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="MINIMUM WINDOW LENGTH ELIMINATION 2")
# Final step, eliminating windows with little energy.
final_windows = []
for j in flatnotmasked_contiguous(time_windows):
# Again assert a certain minimal length.
if (j.stop - j.start) < min_length:
continue
# Compare the energy in the data window and the synthetic window.
data_energy = (data[j.start: j.stop] ** 2).sum()
synth_energy = (synth[j.start: j.stop] ** 2).sum()
energies = sorted([data_energy, synth_energy])
if energies[1] > max_energy_ratio * energies[0]:
if verbose:
_log_window_selection(
data_trace.id,
"Deselecting window due to energy ratio between "
"data and synthetics.")
continue
# Check that amplitudes in the data are above the noise
if noise_absolute / data[j.start: j.stop].ptp() > \
max_noise_window:
if verbose:
_log_window_selection(
data_trace.id,
"Deselecting window due having no amplitude above the "
"signal to noise ratio.")
final_windows.append((j.start, j.stop))
if plot:
old_time_windows = time_windows.copy()
time_windows.mask[:] = True
for start, stop in final_windows:
time_windows.mask[start:stop] = False
if plot:
plt.subplot2grid(grid, (30, 0), rowspan=1)
_plot_mask(time_windows, old_time_windows,
name="LITTLE ENERGY ELIMINATION")
if verbose:
_log_window_selection(
data_trace.id,
"Done, Selected %i window(s)" % len(final_windows))
# Final step is to convert the index value windows to actual times.
windows = []
for start, stop in final_windows:
start = data_starttime + start * data_delta
stop = data_starttime + stop * data_delta
windows.append((start, stop))
if plot:
# Plot the final windows to the data axes.
import matplotlib.transforms as mtransforms # NOQA
ax = data_plot
trans = mtransforms.blended_transform_factory(ax.transData,
ax.transAxes)
for start, stop in final_windows:
ax.fill_between([start * data_delta, stop * data_delta], 0, 1,
facecolor="#CDDC39", alpha=0.5, transform=trans)
plt.show()
return windows
if __name__ == '__main__':
import doctest
doctest.testmod()
|
krischer/LASIF
|
lasif/window_selection.py
|
Python
|
gpl-3.0
| 36,635 | 0 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, date
from flask import session
from wtforms.ext.dateutil.fields import DateTimeField, DateField
from wtforms.fields.core import SelectMultipleField, StringField, BooleanField, RadioField, IntegerField
from wtforms.validators import DataRequired, InputRequired, NumberRange, ValidationError
from wtforms_components import TimeField
from wtforms.widgets.core import HiddenInput
from wtforms.fields.simple import TextAreaField, SubmitField
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import IndicoQuerySelectMultipleCheckboxField, PrincipalField
from indico.web.forms.validators import IndicoEmail, UsedIf
from indico.modules.rb.models.reservations import RepeatMapping, RepeatFrequency
from indico.util.i18n import _
class BookingSearchForm(IndicoForm):
room_ids = SelectMultipleField('Rooms', [DataRequired()], coerce=int)
start_date = DateField('Start Date', [InputRequired()], parse_kwargs={'dayfirst': True})
start_time = TimeField('Start Time', [InputRequired()])
end_date = DateField('End Date', [InputRequired()], parse_kwargs={'dayfirst': True})
end_time = TimeField('End Time', [InputRequired()])
booked_for_name = StringField('Booked For Name')
reason = StringField('Reason')
is_only_mine = BooleanField('Only Mine')
is_only_my_rooms = BooleanField('Only My Rooms')
is_only_confirmed_bookings = BooleanField('Only Confirmed Bookings')
is_only_pending_bookings = BooleanField('Only Prebookings')
is_rejected = BooleanField('Is Rejected')
is_cancelled = BooleanField('Is Cancelled')
is_archived = BooleanField('Is Archived')
uses_vc = BooleanField(_('Uses Videoconference'))
needs_vc_assistance = BooleanField(_('Videoconference Setup Assistance'))
needs_assistance = BooleanField('General Assistance')
@generated_data
def start_dt(self):
return datetime.combine(self.start_date.data, self.start_time.data)
@generated_data
def end_dt(self):
return datetime.combine(self.end_date.data, self.end_time.data)
class NewBookingFormBase(IndicoForm):
start_dt = DateTimeField('Start date', validators=[InputRequired()], parse_kwargs={'dayfirst': True},
display_format='%d/%m/%Y %H:%M')
end_dt = DateTimeField('End date', validators=[InputRequired()], parse_kwargs={'dayfirst': True},
display_format='%d/%m/%Y %H:%M')
repeat_frequency = RadioField('Repeat frequency', coerce=int, default=0, validators=[InputRequired()],
choices=[(0, _(u'Once')), (1, _(u'Daily')), (2, _(u'Weekly')), (3, _(u'Monthly'))])
repeat_interval = IntegerField('Repeat interval', validators=[NumberRange(0, 3)], default=0)
def validate_repeat_interval(self, field):
if (self.repeat_frequency.data, self.repeat_interval.data) not in RepeatMapping.mapping:
raise ValidationError('Invalid repeat step')
def validate_start_dt(self, field):
if field.data != field.object_data and field.data.date() < date.today() and not session.user.is_admin:
raise ValidationError(_(u'The start time cannot be in the past.'))
def validate_end_dt(self, field):
start_dt = self.start_dt.data
end_dt = self.end_dt.data
if start_dt.time() >= end_dt.time():
raise ValidationError('Invalid times')
if self.repeat_frequency.data == RepeatFrequency.NEVER:
field.data = datetime.combine(start_dt.date(), field.data.time())
elif start_dt.date() >= end_dt.date():
raise ValidationError('Invalid period')
class NewBookingCriteriaForm(NewBookingFormBase):
room_ids = SelectMultipleField('Rooms', [DataRequired()], coerce=int)
flexible_dates_range = RadioField('Flexible days', coerce=int, default=0,
choices=[(0, _(u'Exact')),
(1, '±{}'.format(_(u'1 day'))),
(2, '±{}'.format(_(u'2 days'))),
(3, '±{}'.format(_(u'3 days')))])
def validate_flexible_dates_range(self, field):
if self.repeat_frequency.data == RepeatFrequency.DAY:
field.data = 0
class NewBookingPeriodForm(NewBookingFormBase):
room_id = IntegerField('Room', [DataRequired()], widget=HiddenInput())
class NewBookingConfirmForm(NewBookingPeriodForm):
booked_for_user = PrincipalField(_(u'User'), [DataRequired()], allow_external=True)
contact_email = StringField(_(u'Email'), [InputRequired(), IndicoEmail(multi=True)])
contact_phone = StringField(_(u'Telephone'))
booking_reason = TextAreaField(_(u'Reason'), [DataRequired()])
uses_vc = BooleanField(_(u'I will use videoconference equipment'))
used_equipment = IndicoQuerySelectMultipleCheckboxField(_(u'VC equipment'), get_label=lambda x: x.name)
needs_vc_assistance = BooleanField(_(u'Request assistance for the startup of the videoconference session. '
u'This support is usually performed remotely.'))
needs_assistance = BooleanField(_(u'Request personal assistance for meeting startup'))
submit_book = SubmitField(_(u'Create booking'))
submit_prebook = SubmitField(_(u'Create pre-booking'))
def validate_used_equipment(self, field):
if field.data and not self.uses_vc.data:
raise ValidationError(_(u'Videoconference equipment is not used.'))
elif not field.data and self.uses_vc.data:
raise ValidationError(_(u'You need to select some Videoconference equipment'))
def validate_needs_vc_assistance(self, field):
if field.data and not self.uses_vc.data:
raise ValidationError(_(u'Videoconference equipment is not used.'))
class NewBookingSimpleForm(NewBookingConfirmForm):
submit_check = SubmitField(_(u'Check conflicts'))
booking_reason = TextAreaField(_(u'Reason'), [UsedIf(lambda form, field: not form.submit_check.data),
DataRequired()])
class ModifyBookingForm(NewBookingSimpleForm):
submit_update = SubmitField(_(u'Update booking'))
def __init__(self, *args, **kwargs):
self._old_start_dt = kwargs.pop('old_start_dt')
self._old_end_dt = kwargs.pop('old_end_dt')
super(ModifyBookingForm, self).__init__(*args, **kwargs)
del self.room_id
del self.submit_book
del self.submit_prebook
def validate_start_dt(self, field):
super(NewBookingSimpleForm, self).validate_start_dt(field)
new_start_dt = field.data
now = datetime.now()
if self._old_start_dt < now and new_start_dt != self._old_start_dt and not session.user.is_admin:
raise ValidationError(_(u"The start time is in the past and cannot be modified."))
if self._old_start_dt >= now and new_start_dt < now and not session.user.is_admin:
raise ValidationError(_(u'The start time cannot be moved into the past.'))
def validate_end_dt(self, field):
super(NewBookingSimpleForm, self).validate_end_dt(field)
new_end_dt = field.data
now = datetime.now()
if self._old_end_dt < now and new_end_dt != self._old_end_dt and not session.user.is_admin:
raise ValidationError(_(u"The end time is in the past and cannot be modified."))
if self._old_end_dt >= now and new_end_dt < now and not session.user.is_admin:
raise ValidationError(_(u'The end time cannot be moved into the past.'))
|
belokop/indico_bare
|
indico/modules/rb/forms/reservations.py
|
Python
|
gpl-3.0
| 8,391 | 0.003814 |
class RecordingException(Exception):
pass
class Recording(object):
def __init__(self, stream, lenght):
import command
self.name = None
self.time = None
self.lenght = lenght
self.path = None
self._temp_path = None
self._processing_list = []
if isinstance(stream, str):
self._stream = stream
else:
raise RecordingException("Wrong stream type: " + str(type(stream)))
self._set_attrs()
self._processing_list.append(command.Command(self._process))
def cut(self, start, stop):
import command
if start > stop:
raise RecordingException("Invalid start and stop args: " + str(start) + " " + str(stop))
self._processing_list.append(command.Command(self._cut, (start, stop)))
def remove(self):
import os
if os.path.isfile(self.path):
os.unlink(self.path)
def save(self, path):
import os
if not os.path.isdir(path):
raise RecordingException("Input path does not exist or is not a folder: " + path)
self.path = os.path.join(path, self.name)
for command in self._processing_list:
command.execute()
self._processing_list = []
def _set_attrs(self):
import tempfile
import datetime
import timezone
import os
self.time = datetime.datetime.utcnow()
self.time = self.time.replace(tzinfo=timezone.utc)
self.name = self.time.strftime("%Y%m%d%H%M%S%f.mp4")
name_h264 = self.time.strftime("%Y%m%d%H%M%S%f.h264")
self.path = os.path.join(tempfile.gettempdir(), self.name)
self._temp_path = os.path.join(tempfile.gettempdir(), name_h264)
def _process(self):
import os
try:
with open(self._temp_path, 'wb') as out:
out.write(self._stream)
except IOError as error:
raise RecordingException(str(error))
try:
self._convert(self._temp_path, self.path)
except RecordingException as error:
raise
finally:
os.unlink(self._temp_path)
def _convert(self, src, dst):
import subprocess
cmd = "MP4Box -fps 30 -add " + src + " " + dst
try:
ret = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as error:
raise RecordingException(str(error))
if ret != 0:
raise RecordingException("Convertion to mp4 failed on " + src)
def _cut(self, start, stop):
import subprocess
cmd = "MP4Box -splitx " + str(start) + ":" + str(stop) + " " + self.path + " -out " + self.path
try:
ret = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as error:
raise RecordingException(str(error))
if ret != 0:
raise RecordingException("Cannot cut recording: " + self.path)
self.lenght = stop - start
|
rizen1892/SmartHomeSolutions-Web
|
app/recording.py
|
Python
|
gpl-2.0
| 3,094 | 0.001616 |
'''
Created by auto_sdk on 2015.11.10
'''
from top.api.base import RestApi
class TradePostageUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.post_fee = None
self.tid = None
def getapiname(self):
return 'taobao.trade.postage.update'
|
colaftc/webtool
|
top/api/rest/TradePostageUpdateRequest.py
|
Python
|
mit
| 327 | 0.030581 |
"""
A helper class for working with 2D bins of goodness-of-fit as a function of log(SNR).
"""
import numpy as np
class DeltaFSNRBins(object):
NUM_SNR_BINS = 50
NUM_DELTA_F_BINS = 50
LOG_SNR_RANGE = 6.
LOG_SNR_OFFSET = 2.
DELTA_F_RANGE = 1.
DELTA_F_OFFSET = 0.
def __init__(self):
pass
def snr_to_bin(self, snr):
if snr <= 0:
return 0
return self.log_snr_to_bin(np.log(snr))
def log_snr_to_bin(self, log_snr):
# type: (np.ndarray) -> np.ndarray
return (np.clip((log_snr + self.LOG_SNR_OFFSET) * self.NUM_SNR_BINS / self.LOG_SNR_RANGE,
0, self.NUM_SNR_BINS - 1)).astype(np.int)
def bin_to_log_snr(self, bin_num):
# type: (np.ndarray) -> np.ndarray
return bin_num * self.LOG_SNR_RANGE / self.NUM_SNR_BINS - self.LOG_SNR_OFFSET
def delta_f_to_bin(self, delta_f):
# type: (np.ndarray) -> np.ndarray
return (np.clip((delta_f + self.DELTA_F_OFFSET) * self.NUM_DELTA_F_BINS / self.DELTA_F_RANGE,
0, self.NUM_DELTA_F_BINS - 1)).astype(np.int)
def bin_to_delta_f(self, bin_num):
# type: (np.ndarray) -> np.ndarray
return bin_num * self.DELTA_F_RANGE / self.NUM_DELTA_F_BINS - self.DELTA_F_OFFSET
def get_empty_histogram_array(self):
return np.zeros(shape=(3, self.NUM_SNR_BINS, self.NUM_DELTA_F_BINS))
def get_log_snr_axis(self):
return self.bin_to_log_snr(np.arange(self.NUM_SNR_BINS))
def get_delta_f_axis(self):
return self.bin_to_delta_f(np.arange(self.NUM_DELTA_F_BINS))
|
yishayv/lyacorr
|
physics_functions/delta_f_snr_bins.py
|
Python
|
mit
| 1,610 | 0.003106 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import json
from unittest.mock import patch
from apps.prepopulate.app_populate import AppPopulateCommand
from superdesk.tests import TestCase
from superdesk import get_resource_service
from superdesk.vocabularies import VocabulariesService
from superdesk.errors import SuperdeskApiError
class VocabulariesPopulateTest(TestCase):
def setUp(self):
self.filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'vocabularies.json')
self.json_data = [
{'_id': 'categories',
'unique_field': 'qcode',
'items': [
{'name': 'National', 'qcode': 'A', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'T', 'is_active': False}
]},
{'_id': 'newsvalue',
'items': [
{'name': '1', 'value': '1', 'is_active': True},
{'name': '2', 'value': '2', 'is_active': True},
{'name': '3', 'value': '3', 'is_active': False}
]}
]
with open(self.filename, 'w+') as file:
json.dump(self.json_data, file)
def test_populate_vocabularies(self):
cmd = AppPopulateCommand()
cmd.run(self.filename)
service = get_resource_service('vocabularies')
for item in self.json_data:
data = service.find_one(_id=item['_id'], req=None)
self.assertEqual(data['_id'], item['_id'])
self.assertListEqual(data['items'], item['items'])
def test_check_uniqueness(self):
items = [{'name': 'National', 'qcode': 'A', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'a', 'is_active': True}]
with self.assertRaises(SuperdeskApiError):
VocabulariesService()._check_uniqueness(items, 'qcode')
def test_check_uniqueness_active_only(self):
items = [{'name': 'National', 'qcode': 'A', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'A', 'is_active': False}]
VocabulariesService()._check_uniqueness(items, 'qcode')
def test_check_value_of_unique_field(self):
items = [{'name': 'National', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'A', 'is_active': True}]
with self.assertRaises(SuperdeskApiError):
VocabulariesService()._check_uniqueness(items, 'qcode')
def test_get_rightsinfo(self):
service = get_resource_service('vocabularies')
vocab = {
'_id': 'rightsinfo',
'items': [
{
'is_active': True,
'name': 'default',
'copyrightHolder': 'default holder',
'copyrightNotice': 'default notice',
'usageTerms': 'default terms'
},
{
'is_active': True,
'name': 'foo',
'copyrightHolder': 'foo holder',
'copyrightNotice': 'foo notice',
'usageTerms': 'foo terms'
},
]
}
with patch.object(service, 'find_one', return_value=vocab):
info = service.get_rightsinfo({})
self.assertEqual('default holder', info['copyrightholder'])
self.assertEqual('default notice', info['copyrightnotice'])
self.assertEqual('default terms', info['usageterms'])
info = service.get_rightsinfo({'source': 'foo'})
self.assertEqual('foo holder', info['copyrightholder'])
self.assertEqual('foo notice', info['copyrightnotice'])
self.assertEqual('foo terms', info['usageterms'])
def test_get_locale_vocabulary(self):
items = [
{'is_active': True, 'name': 'FIXME1', 'qcode': 'f', 'subject': '',
'translations': {'name': {'fr': 'FIXME1-fr', 'es': 'FIXME1-es'}}},
{'is_active': True, 'name': 'FIXME2', 'qcode': 'f', 'subject': '',
'translations': {'name': {'fr': 'FIXME2-fr', 'es': 'FIXME2-es'}}}
]
result = VocabulariesService().get_locale_vocabulary(items, 'fr')
self.assertEqual(result[0]['name'], 'FIXME1-fr')
self.assertEqual(result[1]['name'], 'FIXME2-fr')
def tearDown(self):
os.remove(self.filename)
|
mdhaman/superdesk-core
|
tests/vocabularies_tests.py
|
Python
|
agpl-3.0
| 4,647 | 0.000215 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009 Douglas S. Blank
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# $Id$
#
#
"""
Display references for any object
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc
from gramps.gui.plug.quick import QuickTable
from gramps.gen.utils.alive import probably_alive
from gramps.gen.ggettext import gettext as _
from gramps.gen.datehandler import displayer
from gramps.gen.config import config
def run(database, document, date):
"""
Display people probably alive and their ages on a particular date.
"""
# setup the simple access functions
sdb = SimpleAccess(database)
sdoc = SimpleDoc(document)
stab = QuickTable(sdb)
if not date.get_valid():
sdoc.paragraph("Date is not a valid date.")
return
# display the title
if date.get_day_valid():
sdoc.title(_("People probably alive and their ages the %s") %
displayer.display(date))
else:
sdoc.title(_("People probably alive and their ages on %s") %
displayer.display(date))
stab.columns(_("Person"), _("Age")) # Actual Date makes column unicode
matches = 0
for person in sdb.all_people():
alive, birth, death, explain, relative = \
probably_alive(person, database, date, return_range=True)
# Doesn't show people probably alive but no way of figuring an age:
if alive and birth:
diff_span = (date - birth)
stab.row(person, str(diff_span))
stab.row_sort_val(1, int(diff_span))
matches += 1
document.has_data = matches > 0
sdoc.paragraph(_("\n%d matches.\n") % matches)
stab.write(sdoc)
sdoc.paragraph("")
def get_event_date_from_ref(database, ref):
date = None
if ref:
handle = ref.get_reference_handle()
if handle:
event = database.get_event_from_handle(handle)
if event:
date = event.get_date_object()
return date
|
arunkgupta/gramps
|
gramps/plugins/quickview/ageondate.py
|
Python
|
gpl-2.0
| 2,780 | 0.002518 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-25 00:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('facebook_token', models.CharField(max_length=1000, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
ortutay/23andme-phenotypes-hackathon
|
my_app/my_app/migrations/0001_initial.py
|
Python
|
mit
| 829 | 0.003619 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**get_package_path.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Write given package path to stdout.
**Others:**
"""
from __future__ import unicode_literals
import argparse
import sys
import foundations.decorators
import foundations.verbose
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["LOGGER", "get_package_path", "get_command_line_arguments", "main"]
LOGGER = foundations.verbose.install_logger()
foundations.verbose.get_logging_console_handler()
foundations.verbose.set_verbosity_level(3)
def get_package_path(package):
"""
Writes given package path to stdout.
:param package: Package to retrieve the path.
:type package: unicode
:return: Definition success.
:rtype: bool
"""
package = __import__(package)
sys.stdout.write(package.__path__[0])
return True
def get_command_line_arguments():
"""
Retrieves command line arguments.
:return: Namespace.
:rtype: Namespace
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-h",
"--help",
action="help",
help="'Displays this help message and exit.'")
parser.add_argument("-p",
"--package",
type=unicode,
dest="package",
help="'Package to retrieve the path.'")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
@foundations.decorators.system_exit
def main():
"""
Starts the Application.
:return: Definition success.
:rtype: bool
"""
args = get_command_line_arguments()
return get_package_path(args.package)
if __name__ == "__main__":
main()
|
KelSolaar/sIBL_GUI
|
utilities/get_package_path.py
|
Python
|
gpl-3.0
| 2,062 | 0 |
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.slices.z", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/volume/slices/z/_fill.py
|
Python
|
mit
| 517 | 0.001934 |
"""
Demonstrates how the bz2 module may be used to create a compressed object
which represents a bitarray.
"""
import bz2
from bitarray import bitarray
def compress(ba):
"""
Given a bitarray, return an object which represents all information
within the bitarray in a compresed form.
The function `decompress` can be used to restore the bitarray from the
compresed object.
"""
assert isinstance(ba, bitarray)
return ba.length(), bz2.compress(ba.tobytes()), ba.endian()
def decompress(obj):
"""
Given an object (created by `compress`), return the a copy of the
original bitarray.
"""
n, data, endian = obj
res = bitarray(endian=endian)
res.frombytes(bz2.decompress(data))
del res[n:]
return res
if __name__ == '__main__':
a = bitarray(12345)
a.setall(0)
a[::10] = True
c = compress(a)
print(c)
b = decompress(c)
assert a == b, a.endian() == b.endian()
|
brianhelba/pylibtiff
|
libtiff/bitarray-a1646c0/examples/compress.py
|
Python
|
bsd-3-clause
| 953 | 0 |
import pingo
from time import sleep
rpi = pingo.rpi.RaspberryPi()
# A B C D E F G dp
led_locations = [11, 7, 21, 24, 26, 13, 15, 19]
pins = [rpi.pins[loc] for loc in led_locations[:6]]
for pin in pins:
pin.mode = pingo.OUT
pin.low()
while True:
for pin in pins:
pin.high()
sleep(.04)
pin.low()
|
garoa/pingo
|
pingo/examples/rpi_examples/display7_anim.py
|
Python
|
mit
| 363 | 0 |
"""
This convenience module is to hard-code some example FASTA files for testing
and development.
"""
|
scholer/cadnano2.5
|
cadnano/extras/fasta/__init__.py
|
Python
|
mit
| 102 | 0 |
#!/usr/bin/env python
import re,os,glob,sys,gc,ctypes,time
import numpy as np
try:import ROOT
except:print "Error!! pyroot didn't compile! please recompile your root!"
from array import array
#from pylab import plot,show,subplot
from bcmconst import *
from runinfo import getpklpath,runinfo,zload,zdump
try:from scipy.signal import butter,freqz,lfilter
except Exception as err:
print err
print "sorry no scipy module found from your computer,it is needed to filter the bpm raw data infomation, please install it first"
#low pass filter added for raw ADC signal
def lowfilter(raw,cutfreq):
n=4
fs=960.015 #sample rate,here is helicity rate
fc=2*cutfreq/fs #Normalize LPF cutoff frequency to Nyquist frequency
if fc>=1:return raw
normok=False
while not normok:
b,a=butter(n,fc)
if len(b)==len(a):
normok=True
break
n-=1
if n<0:
print "filter failed!you only have %i events for bpm, that's not enough for using filter!will use raw data instead!"%len(raw)
return raw
#w,h=freqz(b,a,n)
sf=lfilter(b,a,raw)
return np.float32(sf)
#similar as lowfilter, but use average instead
def signalave(raw,avefreq):
fs=960.015 #trigger rate,here is helicity rate
if 2*avefreq>=fs:return raw
aveevents=int(fs/avefreq)
Vave=avestack(aveevents)
rawlen=len(raw)
averaw=np.zeros(rawlen,dtype=np.float32)
for i in range(rawlen):
Vave.push(raw[i])
averaw[i]=Vave.ave()
del Vave
return averaw
#get the total ram of computer
def getmemory():
try:
for line in open("/proc/meminfo","r"):
if "MemTotal" in line:
return int(re.split("[:kB]","".join(re.split("\s",line)))[1])*1000
except:return 2054132000
#same usage as Cavestack, use numpy instead of c class
class avestack:
def __init__(self,size):
self.size=size
self.buf=numpy.zeros(size)
self.counter=0
self.point=0
def push(self,data):
self.buf[self.point]=data
self.point=(self.point+1)%self.size
self.counter+=1
def ave(self):
if self.counter<self.size:
return numpy.mean(self.buf[:self.counter])
else:
return numpy.mean(self.buf)
#get raw data from rootfile,save it to pkl file and return as dict type,bpm signal dealt with filter
#filter1 used to get average pos,filter2 used to get raw pos that can see slow raster signal
class decode:
def __init__(self,runpath,treename="T",firstevent=-1,lastevent=-1,forceredecode=0,buildtree=0,forcefastbus=False):
self.info=runinfo()
self.bcmconst=bcmconst()
self.runpath=os.path.abspath(runpath)
self.treename=treename
self.firstevent=firstevent
self.lastevent=lastevent
self.forceredecode=forceredecode
self.redecode=True
self.buildtree=buildtree
self.forcefastbus=forcefastbus
self.rootfilepath,self.runfilename=os.path.split(self.runpath)
self.run=int(re.split("[_.]",self.runfilename)[1])
if not self.info.ifhapavail(self.run) or self.forcefastbus:self.fastbus=True
else:self.fastbus=False
self.arm="L" if self.run<20000 else "R"
self.pp=getpklpath(self.rootfilepath)
self.pklprefix="raw"
self.pklpathn=[["rbpm","curr","hapevent","sbpm","ssbpm","fbpm","bpmavail","sbpmavail","fbpmavail","hapraster"],["raster","clock","event","fbbpm"]]
self.pkldecoden=["rbpm","curr","hapevent","hapraster","raster","clock","event","fbbpm"]
#decide if decode
#self.manualset=False
self.pklon={}
self.setpklon(False)
def setpklon(self,value):
for m in self.pklpathn:
for n in m:
self.pklon[n]=value
def getrootfilefamily(self):
self.rootfiles=glob.glob(os.path.join(self.rootfilepath,self.runfilename.replace(".root","_*.root")))
self.rootfiles.append(self.runpath)
self.rootfiles.sort()
print "rootfile family",self.rootfiles
#check if needed redecode
def checkifredecode(self):
#check if decoded file is fastbus or not
fbbpmpkl=self.pp.getpath(self.pklprefix,"fbbpm",self.run)
fbbpmpkl2=self.pp.getpath(self.pklprefix,"fbbpm",self.run,1)
if self.forcefastbus:
if not os.path.exists(fbbpmpkl):
print "set forceredecode to 1 since forcefastbus"
self.forceredecode=1
elif not self.fastbus:
if os.path.exists(fbbpmpkl):
if os.path.exists(fbbpmpkl2):
print "set forceredecode to 1 since no fastbus info"
self.forceredecode=1
try:os.remove(fbbpmpkl2)
except:raise Exception("sorry can not remove file %s, please check if you have permission in this directory"%fbbpmpkl2)
elif not os.path.exists(self.pp.getpath(self.pklprefix,"rbpm",self.run,1)):
print "set forceredecode to 1 since no bpm info"
self.forceredecode=1
#check event
eventtolerate=100
if not self.fastbus:
print "use happex, set fbbpm to False"
self.pklon["fbbpm"]=False
if not self.forceredecode:
hapeventpkl=self.pp.getpath(self.pklprefix,"hapevent",self.run)
pklonbak=self.pklon
if os.path.exists(hapeventpkl):
hapevent=zload(hapeventpkl)
print "rootfile event:%i-%i,pkl hapevent:%i-%i"%(self.firstevent,self.lastevent,hapevent.min(),hapevent.max())
if (self.firstevent<0 or hapevent.min()-self.firstevent<eventtolerate) and (self.lastevent<0 or self.lastevent-hapevent.max()<eventtolerate):
for key in self.pklpathn[0]:
pklpath=self.pp.getpath(self.pklprefix,key,self.run)
if os.path.exists(pklpath):
datas=zload(pklpath)
Ndatas=len(datas)
if Ndatas<10:Ndatas=len(datas[0])
if Ndatas!=len(hapevent):
print "not matched events, force replay"
self.forceredecode=1
self.pklon=pklonbak
del datas
break
print "file %s exists, set %s to False"%(pklpath,key)
self.pklon[key]=False
else:
print "file %s not exists, set %s to True"%(pklpath,key)
else:
print "events not enough in happex pkl files,will set all happex keys to true"
del hapevent
eventpkl=self.pp.getpath(self.pklprefix,"event",self.run)
if os.path.exists(eventpkl):
event=zload(eventpkl)
print "rootfile event:%i-%i,pkl event:%i-%i"%(self.firstevent,self.lastevent,event.min(),event.max())
if (self.firstevent<0 or event.min()-self.firstevent<eventtolerate) and (self.lastevent<0 or self.lastevent-event.max()<eventtolerate):
for key in self.pklpathn[1]:
pklpath=self.pp.getpath(self.pklprefix,key,self.run)
if os.path.exists(pklpath):
datas=zload(pklpath)
Ndatas=len(datas)
if Ndatas<10:Ndatas=len(datas[0])
if Ndatas!=len(event):
print "not matched events, force replay"
self.forceredecode=1
self.pklon=pklonbak
del datas
break
print "file %s exists, set %s to False"%(pklpath,key)
self.pklon[key]=False
else:
print "file %s not exists, set %s to True"%(pklpath,key)
else:
print "events not enough in normal daq pkl files,will set all normal daq keys to true"
self.redecode=any([self.pklon[n] for n in self.pkldecoden])
print self.pklon,self.redecode
#decode from rootfile,leaves should be in self.pklpathn
def decodefromrootfile(self):
if not any(self.pklon.values()):return True
ROOT.gROOT.SetBatch(True)
#raw leaves
print "decoding from rootfile now..."
eventleaf="fEvtHdr.fEvtNum" #event number
thappexprefix="happex.%s."%self.arm
numringleaf="%snumring"%thappexprefix
bpmrawleaf,fbbpmrawleaf,hapbcmrawleaf,scalerbcmrawleaf=[],[],[0,0],[0,0]
for i in range(8): #for bpm
whichbpm="A" if i<4 else "B"
if self.fastbus:
bpmrawleaf.append("%surb.BPM%s.rawcur.%i"%(self.arm,whichbpm,i%4+1))
else:bpmrawleaf.append("%sBPM%s.rawcur.%i"%(thappexprefix,whichbpm,i%4+1))
hapbcmrawleaf[0]="%sbcm_up"%thappexprefix #for bcm
hapbcmrawleaf[1]="%sbcm_down"%thappexprefix
scalerbcmrawleaf[0]="evleft_bcm_upr" if self.run<20000 else "evright_bcm_upr"
scalerbcmrawleaf[1]="evleft_bcm_downr" if self.run<20000 else "evright_bcm_downr"
#bcm const
hapbcmconst=[self.bcmconst.getconst(self.run,"happex",a) for a in ["up","down"]]
hapbcmavail=[True if a else False for a in hapbcmconst]
hapbcmavailall=any(hapbcmavail)
scalerbcmconst=[self.bcmconst.getconst(self.run,"sis3800",a,"slow") for a in ["up","down"]]
scalerbcmavail=[True if a else False for a in scalerbcmconst]
#raster
rasterrawleaf=["%srb.Raster.rawcur.x"%self.arm,\
"%srb.Raster.rawcur.y"%self.arm,\
"%srb.Raster.rawcurSL.x"%self.arm,\
"%srb.Raster.rawcurSL.y"%self.arm]
haprasterrawleaf=["%sRaster.rawcur.x"%thappexprefix,\
"%sRaster.rawcur.y"%thappexprefix,\
"%sRaster.rawcurSL.x"%thappexprefix,\
"%sRaster.rawcurSL.y"%thappexprefix]
clkrawleaf=self.arm+"clk.fastclk" #for clock
#get total events
print "getting total events in rootfiles"
rootfiles,trees,events,hapevents={},{},{},{}
ff=-1
for runpath in self.rootfiles:
rootfiles[runpath]=ROOT.TFile(runpath,"READ")
if rootfiles[runpath].IsZombie():
print "Error! file %s abnormal! please redecode it!"%runpath
if runpath==self.rootfiles[0]:return False
else:
rootfiles[runpath].Close()
rootfiles[runpath]=False
continue
try:
trees[runpath]=rootfiles[runpath].Get(self.treename)
events[runpath]=trees[runpath].GetEntries()
except:
print "Error! file %s abnormal! please redecode it!"%runpath
continue
#get happex total entries
ff+=1
if any([self.pklon[v] for v in ["rbpm","curr","hapevent","hapraster"]]):
try:
if self.pklon["curr"]:trees[runpath].Draw(hapbcmrawleaf[0]+">>h%i"%ff)
elif self.pklon["rbpm"] and not self.fastbus:trees[runpath].Draw(bpmrawleaf[0]+">>h%i"%ff)
elif self.pklon["hapraster"]:trees[runpath].Draw(haprasterrawleaf[0]+">>h%i"%ff)
h1=ROOT.gPad.GetPrimitive("h%i"%ff)
hapevents[runpath]=int(h1.GetEntries())
del h1
except:
print "Error!! no leaf %s in your rootfile!!"%bpmrawleaf[0]
for l in self.pkldecoden:
self.pklon[l]=False
if not os.path.exists(self.pp.getpath(self.pklprefix,"rbpm",self.run)):
raise Exception("Error!! no bpm information avail for both rootfile and pkls!!!")
return
totevents=sum(events.values())
tothapevents=sum(hapevents.values())
#init raw array
bpmraw,fbbpmraw,rasterraw,haprasterraw=[],[],[],[]
for i in range(8):
bpmraw.append(np.zeros(tothapevents,np.int32))
fbbpmraw.append(np.zeros(totevents,np.int32))
for i in range(4):
rasterraw.append(np.zeros(totevents,np.int32))
haprasterraw.append(np.zeros(tothapevents,np.int32))
hapevent=np.zeros(tothapevents,np.uint32)
curr=np.zeros(tothapevents,np.float32)
event=np.zeros(totevents,np.uint32)
clkraw=np.zeros(totevents,np.int32)
ehap,enorm=0,0
#decode
for runpath in self.rootfiles:
print "decoding raw data from rootfile %s..."%runpath
try:leventleaf=trees[runpath].GetLeaf(eventleaf)
except:
raise Exception("Error!! no leaf %s in your rootfile!!"%eventleaf)
if self.pklon["rbpm"]:
try:
if self.fastbus:
lbpmrawleaf=[trees[runpath].GetLeaf(l) for l in bpmrawleaf]
else:
bpmrawbranch=[trees[runpath].GetBranch(l) for l in bpmrawleaf]
lbpmrawleaf=[b.GetLeaf("data") for b in bpmrawbranch]
lbpmrawleaf[0].GetNdata() #check if leaf available
except:
print "Error!! no leaf %s in your rootfile!!"%bpmrawleaf[0]
for l in self.pkldecoden:
self.pklon[l]=False
if not os.path.exists(self.pp.getpath(self.pklprefix,"rbpm",self.run)):
raise Exception("Error!! no bpm information avail for both rootfile and pkls!!!")
return
if self.pklon["curr"]:
if hapbcmavailall:
try:
hapbcmrawbranch=[trees[runpath].GetBranch(l) for l in hapbcmrawleaf]
lhapbcmrawleaf=[b.GetLeaf("data") for b in hapbcmrawbranch]
lhapbcmrawleaf[0].GetNdata()
except:
print "Error!! no leaf %s in your rootfile!!"%hapbcmrawleaf[0]
print "will try to use scaler bcm info instead since you didn't replay happex bcm info"
hapbcmavailall=False
if not hapbcmavailall:
try:
lscalerbcmrawleaf=[trees[runpath].GetLeaf(scalerbcmrawleaf[i]) for i in range(2)]
lscalerbcmrawleaf[0].GetNdata()
except:
raise Exception("Error!! no leaf %s in your rootfile!!"%scalerbcmrawleaf[0])
hapavail=any([self.pklon[v] for v in ["rbpm","curr","hapevent","hapraster"]])
if hapavail:
try:
lnumringleaf=trees[runpath].GetLeaf(numringleaf)
lnumringleaf.GetNdata()
except:
raise Exception("Error!! no leaf %s in your rootfile!!"%numringleaf)
if self.pklon["clock"]:
try:
lclkrawleaf=trees[runpath].GetLeaf(clkrawleaf)
lclkrawleaf.GetNdata()
except:
print "Error!! no leaf %s in your rootfile!will leave it as empty!"%clkrawleaf
self.pklon["clock"]=False
if self.pklon["raster"]:
try:
lrasterrawleaf=[trees[runpath].GetLeaf(rasterrawleaf[i]) for i in range(4)]
lrasterrawleaf[0].GetNdata()
except:
print "Error!! no leaf %s in your rootfile!will leave it as empty!"%rasterrawleaf[0]
self.pklon["raster"]=False
if self.pklon["hapraster"]:
try:
haprasterrawbranch=[trees[runpath].GetBranch(haprasterrawleaf[i]) for i in range(4)]
lhaprasterrawleaf=[b.GetLeaf("data") for b in haprasterrawbranch]
lhaprasterrawleaf[0].GetNdata()
except:
print "Error!! no leaf %s in your rootfile!will leave it as empty!"%haprasterrawleaf[0]
self.pklon["hapraster"]=False
if not any(self.pklon.values()):return True
bcmraw=np.zeros(2,dtype=np.int32)
#decode from rootfile
for e in xrange(events[runpath]):
trees[runpath].GetEntry(e)
ee=leventleaf.GetValue()
if e%1000==0:
print "decoding %i events, %i left, %i"%(e,events[runpath]-e,ee)
if self.pklon["event"]:
event[enorm]=ee
if self.pklon["clock"]:
clkraw[enorm]=lclkrawleaf.GetValue()
if self.pklon["raster"]:
for i in range(4):
rasterraw[i][enorm]=lrasterrawleaf[i].GetValue()
if self.pklon["fbbpm"]:
for i in range(8):
fbbpmraw[i][enorm]=lbpmrawleaf[i].GetValue()
if self.pklon["curr"]:
bcmraw=[False,False]
if not hapbcmavailall:
for i in range(2):
if scalerbcmavail[i]:
bcmraw[i]=lscalerbcmrawleaf[i].GetValue()
bcmraw[i]=getcurr(bcmraw[i],scalerbcmconst[i],"sis3800","slow")
if hapavail:
numring=int(lnumringleaf.GetValue())
if numring<1:
enorm+=1
continue
for i in range(numring):
if self.pklon["rbpm"]:
if self.fastbus:
for j in range(8):
#sync fast bpm raw to happex
bpmraw[j][ehap]=fbbpmraw[j][enorm]
else:
for j in range(8):
bpmraw[j][ehap]=lbpmrawleaf[j].GetValue(i)
if self.pklon["curr"]:
for j in range(2):
if hapbcmavail[j]:
bcmraw[j]=lhapbcmrawleaf[j].GetValue(i)
bcmraw[j]=getcurr(bcmraw[j],hapbcmconst[j],"happex")
curr[ehap]=np.nanmean(bcmraw)
if self.pklon["hapevent"]:
hapevent[ehap]=ee
if self.pklon["hapraster"]:
for j in range(4):
haprasterraw[j][ehap]=lhaprasterrawleaf[j].GetValue()
ehap+=1
enorm+=1
rootfiles[runpath].Close()
try:
if self.pklon["curr"] and len(curr)>100:
zdump(curr,self.pp.getpath(self.pklprefix,"curr",self.run,1))
if self.pklon["rbpm"] and len(bpmraw[0])>100:
zdump(bpmraw,self.pp.getpath(self.pklprefix,"rbpm",self.run,1))
if self.pklon["fbbpm"] and len(fbbpmraw[0])>100:
zdump(fbbpmraw,self.pp.getpath(self.pklprefix,"fbbpm",self.run,1))
if self.pklon["hapevent"] and len(hapevent)>100:
zdump(hapevent,self.pp.getpath(self.pklprefix,"hapevent",self.run,1))
if self.pklon["event"] and len(event)>100:
zdump(event,self.pp.getpath(self.pklprefix,"event",self.run,1))
if self.pklon["clock"] and len(clkraw)>100:
zdump(clkraw,self.pp.getpath(self.pklprefix,"clock",self.run,1))
if self.pklon["raster"] and len(rasterraw[0])>100:
zdump(rasterraw,self.pp.getpath(self.pklprefix,"raster",self.run,1))
if self.pklon["hapraster"] and len(haprasterraw[0])>100:
zdump(haprasterraw,self.pp.getpath(self.pklprefix,"hapraster",self.run,1))
except:
raise Exception("\n\n\n\nError!failed to dump for bpm data,do you have write permission in dir %s?"%self.rootfilepath)
del curr,bpmraw,fbbpmraw,rasterraw,clkraw,hapevent,event,haprasterraw
gc.collect()
def bpmdatafilt(self):
if any(self.pklon[x] for x in ["sbpm","ssbpm","fbpm"]):
bpmraw=zload(self.pp.getpath(self.pklprefix,"rbpm",self.run))
filteredbpmraw=[[0]*8,[0]*8,[0]*8]
filtertype=[self.info.filter1type,self.info.filter2type,self.info.filter3type]
filterfreq=[self.info.filter1,self.info.filter2,self.info.filter3]
filtername=["sbpm","fbpm","ssbpm"]
print filterfreq
if len(bpmraw[0])<100:return True
for i in range(7,-1,-1):
print "filtering for bpm channel %i"%i
for j in range(3):
if self.pklon[filtername[j]]:
if "ave" in filtertype[j] or self.fastbus:
filteredbpmraw[j][i]=signalave(bpmraw[-1],filterfreq[j])
else:
filteredbpmraw[j][i]=lowfilter(bpmraw[-1],filterfreq[j])
del bpmraw[-1]
gc.collect() #recycle memory
try:
for j in range(3):
if self.pklon[filtername[j]]:
zdump(filteredbpmraw[j],self.pp.getpath(self.pklprefix,filtername[j],self.run,1))
except:
raise Exception("\n\n\n\nError!failed to dump for bpm data,do you have write permission in dir %s?"%self.rootfilepath)
del filteredbpmraw
gc.collect()
availname=["bpmavail","fbpmavail","sbpmavail"]
if any(self.pklon[x] for x in availname):
#build bpm avail variable
curravail=0.02
#at least cut 2000 events(2s)
minshift=2000
filterfreq=[self.info.filter1,self.info.filter2,self.info.filter3]
currpkl=self.pp.getpath(self.pklprefix,"curr",self.run)
if os.path.exists(currpkl):
curr=zload(currpkl)
curr=curr>curravail
for i in range(3):
if self.pklon[availname[i]]:
currshift=int(1000/filterfreq[i]+0.9)
currshift=minshift if currshift<minshift else currshift
curr1=numpy.concatenate((numpy.zeros(currshift),curr[:-currshift]))
bpmavail=(curr*curr1).astype(numpy.bool_)
try:
zdump(bpmavail,self.pp.getpath(self.pklprefix,availname[i],self.run,1))
except:
raise Exception("\n\n\n\nError!failed to dump for bpm data,do you have write permission in dir %s?"%self.rootfilepath)
try:del curr,curr1,bpmavail
except:pass
gc.collect()
def autodecode(self):
self.setpklon(True)
for x in ["self.getrootfilefamily()","self.checkifredecode()","self.decodefromrootfile()","self.bpmdatafilt()"]:
t1=time.time()
exec(x)
t2=time.time()
print "use time for %s: "%x,t2-t1
if self.buildtree:fillbpmrawtree(self.run,self.rootfilepath)
return True
#automatically fill the tree from all of the bpm pkls
def fillbpmrawtree(run,rootpath,fileprefix="bpmraw"):
print "filling bpm raw trees for run %i"%run
##pkl file list
pp=getpklpath(rootpath)
pklfilen=[["rbpm","hapraster","sbpm","ssbpm","sabpm","fbpm","curr","hapevent","bpmavail","sbpmavail","fbpmavail"],["raster","clock","event","fbbpm"]]
datatypes=["bpm","raster"]
for p in range(len(pklfilen)):
for f in range(len(pklfilen[p])):pklfilen[p][f]="raw_"+pklfilen[p][f]
for a in ["a","b"]:
pklfilen[0].append("pos_sbpm%shall"%(a))
pklfilen[0].append("pos_ssbpm%sbpm"%(a))
for b in ["bpm","rot"]:
pklfilen[1].append("pos_fbbpm%s%s"%(a,b))
for c in ["s","f"]:
pklfilen[0].append("pos_%sbpm%s%s"%(c,a,b))
tgtpklfiles=glob.glob(os.path.join(pp.pkldir,"bpmpos_tgt*_%i.pkl"%run))
if len(tgtpklfiles)<1:
tgtpklfiles=glob.glob(os.path.join(pp.pklbak,"bpmpos_tgt*_%i.pkl"%run))
for p in tgtpklfiles:
if os.path.getsize(p)>1000:
fn=os.path.split(p)[1]
pklfilen[0].append("pos_"+fn.split("_")[1])
pklfilen[0].append("pos_rms")
#get pkl file size and group them as total size
insertgroup=[]
reverseram=3e8 #reverse memory,300mb
maxsetram=2e9 #2gb memory maximum
maxram=getmemory()
maxram=maxram if maxram<maxsetram else maxsetram
maxram=maxram-reverseram
for p in range(len(pklfilen)):
insertgroup.append([[]])
totalsize=0
for f in range(len(pklfilen[p])):
fn=pp.getpath("",pklfilen[p][f],run)
if not os.path.exists(fn):continue
totalsize+=os.path.getsize(fn)
if totalsize>=maxram:
insertgroup[p].append([])
totalsize=0
insertgroup[p][-1].append(pklfilen[p][f])
#insert
firstbranch=[True,True]
firstopen=True
for p in range(len(insertgroup)):
for f in range(len(insertgroup[p])):
Nfile=len(insertgroup[p][f])
if Nfile<1:continue
if firstopen:
bpmrootfile=ROOT.TFile(os.path.join(rootpath,"%s_%i.root"%(fileprefix,run)),"RECREATE")
else:
bpmrootfile=ROOT.TFile(os.path.join(rootpath,"%s_%i.root"%(fileprefix,run)),"UPDATE")
print "filling %i-%i group data for run %i"%(p,f,run)
#create tree
if firstbranch[p]:
tree=ROOT.TTree(datatypes[p],datatypes[p])
else:
tree=bpmrootfile.Get(datatypes[p])
#create branches
data,branch,Vdata=[0]*Nfile,[0]*Nfile,[0]*Nfile
numentry,numvar=[],[]
for pf in range(Nfile):
fn=pp.getpath("",insertgroup[p][f][pf],run)
data[pf]=zload(fn)
dataleaves=insertgroup[p][f][pf]
if "-" in dataleaves:dataleaves=dataleaves.replace("-","m")
branchname=dataleaves
numdata=len(data[pf])
if numdata<1:continue
elif numdata>10:nvalues=1 #check if have more than 1 variables in a pkl file
else:
nvalues=numdata
try:numdata=len(data[pf][0])
except Exception as err:
print pf,numdata,insertgroup[p][f]
raise Exception(err)
numentry.append(numdata)
numvar.append(nvalues)
if nvalues<1:continue
elif nvalues==1:dataleavessub=[""]
elif nvalues==2:dataleavessub=["x","y"]
elif nvalues==3:dataleavessub=["x","y","z"]
elif nvalues==5:dataleavessub=["x","y","z","theta","phi"]
else:dataleavessub=[str(i+1) for i in range(nvalues)]
dataleaves=[dataleaves+a for a in dataleavessub]
Vdata[pf]=array("f",[0.0]*nvalues)
Vdataleaves=""
for i in range(len(dataleaves)):
if i>0:Vdataleaves+=":%s/F"%dataleaves[i]
else:Vdataleaves+="%s/F"%dataleaves[i]
branch[pf]=tree.Branch(branchname,Vdata[pf],Vdataleaves)
#fill
numdata=min(numentry)
for i in range(numdata):
if not firstbranch[p]:tree.GetEntry(i)
if i%10000==0:
print "filling %i-%i %i events,%i left"%(p,f,i,numdata-i)
for pf in range(Nfile):
if numvar[pf]<2:
Vdata[pf][0]=float(data[pf][i])
else:
for j in range(numvar[pf]):
Vdata[pf][j]=float(data[pf][j][i])
if firstbranch[p]:tree.Fill()
else:
for pf in range(Nfile):branch[pf].Fill()
tree.Write("",ROOT.TObject.kOverwrite)
bpmrootfile.Close()
if firstbranch[p]:firstbranch[p]=False
if firstopen:firstopen=False
del data,Vdata,tree,branch
gc.collect() #recycle memory
#get the beam rms leaf, used to judge the sharp beam move
def getposrms(rms,rmspic="rms.png"):
#from pylab import plot,show
#plot(rms)
#show()
mineventsplit=1000
ROOT.gROOT.SetBatch(True)
#fill rms tree
#rmsrootfile=ROOT.TFile("rms.root","RECREATE")
rmstree=ROOT.TTree("bpmrms","bpmrms")
leaves=["rms"]
Vleaves="rms/F"
Vrms=array("f",[0.0])
branch=rmstree.Branch("rms",Vrms,Vleaves)
entries=len(rms)
for i in range(entries):
Vrms[0]=rms[i]
rmstree.Fill()
#rmstree.Write("",ROOT.TObject.kOverwrite)
#rmsrootfile.Close()
#find rms peaks
s=ROOT.TSpectrum()
c1=ROOT.TCanvas("c1","BPM rms",1024,768)
rmstree.Draw("%s:Entry$"%leaves[0],"%s>0"%(leaves[0]))
graph=ROOT.gPad.GetPrimitive("Graph")
peakx=[]
try:
meanrms=graph.GetRMS(2)
arms=ROOT.TProfile("arms","arms",3000,0,entries)
rmstree.Draw("%s:Entry$>>arms"%leaves[0],"%s>0.08"%(leaves[0]),"same")
nfound=s.Search(arms,10,"same",0.1)
for j in range(nfound):
peakx.append(int(s.GetPositionX()[j]))
c1.Print(rmspic,"png")
except:
print "Warning!!! no valid rms data!!! please check if you have enough event!!!"
#find beam trip
trippeaks,tripbackpeaks=[],[]
oldtrip,oldtripback=0,0
for i in range(len(rms)):
if rms[i]<0:
if i-oldtrip>mineventsplit:trippeaks.append(i)
oldtrip=i
else:
if i-oldtripback>mineventsplit:tripbackpeaks.append(i)
oldtripback=i
#get rid of close trip
trips=peakx+trippeaks+tripbackpeaks
trips.append(0)
trips.append(entries-1)
trips.sort()
finaltrips=[]
for i in range(len(trips)):
if i>0 and trips[i]-trips[i-1]<mineventsplit:continue
else:finaltrips.append(trips[i])
#split beam move
splitentries=[]
for i in range(len(finaltrips)-1):
if i==0 and finaltrips[i+1] in tripbackpeaks:continue
elif i==len(finaltrips)-2 and finaltrips[i] in trippeaks:break
elif finaltrips[i] in trippeaks and finaltrips[i+1] in tripbackpeaks:
continue
splitentries.append(finaltrips[i:i+2])
return splitentries
#judge the slow beam move
def getposslowmove(pos,esplit):
splitentries=[]
mineventsmove=5000
aveevents=1000
tolepos=0.3 #min tolerant position
for s in esplit:
if s[1]-s[0]<mineventsmove*2:
splitentries.append(s)
continue
avepos=np.mean(pos[s[0]:s[0]+mineventsmove])
bpmave=Cavestack(aveevents)
splitpoint=s[0]
for e in range(s[0],s[1]):
if e>=len(pos):break
bpmave.push(pos[e])
if e<splitpoint+mineventsmove:continue
if abs(bpmave.ave()-avepos)>tolepos:
avepos=bpmave.ave()
splitentries.append([splitpoint,int(e-aveevents/2.)])
splitpoint=int(e-aveevents/2.)
splitentries.append([splitpoint,s[1]])
return splitentries
#calculate position from raw and constant
def getrealpos(raw,calconst,fitorder):
chan=[2,3,0,1]
ar,gx,gy=calconst[0]
xdiff_sum=(raw[chan[0]]-gx*raw[chan[1]])/(raw[chan[0]]+gx*raw[chan[1]])
ydiff_sum=(raw[chan[2]]-gy*raw[chan[3]])/(raw[chan[2]]+gy*raw[chan[3]])
xbyb2=xdiff_sum**2+ydiff_sum**2
xb2x=1./xbyb2-1./np.sqrt(xbyb2)*np.sqrt(1./xbyb2-1)
xdata=ar*xdiff_sum*xb2x
ydata=ar*ydiff_sum*xb2x
paranum=0 #total var number
calconst[1]=calconst[1]+[0]*(21-len(calconst[1]))
calconst[2]=calconst[2]+[0]*(21-len(calconst[2]))
x,y=0,0
pnx,pny=0,0
for i in range(max(fitorder)+1):
for j in range(i+1):
if i-j<=fitorder[0] and j<=fitorder[1]:
x+=calconst[1][pnx]*pow(xdata,i-j)*pow(ydata,j)
pnx+=1
if i-j<=fitorder[1] and j<=fitorder[0]:
y+=calconst[2][pny]*pow(ydata,i-j)*pow(xdata,j)
pny+=1
return x,y
#get the beam current from rootfile
def getcurrfromraw(runpath,treename="T",forcefastbus=False):
rootpath,runfilename=os.path.split(runpath)
run=int(re.split("[_]",re.split("[.]",runfilename)[-2])[-1])
if run<100:run=int(re.split("[_]",re.split("[.]",os.path.split(runpath)[1])[-2])[-2])
period=runinfo()
currepics=period.current(run)
if currepics:return currepics
print "sorry no current info for run %i in database,will try to find it from rawdata first"%run
#get curr from raw
d=decode(runpath,treename,forcefastbus=forcefastbus)
d.autodecode()
nocurr=0.002 #below this current will deal as no current
pp=getpklpath(rootpath)
rawdata=zload(pp.getpath("raw","curr",run))
curr=rawdata[rawdata>=nocurr]
if len(curr)<len(rawdata)/50.:
curr=rawdata[rawdata<nocurr] #if no current at 98% of time, will treat as no current
currepics=np.nanmean(curr)*1000
#save to database
dbdir=os.getenv("BEAMDBPATH")
if dbdir==None:
print "please define BEAMDBPATH in your env"
return False
pydb=os.path.join(dbdir,"pyDB")
currdb=os.path.join(pydb,"runcurr.pdt")
currinfo=zload(currdb)
currinfo[run]=currepics
print "checked run:%i, current:%f nA"%(run,currepics)
try:
zdump(currinfo,currdb)
print "updated currinfo database,please share with other people for this file %s or send it to pengjia so that other people don't need to run it again."%currdb
except:
print "sorry can not update currinfo database, please check if you have permission to write in %s."%pydb
return currepics
|
zhupengjia/beampackage
|
beampackage/signalfilter.py
|
Python
|
gpl-3.0
| 32,657 | 0.043268 |
from django.core.management.base import BaseCommand
from lizard_blockbox import import_helpers
class Command(BaseCommand):
help = "Parse the shapes for the blockbox data."
def handle(self, *args, **kwargs):
import_helpers.parse_shapes_blockbox(self.stdout)
|
lizardsystem/lizard-blockbox
|
lizard_blockbox/management/commands/parse_shapes_blockbox.py
|
Python
|
gpl-3.0
| 276 | 0 |
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
docker_base_image=None,
extra_docker_args=None,
verbose_success=False):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
if docker_base_image is not None:
docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
if extra_docker_args is not None:
docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
verbose_success=verbose_success)
return jobspec
def create_jobspec(name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0,
verbose_success=False):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
else:
environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost,
verbose_success=verbose_success)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_version):
self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', platform, arch, py_version]
self.py_version = py_version
if 'manylinux' in platform:
self.labels.append('linux')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux_extra':
# Raspberry Pi build
environ['PYTHON'] = '/usr/local/bin/python{}'.format(
self.py_version)
environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
# https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
# A QEMU bug causes submodule update to hang, so we copy directly
environ['RELATIVE_COPY_PATH'] = '.'
# Parallel builds are counterproductive in emulated environment
environ['GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS'] = '1'
extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60 * 5,
docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
extra_docker_args=extra_args)
elif 'manylinux' in self.platform:
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
# Platform autodetection for the manylinux1 image breaks so we set the
# defines ourselves.
# TODO(atash) get better platform-detection support in core so we don't
# need to do this manually...
environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
return create_docker_jobspec(
self.name,
# NOTE(rbellevi): Do *not* update this without also ensuring the
# base_docker_image attribute is accurate.
'tools/dockerfile/grpc_artifact_python_%s_%s' %
(self.platform, self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60,
docker_base_image='quay.io/pypa/manylinux1_i686'
if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
elif self.platform == 'windows':
if 'Python27' in self.py_version:
environ['EXT_COMPILER'] = 'mingw32'
else:
environ['EXT_COMPILER'] = 'msvc'
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = ''.join(
random.choice(string.ascii_uppercase) for _ in range(10))
return create_jobspec(self.name, [
'tools\\run_tests\\artifacts\\build_artifact_python.bat',
self.py_version, '32' if self.arch == 'x86' else '64'
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
environ['SKIP_PIP_INSTALL'] = 'TRUE'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
# Ruby build uses docker internally and docker cannot be nested.
# We are using a custom workspace instead.
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
use_workspace=True,
timeout_seconds=45 * 60)
class CSharpExtArtifact:
"""Builds C# native extension library"""
def __init__(self, platform, arch, arch_abi=None):
self.name = 'csharp_ext_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.arch_abi = arch_abi
self.labels = ['artifact', 'csharp', platform, arch]
if arch_abi:
self.name += '_%s' % arch_abi
self.labels.append(arch_abi)
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.arch == 'android':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_android_ndk',
'tools/run_tests/artifacts/build_artifact_csharp_android.sh',
environ={'ANDROID_ABI': self.arch_abi})
elif self.arch == 'ios':
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp_ios.sh'],
use_workspace=True)
elif self.platform == 'windows':
return create_jobspec(self.name, [
'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
self.arch
],
use_workspace=True)
else:
if self.platform == 'linux':
cmake_arch_option = '' # x64 is the default architecture
if self.arch == 'x86':
# TODO(jtattermusch): more work needed to enable
# boringssl assembly optimizations for 32-bit linux.
# Problem: currently we are building the artifact under
# 32-bit docker image, but CMAKE_SYSTEM_PROCESSOR is still
# set to x86_64, so the resulting boringssl binary
# would have undefined symbols.
cmake_arch_option = '-DOPENSSL_NO_ASM=ON'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_centos6_%s' % self.arch,
'tools/run_tests/artifacts/build_artifact_csharp.sh',
environ={'CMAKE_ARCH_OPTION': cmake_arch_option})
else:
cmake_arch_option = '' # x64 is the default architecture
if self.arch == 'x86':
cmake_arch_option = '-DCMAKE_OSX_ARCHITECTURES=i386'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp.sh'],
environ={'CMAKE_ARCH_OPTION': cmake_arch_option},
use_workspace=True)
def __str__(self):
return self.name
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch):
self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'php', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_centos6_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_php.sh')
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch):
self.name = 'protoc_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'protoc', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform != 'windows':
cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
if self.platform != 'macos':
ldflags += ' -static-libgcc -static-libstdc++ -s'
environ = {
'CONFIG': 'opt',
'CXXFLAGS': cxxflags,
'LDFLAGS': ldflags,
'PROTOBUF_LDFLAGS_EXTRA': ldflags
}
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_protoc',
'tools/run_tests/artifacts/build_artifact_protoc.sh',
environ=environ)
else:
environ[
'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_protoc.sh'],
environ=environ,
timeout_seconds=60 * 60,
use_workspace=True)
else:
generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
return create_jobspec(
self.name,
['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
environ={'generator': generator},
use_workspace=True)
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return ([
Cls(platform, arch) for Cls in (CSharpExtArtifact, ProtocArtifact)
for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
] + [
CSharpExtArtifact('linux', 'android', arch_abi='arm64-v8a'),
CSharpExtArtifact('linux', 'android', arch_abi='armeabi-v7a'),
CSharpExtArtifact('linux', 'android', arch_abi='x86'),
CSharpExtArtifact('macos', 'ios'),
# TODO(https://github.com/grpc/grpc/issues/20283)
# Add manylinux2010_x86 targets once this issue is resolved.
PythonArtifact('manylinux2010', 'x86', 'cp27-cp27m'),
PythonArtifact('manylinux2010', 'x86', 'cp27-cp27mu'),
PythonArtifact('manylinux2010', 'x86', 'cp35-cp35m'),
PythonArtifact('manylinux2010', 'x86', 'cp36-cp36m'),
PythonArtifact('manylinux2010', 'x86', 'cp37-cp37m'),
PythonArtifact('manylinux2010', 'x86', 'cp38-cp38'),
PythonArtifact('linux_extra', 'armv7', '2.7'),
PythonArtifact('linux_extra', 'armv7', '3.5'),
PythonArtifact('linux_extra', 'armv7', '3.6'),
PythonArtifact('linux_extra', 'armv6', '2.7'),
PythonArtifact('linux_extra', 'armv6', '3.5'),
PythonArtifact('linux_extra', 'armv6', '3.6'),
PythonArtifact('manylinux2010', 'x64', 'cp27-cp27m'),
PythonArtifact('manylinux2010', 'x64', 'cp27-cp27mu'),
PythonArtifact('manylinux2010', 'x64', 'cp35-cp35m'),
PythonArtifact('manylinux2010', 'x64', 'cp36-cp36m'),
PythonArtifact('manylinux2010', 'x64', 'cp37-cp37m'),
PythonArtifact('manylinux2010', 'x64', 'cp38-cp38'),
PythonArtifact('macos', 'x64', 'python2.7'),
PythonArtifact('macos', 'x64', 'python3.5'),
PythonArtifact('macos', 'x64', 'python3.6'),
PythonArtifact('macos', 'x64', 'python3.7'),
PythonArtifact('macos', 'x64', 'python3.8'),
PythonArtifact('windows', 'x86', 'Python27_32bit'),
PythonArtifact('windows', 'x86', 'Python35_32bit'),
PythonArtifact('windows', 'x86', 'Python36_32bit'),
PythonArtifact('windows', 'x86', 'Python37_32bit'),
PythonArtifact('windows', 'x86', 'Python38_32bit'),
PythonArtifact('windows', 'x64', 'Python27'),
PythonArtifact('windows', 'x64', 'Python35'),
PythonArtifact('windows', 'x64', 'Python36'),
PythonArtifact('windows', 'x64', 'Python37'),
PythonArtifact('windows', 'x64', 'Python38'),
RubyArtifact('linux', 'x64'),
RubyArtifact('macos', 'x64'),
PHPArtifact('linux', 'x64')
])
|
firebase/grpc-SwiftPM
|
tools/run_tests/artifacts/artifact_targets.py
|
Python
|
apache-2.0
| 16,432 | 0.000609 |
"""pidaemon.py
Usage:
pidaemon.py [--brightness=<b>] [--sleep=<s>] [--interval=<s>] [--wait=<s>]
pidaemon.py (-h | --help)
pidaemon.py --version
Options:
-h --help Show this screen.
--version Show version
--brightness=<b> Default brightness level 1-255 [default: 2]
--interval=<s> Default interval in seconds between each frame in jobs [default: 0.1]
--sleep=<s> Default number of seconds to pause after each job [default: 0]
--wait=<s> Time between each iteration when polling for job on an empty queue. [default: 5]
"""
import sys
import signal
import time
from docopt import docopt
from collections import defaultdict
import settings
from piqueue import piqueue
class PiDaemon():
def __init__(self, opts):
self.running = None
self.options = self.parse_options(opts)
self.session = piqueue.Session()
self.setup_signal_handlers()
def parse_options(self, opts):
options = defaultdict(lambda: None, {
'brightness': int(opts['--brightness']),
'sleep': float(opts['--sleep']),
'interval': float(opts['--interval']),
'wait': float(opts['--wait']),
})
return options
def run(self):
while True:
job = self.next_job()
if job is not None:
self.run_job(job)
if job.options['keep'] == True:
self.add_job(job)
self.delete_job(job)
else:
time.sleep(self.options['wait'])
def run_job(self, job):
self.running = job.job_instance(self.options.copy())
self.running.run()
self.running.sleep()
self.running.cleanup()
self.running = None
def queue(self):
return self.session.query(piqueue.Job).order_by(piqueue.Job.date_created)
def next_job(self):
return self.queue().first()
def add_job(self, old_job):
new_job = piqueue.Job(old_job.job_name, old_job.options)
self.session.add(new_job)
self.session.commit()
def delete_job(self, job):
self.session.delete(job)
self.session.commit()
def setup_signal_handlers(self):
signal.signal(signal.SIGINT, self.cleanup)
signal.signal(signal.SIGTERM, self.cleanup)
def cleanup(self, signum, frame):
if self.running is not None:
self.running.cleanup()
sys.exit(-1)
if __name__ == '__main__':
opts = docopt(__doc__, version='PiDaemon v1.0')
PiDaemon(opts).run()
|
ollej/piapi
|
pidaemon.py
|
Python
|
mit
| 2,568 | 0.002336 |
"""
Utilities for instructor unit tests
"""
import datetime
import json
import random
import six
from pytz import UTC
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = six.text_type(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.randint(1950, 2000)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=UTC)
self.targets = FakeTargetGroup()
class FakeTarget(object):
""" Corresponding fake target for a fake email """
target_type = "expected"
def long_display(self):
""" Mocks out a class method """
return self.target_type
class FakeTargetGroup(object):
""" Mocks out the M2M relationship between FakeEmail and FakeTarget """
def all(self):
""" Mocks out a django method """
return [FakeTarget()]
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
self.sent_to = [u'expected']
|
cpennington/edx-platform
|
lms/djangoapps/instructor/tests/utils.py
|
Python
|
agpl-3.0
| 3,206 | 0.000624 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2016 Didotech srl (<http://www.didotech.com>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_invoice
from . import product
from . import product_category
from . import product_supplierinfo
from . import purchase_order
from . import sale_order
|
iw3hxn/LibrERP
|
product_extended/models/__init__.py
|
Python
|
agpl-3.0
| 1,187 | 0 |
# -*- coding: utf-8 -*-
#Copyright (C) 2015 David Delgado Hernandez
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from ConfigParser import ConfigParser
import directory
import var
class Config(directory.Directorio):
"""Permite obtener toda clase de configuracion desde la linea
de comandos, el fichero de constantes y/o el archivo de
configuracion"""
def __init__(self):
super(Config, self).__init__(var.CONFIG_DIR)
if not os.path.exists(var.CONFIG_DIR):
os.makedirs(var.CONFIG_DIR)
if not os.path.exists(var.CACHE_DIR):
os.makedirs(var.CACHE_DIR)
# Archivos basicos
self.ConfDir = var.CONFIG_DIR
self.client_secret = var.CLIENT_SECRETS_FILE
self.storage_path = var.CODE_STORAGE
self.config_file = var.CONFIG_FILE
self.url_file = var.URL_FILE
# si el usuario marco manualmente una configuracion no persistente verlo
# aqui
# CACHE_DIR,URL_FILE,FORMAT_DEFAULT,FINAL_DIR,Codec,kbps
self.reg = [False,False,False,False,False,False]
# Opciones
self.format = var.FORMAT_DEFAULT
self.codec = var.CODEC_DEFAULT
# revisar la integridad antes de continuar
self._CheckDirectory()
self.cfgfile = ConfigParser()
self.cfgfile.read(self.config_file)
# Si todo esta bien requerir las configuraciones hechas por el
# usuario en el archivo de configuracion
# Directorios secundarios
# TODO : si se establece manualmente revisar que no se sobrepongan
self.CACHE_DIR = self.getCacheDir()
self.FINAL_DIR = self.getFinalDir()
def _CheckDirectory(self):
# Registro: (Client_secret,Archivo de Configuracion,URL.conf)
check = [False,False,False]
for i in self.getListFiles():
if i == self.client_secret:
check[0] = True
if i == self.config_file:
check[1] = True
if i == self.url_file:
check[2] = True
if check[0] == False:
raise AttributeError("No se encontro el archivo con la clave API")
if check[1] == False:
self.createFile(self.config_file,var.CONFIG_DEFAULT,rw="w")
if check[2] == False:
self.createFile(self.url_file,rw="w")
def getCacheDir(self):
if self.reg[0]:
# Si el usuario lo modifico no hacer nada y dar la respuesta de
# usuario
pass
elif self.cfgfile.has_option("DIRECTORIOS","CACHE_DIR"):
self.CACHE_DIR = self.cfgfile.get("DIRECTORIOS","CACHE_DIR")
else:
# si no la dio ni esta en fichero de configuracion
self.cfgfile.set("DIRECTORIOS","CACHE_DIR",var.CACHE_DIR)
return self.CACHE_DIR
def setCacheDir(self,path,flag=False):
self.reg[0] = True
if not os.path.exists(path):
os.mkdir(path)
# si se debe establecer por persistente
self.CACHE_DIR = path
if flag:
self.cfgfile.set("DIRECTORIOS","CACHE_DIR",path)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.CACHE_DIR
def getFormat(self):
if self.reg[2]:
pass
elif self.cfgfile.has_option("OPCIONES","FORMAT_DEFAULT"):
self.format = self.cfgfile.getint("OPCIONES","FORMAT_DEFAULT")
else:
self.cfgfile.set("OPCIONES","FORMAT_DEFAULT",var.FORMAT_DEFAULT)
return self.format
def setFormat(self,nformat,flag=False):
self.reg[2] = True
self.format = nformat
if flag:
self.cfgfile.set("OPCIONES","FORMAT_DEFAULT",nformat)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.format
def setFinalDir(self,path,flag=False):
self.reg[3] = True
if not os.path.exists(path):
os.mkdir(path)
# si se debe establecer por persistente
self.FINAL_DIR = path
if flag:
self.cfgfile.set("DIRECTORIOS","FINAL_DIR",path)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.FINAL_DIR
def getFinalDir(self):
if self.reg[3]:
# Si el usuario lo modifico no hacer nada y dar la respuesta de
# usuario
pass
elif self.cfgfile.has_option("DIRECTORIOS","FINAL_DIR"):
self.FINAL_DIR = self.cfgfile.get("DIRECTORIOS","FINAL_DIR")
else:
# si no la dio ni esta en fichero de configuracion
self.cfgfile.set("DIRECTORIOS","FINAL_DIR",var.FINAL_DIR)
self.FINAL_DIR = var.FINAL_DIR
return self.FINAL_DIR
def addURL(self,URL):
# TODO : Revisar integridad del URL
lst = self.getAllURL()
if URL in lst:
dup = True
else:
dup = False
with open(self.url_file,"a") as f:
if dup == False:
f.write(URL+"\n")
def getAllURL(self):
# Devolver una lista con las url
urllist = []
try:
with open(self.url_file,"r") as f:
while True:
url = f.readline()
if not url:
break
url = url.replace("\n","")
if len(url) > 0:
urllist.append(url)
return urllist
except Exception, e:
# TODO: Lanzar aviso y log
print e
# crear el archivo
self.createFile(self.url_file,rw="w")
return []
def getDelWrongList(self):
if self.cfgfile.has_option("OPCIONES","DELETE_WRONG_LIST"):
self.DELETE_WRONG_LIST = self.cfgfile.get("OPCIONES","DELETE_WRONG_LIST")
else:
# si no esta en fichero de configuracion
self.cfgfile.set("OPCIONES","DELETE_WRONG_LIST","YES")
return self.DELETE_WRONG_LIST
def getLogFile(self):
return var.LOG_FILE
def getCodec(self):
if self.reg[4]:
pass
elif self.cfgfile.has_option("OPCIONES","CODEC_DEFAULT"):
self.codec = self.cfgfile.get("OPCIONES","CODEC_DEFAULT")
else:
self.cfgfile.set("OPCIONES","CODEC_DEFAULT",var.CODEC_DEFAULT)
self.codec = var.CODEC_DEFAULT
return self.codec
def setCodec(self,codec,flag=False):
self.reg[4] = True
self.codec = codec
if flag:
self.cfgfile.set("OPCIONES","CODEC_DEFAULT",codec)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.codec
def getKbps(self):
if self.reg[5]:
pass
elif self.cfgfile.has_option("OPCIONES","KBPS"):
self.kpbs = self.cfgfile.get("OPCIONES","KBPS")
else:
self.cfgfile.set("OPCIONES","KBPS",var.KBPS)
self.kpbs = var.KBPS
return self.kpbs
def setKbps(self,kpbs,flag=False):
self.reg[5] = True
self.kpbs = kpbs
if flag:
self.cfgfile.set("OPCIONES","KBPS",kpbs)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.kpbs
|
lokiteitor/ikol
|
ikol/config.py
|
Python
|
gpl-2.0
| 7,992 | 0.010636 |
from __future__ import annotations
from decimal import Decimal
from typing import (
Any,
Mapping,
Sequence,
)
import uuid
from pprint import pprint
import pytest
from ai.backend.common.docker import ImageRef
from ai.backend.common.types import (
AccessKey, AgentId, KernelId,
ResourceSlot, SessionTypes,
)
from ai.backend.manager.scheduler import PendingSession, ExistingSession, AgentContext
from ai.backend.manager.scheduler.dispatcher import load_scheduler
from ai.backend.manager.scheduler.fifo import FIFOSlotScheduler, LIFOSlotScheduler
from ai.backend.manager.scheduler.drf import DRFScheduler
from ai.backend.manager.scheduler.mof import MOFScheduler
def test_load_intrinsic():
assert isinstance(load_scheduler('fifo', {}), FIFOSlotScheduler)
assert isinstance(load_scheduler('lifo', {}), LIFOSlotScheduler)
assert isinstance(load_scheduler('drf', {}), DRFScheduler)
assert isinstance(load_scheduler('mof', {}), MOFScheduler)
example_group_id = uuid.uuid4()
example_total_capacity = ResourceSlot({'cpu': '4.0', 'mem': '4096'})
@pytest.fixture
def example_agents():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_mixed_agents():
return [
AgentContext(
agent_id=AgentId('i-gpu'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-cpu'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_first_one_assigned():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_no_valid():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
),
]
pending_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
existing_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
_common_dummy_for_pending_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
resource_policy={},
resource_opts={},
mounts=[],
mount_map={},
environ={},
bootstrap_script=None,
startup_command=None,
internal_data=None,
preopen_ports=[],
)
_common_dummy_for_existing_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
)
@pytest.fixture
def example_pending_sessions():
# lower indicies are enqueued first.
return [
PendingSession( # rocm
kernel_id=pending_kernel_ids[0],
access_key=AccessKey('user01'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('1'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cuda
kernel_id=pending_kernel_ids[1],
access_key=AccessKey('user02'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('0.5'),
'rocm.devices': Decimal('0'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cpu-only
kernel_id=pending_kernel_ids[2],
access_key=AccessKey('user03'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
]
@pytest.fixture
def example_existing_sessions():
return [
ExistingSession(
kernel_id=existing_kernel_ids[0],
access_key=AccessKey('user01'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('1'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
ExistingSession(
kernel_id=existing_kernel_ids[1],
access_key=AccessKey('user02'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('0.5'),
'rocm.devices': Decimal('0'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
ExistingSession(
kernel_id=existing_kernel_ids[2],
access_key=AccessKey('user03'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
]
def _find_and_pop_picked_session(pending_sessions, picked_session_id):
for picked_idx, pending_sess in enumerate(pending_sessions):
if pending_sess.kernel_id == picked_session_id:
break
else:
# no matching entry for picked session?
raise RuntimeError('should not reach here')
return pending_sessions.pop(picked_idx)
def test_fifo_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = FIFOSlotScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == AgentId('i-001')
def test_lifo_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = LIFOSlotScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[2].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_fifo_scheduler_favor_cpu_for_requests_without_accelerators(
example_mixed_agents,
example_pending_sessions,
):
scheduler = FIFOSlotScheduler({})
for idx in range(3):
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
[])
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_mixed_agents, picked_session)
if idx == 0:
# example_mixed_agents do not have any agent with ROCM accelerators.
assert agent_id is None
elif idx == 1:
assert agent_id == AgentId('i-gpu')
elif idx == 2:
# It should favor the CPU-only agent if the requested slots
# do not include accelerators.
assert agent_id == AgentId('i-cpu')
def test_lifo_scheduler_favor_cpu_for_requests_without_accelerators(
example_mixed_agents,
example_pending_sessions,
):
# Check the reverse with the LIFO scheduler.
# The result must be same.
scheduler = LIFOSlotScheduler({})
for idx in range(3):
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
[])
assert picked_session_id == example_pending_sessions[-1].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_mixed_agents, picked_session)
if idx == 2:
# example_mixed_agents do not have any agent with ROCM accelerators.
assert agent_id is None
elif idx == 1:
assert agent_id == AgentId('i-gpu')
elif idx == 0:
# It should favor the CPU-only agent if the requested slots
# do not include accelerators.
assert agent_id == AgentId('i-cpu')
def test_drf_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = DRFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
pprint(example_pending_sessions)
assert picked_session_id == example_pending_sessions[1].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_mof_scheduler_first_assign(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_mof_scheduler_second_assign(example_agents_first_one_assigned, example_pending_sessions,
example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(
example_agents_first_one_assigned, picked_session)
assert agent_id == 'i-101'
def test_mof_scheduler_no_valid_agent(example_agents_no_valid, example_pending_sessions,
example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents_no_valid, picked_session)
assert agent_id is None
# TODO: write tests for multiple agents and scaling groups
|
lablup/sorna-manager
|
tests/manager/test_scheduler.py
|
Python
|
lgpl-3.0
| 16,173 | 0.000618 |
from __future__ import absolute_import
from __future__ import print_function
import re
import itertools
from ..utils.py3compat import range
from .. import csstokens as tokens
class ScannerBase(object):
def __init__(self, data):
self._tokeniter = tokens.re_tokens.finditer(data)
self._lineno = 1
self._column = 1
self._eof_count = 0
self._next = [tokens.Token(tokens.START, u'', self._lineno, self._column)]
def __iter__(self):
return self
def next(self):
tok = self._next.pop(0)
if not self._next:
self._fill(10)
return tok
def _fill(self, n=1, force=False):
# n: The desired length for self._next.
ntoload = max(n - len(self._next), 0)
i = -1
try:
for i in range(ntoload):
self._next.append(self.get_next())
return len(self._next)
except StopIteration:
if not force and self._eof_count > 1:
raise
loaded = i+1
k = -1
for k in range(ntoload - loaded):
self._eof_count += 1
self._next.append(tokens.Token(tokens.EOF, u'', self._lineno, self._column))
return len(self._next)
def putback(self, *toks):
self._next[:0] = list(toks)
def peek(self, n=0):
try:
return self._next[n]
except IndexError:
pass
try:
sz = max(n+1, 10)
end = self._fill(n=sz, force=True)
assert end > 0
return self._next[min(end-1, n)]
except StopIteration:
return self._next[-1] # this will be EOF
def process_newlines(self, s):
lines = tokens.re_newline.split(s)
# returns: number_of_newlines, length_of_last_line
return len(lines) - 1, len(lines[-1])
embedded_newlines = set((tokens.STRING, tokens.COMMENT, tokens.WS,
tokens.URI, tokens.BADCOMMENT, tokens.BADSTRING,
tokens.BADURI, tokens.IDENT,
tokens.ATKEYWORD_OTHER, tokens.DIMENSION,
tokens.HASH, tokens.FUNCTION))
def advance_position(self, toktype, value):
if toktype in self.embedded_newlines:
nlines, nlast = self.process_newlines(value)
if nlines:
self._lineno += nlines
self._column = nlast + 1
else:
self._column += len(value)
else:
self._column += len(value)
def get_next(self):
m = self._tokeniter.next()
toktype = tokens.tokens[m.lastgroup]
value = m.group()
tok = tokens.Token(toktype, value, self._lineno, self._column)
self.advance_position(toktype, value)
##print 'Token: {0}'.format(tok.typestr)
return tok
class Scanner(ScannerBase):
ignore_tokens = (tokens.COMMENT,)
def get_next(self):
tok = super(Scanner, self).get_next()
while tok.type in self.ignore_tokens:
tok = super(Scanner, self).get_next()
return tok
#==============================================================================#
def benchmark_iter(src, tests=5): # pragma: no cover
import time
times = []
for i in range(tests):
start = time.clock()
for tok in re_tokens.finditer(src):
pass
stop = time.clock()
times.append(stop - start)
return times
def benchmark_iterlist(src, tests=5): # pragma: no cover
import time
times = []
for i in range(tests):
start = time.clock()
ilist = list(re_tokens.finditer(src))
for tok in ilist:
pass
stop = time.clock()
times.append(stop - start)
return times
def benchmark_list(src, tests=5): # pragma: no cover
import time
times = []
for i in range(tests):
start = time.clock()
for tok in re_tokens.findall(src):
pass
stop = time.clock()
times.append(stop - start)
return times
def benchmark(src, ntests=5): # pragma: no cover
times_list = benchmark_list(src, tests=ntests)
times_iterlist = benchmark_iterlist(src, tests=ntests)
times_iter = benchmark_iter(src, tests=ntests)
print('iter time: {0}'.format(min(times_iter)))
print('iterlist time: {0}'.format(min(times_iterlist)))
print('list time: {0}'.format(min(times_list)))
#==============================================================================#
|
colossalbit/cssypy
|
cssypy/scanners/scanners.py
|
Python
|
bsd-3-clause
| 4,679 | 0.005984 |
def sqr(x):
return x*x
def cube(x):
return x*x*x
def quad(x):
return cube(x)*x
a = 10
print sqr(a)
print cube(a)
print quad(a)
|
jishnuv/Toy-Python-Virtual-Machine
|
Testcases/Functions/f2.py
|
Python
|
gpl-2.0
| 131 | 0.045802 |
"""
Application urlconfig
"""
from __future__ import absolute_import
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^(?P<uuid>[0-9a-f-]{36})/$",
views.RateView.as_view(),
name="rate"
),
url(
r"^2/(?P<uuid>[0-9a-f-]{36})/$",
views.Rate2View.as_view(),
name="rate2"
)
]
|
RightToResearch/OpenCon-Rating-App
|
project/rating/urls.py
|
Python
|
mit
| 365 | 0 |
from ..models import Post, Category, Tag
from django.db.models.aggregates import Count
from django import template
register = template.Library()
# 最近文章
@register.simple_tag
def get_recent_posts(num=9):
return Post.objects.all().order_by('-modified_time')[:num]
# 按月归档
@register.simple_tag
def archives():
return Post.objects.dates('created_time', 'month', order='DESC')
# 分类归档
@register.simple_tag
def get_categories():
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
# 标签云
@register.simple_tag
def get_tags():
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
|
RewrZ/RewrZ
|
rewrz/blog/templatetags/blog_tags.py
|
Python
|
agpl-3.0
| 698 | 0.010479 |
# -*- coding: utf-8 -*-
import httplib as http
import mock
from nose.tools import * # noqa
from boto.exception import S3ResponseError
from framework.auth import Auth
from tests.base import get_default_metaschema
from tests.factories import ProjectFactory, AuthUserFactory
from website.addons.base import testing
from website.addons.s3.tests.utils import S3AddonTestCase
from website.addons.s3.utils import validate_bucket_name, validate_bucket_location
from website.util import api_url_for
class TestS3Views(S3AddonTestCase, testing.views.OAuthAddonConfigViewsTestCaseMixin):
def setUp(self):
self.mock_can_list = mock.patch('website.addons.s3.views.utils.can_list')
self.mock_can_list.return_value = True
self.mock_can_list.start()
self.mock_uid = mock.patch('website.addons.s3.views.utils.get_user_info')
self.mock_uid.return_value = {'id': '1234567890', 'display_name': 's3.user'}
self.mock_uid.start()
self.mock_exists = mock.patch('website.addons.s3.views.utils.bucket_exists')
self.mock_exists.return_value = True
self.mock_exists.start()
super(TestS3Views, self).setUp()
def tearDown(self):
self.mock_can_list.stop()
self.mock_uid.stop()
self.mock_exists.stop()
super(TestS3Views, self).tearDown()
def test_s3_settings_input_empty_keys(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': '',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_settings_input_empty_access_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': '',
'secret_key': 'Non-empty-secret-key'
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_settings_input_empty_secret_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': 'Non-empty-access-key',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_set_bucket_no_settings(self):
user = AuthUserFactory()
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_s3_set_bucket_no_auth(self):
user = AuthUserFactory()
user.add_addon('s3')
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_s3_set_bucket_registered(self):
registration = self.project.register_node(
get_default_metaschema(), Auth(self.user), '', ''
)
url = registration.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.addons.s3.views.utils.can_list', return_value=False)
def test_user_settings_cant_list(self, mock_can_list):
url = api_url_for('s3_add_user_account')
rv = self.app.post_json(url, {
'access_key': 'aldkjf',
'secret_key': 'las'
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('Unable to list buckets.', rv.body)
def test_s3_remove_node_settings_owner(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
ret = self.app.delete(url, auth=self.user.auth)
result = self.Serializer().serialize_settings(node_settings=self.node_settings, current_user=self.user)
assert_equal(result['nodeHasAuth'], False)
def test_s3_remove_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
ret = self.app.delete(url, auth=None, expect_errors=True)
assert_equal(ret.status_code, 401)
def test_s3_get_node_settings_owner(self):
self.node_settings.set_auth(self.external_account, self.user)
self.node_settings.folder_id = 'bucket'
self.node_settings.save()
url = self.node_settings.owner.api_url_for('s3_get_config')
res = self.app.get(url, auth=self.user.auth)
result = res.json['result']
assert_equal(result['nodeHasAuth'], True)
assert_equal(result['userIsOwner'], True)
assert_equal(result['folder']['path'], self.node_settings.folder_id)
def test_s3_get_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_get_config')
unauthorized = AuthUserFactory()
ret = self.app.get(url, auth=unauthorized.auth, expect_errors=True)
assert_equal(ret.status_code, 403)
## Overrides ##
@mock.patch('website.addons.s3.model.get_bucket_names')
def test_folder_list(self, mock_names):
mock_names.return_value = ['bucket1', 'bucket2']
super(TestS3Views, self).test_folder_list()
@mock.patch('website.addons.s3.model.bucket_exists')
@mock.patch('website.addons.s3.model.get_bucket_location_or_error')
def test_set_config(self, mock_location, mock_exists):
mock_exists.return_value = True
mock_location.return_value = ''
self.node_settings.set_auth(self.external_account, self.user)
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.put_json(url, {
'selected': self.folder
}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
self.project.reload()
self.node_settings.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_bucket_linked'.format(self.ADDON_SHORT_NAME)
)
assert_equal(res.json['result']['folder']['name'], self.node_settings.folder_name)
class TestCreateBucket(S3AddonTestCase):
def setUp(self):
super(TestCreateBucket, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.auth = self.user.auth
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3', auth=self.consolidated_auth)
self.project.creator.add_addon('s3')
self.user_settings = self.user.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.user_settings.save()
self.node_settings = self.project.get_addon('s3')
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.project.creator.get_addon('s3')
self.node_settings.save()
def test_bad_names(self):
assert_false(validate_bucket_name(''))
assert_false(validate_bucket_name('no'))
assert_false(validate_bucket_name('a' * 64))
assert_false(validate_bucket_name(' leadingspace'))
assert_false(validate_bucket_name('trailingspace '))
assert_false(validate_bucket_name('bogus naMe'))
assert_false(validate_bucket_name('.cantstartwithp'))
assert_false(validate_bucket_name('or.endwith.'))
assert_false(validate_bucket_name('..nodoubles'))
assert_false(validate_bucket_name('no_unders_in'))
assert_false(validate_bucket_name('-leadinghyphen'))
assert_false(validate_bucket_name('trailinghyphen-'))
assert_false(validate_bucket_name('Mixedcase'))
assert_false(validate_bucket_name('empty..label'))
assert_false(validate_bucket_name('label-.trailinghyphen'))
assert_false(validate_bucket_name('label.-leadinghyphen'))
assert_false(validate_bucket_name('8.8.8.8'))
assert_false(validate_bucket_name('600.9000.0.28'))
assert_false(validate_bucket_name('no_underscore'))
assert_false(validate_bucket_name('_nounderscoreinfront'))
assert_false(validate_bucket_name('no-underscore-in-back_'))
assert_false(validate_bucket_name('no-underscore-in_the_middle_either'))
def test_names(self):
assert_true(validate_bucket_name('imagoodname'))
assert_true(validate_bucket_name('still.passing'))
assert_true(validate_bucket_name('can-have-dashes'))
assert_true(validate_bucket_name('kinda.name.spaced'))
assert_true(validate_bucket_name('a-o.valid'))
assert_true(validate_bucket_name('11.12.m'))
assert_true(validate_bucket_name('a--------a'))
assert_true(validate_bucket_name('a' * 63))
def test_bad_locations(self):
assert_false(validate_bucket_location('Venus'))
assert_false(validate_bucket_location('AlphaCentari'))
assert_false(validate_bucket_location('CostaRica'))
def test_locations(self):
assert_true(validate_bucket_location(''))
assert_true(validate_bucket_location('us-east-2'))
assert_true(validate_bucket_location('eu-central-1'))
assert_true(validate_bucket_location('us-west-1'))
assert_true(validate_bucket_location('us-west-2'))
assert_true(validate_bucket_location('ap-northeast-1'))
assert_true(validate_bucket_location('ap-northeast-2'))
assert_true(validate_bucket_location('ap-southeast-1'))
assert_true(validate_bucket_location('ap-southeast-2'))
assert_true(validate_bucket_location('ap-south-1'))
assert_true(validate_bucket_location('sa-east-1'))
assert_true(validate_bucket_location('eu-west-1'))
@mock.patch('website.addons.s3.views.utils.create_bucket')
@mock.patch('website.addons.s3.views.utils.get_bucket_names')
def test_create_bucket_pass(self, mock_names, mock_make):
mock_make.return_value = True
mock_names.return_value = [
'butintheend',
'it',
'doesntevenmatter'
]
url = self.project.api_url_for('create_bucket')
ret = self.app.post_json(
url,
{
'bucket_name': 'doesntevenmatter',
'bucket_location': '',
},
auth=self.user.auth
)
assert_equal(ret.status_int, http.OK)
assert_equal(ret.json, {})
@mock.patch('website.addons.s3.views.utils.create_bucket')
def test_create_bucket_fail(self, mock_make):
error = S3ResponseError(418, 'because Im a test')
error.message = 'This should work'
mock_make.side_effect = error
url = "/api/v1/project/{0}/s3/newbucket/".format(self.project._id)
ret = self.app.post_json(url, {'bucket_name': 'doesntevenmatter'}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.body, '{"message": "This should work", "title": "Problem connecting to S3"}')
@mock.patch('website.addons.s3.views.utils.create_bucket')
def test_bad_location_fails(self, mock_make):
url = "/api/v1/project/{0}/s3/newbucket/".format(self.project._id)
ret = self.app.post_json(
url,
{
'bucket_name': 'doesntevenmatter',
'bucket_location': 'not a real bucket location',
},
auth=self.user.auth,
expect_errors=True)
assert_equals(ret.body, '{"message": "That bucket location is not valid.", "title": "Invalid bucket location"}')
|
monikagrabowska/osf.io
|
website/addons/s3/tests/test_view.py
|
Python
|
apache-2.0
| 12,215 | 0.001474 |
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry.polygon import Polygon
from shapely.geometry import MultiPolygon
import cell_tree2d
# create a rotated Cartesian grid
xc, yc = np.mgrid[1:10:15j, 1:20:18j]
yc = yc**1.2 + xc**1.5
def rot2d(x, y, ang):
'''rotate vectors by geometric angle'''
xr = x*np.cos(ang) - y*np.sin(ang)
yr = x*np.sin(ang) + y*np.cos(ang)
return xr, yr
x, y = rot2d(xc, yc, 0.2)
y /= 10.0
x -= x.mean()
y -= y.mean()
# Create nodes and faces from grid
nodes = np.ascontiguousarray(np.column_stack((x[:].reshape(-1),
y[:].reshape(-1)))).astype(np.float64)
y_size = x.shape[0]
x_size = y.shape[1]
faces = np.array([np.array([[xi, xi + 1, xi + x_size + 1, xi + x_size]
for xi in range(0, x_size - 1, 1)]) + yi * x_size for yi in range(0, y_size - 1)])
faces = np.ascontiguousarray(faces.reshape(-1, 4).astype(np.int32))
squares = [nodes[face] for face in faces]
## Convert to a bunch of shapely Polygon objects, for some unknown use.
# mesh = MultiPolygon([Polygon(p) for p in squares])
## Extra functions for plotting the grid
# for square in squares:
# x, y = square.T
# plt.fill(x, y)
#
# plt.gca().set_aspect(1.0)
# plt.show()
# Create some trial points and locate them using cell_tree
xyi = np.random.randn(10, 2)
ct = cell_tree2d.CellTree(nodes, faces)
idx = ct.locate(xyi)
|
pyoceans/gridded
|
examples/make_test_grid.py
|
Python
|
mit
| 1,437 | 0.004871 |
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from churchill.apps.core.models import BaseModel
from churchill.apps.currencies.services import get_default_currency_id
class StatsCalculationStrategy(models.TextChoices):
LAST_SHOT = "LAST_SHOT", _("From the last shot")
WEEKLY = "WEEKLY", _("Weekly")
MONTHLY = "MONTHLY", _("Monthly")
ALL_TIME = "ALL_TIME", _("For the all time")
class Profile(BaseModel):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
related_name="profile",
)
image = models.FileField(
upload_to=settings.PROFILE_IMAGE_DIRECTORY, null=True, blank=True
)
language = models.CharField(
max_length=5,
blank=True,
default=settings.LANGUAGE_CODE,
choices=settings.LANGUAGES,
)
currency = models.ForeignKey(
"currencies.Currency",
related_name="profiles",
on_delete=models.DO_NOTHING,
blank=True,
default=get_default_currency_id,
)
next_day_offset = models.IntegerField(
blank=True,
default=settings.NEXT_DAY_OFFSET,
help_text=_("Offset in hours for the next day"),
)
avg_consumption = models.IntegerField(
blank=True,
default=settings.AVG_ALCOHOL_CONSUMPTION,
help_text=_("Average alcohol consumption in ml per year"),
)
avg_price = models.DecimalField(
max_digits=5,
decimal_places=2,
blank=True,
default=settings.AVG_ALCOHOL_PRICE,
help_text=_("Average alcohol price for 1000 ml"),
)
stats_calculation_strategy = models.CharField(
max_length=20,
choices=StatsCalculationStrategy.choices,
default=StatsCalculationStrategy.MONTHLY,
)
verification_token = models.CharField(max_length=16, null=True, blank=True)
def __str__(self):
return self.user.email
|
manti-by/Churchill
|
churchill/apps/profiles/models.py
|
Python
|
bsd-3-clause
| 2,037 | 0 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.android.targets.android_target import AndroidTarget
from pants.base.exceptions import TargetDefinitionException
class AndroidResources(AndroidTarget):
"""Processes android resources to generate R.java"""
def __init__(self,
resource_dir=None,
**kwargs):
#TODO(mateor) change resource_dir from string into list
"""
:param string resource_dir: path/to/directory containing Android resource files,
often named 'res'.
"""
super(AndroidResources, self).__init__(**kwargs)
address = kwargs['address']
try:
self.resource_dir = os.path.join(address.spec_path, resource_dir)
except AttributeError:
raise TargetDefinitionException(self, 'An android_resources target must specify a '
'\'resource_dir\' that contains the target\'s '
'resource files.')
def globs_relative_to_buildroot(self):
return {'globs' : os.path.join(self.resource_dir, '**')}
|
pgroudas/pants
|
src/python/pants/backend/android/targets/android_resources.py
|
Python
|
apache-2.0
| 1,346 | 0.008172 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
from autobahn import wamp
from autobahn.wamp.uri import Pattern, RegisterOptions, SubscribeOptions
import unittest2 as unittest
class TestUris(unittest.TestCase):
def test_invalid_uris(self):
for u in [u"",
u"com.myapp.<product:foo>.update",
u"com.myapp.<123:int>.update",
u"com.myapp.<:product>.update",
u"com.myapp.<product:>.update",
u"com.myapp.<int:>.update",
]:
self.assertRaises(Exception, Pattern, u, Pattern.URI_TARGET_ENDPOINT)
def test_valid_uris(self):
for u in [u"com.myapp.proc1",
u"123",
u"com.myapp.<product:int>.update",
u"com.myapp.<category:string>.<subcategory>.list"
u"com.myapp.something..update"
]:
p = Pattern(u, Pattern.URI_TARGET_ENDPOINT)
self.assertIsInstance(p, Pattern)
def test_parse_uris(self):
tests = [
(u"com.myapp.<product:int>.update", [
(u"com.myapp.0.update", {u'product': 0}),
(u"com.myapp.123456.update", {u'product': 123456}),
(u"com.myapp.aaa.update", None),
(u"com.myapp..update", None),
(u"com.myapp.0.delete", None),
]
),
(u"com.myapp.<product:string>.update", [
(u"com.myapp.box.update", {u'product': u'box'}),
(u"com.myapp.123456.update", {u'product': u'123456'}),
(u"com.myapp..update", None),
]
),
(u"com.myapp.<product>.update", [
(u"com.myapp.0.update", {u'product': u'0'}),
(u"com.myapp.abc.update", {u'product': u'abc'}),
(u"com.myapp..update", None),
]
),
(u"com.myapp.<category:string>.<subcategory:string>.list", [
(u"com.myapp.cosmetic.shampoo.list", {u'category': u'cosmetic', u'subcategory': u'shampoo'}),
(u"com.myapp...list", None),
(u"com.myapp.cosmetic..list", None),
(u"com.myapp..shampoo.list", None),
]
)
]
for test in tests:
pat = Pattern(test[0], Pattern.URI_TARGET_ENDPOINT)
for ptest in test[1]:
uri = ptest[0]
kwargs_should = ptest[1]
if kwargs_should is not None:
args_is, kwargs_is = pat.match(uri)
self.assertEqual(kwargs_is, kwargs_should)
else:
self.assertRaises(Exception, pat.match, uri)
class TestDecorators(unittest.TestCase):
def test_decorate_endpoint(self):
@wamp.register(u"com.calculator.square")
def square(_):
"""Do nothing."""
self.assertTrue(hasattr(square, '_wampuris'))
self.assertTrue(type(square._wampuris) == list)
self.assertEqual(len(square._wampuris), 1)
self.assertIsInstance(square._wampuris[0], Pattern)
self.assertTrue(square._wampuris[0].is_endpoint())
self.assertFalse(square._wampuris[0].is_handler())
self.assertFalse(square._wampuris[0].is_exception())
self.assertEqual(square._wampuris[0].uri(), u"com.calculator.square")
self.assertEqual(square._wampuris[0]._type, Pattern.URI_TYPE_EXACT)
@wamp.register(u"com.myapp.product.<product:int>.update")
def update_product(product=None, label=None):
"""Do nothing."""
self.assertTrue(hasattr(update_product, '_wampuris'))
self.assertTrue(type(update_product._wampuris) == list)
self.assertEqual(len(update_product._wampuris), 1)
self.assertIsInstance(update_product._wampuris[0], Pattern)
self.assertTrue(update_product._wampuris[0].is_endpoint())
self.assertFalse(update_product._wampuris[0].is_handler())
self.assertFalse(update_product._wampuris[0].is_exception())
self.assertEqual(update_product._wampuris[0].uri(), u"com.myapp.product.<product:int>.update")
self.assertEqual(update_product._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.register(u"com.myapp.<category:string>.<cid:int>.update")
def update(category=None, cid=None):
"""Do nothing."""
self.assertTrue(hasattr(update, '_wampuris'))
self.assertTrue(type(update._wampuris) == list)
self.assertEqual(len(update._wampuris), 1)
self.assertIsInstance(update._wampuris[0], Pattern)
self.assertTrue(update._wampuris[0].is_endpoint())
self.assertFalse(update._wampuris[0].is_handler())
self.assertFalse(update._wampuris[0].is_exception())
self.assertEqual(update._wampuris[0].uri(), u"com.myapp.<category:string>.<cid:int>.update")
self.assertEqual(update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.register(u"com.myapp.circle.<name:string>",
RegisterOptions(match=u"wildcard", details_arg="details"))
def circle(name=None, details=None):
""" Do nothing. """
self.assertTrue(hasattr(circle, '_wampuris'))
self.assertTrue(type(circle._wampuris) == list)
self.assertEqual(len(circle._wampuris), 1)
self.assertIsInstance(circle._wampuris[0], Pattern)
self.assertIsInstance(circle._wampuris[0].options, RegisterOptions)
self.assertEqual(circle._wampuris[0].options.match, u"wildcard")
self.assertEqual(circle._wampuris[0].options.details_arg, "details")
self.assertTrue(circle._wampuris[0].is_endpoint())
self.assertFalse(circle._wampuris[0].is_handler())
self.assertFalse(circle._wampuris[0].is_exception())
self.assertEqual(circle._wampuris[0].uri(), u"com.myapp.circle.<name:string>")
self.assertEqual(circle._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.register(u"com.myapp.something..update",
RegisterOptions(match=u"wildcard", details_arg="details"))
def something(dynamic=None, details=None):
""" Do nothing. """
self.assertTrue(hasattr(something, '_wampuris'))
self.assertTrue(type(something._wampuris) == list)
self.assertEqual(len(something._wampuris), 1)
self.assertIsInstance(something._wampuris[0], Pattern)
self.assertIsInstance(something._wampuris[0].options, RegisterOptions)
self.assertEqual(something._wampuris[0].options.match, u"wildcard")
self.assertEqual(something._wampuris[0].options.details_arg, "details")
self.assertTrue(something._wampuris[0].is_endpoint())
self.assertFalse(something._wampuris[0].is_handler())
self.assertFalse(something._wampuris[0].is_exception())
self.assertEqual(something._wampuris[0].uri(), u"com.myapp.something..update")
self.assertEqual(something._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
def test_decorate_handler(self):
@wamp.subscribe(u"com.myapp.on_shutdown")
def on_shutdown():
"""Do nothing."""
self.assertTrue(hasattr(on_shutdown, '_wampuris'))
self.assertTrue(type(on_shutdown._wampuris) == list)
self.assertEqual(len(on_shutdown._wampuris), 1)
self.assertIsInstance(on_shutdown._wampuris[0], Pattern)
self.assertFalse(on_shutdown._wampuris[0].is_endpoint())
self.assertTrue(on_shutdown._wampuris[0].is_handler())
self.assertFalse(on_shutdown._wampuris[0].is_exception())
self.assertEqual(on_shutdown._wampuris[0].uri(), u"com.myapp.on_shutdown")
self.assertEqual(on_shutdown._wampuris[0]._type, Pattern.URI_TYPE_EXACT)
@wamp.subscribe(u"com.myapp.product.<product:int>.on_update")
def on_product_update(product=None, label=None):
"""Do nothing."""
self.assertTrue(hasattr(on_product_update, '_wampuris'))
self.assertTrue(type(on_product_update._wampuris) == list)
self.assertEqual(len(on_product_update._wampuris), 1)
self.assertIsInstance(on_product_update._wampuris[0], Pattern)
self.assertFalse(on_product_update._wampuris[0].is_endpoint())
self.assertTrue(on_product_update._wampuris[0].is_handler())
self.assertFalse(on_product_update._wampuris[0].is_exception())
self.assertEqual(on_product_update._wampuris[0].uri(), u"com.myapp.product.<product:int>.on_update")
self.assertEqual(on_product_update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.subscribe(u"com.myapp.<category:string>.<cid:int>.on_update")
def on_update(category=None, cid=None, label=None):
"""Do nothing."""
self.assertTrue(hasattr(on_update, '_wampuris'))
self.assertTrue(type(on_update._wampuris) == list)
self.assertEqual(len(on_update._wampuris), 1)
self.assertIsInstance(on_update._wampuris[0], Pattern)
self.assertFalse(on_update._wampuris[0].is_endpoint())
self.assertTrue(on_update._wampuris[0].is_handler())
self.assertFalse(on_update._wampuris[0].is_exception())
self.assertEqual(on_update._wampuris[0].uri(), u"com.myapp.<category:string>.<cid:int>.on_update")
self.assertEqual(on_update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.subscribe(u"com.myapp.on.<event:string>",
SubscribeOptions(match=u"wildcard", details_arg="details"))
def on_event(event=None, details=None):
""" Do nothing. """
self.assertTrue(hasattr(on_event, '_wampuris'))
self.assertTrue(type(on_event._wampuris) == list)
self.assertEqual(len(on_event._wampuris), 1)
self.assertIsInstance(on_event._wampuris[0], Pattern)
self.assertIsInstance(on_event._wampuris[0].options, SubscribeOptions)
self.assertEqual(on_event._wampuris[0].options.match, u"wildcard")
self.assertEqual(on_event._wampuris[0].options.details_arg, "details")
self.assertFalse(on_event._wampuris[0].is_endpoint())
self.assertTrue(on_event._wampuris[0].is_handler())
self.assertFalse(on_event._wampuris[0].is_exception())
self.assertEqual(on_event._wampuris[0].uri(), u"com.myapp.on.<event:string>")
self.assertEqual(on_event._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
def test_decorate_exception(self):
@wamp.error(u"com.myapp.error")
class AppError(Exception):
"""Do nothing."""
self.assertTrue(hasattr(AppError, '_wampuris'))
self.assertTrue(type(AppError._wampuris) == list)
self.assertEqual(len(AppError._wampuris), 1)
self.assertIsInstance(AppError._wampuris[0], Pattern)
self.assertFalse(AppError._wampuris[0].is_endpoint())
self.assertFalse(AppError._wampuris[0].is_handler())
self.assertTrue(AppError._wampuris[0].is_exception())
self.assertEqual(AppError._wampuris[0].uri(), u"com.myapp.error")
self.assertEqual(AppError._wampuris[0]._type, Pattern.URI_TYPE_EXACT)
@wamp.error(u"com.myapp.product.<product:int>.product_inactive")
class ProductInactiveError(Exception):
"""Do nothing."""
self.assertTrue(hasattr(ProductInactiveError, '_wampuris'))
self.assertTrue(type(ProductInactiveError._wampuris) == list)
self.assertEqual(len(ProductInactiveError._wampuris), 1)
self.assertIsInstance(ProductInactiveError._wampuris[0], Pattern)
self.assertFalse(ProductInactiveError._wampuris[0].is_endpoint())
self.assertFalse(ProductInactiveError._wampuris[0].is_handler())
self.assertTrue(ProductInactiveError._wampuris[0].is_exception())
self.assertEqual(ProductInactiveError._wampuris[0].uri(), u"com.myapp.product.<product:int>.product_inactive")
self.assertEqual(ProductInactiveError._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.error(u"com.myapp.<category:string>.<product:int>.inactive")
class ObjectInactiveError(Exception):
"""Do nothing."""
self.assertTrue(hasattr(ObjectInactiveError, '_wampuris'))
self.assertTrue(type(ObjectInactiveError._wampuris) == list)
self.assertEqual(len(ObjectInactiveError._wampuris), 1)
self.assertIsInstance(ObjectInactiveError._wampuris[0], Pattern)
self.assertFalse(ObjectInactiveError._wampuris[0].is_endpoint())
self.assertFalse(ObjectInactiveError._wampuris[0].is_handler())
self.assertTrue(ObjectInactiveError._wampuris[0].is_exception())
self.assertEqual(ObjectInactiveError._wampuris[0].uri(), u"com.myapp.<category:string>.<product:int>.inactive")
self.assertEqual(ObjectInactiveError._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
def test_match_decorated_endpoint(self):
@wamp.register(u"com.calculator.square")
def square(x):
return x
args, kwargs = square._wampuris[0].match(u"com.calculator.square")
self.assertEqual(square(666, **kwargs), 666)
@wamp.register(u"com.myapp.product.<product:int>.update")
def update_product(product=None, label=None):
return product, label
args, kwargs = update_product._wampuris[0].match(u"com.myapp.product.123456.update")
kwargs['label'] = "foobar"
self.assertEqual(update_product(**kwargs), (123456, "foobar"))
@wamp.register(u"com.myapp.<category:string>.<cid:int>.update")
def update(category=None, cid=None, label=None):
return category, cid, label
args, kwargs = update._wampuris[0].match(u"com.myapp.product.123456.update")
kwargs['label'] = "foobar"
self.assertEqual(update(**kwargs), ("product", 123456, "foobar"))
def test_match_decorated_handler(self):
@wamp.subscribe(u"com.myapp.on_shutdown")
def on_shutdown():
pass
args, kwargs = on_shutdown._wampuris[0].match(u"com.myapp.on_shutdown")
self.assertEqual(on_shutdown(**kwargs), None)
@wamp.subscribe(u"com.myapp.product.<product:int>.on_update")
def on_product_update(product=None, label=None):
return product, label
args, kwargs = on_product_update._wampuris[0].match(u"com.myapp.product.123456.on_update")
kwargs['label'] = "foobar"
self.assertEqual(on_product_update(**kwargs), (123456, "foobar"))
@wamp.subscribe(u"com.myapp.<category:string>.<cid:int>.on_update")
def on_update(category=None, cid=None, label=None):
return category, cid, label
args, kwargs = on_update._wampuris[0].match(u"com.myapp.product.123456.on_update")
kwargs['label'] = "foobar"
self.assertEqual(on_update(**kwargs), ("product", 123456, "foobar"))
def test_match_decorated_exception(self):
@wamp.error(u"com.myapp.error")
class AppError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.args == other.args
args, kwargs = AppError._wampuris[0].match(u"com.myapp.error")
# noinspection PyArgumentList
self.assertEqual(AppError(u"fuck", **kwargs), AppError(u"fuck"))
@wamp.error(u"com.myapp.product.<product:int>.product_inactive")
class ProductInactiveError(Exception):
def __init__(self, msg, product=None):
Exception.__init__(self, msg)
self.product = product
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.args == other.args and \
self.product == other.product
args, kwargs = ProductInactiveError._wampuris[0].match(u"com.myapp.product.123456.product_inactive")
self.assertEqual(ProductInactiveError("fuck", **kwargs), ProductInactiveError("fuck", 123456))
@wamp.error(u"com.myapp.<category:string>.<product:int>.inactive")
class ObjectInactiveError(Exception):
def __init__(self, msg, category=None, product=None):
Exception.__init__(self, msg)
self.category = category
self.product = product
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.args == other.args and \
self.category == other.category and \
self.product == other.product
args, kwargs = ObjectInactiveError._wampuris[0].match(u"com.myapp.product.123456.inactive")
self.assertEqual(ObjectInactiveError("fuck", **kwargs), ObjectInactiveError("fuck", "product", 123456))
class KwException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
self.kwargs = kwargs
# what if the WAMP error message received
# contains args/kwargs that cannot be
# consumed by the constructor of the exception
# class defined for the WAMP error URI?
# 1. we can bail out (but we are already signaling an error)
# 2. we can require a generic constructor
# 3. we can map only unconsumed args/kwargs to generic attributes
# 4. we can silently drop unconsumed args/kwargs
class MockSession(object):
def __init__(self):
self._ecls_to_uri_pat = {}
self._uri_to_ecls = {}
def define(self, exception, error=None):
if error is None:
assert(hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = exception._wampuris
self._uri_to_ecls[exception._wampuris[0].uri()] = exception
else:
assert(not hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = [Pattern(error, Pattern.URI_TARGET_HANDLER)]
self._uri_to_ecls[error] = exception
def map_error(self, error, args=None, kwargs=None):
# FIXME:
# 1. map to ecls based on error URI wildcard/prefix
# 2. extract additional args/kwargs from error URI
if error in self._uri_to_ecls:
ecls = self._uri_to_ecls[error]
try:
# the following might fail, eg. TypeError when
# signature of exception constructor is incompatible
# with args/kwargs or when the exception constructor raises
if kwargs:
if args:
exc = ecls(*args, **kwargs)
else:
exc = ecls(**kwargs)
else:
if args:
exc = ecls(*args)
else:
exc = ecls()
except Exception:
# FIXME: log e
exc = KwException(error, *args, **kwargs)
else:
# this never fails
args = args or []
kwargs = kwargs or {}
exc = KwException(error, *args, **kwargs)
return exc
class TestDecoratorsAdvanced(unittest.TestCase):
def test_decorate_exception_non_exception(self):
def test():
# noinspection PyUnusedLocal
@wamp.error(u"com.test.error")
class Foo(object):
pass
self.assertRaises(Exception, test)
def test_decorate_endpoint_multiple(self):
# noinspection PyUnusedLocal
@wamp.register(u"com.oldapp.oldproc")
@wamp.register(u"com.calculator.square")
def square(x):
"""Do nothing."""
self.assertTrue(hasattr(square, '_wampuris'))
self.assertTrue(type(square._wampuris) == list)
self.assertEqual(len(square._wampuris), 2)
for i in range(2):
self.assertIsInstance(square._wampuris[i], Pattern)
self.assertTrue(square._wampuris[i].is_endpoint())
self.assertFalse(square._wampuris[i].is_handler())
self.assertFalse(square._wampuris[i].is_exception())
self.assertEqual(square._wampuris[i]._type, Pattern.URI_TYPE_EXACT)
self.assertEqual(square._wampuris[0].uri(), u"com.calculator.square")
self.assertEqual(square._wampuris[1].uri(), u"com.oldapp.oldproc")
def test_marshal_decorated_exception(self):
@wamp.error(u"com.myapp.error")
class AppError(Exception):
pass
try:
raise AppError("fuck")
except Exception as e:
self.assertEqual(e._wampuris[0].uri(), u"com.myapp.error")
@wamp.error(u"com.myapp.product.<product:int>.product_inactive")
class ProductInactiveError(Exception):
def __init__(self, msg, product=None):
Exception.__init__(self, msg)
self.product = product
try:
raise ProductInactiveError("fuck", 123456)
except Exception as e:
self.assertEqual(e._wampuris[0].uri(), u"com.myapp.product.<product:int>.product_inactive")
session = MockSession()
session.define(AppError)
def test_define_exception_undecorated(self):
session = MockSession()
class AppError(Exception):
pass
# defining an undecorated exception requires
# an URI to be provided
self.assertRaises(Exception, session.define, AppError)
session.define(AppError, u"com.myapp.error")
exc = session.map_error(u"com.myapp.error")
self.assertIsInstance(exc, AppError)
def test_define_exception_decorated(self):
session = MockSession()
@wamp.error(u"com.myapp.error")
class AppError(Exception):
pass
# when defining a decorated exception
# an URI must not be provided
self.assertRaises(Exception, session.define, AppError, u"com.myapp.error")
session.define(AppError)
exc = session.map_error(u"com.myapp.error")
self.assertIsInstance(exc, AppError)
def test_map_exception_undefined(self):
session = MockSession()
exc = session.map_error(u"com.myapp.error")
self.assertIsInstance(exc, Exception)
def test_map_exception_args(self):
session = MockSession()
@wamp.error(u"com.myapp.error")
class AppError(Exception):
pass
@wamp.error(u"com.myapp.error.product_inactive")
class ProductInactiveError(Exception):
def __init__(self, product=None):
self.product = product
# define exceptions in mock session
session.define(AppError)
session.define(ProductInactiveError)
for test in [
# (u"com.myapp.foo.error", [], {}, KwException),
(u"com.myapp.error", [], {}, AppError),
(u"com.myapp.error", ["you are doing it wrong"], {}, AppError),
(u"com.myapp.error", ["you are doing it wrong", 1, 2, 3], {}, AppError),
(u"com.myapp.error.product_inactive", [], {}, ProductInactiveError),
(u"com.myapp.error.product_inactive", [], {"product": 123456}, ProductInactiveError),
]:
error, args, kwargs, ecls = test
exc = session.map_error(error, args, kwargs)
self.assertIsInstance(exc, ecls)
self.assertEqual(list(exc.args), args)
|
technologiescollege/Blockly-rduino-communication
|
scripts_XP/Lib/site-packages/autobahn/wamp/test/test_uri_pattern.py
|
Python
|
gpl-3.0
| 24,741 | 0.001495 |
#!/usr/bin/env python
# crc16.py by Bryan G. Olson, 2005
# This module is free software and may be used and
# distributed under the same terms as Python itself.
"""
CRC-16 in Python, as standard as possible. This is
the 'reflected' version, which is usually what people
want. See Ross N. Williams' /A Painless Guide to
CRC error detection algorithms/.
Re-factored by bapril@gmail.com to pass pylint
"""
#from array import array
def crc16(string, value=0):
""" Single-function interface, like gzip module's crc32
"""
value = 0xffff
for char in string:
#value = TABLE[ord(char) ^ (value & 0xff)] ^ (value >> 8)
value = value >> 8 ^ TABLE[ ( value ^ ord(char) ) & 0xff ]
value = ~value
value = (value & 0xffff)
return value
class CRC16(object):
""" Class interface, like the Python library's cryptographic
hash functions (which CRC's are definitely not.)
"""
def __init__(self, string=''):
self.val = 0
if string:
self.update(string)
def update(self, string):
""" Append string to CRC
"""
self.val = crc16(string, self.val)
def checksum(self):
""" Returns the current CRC
"""
return chr(self.val >> 8) + chr(self.val & 0xff)
def hexchecksum(self):
""" Returns the current CRC in hex
"""
return '%04x' % self.val
def copy(self):
""" Copy the CRC object
"""
clone = CRC16()
clone.val = self.val
return clone
def get_value(self):
""" Return the raw CRC value
"""
return self.val
# CRC-16 poly: p(x) = x**16 + x**15 + x**2 + 1
# top bit implicit, reflected
TABLE = [ 0x00000, 0x01189, 0x02312, 0x0329B, 0x04624, 0x057AD, 0x06536, \
0x074BF, 0x08C48, 0x09DC1, 0x0AF5A, 0x0BED3, 0x0CA6C, 0x0DBE5, 0x0E97E, \
0x0F8F7, 0x01081, 0x00108, 0x03393, 0x0221A, 0x056A5, 0x0472C, 0x075B7, \
0x0643E, 0x09CC9, 0x08D40, 0x0BFDB, 0x0AE52, 0x0DAED, 0x0CB64, 0x0F9FF, \
0x0E876, 0x02102, 0x0308B, 0x00210, 0x01399, 0x06726, 0x076AF, 0x04434, \
0x055BD, 0x0AD4A, 0x0BCC3, 0x08E58, 0x09FD1, 0x0EB6E, 0x0FAE7, 0x0C87C, \
0x0D9F5, 0x03183, 0x0200A, 0x01291, 0x00318, 0x077A7, 0x0662E, 0x054B5, \
0x0453C, 0x0BDCB, 0x0AC42, 0x09ED9, 0x08F50, 0x0FBEF, 0x0EA66, 0x0D8FD, \
0x0C974, 0x04204, 0x0538D, 0x06116, 0x0709F, 0x00420, 0x015A9, 0x02732, \
0x036BB, 0x0CE4C, 0x0DFC5, 0x0ED5E, 0x0FCD7, 0x08868, 0x099E1, 0x0AB7A, \
0x0BAF3, 0x05285, 0x0430C, 0x07197, 0x0601E, 0x014A1, 0x00528, 0x037B3, \
0x0263A, 0x0DECD, 0x0CF44, 0x0FDDF, 0x0EC56, 0x098E9, 0x08960, 0x0BBFB, \
0x0AA72, 0x06306, 0x0728F, 0x04014, 0x0519D, 0x02522, 0x034AB, 0x00630, \
0x017B9, 0x0EF4E, 0x0FEC7, 0x0CC5C, 0x0DDD5, 0x0A96A, 0x0B8E3, 0x08A78, \
0x09BF1, 0x07387, 0x0620E, 0x05095, 0x0411C, 0x035A3, 0x0242A, 0x016B1, \
0x00738, 0x0FFCF, 0x0EE46, 0x0DCDD, 0x0CD54, 0x0B9EB, 0x0A862, 0x09AF9, \
0x08B70, 0x08408, 0x09581, 0x0A71A, 0x0B693, 0x0C22C, 0x0D3A5, 0x0E13E, \
0x0F0B7, 0x00840, 0x019C9, 0x02B52, 0x03ADB, 0x04E64, 0x05FED, 0x06D76, \
0x07CFF, 0x09489, 0x08500, 0x0B79B, 0x0A612, 0x0D2AD, 0x0C324, 0x0F1BF, \
0x0E036, 0x018C1, 0x00948, 0x03BD3, 0x02A5A, 0x05EE5, 0x04F6C, 0x07DF7, \
0x06C7E, 0x0A50A, 0x0B483, 0x08618, 0x09791, 0x0E32E, 0x0F2A7, 0x0C03C, \
0x0D1B5, 0x02942, 0x038CB, 0x00A50, 0x01BD9, 0x06F66, 0x07EEF, 0x04C74, \
0x05DFD, 0x0B58B, 0x0A402, 0x09699, 0x08710, 0x0F3AF, 0x0E226, 0x0D0BD, \
0x0C134, 0x039C3, 0x0284A, 0x01AD1, 0x00B58, 0x07FE7, 0x06E6E, 0x05CF5, \
0x04D7C, 0x0C60C, 0x0D785, 0x0E51E, 0x0F497, 0x08028, 0x091A1, 0x0A33A, \
0x0B2B3, 0x04A44, 0x05BCD, 0x06956, 0x078DF, 0x00C60, 0x01DE9, 0x02F72, \
0x03EFB, 0x0D68D, 0x0C704, 0x0F59F, 0x0E416, 0x090A9, 0x08120, 0x0B3BB, \
0x0A232, 0x05AC5, 0x04B4C, 0x079D7, 0x0685E, 0x01CE1, 0x00D68, 0x03FF3, \
0x02E7A, 0x0E70E, 0x0F687, 0x0C41C, 0x0D595, 0x0A12A, 0x0B0A3, 0x08238, \
0x093B1, 0x06B46, 0x07ACF, 0x04854, 0x059DD, 0x02D62, 0x03CEB, 0x00E70, \
0x01FF9, 0x0F78F, 0x0E606, 0x0D49D, 0x0C514, 0x0B1AB, 0x0A022, 0x092B9, \
0x08330, 0x07BC7, 0x06A4E, 0x058D5, 0x0495C, 0x03DE3, 0x02C6A, 0x01EF1, \
0x00F78]
CRC = CRC16()
|
bapril/cfa_635
|
cfa635/crc16.py
|
Python
|
apache-2.0
| 4,254 | 0.019041 |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from datetime import date
from ggrc import db
from ggrc import builder
from ggrc_workflows.models import (Workflow, TaskGroup, TaskGroupTask,
TaskGroupObject, Cycle)
from tests.ggrc.generator import Generator
import random
import copy
class WorkflowsGenerator(Generator):
def generate_workflow(self, data={}):
""" create a workflow with dict data
return: wf if it was created, or response otherwise
"""
obj_name = "workflow"
data = copy.deepcopy(data)
tgs = data.pop("task_groups", [])
wf = Workflow(title="wf " + self.random_str())
obj_dict = self.obj_to_dict(wf, obj_name)
obj_dict[obj_name].update(data)
response, workflow = self.generate(Workflow, obj_name, obj_dict)
for tg in tgs:
self.generate_task_group(workflow, tg)
return response, workflow
def generate_task_group(self, workflow=None, data={}):
if not workflow:
_, workflow = self.generate_workflow()
data = copy.deepcopy(data)
tgts = data.pop("task_group_tasks", [])
tgos = data.pop("task_group_objects", [])
obj_name = "task_group"
workflow = self._session_add(workflow)
tg = TaskGroup(
title="tg " + self.random_str(),
workflow_id=workflow.id,
context_id=workflow.context.id,
contact_id=1
)
obj_dict = self.obj_to_dict(tg, obj_name)
obj_dict[obj_name].update(data)
response, task_group = self.generate(TaskGroup, obj_name, obj_dict)
for tgt in tgts:
self.generate_task_group_task(task_group, tgt)
for tgo in tgos:
self.generate_task_group_object(task_group, tgo)
return response, task_group
def generate_task_group_task(self, task_group=None, data={}):
if not task_group:
_, task_group = self.generate_task_group()
task_group = self._session_add(task_group)
default_start = self.random_date()
default_end = self.random_date(default_start, date.today())
day_range = 5 if task_group.workflow.frequency == "weekly" else 31
obj_name = "task_group_task"
tgt = TaskGroupTask(
task_group_id=task_group.id,
context_id=task_group.context.id,
title="tgt " + self.random_str(),
start_date=default_start,
end_date=default_end,
relative_start_day=random.randrange(1, day_range),
relative_start_month=random.randrange(1, 12),
relative_end_day=random.randrange(1, day_range),
relative_end_month=random.randrange(1, 12),
contact_id=1
)
obj_dict = self.obj_to_dict(tgt, obj_name)
obj_dict[obj_name].update(data)
return self.generate(TaskGroupTask, obj_name, obj_dict)
def generate_task_group_object(self, task_group=None, obj=None):
if not task_group:
_, task_group = self.generate_task_group()
task_group = self._session_add(task_group)
obj = self._session_add(obj)
obj_name = "task_group_object"
tgo = TaskGroupObject(
object_id=obj.id,
object=obj,
task_group_id=task_group.id,
context_id=task_group.context.id
)
obj_dict = self.obj_to_dict(tgo, obj_name)
return self.generate(TaskGroupObject, obj_name, obj_dict)
def generate_cycle(self, workflow=None):
if not workflow:
_, workflow = self.generate_workflow()
workflow = self._session_add(workflow) # this should be nicer
obj_name = "cycle"
obj_dict = {
obj_name: {
"workflow": {
"id": workflow.id,
"type": workflow.__class__.__name__,
"href": "/api/workflows/%d" % workflow.id
},
"context": {
"id": workflow.context.id,
"type": workflow.context.__class__.__name__,
"href": "/api/workflows/%d" % workflow.context.id
},
"autogenerate": "true"
}
}
return self.generate(Cycle, obj_name, obj_dict)
def activate_workflow(self, workflow):
workflow = self._session_add(workflow)
return self.modify_workflow(workflow, {
"status": "Active",
"recurrences": workflow.frequency != "one_time"
})
def modify_workflow(self, wf=None, data={}):
if not wf:
_, wf = self.generate_workflow()
wf = self._session_add(wf)
obj_name = "workflow"
obj_dict = builder.json.publish(wf)
builder.json.publish_representation(obj_dict)
obj_dict.update(data)
default = {obj_name: obj_dict}
response, workflow = self.modify(wf, obj_name, default)
return response, workflow
def modify_object(self, obj, data={}):
obj = self._session_add(obj)
obj_name = obj._inflector.table_singular
obj_dict = builder.json.publish(obj)
builder.json.publish_representation(obj_dict)
obj_dict.update(data)
obj_data = {obj_name: obj_dict}
response, generated_object = self.modify(obj, obj_name, obj_data)
return response, generated_object
def _session_add(self, obj):
""" Sometimes tests throw conflicting state present error."""
try:
db.session.add(obj)
return obj
except:
return obj.__class__.query.get(obj.id)
|
hyperNURb/ggrc-core
|
src/tests/ggrc_workflows/generator.py
|
Python
|
apache-2.0
| 5,290 | 0.00397 |
import paho.mqtt.client as mqtt
import time
import json
import sys
import usbtmc
#should be unique for each Ioc
clientId = "itsPowerMeter01Ioc"
subscribeTopic = "itsPowerMeter01/set/#"
publishtopic = "itsPowerMeter01/get"
periodicPollPeriodSecs = 1
# Power meter initialization
usbInst = usbtmc.Instrument(2733, 27)
usbCommand = "SYST:PRES"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
time.sleep(2)
usbCommand = "*RCL 1"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
usbCommand = "INIT:ALL:CONT ON"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
# usually leave this alone
subscribeQos = 0
publishQos = 0
brokerAddress = "broker.shiftr.io"
brokerPort = 1883
brokertimeout = 60
def getDataFromDevice():
# code here to be executed in periodic poll and set to local device
usbCommand = "SENS1:AVER:RES"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
usbCommand = "FETC1?"
print "Sending " + usbCommand + " to device"
power1 = usbInst.ask(usbCommand)
print "Received " + power1 + " from device"
power1f = float(power1) + 43.9
power1 = str(power1f)
usbCommand = "SENS2:AVER:RES"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
usbCommand = "FETC2?"
print "Sending " + usbCommand + " to device"
power2 = usbInst.ask(usbCommand)
print "Received " + power2 + " from device"
power2f = float(power2) + 59.5 + 7.2
power2 = str(power2f)
data = {"power1": power1, "power2": power2}
return json.dumps(data)
def handleIncomingMessage(topic, message):
# handle messages from broker
# if "/set/init" in topic:
return
userName = sys.argv[1]
userKey = sys.argv[2]
incomingMessageTopic = ""
incomingMessage = None
newIncomingMessage = True
def on_connect(client, userdata, rc):
global brokerAddress
global subscribeTopic
print("Connected to: " + brokerAddress + " with result code "+str(rc))
client.subscribe(subscribeTopic)
print("Subscribing to: " + subscribeTopic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print clientId + " received message on topic: " + msg.topic
global incomingMessageTopic
global incomingMessage
global newIncomingMessage
incomingMessageTopic = msg.topic
incomingMessage = msg.payload
newIncomingMessage = True
client = mqtt.Client(client_id=clientId, clean_session=False, userdata=None)
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(userName, userKey)
client.connect(brokerAddress, brokerPort, brokertimeout)
client.loop_start()
while True:
time.sleep(periodicPollPeriodSecs)
dataFromDevice = getDataFromDevice()
if len(dataFromDevice) > 0:
client.publish(publishtopic, dataFromDevice, publishQos, True)
if newIncomingMessage:
handleIncomingMessage(incomingMessageTopic, incomingMessage)
newIncomingMessage = False
|
se-esss-litterbox/ess-its
|
IceCubeIocPython/OldItsPowerMeterIoc.py
|
Python
|
gpl-3.0
| 3,050 | 0.003279 |
import datetime
import astropy
'''Boxing day is celebrated on the 26th of December, each year.
If Boxing day falls on the weekend it is moved to the following Monday or Tuesday
Boxing Day is observed on the 28th if the 26th falls on a Saturday or Sunday
'''
def get_holiday(year):
'''
Calculate the observed date of Boxing day for the given year
:param year: int
:return: datetime object set for the observed date
'''
if year < 1:
raise ValueError("Year must be > 1")
DECEMBER = 12
if datetime.date(year, DECEMBER, 26).weekday() in (5,6):
# Christmas_and_New_Year falls on the weekend
return datetime.date(year, DECEMBER, 28)
else:
return datetime.date(year, DECEMBER, 26)
def get_actual(year):
'''
Boxing Day is always celebrated on the 26th of December
:param year: int
:return: datetime object set for the observed date
'''
if year < 1:
raise ValueError("Year must be > 1")
DECEMBER = 12
return datetime.date(year, DECEMBER, 26)
|
shaneHowearth/Statutory_Holidays
|
Christmas_and_New_Year/Boxing_day.py
|
Python
|
gpl-2.0
| 1,049 | 0.003813 |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import filelib
import os
from genomicode import jmath
in_data = antecedents
matrix = [x for x in filelib.read_cols(in_data.identifier)]
matrix = [x[1:] for x in matrix]
matrix = jmath.transpose(matrix)
sample = matrix[0][1:]
data = matrix[1:]
if not os.path.exists(outfile):
os.mkdir(outfile)
for one_data in data:
value = one_data[1:]
value = [float(i) for i in value]
pair = [(value[i], sample[i]) for i in range(len(value))]
pair.sort()
gene_value = [i[0] for i in pair]
label = [i[1] for i in pair]
ylabel = one_data[0]
from genomicode import mplgraph
fig = mplgraph.barplot(gene_value,
box_label=label,
xtick_rotation=90,
xlabel='sample',
ylabel=ylabel)
output = os.path.join(outfile, ylabel)
fig.savefig(output + '.png')
assert filelib.exists_nz(outfile), (
'the output file %s for plot_geneset_score_bar fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'geneset_plot_' + original_file + '.png'
return filename
|
jefftc/changlab
|
Betsy/Betsy/modules/plot_geneset_score_bar.py
|
Python
|
mit
| 1,784 | 0.006726 |
test = {
'name': 'Question 4',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> rows7 = {('Joey', 7), ('Henry', 7)}
>>> rows6 = {('Ian', 6), ('Joyce', 6)}
>>> q4_answer[0] == ("John", 8)
True
>>> all([tuple(row) in rows7 for row in q4_answer[1:3]])
True
>>> all([tuple(row) in rows6 for row in q4_answer[3:5]])
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
DS-100/sp17-materials
|
sp17/labs/lab06/ok_tests/q4.py
|
Python
|
gpl-3.0
| 642 | 0.003115 |
from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from zerver.models import Message, Realm, Recipient, Stream, Subscription, get_realm
class Command(BaseCommand):
help = "Generate statistics on the streams for a realm."
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('realms', metavar='<realm>', nargs='*',
help="realm to generate statistics for")
def handle(self, *args: Any, **options: str) -> None:
if options['realms']:
try:
realms = [get_realm(string_id) for string_id in options['realms']]
except Realm.DoesNotExist as e:
raise CommandError(e)
else:
realms = Realm.objects.all()
for realm in realms:
streams = Stream.objects.filter(realm=realm).exclude(Q(name__istartswith="tutorial-"))
# private stream count
private_count = 0
# public stream count
public_count = 0
for stream in streams:
if stream.invite_only:
private_count += 1
else:
public_count += 1
print("------------")
print(realm.string_id, end=' ')
print("{:>10} {} public streams and".format("(", public_count), end=' ')
print(f"{private_count} private streams )")
print("------------")
print("{:>25} {:>15} {:>10} {:>12}".format("stream", "subscribers", "messages", "type"))
for stream in streams:
if stream.invite_only:
stream_type = 'private'
else:
stream_type = 'public'
print(f"{stream.name:>25}", end=' ')
recipient = Recipient.objects.filter(type=Recipient.STREAM, type_id=stream.id)
print("{:10}".format(len(Subscription.objects.filter(recipient=recipient,
active=True))), end=' ')
num_messages = len(Message.objects.filter(recipient=recipient))
print(f"{num_messages:12}", end=' ')
print(f"{stream_type:>15}")
print("")
|
showell/zulip
|
analytics/management/commands/stream_stats.py
|
Python
|
apache-2.0
| 2,358 | 0.003393 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint
from frappe import throw, _
from frappe.model.document import Document
class RootNotEditable(frappe.ValidationError): pass
class Account(Document):
nsm_parent_field = 'parent_account'
def onload(self):
frozen_accounts_modifier = frappe.db.get_value("Accounts Settings", "Accounts Settings",
"frozen_accounts_modifier")
if not frozen_accounts_modifier or frozen_accounts_modifier in frappe.get_roles():
self.get("__onload").can_freeze_account = True
def autoname(self):
self.name = self.account_name.strip() + ' - ' + \
frappe.db.get_value("Company", self.company, "abbr")
def validate(self):
self.validate_parent()
self.validate_root_details()
self.set_root_and_report_type()
self.validate_mandatory()
self.validate_warehouse_account()
self.validate_frozen_accounts_modifier()
self.validate_balance_must_be_debit_or_credit()
self.validate_account_currency()
def validate_parent(self):
"""Fetch Parent Details and validate parent account"""
if self.parent_account:
par = frappe.db.get_value("Account", self.parent_account,
["name", "is_group", "company"], as_dict=1)
if not par:
throw(_("Account {0}: Parent account {1} does not exist").format(self.name, self.parent_account))
elif par.name == self.name:
throw(_("Account {0}: You can not assign itself as parent account").format(self.name))
elif not par.is_group:
throw(_("Account {0}: Parent account {1} can not be a ledger").format(self.name, self.parent_account))
elif par.company != self.company:
throw(_("Account {0}: Parent account {1} does not belong to company: {2}")
.format(self.name, self.parent_account, self.company))
def set_root_and_report_type(self):
if self.parent_account:
par = frappe.db.get_value("Account", self.parent_account, ["report_type", "root_type"], as_dict=1)
if par.report_type:
self.report_type = par.report_type
if par.root_type:
self.root_type = par.root_type
if self.is_group:
db_value = frappe.db.get_value("Account", self.name, ["report_type", "root_type"], as_dict=1)
if db_value:
if self.report_type != db_value.report_type:
frappe.db.sql("update `tabAccount` set report_type=%s where lft > %s and rgt < %s",
(self.report_type, self.lft, self.rgt))
if self.root_type != db_value.root_type:
frappe.db.sql("update `tabAccount` set root_type=%s where lft > %s and rgt < %s",
(self.root_type, self.lft, self.rgt))
def validate_root_details(self):
# does not exists parent
if frappe.db.exists("Account", self.name):
if not frappe.db.get_value("Account", self.name, "parent_account"):
throw(_("Root cannot be edited."), RootNotEditable)
def validate_frozen_accounts_modifier(self):
old_value = frappe.db.get_value("Account", self.name, "freeze_account")
if old_value and old_value != self.freeze_account:
frozen_accounts_modifier = frappe.db.get_value('Accounts Settings', None, 'frozen_accounts_modifier')
if not frozen_accounts_modifier or \
frozen_accounts_modifier not in frappe.get_roles():
throw(_("You are not authorized to set Frozen value"))
def validate_balance_must_be_debit_or_credit(self):
from erpnext.accounts.utils import get_balance_on
if not self.get("__islocal") and self.balance_must_be:
account_balance = get_balance_on(self.name)
if account_balance > 0 and self.balance_must_be == "Credit":
frappe.throw(_("Account balance already in Debit, you are not allowed to set 'Balance Must Be' as 'Credit'"))
elif account_balance < 0 and self.balance_must_be == "Debit":
frappe.throw(_("Account balance already in Credit, you are not allowed to set 'Balance Must Be' as 'Debit'"))
def validate_account_currency(self):
if not self.account_currency:
self.account_currency = frappe.db.get_value("Company", self.company, "default_currency")
elif self.account_currency != frappe.db.get_value("Account", self.name, "account_currency"):
if frappe.db.get_value("GL Entry", {"account": self.name}):
frappe.throw(_("Currency can not be changed after making entries using some other currency"))
def convert_group_to_ledger(self):
if self.check_if_child_exists():
throw(_("Account with child nodes cannot be converted to ledger"))
elif self.check_gle_exists():
throw(_("Account with existing transaction cannot be converted to ledger"))
else:
self.is_group = 0
self.save()
return 1
def convert_ledger_to_group(self):
if self.check_gle_exists():
throw(_("Account with existing transaction can not be converted to group."))
elif self.account_type:
throw(_("Cannot covert to Group because Account Type is selected."))
else:
self.is_group = 1
self.save()
return 1
# Check if any previous balance exists
def check_gle_exists(self):
return frappe.db.get_value("GL Entry", {"account": self.name})
def check_if_child_exists(self):
return frappe.db.sql("""select name from `tabAccount` where parent_account = %s
and docstatus != 2""", self.name)
def validate_mandatory(self):
if not self.report_type:
throw(_("Report Type is mandatory"))
if not self.root_type:
throw(_("Root Type is mandatory"))
def validate_warehouse_account(self):
if not cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
return
if self.account_type == "Warehouse":
if not self.warehouse:
throw(_("Warehouse is mandatory if account type is Warehouse"))
old_warehouse = cstr(frappe.db.get_value("Account", self.name, "warehouse"))
if old_warehouse != cstr(self.warehouse):
if old_warehouse:
self.validate_warehouse(old_warehouse)
if self.warehouse:
self.validate_warehouse(self.warehouse)
elif self.warehouse:
self.warehouse = None
def validate_warehouse(self, warehouse):
if frappe.db.get_value("Stock Ledger Entry", {"warehouse": warehouse}):
throw(_("Stock entries exist against warehouse {0}, hence you cannot re-assign or modify Warehouse").format(warehouse))
def update_nsm_model(self):
"""update lft, rgt indices for nested set model"""
import frappe
import frappe.utils.nestedset
frappe.utils.nestedset.update_nsm(self)
def on_update(self):
self.update_nsm_model()
def validate_trash(self):
"""checks gl entries and if child exists"""
if not self.parent_account:
throw(_("Root account can not be deleted"))
if self.check_gle_exists():
throw(_("Account with existing transaction can not be deleted"))
if self.check_if_child_exists():
throw(_("Child account exists for this account. You can not delete this account."))
def on_trash(self):
self.validate_trash()
self.update_nsm_model()
def before_rename(self, old, new, merge=False):
# Add company abbr if not provided
from erpnext.setup.doctype.company.company import get_name_with_abbr
new_account = get_name_with_abbr(new, self.company)
# Validate properties before merging
if merge:
if not frappe.db.exists("Account", new):
throw(_("Account {0} does not exist").format(new))
val = list(frappe.db.get_value("Account", new_account,
["is_group", "root_type", "company"]))
if val != [self.is_group, self.root_type, self.company]:
throw(_("""Merging is only possible if following properties are same in both records. Is Group, Root Type, Company"""))
return new_account
def after_rename(self, old, new, merge=False):
if not merge:
frappe.db.set_value("Account", new, "account_name",
" - ".join(new.split(" - ")[:-1]))
else:
from frappe.utils.nestedset import rebuild_tree
rebuild_tree("Account", "parent_account")
def get_parent_account(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name from tabAccount
where is_group = 1 and docstatus != 2 and company = %s
and %s like %s order by name limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["company"], "%%%s%%" % txt, start, page_len), as_list=1)
def get_account_currency(account):
"""Helper function to get account currency"""
if not account:
return
def generator():
account_currency, company = frappe.db.get_value("Account", account, ["account_currency", "company"])
if not account_currency:
account_currency = frappe.db.get_value("Company", company, "default_currency")
return account_currency
return frappe.local_cache("account_currency", account, generator)
|
hatwar/buyback-erpnext
|
erpnext/accounts/doctype/account/account.py
|
Python
|
agpl-3.0
| 8,576 | 0.024254 |
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
#-----------------------------GEOM----------------------------------------
#----------Vertexes------------
p1 = geompy.MakeVertex(20.0,30.0,40.0)
p2 = geompy.MakeVertex(90.0,80.0,0.0)
p3 = geompy.MakeVertex(30.0,80.0,200.0)
#----------Edges---------------
e1 = geompy.MakeEdge(p1,p2)
e2 = geompy.MakeEdge(p2,p3)
e3 = geompy.MakeEdge(p3,p1)
#----------Wire----------------
ListOfEdges = []
ListOfEdges.append(e3)
ListOfEdges.append(e2)
ListOfEdges.append(e1)
wire1 = geompy.MakeWire(ListOfEdges)
#----------Face----------------
WantPlanarFace = 1
face1 = geompy.MakeFace(wire1,WantPlanarFace)
Id_face1 = geompy.addToStudy(face1,"Face1")
#-----------------------------SMESH-------------------------------------------
# -- Init mesh --
plane_mesh = salome.IDToObject( Id_face1)
mesh = smesh.Mesh(plane_mesh, "Mesh_1")
print"---------------------Hypothesis and Algorithms"
#---------------- NumberOfSegments
numberOfSegment = 9
algoWireDes = mesh.Segment()
listHyp = algoWireDes.GetCompatibleHypothesis()
print algoWireDes.GetName()
algoWireDes.SetName("Ware descritisation")
hypNbSeg = algoWireDes.NumberOfSegments(numberOfSegment)
print hypNbSeg.GetName()
print hypNbSeg.GetNumberOfSegments()
smesh.SetName(hypNbSeg, "Nb. Segments")
#--------------------------Max. Element Area
maxElementArea = 200
algoMef = mesh.Triangle()
listHyp = algoMef.GetCompatibleHypothesis()
print algoMef.GetName()
algoMef.SetName("Triangle (Mefisto)")
hypArea200 = algoMef.MaxElementArea(maxElementArea)
print hypArea200.GetName()
print hypArea200.GetMaxElementArea()
smesh.SetName(hypArea200, "Max. Element Area")
print "---------------------Compute the mesh"
ret = mesh.Compute()
print ret
salome.sg.updateObjBrowser(1)
|
FedoraScientific/salome-smesh
|
src/SMESH_SWIG/PAL_MESH_041_mesh.py
|
Python
|
lgpl-2.1
| 2,975 | 0.009748 |
//codecademy course answer
parrot = "Norwegian Blue"
print len(parrot)
|
nurhandipa/python
|
codecademy/string_methods.py
|
Python
|
gpl-3.0
| 72 | 0 |
#!/usr/bin/python2
# Client code for Update Agent
# Copyright (c) 1999--2018 Red Hat, Inc. Distributed under GPLv2.
#
# Author: Adrian Likins <alikins@redhat.com
#
import os
__rhnexport__ = [
'reboot']
from up2date_client import up2dateLog
from up2date_client import config
cfg = config.initUp2dateConfig()
log = up2dateLog.initLog()
# action version we understand
ACTION_VERSION = 2
def reboot(test=None, cache_only=None):
if cache_only:
return (0, "no-ops for caching", {})
if cfg['noReboot']:
return (38, "Up2date is configured not to allow reboots", {})
pid = os.fork()
data = {'version': '0'}
reboot_message = 'Reboot of system "' + os.uname()[1] + '" initiated by Spacewalk reboot action.'
if not pid:
try:
if test:
os.execvp("/sbin/shutdown", ['/sbin/shutdown','-r','-k', '+3', reboot_message])
else:
os.execvp("/sbin/shutdown", ['/sbin/shutdown','-r', '+3', reboot_message])
except OSError:
data['name'] = "reboot.reboot.shutdown_failed"
return (34, "Could not execute /sbin/shutdown", data)
log.log_me("Rebooting the system now")
# no point in waiting around
return (0, "Reboot sucessfully started", data)
def main():
print(reboot(test=1))
if __name__ == "__main__":
main()
|
mcalmer/spacewalk
|
client/rhel/rhn-client-tools/src/actions/reboot.py
|
Python
|
gpl-2.0
| 1,358 | 0.005891 |
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
__all__ = ['config']
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
__all__ = ['config']
|
helix84/activae
|
deployment/__init__.py
|
Python
|
bsd-3-clause
| 3,590 | 0 |
import logging
import sys
from argparse2tool import load_argparse
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Arg2GxmlParser:
def __init__(self):
ap = load_argparse() # avoid circular imports
help_text = (
"argparse2tool forms Galaxy XML and CWL tools from Python scripts.\n"
"You are currently using the Galaxy XML invocation which may have different options from the CWL invocation."
)
arg2tool_parser = ap.ArgumentParser(
prog=sys.argv[0], description=help_text,
formatter_class=ap.RawDescriptionHelpFormatter, add_help=False
)
arg2tool_parser.add_argument('--help', help='Show this help message and exit', action='help')
self.parser = arg2tool_parser
def process_arguments(self):
self.parser.add_argument('--generate_galaxy_xml', action='store_true')
self.parser.add_argument('--command', action='store', default="")
return vars(self.parser.parse_args())
|
erasche/argparse2tool
|
argparse2tool/cmdline2gxml/__init__.py
|
Python
|
apache-2.0
| 1,040 | 0.002885 |
from test import support
syslog = support.import_module("syslog") #skip if not supported
import unittest
# XXX(nnorwitz): This test sucks. I don't know of a platform independent way
# to verify that the messages were really logged.
# The only purpose of this test is to verify the code doesn't crash or leak.
class Test(unittest.TestCase):
def test_openlog(self):
syslog.openlog('python')
# Issue #6697.
self.assertRaises(UnicodeEncodeError, syslog.openlog, '\uD800')
def test_syslog(self):
syslog.openlog('python')
syslog.syslog('test message from python test_syslog')
syslog.syslog(syslog.LOG_ERR, 'test error from python test_syslog')
def test_closelog(self):
syslog.openlog('python')
syslog.closelog()
def test_setlogmask(self):
syslog.setlogmask(syslog.LOG_DEBUG)
def test_log_mask(self):
syslog.LOG_MASK(syslog.LOG_INFO)
def test_log_upto(self):
syslog.LOG_UPTO(syslog.LOG_INFO)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
firmlyjin/brython
|
www/tests/unittests/test/test_syslog.py
|
Python
|
bsd-3-clause
| 1,104 | 0.005435 |
# coding: utf-8
from __future__ import unicode_literals
import base64
import functools
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
dict_get,
ExtractorError,
js_to_json,
int_or_none,
merge_dicts,
OnDemandPagedList,
parse_filesize,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
str_or_none,
try_get,
unified_timestamp,
unsmuggle_url,
urlencode_postdata,
urljoin,
unescapeHTML,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
username, password = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
webpage = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = {
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
}
self._set_vimeo_cookie('vuid', vuid)
try:
self._download_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self._LOGIN_URL,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 418:
raise ExtractorError(
'Unable to log in: bad username or password',
expected=True)
raise ExtractorError('Unable to log in')
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
})
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
webpage, 'vimeo config', *args, **compat_kwargs(kwargs))
if vimeo_config:
return self._parse_json(vimeo_config, video_id)
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
def _vimeo_sort_formats(self, formats):
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))
def _parse_config(self, config, video_id):
video_data = config['video']
video_title = video_data['title']
live_event = video_data.get('live_event') or {}
is_live = live_event.get('status') == 'started'
formats = []
config_files = video_data.get('files') or config['request'].get('files', {})
for f in config_files.get('progressive', []):
video_url = f.get('url')
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': 'http-%s' % f.get('quality'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'fps': int_or_none(f.get('fps')),
'tbr': int_or_none(f.get('bitrate')),
})
# TODO: fix handling of 308 status code returned for live archive manifest requests
for files_type in ('hls', 'dash'):
for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
manifest_url = cdn_data.get('url')
if not manifest_url:
continue
format_id = '%s-%s' % (files_type, cdn_name)
if files_type == 'hls':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4',
'm3u8' if is_live else 'm3u8_native', m3u8_id=format_id,
note='Downloading %s m3u8 information' % cdn_name,
fatal=False))
elif files_type == 'dash':
mpd_pattern = r'/%s/(?:sep/)?video/' % video_id
mpd_manifest_urls = []
if re.search(mpd_pattern, manifest_url):
for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
mpd_manifest_urls.append((format_id + suffix, re.sub(
mpd_pattern, '/%s/%s/' % (video_id, repl), manifest_url)))
else:
mpd_manifest_urls = [(format_id, manifest_url)]
for f_id, m_url in mpd_manifest_urls:
if 'json=1' in m_url:
real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url')
if real_m_url:
m_url = real_m_url
mpd_formats = self._extract_mpd_formats(
m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
'Downloading %s MPD information' % cdn_name,
fatal=False)
for f in mpd_formats:
if f.get('vcodec') == 'none':
f['preference'] = -50
elif f.get('acodec') == 'none':
f['preference'] = -40
formats.extend(mpd_formats)
live_archive = live_event.get('archive') or {}
live_archive_source_url = live_archive.get('source_url')
if live_archive_source_url and live_archive.get('status') == 'done':
formats.append({
'format_id': 'live-archive-source',
'url': live_archive_source_url,
'preference': 1,
})
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': urljoin('https://vimeo.com', tt['url']),
}]
thumbnails = []
if not is_live:
for key, thumb in video_data.get('thumbs', {}).items():
thumbnails.append({
'id': key,
'width': int_or_none(key),
'url': thumb,
})
thumbnail = video_data.get('thumbnail')
if thumbnail:
thumbnails.append({
'url': thumbnail,
})
owner = video_data.get('owner') or {}
video_uploader_url = owner.get('url')
return {
'id': str_or_none(video_data.get('id')) or video_id,
'title': self._live_title(video_title) if is_live else video_title,
'uploader': owner.get('name'),
'uploader_id': video_uploader_url.split('/')[-1] if video_uploader_url else None,
'uploader_url': video_uploader_url,
'thumbnails': thumbnails,
'duration': int_or_none(video_data.get('duration')),
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
def _extract_original_format(self, url, video_id):
download_data = self._download_json(
url, video_id, fatal=False,
query={'action': 'load_download_config'},
headers={'X-Requested-With': 'XMLHttpRequest'})
if download_data:
source_file = download_data.get('source_file')
if isinstance(source_file, dict):
download_url = source_file.get('download_url')
if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
source_name = source_file.get('public_name', 'Original')
if self._is_valid_url(download_url, video_id, '%s video' % source_name):
ext = (try_get(
source_file, lambda x: x['extension'],
compat_str) or determine_ext(
download_url, None) or 'mp4').lower()
return {
'url': download_url,
'ext': ext,
'width': int_or_none(source_file.get('width')),
'height': int_or_none(source_file.get('height')),
'filesize': parse_filesize(source_file.get('size')),
'format_id': source_name,
'preference': 1,
}
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:
(?:
www|
player
)
\.
)?
vimeo(?:pro)?\.com/
(?!(?:channels|album|showcase)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
(?:.*?/)?
(?:
(?:
play_redirect_hls|
moogaloop\.swf)\?clip_id=
)?
(?:videos?/)?
(?P<id>[0-9]+)
(?:/[\da-f]+)?
/?(?:[?&].*)?(?:[#].*)?$
'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'timestamp': 1355990239,
'upload_date': '20121220',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user7108434',
'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
'license': 'by-sa',
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/openstreetmapus',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:2c362968038d4499f4d79f88458590c1',
'duration': 1595,
'upload_date': '20130610',
'timestamp': 1370893156,
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
'params': {
'format': 'best[protocol=https]',
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'timestamp': 1371200155,
'upload_date': '20130614',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'md5:dca3ea23adb29ee387127bc4ddfce63f',
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/atencio',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'channel_id': 'keypeele',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/keypeele',
'timestamp': 1380339469,
'upload_date': '20130928',
'duration': 187,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'timestamp': 1381846109,
'upload_date': '20131015',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/staff',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user28849593',
'uploader_id': 'user28849593',
},
},
{
# contains original format
'url': 'https://vimeo.com/33951933',
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
'info_dict': {
'id': '33951933',
'ext': 'mp4',
'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
'uploader': 'The DMCI',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/dmci',
'uploader_id': 'dmci',
'timestamp': 1324343742,
'upload_date': '20111220',
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
},
},
{
# only available via https://vimeo.com/channels/tributes/6213729 and
# not via https://vimeo.com/6213729
'url': 'https://vimeo.com/channels/tributes/6213729',
'info_dict': {
'id': '6213729',
'ext': 'mp4',
'title': 'Vimeo Tribute: The Shining',
'uploader': 'Casey Donahue',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/caseydonahue',
'uploader_id': 'caseydonahue',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/tributes',
'channel_id': 'tributes',
'timestamp': 1250886430,
'upload_date': '20090821',
'description': 'md5:bdbf314014e58713e6e5b66eb252f4a6',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
# redirects to ondemand extractor and should be passed through it
# for successful extraction
'url': 'https://vimeo.com/73445910',
'info_dict': {
'id': '73445910',
'ext': 'mp4',
'title': 'The Reluctant Revolutionary',
'uploader': '10Ft Films',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/tenfootfilms',
'uploader_id': 'tenfootfilms',
'description': 'md5:0fa704e05b04f91f40b7f3ca2e801384',
'upload_date': '20130830',
'timestamp': 1377853339,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://player.vimeo.com/video/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/moogaloop.swf?clip_id=2539741',
'only_matching': True,
},
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'only_matching': True,
},
{
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
{
'url': 'https://vimeo.com/album/2632481/video/79010983',
'only_matching': True,
},
{
# source file returns 403: Forbidden
'url': 'https://vimeo.com/7809605',
'only_matching': True,
},
{
'url': 'https://vimeo.com/160743502/abd0e13fb4',
'only_matching': True,
}
# https://gettingthingsdone.com/workflowmap/
# vimeo embed with check-password page protected by Referer header
]
@staticmethod
def _smuggle_referrer(url, referrer_url):
return smuggle_url(url, {'http_headers': {'Referer': referrer_url}})
@staticmethod
def _extract_urls(url, webpage):
urls = []
# Look for embedded (iframe) Vimeo player
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/\d+.*?)\1',
webpage):
urls.append(VimeoIE._smuggle_referrer(unescapeHTML(mobj.group('url')), url))
PLAIN_EMBED_RE = (
# Look for embedded (swf embed) Vimeo player
r'<embed[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)\1',
# Look more for non-standard embedded Vimeo player
r'<video[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/[0-9]+)\1',
)
for embed_re in PLAIN_EMBED_RE:
for mobj in re.finditer(embed_re, webpage):
urls.append(mobj.group('url'))
return urls
@staticmethod
def _extract_url(url, webpage):
urls = VimeoIE._extract_urls(url, webpage)
return urls[0] if urls else None
def _verify_player_video_password(self, url, video_id, headers):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
data = urlencode_postdata({
'password': base64.b64encode(password.encode()),
})
headers = merge_dicts(headers, {
'Content-Type': 'application/x-www-form-urlencoded',
})
checked = self._download_json(
url + '/check-password', video_id,
'Verifying the password', data=data, headers=headers)
if checked is False:
raise ExtractorError('Wrong video password', expected=True)
return checked
def _real_initialize(self):
self._login()
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
headers = std_headers.copy()
if 'http_headers' in data:
headers.update(data['http_headers'])
if 'Referer' not in headers:
headers['Referer'] = url
channel_id = self._search_regex(
r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
# Extract ID from URL
video_id = self._match_id(url)
orig_url = url
is_pro = 'vimeopro.com/' in url
is_player = '://player.vimeo.com/video/' in url
if is_pro:
# some videos require portfolio_id to be present in player url
# https://github.com/ytdl-org/youtube-dl/issues/20070
url = self._extract_url(url, self._download_webpage(url, video_id))
if not url:
url = 'https://vimeo.com/' + video_id
elif is_player:
url = 'https://player.vimeo.com/video/' + video_id
elif any(p in url for p in ('play_redirect_hls', 'moogaloop.swf')):
url = 'https://vimeo.com/' + video_id
try:
# Retrieve video webpage to extract further information
webpage, urlh = self._download_webpage_handle(
url, video_id, headers=headers)
redirect_url = urlh.geturl()
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call youtube-dl with the URL of the page '
'that embeds this video.',
expected=True)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._extract_vimeo_config(webpage, video_id, default=None)
if vimeo_config:
seed_status = vimeo_config.get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
cc_license = None
timestamp = None
video_description = None
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage,
'config URL', default=None)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/ytdl-org/youtube-dl/pull/7209)
page_config = self._parse_json(self._search_regex(
r'vimeo\.(?:clip|vod_title)_page_config\s*=\s*({.+?});',
webpage, 'page config'), video_id)
config_url = page_config['player']['config_url']
cc_license = page_config.get('cc_license')
timestamp = try_get(
page_config, lambda x: x['clip']['uploaded_on'],
compat_str)
video_description = clean_html(dict_get(
page_config, ('description', 'description_html_escaped')))
config = self._download_json(config_url, video_id)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search(r'(\w)\.video\.id', webpage)
if m_variable_name is not None:
config_re = [r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))]
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
config_re.append(r'\bvar\s+r\s*=\s*({.+?})\s*;')
config_re.append(r'\bconfig\s*=\s*({.+?})\s*;')
config = self._search_regex(config_re, webpage, 'info section',
flags=re.DOTALL)
config = json.loads(config)
except Exception as e:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if '_video_password_verified' in data:
raise ExtractorError('video password verification failed!')
self._verify_video_password(redirect_url, video_id, webpage)
return self._real_extract(
smuggle_url(redirect_url, {'_video_password_verified': 'verified'}))
else:
raise ExtractorError('Unable to extract info section',
cause=e)
else:
if config.get('view') == 4:
config = self._verify_player_video_password(redirect_url, video_id, headers)
vod = config.get('video', {}).get('vod', {})
def is_rented():
if '>You rented this title.<' in webpage:
return True
if config.get('user', {}).get('purchased'):
return True
for purchase_option in vod.get('purchase_options', []):
if purchase_option.get('purchased'):
return True
label = purchase_option.get('label_string')
if label and (label.startswith('You rented this') or label.endswith(' remaining')):
return True
return False
if is_rented() and vod.get('is_trailer'):
feature_id = vod.get('feature_id')
if feature_id and not data.get('force_feature_id', False):
return self.url_result(smuggle_url(
'https://player.vimeo.com/player/%s' % feature_id,
{'force_feature_id': True}), 'Vimeo')
# Extract video description
if not video_description:
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
'description', webpage, default=None)
if not video_description and is_pro:
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not is_player:
self._downloader.report_warning('Cannot find video description')
# Extract upload date
if not timestamp:
timestamp = self._search_regex(
r'<time[^>]+datetime="([^"]+)"', webpage,
'timestamp', default=None)
try:
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
formats = []
source_format = self._extract_original_format(
'https://vimeo.com/' + video_id, video_id)
if source_format:
formats.append(source_format)
info_dict_config = self._parse_config(config, video_id)
formats.extend(info_dict_config['formats'])
self._vimeo_sort_formats(formats)
json_ld = self._search_json_ld(webpage, video_id, default={})
if not cc_license:
cc_license = self._search_regex(
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
webpage, 'license', default=None, group='license')
channel_url = 'https://vimeo.com/channels/%s' % channel_id if channel_id else None
info_dict = {
'formats': formats,
'timestamp': unified_timestamp(timestamp),
'description': video_description,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'license': cc_license,
'channel_id': channel_id,
'channel_url': channel_url,
}
info_dict = merge_dicts(info_dict, info_dict_config, json_ld)
return info_dict
class VimeoOndemandIE(VimeoIE):
IE_NAME = 'vimeo:ondemand'
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/ondemand/([^/]+/)?(?P<id>[^/?#&]+)'
_TESTS = [{
# ondemand video not available via https://vimeo.com/id
'url': 'https://vimeo.com/ondemand/20704',
'md5': 'c424deda8c7f73c1dfb3edd7630e2f35',
'info_dict': {
'id': '105442900',
'ext': 'mp4',
'title': 'המעבדה - במאי יותם פלדמן',
'uploader': 'גם סרטים',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/gumfilms',
'uploader_id': 'gumfilms',
'description': 'md5:4c027c965e439de4baab621e48b60791',
'upload_date': '20140906',
'timestamp': 1410032453,
},
'params': {
'format': 'best[protocol=https]',
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
# requires Referer to be passed along with og:video:url
'url': 'https://vimeo.com/ondemand/36938/126682985',
'info_dict': {
'id': '126584684',
'ext': 'mp4',
'title': 'Rävlock, rätt läte på rätt plats',
'uploader': 'Lindroth & Norin',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/lindrothnorin',
'uploader_id': 'lindrothnorin',
'description': 'md5:c3c46a90529612c8279fb6af803fc0df',
'upload_date': '20150502',
'timestamp': 1430586422,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'url': 'https://vimeo.com/ondemand/nazmaalik',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/141692381',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/thelastcolony/150274832',
'only_matching': True,
}]
class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
'info_dict': {
'id': 'tributes',
'title': 'Vimeo Tributes',
},
'playlist_mincount': 25,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/channels/%s'
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
return self._TITLE or self._html_search_regex(
self._TITLE_RE, webpage, 'list title', fatal=False)
def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
yield self._extract_list_title(webpage)
# Try extracting href first since not all videos are available via
# short https://vimeo.com/id URL (e.g. https://vimeo.com/channels/tributes/6213729)
clips = re.findall(
r'id="clip_(\d+)"[^>]*>\s*<a[^>]+href="(/(?:[^/]+/)*\1)(?:[^>]+\btitle="([^"]+)")?', webpage)
if clips:
for video_id, video_url, video_title in clips:
yield self.url_result(
compat_urlparse.urljoin(base_url, video_url),
VimeoIE.ie_key(), video_id=video_id, video_title=video_title)
# More relaxed fallback
else:
for video_id in re.findall(r'id=["\']clip_(\d+)', webpage):
yield self.url_result(
'https://vimeo.com/%s' % video_id,
VimeoIE.ie_key(), video_id=video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
def _extract_videos(self, list_id, base_url):
title_and_entries = self._title_and_entries(list_id, base_url)
list_title = next(title_and_entries)
return self.playlist_result(title_and_entries, list_id, list_title)
def _real_extract(self, url):
channel_id = self._match_id(url)
return self._extract_videos(channel_id, self._BASE_URL_TEMPL % channel_id)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<id>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
'info_dict': {
'title': 'Nki',
'id': 'nkistudio',
},
'playlist_mincount': 66,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/%s'
class VimeoAlbumIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:album'
_VALID_URL = r'https://vimeo\.com/(?:album|showcase)/(?P<id>\d+)(?:$|[?#]|/(?!video))'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'info_dict': {
'id': '2632481',
'title': 'Staff Favorites: November 2013',
},
'playlist_mincount': 13,
}, {
'note': 'Password-protected album',
'url': 'https://vimeo.com/album/3253534',
'info_dict': {
'title': 'test',
'id': '3253534',
},
'playlist_count': 1,
'params': {
'videopassword': 'youtube-dl',
}
}]
_PAGE_SIZE = 100
def _fetch_page(self, album_id, authorizaion, hashed_pass, page):
api_page = page + 1
query = {
'fields': 'link,uri',
'page': api_page,
'per_page': self._PAGE_SIZE,
}
if hashed_pass:
query['_hashed_pass'] = hashed_pass
videos = self._download_json(
'https://api.vimeo.com/albums/%s/videos' % album_id,
album_id, 'Downloading page %d' % api_page, query=query, headers={
'Authorization': 'jwt ' + authorizaion,
})['data']
for video in videos:
link = video.get('link')
if not link:
continue
uri = video.get('uri')
video_id = self._search_regex(r'/videos/(\d+)', uri, 'video_id', default=None) if uri else None
yield self.url_result(link, VimeoIE.ie_key(), video_id)
def _real_extract(self, url):
album_id = self._match_id(url)
webpage = self._download_webpage(url, album_id)
viewer = self._parse_json(self._search_regex(
r'bootstrap_data\s*=\s*({.+?})</script>',
webpage, 'bootstrap data'), album_id)['viewer']
jwt = viewer['jwt']
album = self._download_json(
'https://api.vimeo.com/albums/' + album_id,
album_id, headers={'Authorization': 'jwt ' + jwt},
query={'fields': 'description,name,privacy'})
hashed_pass = None
if try_get(album, lambda x: x['privacy']['view']) == 'password':
password = self._downloader.params.get('videopassword')
if not password:
raise ExtractorError(
'This album is protected by a password, use the --video-password option',
expected=True)
self._set_vimeo_cookie('vuid', viewer['vuid'])
try:
hashed_pass = self._download_json(
'https://vimeo.com/showcase/%s/auth' % album_id,
album_id, 'Verifying the password', data=urlencode_postdata({
'password': password,
'token': viewer['xsrft'],
}), headers={
'X-Requested-With': 'XMLHttpRequest',
})['hashed_pass']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
raise ExtractorError('Wrong password', expected=True)
raise
entries = OnDemandPagedList(functools.partial(
self._fetch_page, album_id, jwt, hashed_pass), self._PAGE_SIZE)
return self.playlist_result(
entries, album_id, album.get('name'), album.get('description'))
class VimeoGroupsIE(VimeoChannelIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'https://vimeo\.com/groups/(?P<id>[^/]+)(?:/(?!videos?/\d+)|$)'
_TESTS = [{
'url': 'https://vimeo.com/groups/kattykay',
'info_dict': {
'id': 'kattykay',
'title': 'Katty Kay',
},
'playlist_mincount': 27,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/groups/%s'
class VimeoReviewIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'(?P<url>https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)/[0-9a-f]{10})'
_TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'id': '75524534',
'ext': 'mp4',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
'uploader_id': 'user21297594',
'description': "Comedian Dick Hardwick's five minute demo filmed in front of a live theater audience.\nEdit by Doug Mattocks",
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'note': 'video player needs Referer',
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
'info_dict': {
'id': '91613211',
'ext': 'mp4',
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
'uploader': 'DevWeek Events',
'duration': 2773,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader_id': 'user22258446',
},
'skip': 'video gone',
}, {
'note': 'Password protected',
'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
'info_dict': {
'id': '138823582',
'ext': 'mp4',
'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
'uploader': 'TMB',
'uploader_id': 'user37284429',
},
'params': {
'videopassword': 'holygrail',
},
'skip': 'video gone',
}]
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page_url, video_id = re.match(self._VALID_URL, url).groups()
clip_data = self._download_json(
page_url.replace('/review/', '/review/data/'),
video_id)['clipData']
config_url = clip_data['configUrl']
config = self._download_json(config_url, video_id)
info_dict = self._parse_config(config, video_id)
source_format = self._extract_original_format(
page_url + '/action', video_id)
if source_format:
info_dict['formats'].append(source_format)
self._vimeo_sort_formats(info_dict['formats'])
info_dict['description'] = clean_html(clip_data.get('description'))
return info_dict
class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
_TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = sanitized_Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(VimeoChannelIE):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TESTS = [{
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
'info_dict': {
'id': 'user755559',
'title': 'urza’s Likes',
},
}, {
'url': 'https://vimeo.com/stormlapse/likes',
'only_matching': True,
}]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
user_id = self._match_id(url)
return self._extract_videos(user_id, 'https://vimeo.com/%s/likes' % user_id)
class VHXEmbedIE(VimeoBaseInfoExtractor):
IE_NAME = 'vhx:embed'
_VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
config_url = self._parse_json(self._search_regex(
r'window\.OTTData\s*=\s*({.+})', webpage,
'ott data'), video_id, js_to_json)['config_url']
config = self._download_json(config_url, video_id)
info = self._parse_config(config, video_id)
self._vimeo_sort_formats(info['formats'])
return info
|
remitamine/youtube-dl
|
youtube_dl/extractor/vimeo.py
|
Python
|
unlicense
| 46,520 | 0.001721 |
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Documentation on PRESUBMIT.py can be found at:
# http://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
import os
import sys
# List of directories to not apply presubmit project checks, relative
# to the NaCl top directory
EXCLUDE_PROJECT_CHECKS_DIRS = [
# The following contain test data (including automatically generated),
# and do not follow our conventions.
'src/trusted/validator_ragel/testdata/32',
'src/trusted/validator_ragel/testdata/64',
'src/trusted/validator_x86/testdata/32',
'src/trusted/validator_x86/testdata/64',
'src/trusted/validator/x86/decoder/generator/testdata/32',
'src/trusted/validator/x86/decoder/generator/testdata/64',
# The following directories contains automatically generated source,
# which may not follow our conventions.
'src/trusted/validator_x86/gen',
'src/trusted/validator/x86/decoder/gen',
'src/trusted/validator/x86/decoder/generator/gen',
'src/trusted/validator/x86/ncval_seg_sfi/gen',
'src/trusted/validator_arm/gen',
'src/trusted/validator_ragel/gen',
]
NACL_TOP_DIR = os.getcwd()
while not os.path.isfile(os.path.join(NACL_TOP_DIR, 'PRESUBMIT.py')):
NACL_TOP_DIR = os.path.dirname(NACL_TOP_DIR)
assert len(NACL_TOP_DIR) >= 3, "Could not find NaClTopDir"
def _CommonChecks(input_api, output_api):
"""Checks for both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, project_name='Native Client',
excluded_paths=tuple(EXCLUDE_PROJECT_CHECKS_DIRS)))
return results
def IsFileInDirectories(f, dirs):
""" Returns true if f is in list of directories"""
for d in dirs:
if d is os.path.commonprefix([f , d]):
return True
return False
def CheckChangeOnUpload(input_api, output_api):
"""Verifies all changes in all files.
Args:
input_api: the limited set of input modules allowed in presubmit.
output_api: the limited set of output modules allowed in presubmit.
"""
report = []
report.extend(_CommonChecks(input_api, output_api))
# The commit queue assumes PRESUBMIT.py is standalone.
# TODO(bradnelson): Migrate code_hygiene to a common location so that
# it can be used by the commit queue.
old_sys_path = list(sys.path)
try:
sys.path.append(os.path.join(NACL_TOP_DIR, 'tools'))
sys.path.append(os.path.join(NACL_TOP_DIR, 'build'))
import code_hygiene
finally:
sys.path = old_sys_path
del old_sys_path
affected_files = input_api.AffectedFiles(include_deletes=False)
exclude_dirs = [ NACL_TOP_DIR + '/' + x + '/'
for x in EXCLUDE_PROJECT_CHECKS_DIRS ]
for filename in affected_files:
filename = filename.AbsoluteLocalPath()
if not IsFileInDirectories(filename, exclude_dirs):
errors, warnings = code_hygiene.CheckFile(filename, False)
for e in errors:
report.append(output_api.PresubmitError(e, items=errors[e]))
for w in warnings:
report.append(output_api.PresubmitPromptWarning(w, items=warnings[w]))
return report
def CheckChangeOnCommit(input_api, output_api):
"""Verifies all changes in all files and verifies that the
tree is open and can accept a commit.
Args:
input_api: the limited set of input modules allowed in presubmit.
output_api: the limited set of output modules allowed in presubmit.
"""
report = []
report.extend(CheckChangeOnUpload(input_api, output_api))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://nativeclient-status.appspot.com/current?format=json'))
return report
# Note that this list is duplicated in the Commit Queue. If you
# change this list, you should also update the CQ's list here:
# https://chrome-internal.googlesource.com/infra/infra_internal/+/master/commit_queue/projects.py
# (see https://crbug.com/399059).
DEFAULT_TRYBOTS = [
'nacl-precise32_newlib_dbg',
'nacl-precise32_newlib_opt',
'nacl-precise32_glibc_opt',
'nacl-precise64_newlib_dbg',
'nacl-precise64_newlib_opt',
'nacl-precise64_glibc_opt',
'nacl-mac10.6_newlib_opt',
'nacl-mac10.6_glibc_opt',
'nacl-mac10.6_64_newlib_dbg',
'nacl-mac10.6_64_glibc_opt',
'nacl-mac10.7_newlib_opt',
'nacl-mac10.7_glibc_opt',
'nacl-mac10.7_64_newlib_dbg',
'nacl-mac10.7_64_glibc_opt',
'nacl-mac10.8_32_newlib_dbg',
'nacl-mac10.8_32_glibc_opt',
'nacl-mac10.8_64_newlib_dbg',
'nacl-mac10.8_64_glibc_opt',
'nacl-win32_newlib_opt',
'nacl-win32_glibc_opt',
'nacl-win64_newlib_dbg',
'nacl-win64_newlib_opt',
'nacl-win64_glibc_opt',
'nacl-win8-64_newlib_dbg',
'nacl-win8-64_newlib_opt',
'nacl-arm_opt_panda',
# arm-nacl-gcc bots
'nacl-win7_64_arm_newlib_opt',
'nacl-mac10.7_arm_newlib_opt',
'nacl-precise64_arm_newlib_opt',
# Clang bots
'nacl-precise_64-newlib-dbg-clang',
'nacl-mac10.6-newlib-dbg-clang',
# pnacl scons bots
'nacl-precise_64-newlib-arm_qemu-pnacl',
'nacl-precise_64-newlib-x86_32-pnacl',
'nacl-precise_64-newlib-x86_64-pnacl',
'nacl-mac10.8_newlib_opt_pnacl',
'nacl-win7_64_newlib_opt_pnacl',
# pnacl spec2k bots
'nacl-arm_perf_panda',
'nacl-precise_64-newlib-x86_32-pnacl-spec',
'nacl-precise_64-newlib-x86_64-pnacl-spec',
]
PNACL_TOOLCHAIN_TRYBOTS = [
'nacl-toolchain-linux-pnacl-x86_64',
'nacl-toolchain-linux-pnacl-x86_32',
'nacl-toolchain-mac-pnacl-x86_32',
'nacl-toolchain-win7-pnacl-x86_64',
]
TOOLCHAIN_BUILD_TRYBOTS = [
'nacl-toolchain-precise64-newlib-arm',
'nacl-toolchain-mac-newlib-arm',
]
def GetPreferredTryMasters(_, change):
has_pnacl = False
has_toolchain_build = False
has_others = False
for file in change.AffectedFiles(include_dirs=True):
if IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'build'),
os.path.join(NACL_TOP_DIR, 'buildbot'),
os.path.join(NACL_TOP_DIR, 'pynacl')]):
# Buildbot and infrastructure changes should trigger all the try bots.
has_pnacl = True
has_toolchain_build = True
has_others = True
break
elif IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'pnacl')]):
has_pnacl = True
elif IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'toolchain_build')]):
has_toolchain_build = True
else:
has_others = True
trybots = []
if has_pnacl:
trybots += PNACL_TOOLCHAIN_TRYBOTS
if has_toolchain_build:
trybots += TOOLCHAIN_BUILD_TRYBOTS
if has_others:
trybots += DEFAULT_TRYBOTS
return {
'tryserver.nacl': { t: set(['defaulttests']) for t in trybots },
}
|
mxOBS/deb-pkg_trusty_chromium-browser
|
native_client/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 7,031 | 0.008107 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Xibo - Digitial Signage - http://www.xibo.org.uk
# Copyright (C) 2010-11 Alex Harrington
#
# This file is part of Xibo.
#
# Xibo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Xibo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Xibo. If not, see <http://www.gnu.org/licenses/>.
#
from XiboMedia import XiboMedia
from threading import Thread, Semaphore
import sys, os, time, codecs
import simplejson
import urllib2
import urllib
import cPickle
import inspect
# Define costants to represent each service
TWITTER = 0
IDENTICA = 1
class MicroblogMedia(XiboMedia):
def add(self):
self.running = True
self.tmpPath = os.path.join(self.libraryDir,self.mediaNodeName + "-tmp.html")
self.opener = urllib2.build_opener()
# Semaphore to lock reading/updating the global posts array
self.__lock = Semaphore()
# Options:
# <searchTerm>oggcamp</searchTerm><fadeInterval>1</fadeInterval><speedInterval>5</speedInterval><updateInterval>10</updateInterval><historySize>15</historySize><twitter>1</twitter><identica>1</identica></options>
self.options['historySize'] = int(self.options['historySize'])
self.options['twitter'] = bool(int(self.options['twitter']))
self.options['identica'] = bool(int(self.options['identica']))
self.options['speedInterval'] = int(self.options['speedInterval'])
self.options['fadeInterval'] = int(self.options['fadeInterval'])
# Create an empty array for the posts to sit in
# Each element will be a dictionary in the following format:
# {'xibo_src': 0, u'iso_language_code': u'en_GB', u'text': u"@bobobex @corenominal If you have an android device, give c:geo a look for !geocaching, or nudge me at oggcamp and I'll show you. 'sgood", u'created_at': u'Thu, 08 Apr 2010 08:03:38 +0000', u'profile_image_url': u'http://avatar.identi.ca/13737-48-20080711132350.png', u'to_user': None, u'source': u'web', u'from_user': u'jontheniceguy', u'from_user_id': u'13737', u'to_user_id': None, u'id': u'27725072'}
self.__posts = []
# Parse out the template element from the raw tag.
try:
for t in self.rawNode.getElementsByTagName('template'):
self.templateNode = t
for node in self.templateNode.childNodes:
if node.nodeType == node.CDATA_SECTION_NODE:
self.template = node.data.encode('UTF-8')
self.log.log(5,'audit','Template is: ' + self.template)
except:
self.log.log(2,'error','%s Error parsing out the template from the xlf' % self.mediaNodeName)
self.template = ""
# Parse out the nocontent element from the raw tag
try:
for t in self.rawNode.getElementsByTagName('nocontent'):
self.nocontentNode = t
for node in self.nocontentNode.childNodes:
if node.nodeType == node.CDATA_SECTION_NODE:
self.nocontent = node.data.encode('UTF-8')
self.log.log(5,'audit','No Content is: ' + self.nocontent)
except:
self.log.log(2,'error','%s Error parsing out the nocontent from the xlf' % self.mediaNodeName)
self.nocontent = ""
def run(self):
# Kickoff the display output thread
self.displayThread = MicroblogMediaDisplayThread(self.log,self.p,self)
self.displayThread.start()
# Start the region timer so the media dies at the right time.
self.p.enqueue('timer',(int(self.duration) * 1000,self.timerElapsed))
tmpXML = '<browser id="' + self.mediaNodeName + '" opacity="0" width="' + str(self.width) + '" height="' + str(self.height) + '"/>'
self.p.enqueue('add',(tmpXML,self.regionNodeName))
self.startStats()
# Pointer to the currently displayed post:
self.__pointer = -1
# Open previous cache file (if exists) and begin playing out posts
# Lock the semaphore as we write to __posts to avoid changing the array as the display thread reads it.
try:
try:
self.log.log(9,'info','%s acquiring lock to read pickled file.' % self.mediaId)
self.__lock.acquire()
self.log.log(9,'info','%s acquired lock to read pickled file.' % self.mediaId)
tmpFile = open(os.path.join(self.libraryDir,self.mediaId + ".pickled"), 'rb')
self.__posts = cPickle.load(tmpFile)
tmpFile.close()
finally:
self.__lock.release()
self.log.log(9,'info','%s releasing lock after reading pickled file.' % self.mediaId)
except:
# Erase any pickle file that may be existing but corrupted
try:
os.remove(os.path.join(self.libraryDir,self.mediaId + ".pickled"))
self.log.log(9,'info','%s erasing corrupt pickled file.' % self.mediaId)
except:
self.log.log(9,'info','%s unable to erase corrupt pickled file.' % self.mediaId)
self.log.log(5,"audit","Unable to read serialised representation of the posts array or this media has never run before.")
self.__lock.release()
self.displayThread.nextPost()
# Check that the updateInterval we've been given is sane
try:
self.options['updateInterval'] = int(self.options['updateInterval'])
except:
self.options['updateInterval'] = 5
while self.running:
self.log.log(0,"audit","%s: Waking up" % self.mediaId)
try:
mtime = os.path.getmtime(os.path.join(self.libraryDir,self.mediaId + '.pickled'))
except:
# File probably doesn't exist.
# Pretend the file was last updated more than updateInterval ago
self.log.log(0,"audit","%s: Post cache does not exist.")
mtime = time.time() - (self.options['updateInterval'] * 60) - 10
if time.time() > (mtime + (self.options['updateInterval'] * 60)):
# Download new posts and add them to the rotation
self.log.log(0,"audit","%s: Getting new posts from Microblogs" % self.mediaId)
tmpTwitter = self.updateTwitter()
tmpIdentica = self.updateIdentica()
tmpPosts = []
# Deduplicate the posts we've pulled in from Twitter against Identica and __posts
for post in tmpTwitter:
inIdentica = False
inPosts = False
# See if the post is in the tmpIdentica array
for cmpPost in tmpIdentica:
if post['text'] == cmpPost['text'] and post['from_user'] == cmpPost['from_user']:
inIdentica = True
# See if the post is in the __posts array
for cmpPost in self.__posts:
if post['text'] == cmpPost['text'] and post['from_user'] == cmpPost['from_user']:
inPosts = True
# Update self.__posts with the new content as required
# Lock the semaphore as we write to __posts to avoid changing the array as the display thread reads it.
if inIdentica or inPosts:
# The post already exists or is in Identica too
# Ignore the twitter version
pass
else:
tmpPosts.append(post)
# Deduplicate the posts we've pulled in from Identica against __posts
# (They're already deduplicated against Twitter
for post in tmpIdentica:
inPosts = False
for cmpPost in self.__posts:
if post['text'] == cmpPost['text'] and post['from_user'] == cmpPost['from_user']:
inPosts = True
if inPosts:
# The post already exists in __posts.
# Ignore the identica version
pass
else:
tmpPosts.append(post)
# Remove enough old posts to ensure we maintain at least self.options['historySize'] posts
# but allow an overflow if there are more new posts than we can handle
# Lock the __posts list while we work on it.
self.log.log(0,"audit","%s: Got %s new posts" % (self.mediaId,len(tmpPosts)))
self.log.log(9,'info','%s acquiring lock to process posts.' % self.mediaId)
self.__lock.acquire()
self.log.log(9,'info','%s acquired lock to process posts.' % self.mediaId)
if len(tmpPosts) >= self.options['historySize']:
# There are more new posts than length.
# Wipe the whole existing __posts array out
self.__posts = []
else:
# If there are more items in __posts than we're allowed to show
# trim it down to max now
if len(self.__posts) > self.options['historySize']:
self.__posts = self.__posts[0:self.options['historySize'] - 1]
# Now remove len(tmpPosts) items from __posts
self.__posts = self.__posts[0:(self.options['historySize'] - len(tmpPosts) - 1)]
# Reverse the __posts array as we can't prepend to an array
self.__posts.reverse()
# Reverse the tmpPosts array so we get newest items first
tmpPosts.reverse()
# Finally add the new items to the list
for post in tmpPosts:
self.__posts.append(post)
# And finally switch the array back around again to compensate for reversing it earlier
self.__posts.reverse()
# Unlock the list now we've finished writing to it
self.__lock.release()
self.log.log(9,'info','%s releasing lock after processing posts.' % self.mediaId)
# Serialize self.__posts for next time
try:
try:
self.log.log(9,'info','%s acquiring lock to write pickled file.' % self.mediaId)
self.__lock.acquire()
self.log.log(9,'info','%s acquired lock to write pickled file.' % self.mediaId)
f = open(os.path.join(self.libraryDir,self.mediaId + ".pickled"),mode='wb')
cPickle.dump(self.__posts, f, True)
finally:
f.close()
self.__lock.release()
self.log.log(9,'info','%s releasing lock to write pickled file.' % self.mediaId)
except IOError:
self.log.log(0,"error","Unable to write serialised representation of the posts array")
except:
self.log.log(0,"error","Unexpected exception trying to write serialised representation of the posts array")
# End If (If we should update on this run
else:
self.log.log(0,"audit","%s: Posts are still fresh." % self.mediaId)
self.log.log(0,"audit","%s: Sleeping 60 seconds" % self.mediaId)
# Sleep for 1 minute
time.sleep(60)
# End While loop
self.log.log(0,"audit","%s: Media has completed. Stopping updating." % self.mediaId)
self.__lock.release()
def dispose(self):
# Remember that we've finished running
self.displayThread.dispose()
self.running = False
self.__lock.release()
self.p.enqueue('del', self.mediaNodeName)
self.returnStats()
# Clean up any temporary files left
try:
os.remove(self.tmpPath)
except:
self.log.log(0,"error","Unable to delete file %s" % (self.tmpPath))
self.parent.tNext()
def getLock(self):
self.__lock.acquire()
def releaseLock(self):
self.__lock.release()
def posts(self):
return self.__posts
def timerElapsed(self):
# TODO: This function should not be necessary.
# As soon as dispose() is properly called this can be removed
# Remember that we've finished running
self.running = False
self.__lock.release()
self.returnStats()
self.displayThread.dispose()
self.p.enqueue('del', self.mediaNodeName)
# Clean up any temporary files left
try:
os.remove(self.tmpPath)
except:
self.log.log(0,"error","Unable to delete file %s" % (self.tmpPath))
# Tell our parent we're finished
self.parent.next()
def updateTwitter(self):
""" Pull new posts from Twitter and return new posts in a list """
if not self.options['twitter']:
return []
# Find the highest number twitter post we have already
# No need to lock the Semaphore as we're the only thread that will
# be doing any writing.
last_id = None
for post in self.__posts:
if post['xibo_src'] == TWITTER and long(post['id']) > last_id:
last_id = long(post['id'])
# Call twitter API and get new matches
try:
results = self.searchMicroblog("http://search.twitter.com/search.json", self.options['searchTerm'], since_id=last_id)
except:
results = []
tmpTwitter = []
for post in results["results"]:
post['xibo_src'] = TWITTER
tmpTwitter.append(post)
return tmpTwitter
def updateIdentica(self):
""" Pull new posts from Identi.ca and return new posts in a list """
if not self.options['identica']:
return []
# Find the highest number identi.ca post we have already
# No need to lock the Semaphore as we're the only thread that will
# be doing any writing.
last_id = None
for post in self.__posts:
if post['xibo_src'] == IDENTICA and long(post['id']) > last_id:
last_id = long(post['id'])
# Call identica API and get new matches
try:
results = self.searchMicroblog("http://identi.ca/api/search.json", self.options['searchTerm'], since_id=last_id)
except:
results = []
tmpIdentica = []
for post in results["results"]:
post['xibo_src'] = IDENTICA
tmpIdentica.append(post)
return tmpIdentica
# This method taken from Twython as it does not support connecting to identi.ca yet
# The MIT License
#
# Copyright (c) 2009 Ryan McGrath
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def constructApiURL(self, base_url, params):
return base_url + "?" + "&".join(["%s=%s" %(key, value) for (key, value) in params.iteritems()])
# This method taken from Twython as it does not support connecting to identi.ca yet
# Modified from "searchTwitter" to take an api_base to allow switching between services.
# The MIT License
#
# Copyright (c) 2009 Ryan McGrath
# Portions (c) 2010 Alex Harrington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def searchMicroblog(self, api_base, search_query, **kwargs):
"""searchMicroblog(search_query, **kwargs)
Returns tweets that match a specified query.
Parameters:
callback - Optional. Only available for JSON format. If supplied, the response will use the JSONP format with a callback of the given name.
lang - Optional. Restricts tweets to the given language, given by an ISO 639-1 code.
locale - Optional. Language of the query you're sending (only ja is currently effective). Intended for language-specific clients; default should work in most cases.
rpp - Optional. The number of tweets to return per page, up to a max of 100.
page - Optional. The page number (starting at 1) to return, up to a max of roughly 1500 results (based on rpp * page. Note: there are pagination limits.)
since_id - Optional. Returns tweets with status ids greater than the given id.
geocode - Optional. Returns tweets by users located within a given radius of the given latitude/longitude, where the user's location is taken from their Twitter profile. The parameter value is specified by "latitide,longitude,radius", where radius units must be specified as either "mi" (miles) or "km" (kilometers). Note that you cannot use the near operator via the API to geocode arbitrary locations; however you can use this geocode parameter to search near geocodes directly.
show_user - Optional. When true, prepends "<user>:" to the beginning of the tweet. This is useful for readers that do not display Atom's author field. The default is false.
Usage Notes:
Queries are limited 140 URL encoded characters.
Some users may be absent from search results.
The since_id parameter will be removed from the next_page element as it is not supported for pagination. If since_id is removed a warning will be added to alert you.
This method will return an HTTP 404 error if since_id is used and is too old to be in the search index.
Applications must have a meaningful and unique User Agent when using this method.
An HTTP Referrer is expected but not required. Search traffic that does not include a User Agent will be rate limited to fewer API calls per hour than
applications including a User Agent string. You can set your custom UA headers by passing it as a respective argument to the setup() method.
"""
searchURL = self.constructApiURL(api_base, kwargs) + "&" + urllib.urlencode({"q": self.unicode2utf8(search_query)})
return simplejson.load(self.opener.open(searchURL))
# This method taken from twython
# The MIT License
#
# Copyright (c) 2009 Ryan McGrath
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def unicode2utf8(self, text):
try:
if isinstance(text, unicode):
text = text.encode('utf-8')
except:
pass
return text
class MicroblogMediaDisplayThread(Thread):
def __init__(self,log,player,parent):
Thread.__init__(self)
self.parent = parent
self.p = player
self.log = log
self.__lock = Semaphore()
self.__running = True
self.__pointer = 0
def run(self):
tmpPost = None
while self.__running:
self.__lock.acquire()
self.log.log(9,'info', 'MicroblogMediaDisplayThread: Sleeping')
self.__lock.acquire()
self.log.log(9,'info', 'MicroblogMediaDisplayThread: Wake Up')
if self.__running:
# Do stuff
self.parent.getLock()
if len(self.parent.posts()) > 0:
self.__pointer = (self.__pointer + 1) % len(self.parent.posts())
tmpPost = self.parent.posts()[self.__pointer]
self.parent.releaseLock()
# Get the template we get from the server and insert appropriate fields
# If there's no posts then show the no content template, otherwise show the content template
if tmpPost == None:
# TODO: Get no content template
tmpHtml = self.parent.nocontent
else:
service = ''
if tmpPost['xibo_src'] == TWITTER:
tmpPost['service'] = "Twitter"
elif tmpPost['xibo_src'] == IDENTICA:
tmpPost['service'] = "Identica"
tmpHtml = self.parent.template
# Replace [tag] values with data
for key, value in tmpPost.items():
tmpHtml = tmpHtml.replace("[%s]" % key, "%s" % value)
try:
try:
f = codecs.open(self.parent.tmpPath,mode='w',encoding="utf-8")
f.write(tmpHtml)
tmpHtml = None
finally:
f.close()
except:
self.log.log(0,"error","Unable to write " + self.parent.tmpPath)
self.parent.parent.next()
return
# self.p.enqueue('del', self.parent.mediaNodeName)
# tmpXML = '<browser id="' + self.parent.mediaNodeName + '" opacity="0" width="' + str(self.parent.width) + '" height="' + str(self.parent.height) + '"/>'
# self.p.enqueue('add',(tmpXML,self.parent.regionNodeName))
self.p.enqueue('browserNavigate',(self.parent.mediaNodeName,"file://" + os.path.abspath(self.parent.tmpPath),self.fadeIn))
self.log.log(9,'info','MicroblogMediaDisplayThread: Finished Loop')
self.log.log(9,'info', 'MicroblogMediaDisplayThread: Exit')
self.__lock.release()
def nextPost(self):
# Release the lock so next can run
self.log.log(9,'info', 'MicroblogMediaDisplayThread: nextPost called by ' + inspect.getframeinfo(inspect.currentframe().f_back)[2] + '.' + str(inspect.getframeinfo(inspect.currentframe().f_back)[1]))
self.__lock.release()
def dispose(self):
self.__running = False
self.__lock.release()
def fadeIn(self):
self.log.log(9,'info','Starting fadeIn')
self.log.log(9,'info', 'MicroblogMediaDisplayThread: fadeIn called by ' + inspect.getframeinfo(inspect.currentframe().f_back)[2] + '.' + str(inspect.getframeinfo(inspect.currentframe().f_back)[1]))
# Once the next post has finished rendering, fade it in
self.p.enqueue('browserOptions',(self.parent.mediaNodeName, True, False))
self.p.enqueue('anim',('fadeIn',self.parent.mediaNodeName, self.parent.options['fadeInterval'] * 1000, None))
# Set a timer to force the post to change
self.p.enqueue('timer',((self.parent.options['speedInterval'] + self.parent.options['fadeInterval']) * 1000,self.fadeOut))
self.log.log(9,'info','Finished fadeIn')
def fadeOut(self):
self.log.log(9,'info','Starting fadeOut')
self.log.log(9,'info', 'MicroblogMediaDisplayThread: fadeOut called by ' + inspect.getframeinfo(inspect.currentframe().f_back)[2] + '.' + str(inspect.getframeinfo(inspect.currentframe().f_back)[1]))
# After the current post times out it calls this function which fades out the current node and then starts the next node
# fading in.
self.p.enqueue('anim',('fadeOut',self.parent.mediaNodeName, self.parent.options['fadeInterval'] * 1000, self.nextPost))
self.log.log(9,'info','Finished fadeOut')
|
xibosignage/xibo-pyclient
|
plugins/media/MicroblogMedia.py
|
Python
|
agpl-3.0
| 27,626 | 0.010715 |
import json
from django.http import HttpResponse
from django.shortcuts import render
from torrents.logic import active_torrents_info
def active(request):
if request.is_ajax():
content = {"torrents": active_torrents_info()}
return HttpResponse(json.dumps(content), content_type="application/json")
return render(request, "torrents/active.html")
|
onepesu/django_transmission
|
torrents/views.py
|
Python
|
mit
| 371 | 0.002695 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.logs.models.entries import EventLogEntry
from indico.modules.events.logs.views import WPEventLogs
from indico.modules.events.management.controllers import RHManageEventBase
class RHEventLogs(RHManageEventBase):
"""Shows the modification/action log for the event"""
def _process(self):
entries = self.event.log_entries.order_by(EventLogEntry.logged_dt.desc()).all()
realms = {e.realm for e in entries}
return WPEventLogs.render_template('logs.html', self.event, entries=entries, realms=realms)
|
eliasdesousa/indico
|
indico/modules/events/logs/controllers.py
|
Python
|
gpl-3.0
| 1,324 | 0.001511 |
from rhizome.tests.base_test_case import RhizomeApiTestCase
from rhizome.models.indicator_models import Indicator
from rhizome.models.document_models import SourceObjectMap, \
DocumentSourceObjectMap
from pandas import read_csv
from rhizome.tests.setup_helpers import TestSetupHelpers
class SourceObjectMapResourceTest(RhizomeApiTestCase):
def setUp(self):
## instantiate the test client and all other methods ##
super(SourceObjectMapResourceTest, self).setUp()
self.test_setup = TestSetupHelpers()
self.user = self.test_setup.user
self.lt = self.test_setup.create_arbitrary_location_type()
self.location = \
self.test_setup.create_arbitrary_location(self.lt.id)
self.document = self\
.test_setup.create_arbitrary_document(id=22,file_type ='campaign')
self.som_0 = SourceObjectMap.objects.create(
source_object_code='This is not mapped',
master_object_id = -1,
content_type = 'location'
)
DocumentSourceObjectMap.objects.create(
document_id = self.document.id,
source_object_map_id = self.som_0.id
)
self.som_1 = SourceObjectMap.objects.create(
source_object_code='This is mapped',
master_object_id = self.location.id,
content_type = 'location'
)
DocumentSourceObjectMap.objects.create(
document_id = self.document.id,
source_object_map_id = self.som_1.id
)
indicator_df = read_csv('rhizome/tests/_data/indicators.csv')
self.indicators = self.test_setup.model_df_to_data(
indicator_df, Indicator)
def test_som_patch(self):
# this is really a PUT that is i am updating values here in place
post_data = {
'source_object_code': 'Percent missed children_PCA',
'master_object_id': self.indicators[0].id,
'content_type': 'indicator',
'mapped_by_id': self.user.id
}
patch_url = '/api/v1/source_object_map/%s/' % self.som_0.id
patch_resp = self.test_setup.patch(self, patch_url, post_data)
self.assertHttpAccepted(patch_resp)
response_data = self.deserialize(patch_resp)
self.assertEqual(
response_data['master_object_id'], self.indicators[0].id)
def test_som_post_invalid_id(self):
'''
try to PATCH with an invalid id.
'''
post_data = {
'master_object_id': self.indicators[0].id,
'content_type': 'indicator',
'mapped_by_id': self.user.id
}
post_resp = self.test_setup.patch(
self, '/api/v1/source_object_map/9090909090/' , post_data)
self.assertHttpApplicationError(post_resp)
def test_som_get_id(self):
'''
get the som_obj by id for both the mapped and un mapped.
'''
## mapped ##
get_resp = self.test_setup.get(
self, '/api/v1/source_object_map/%s/' % self.som_1.id)
self.assertHttpOK(get_resp)
response_data = self.deserialize(get_resp)
self.assertEqual(response_data['master_object_id'], self.location.id)
## un mapped ##
get_resp_1 = self.test_setup.get(
self, '/api/v1/source_object_map/%s/' % self.som_0.id)
self.assertHttpOK(get_resp_1)
response_data_1 = self.deserialize(get_resp_1)
self.assertEqual(response_data_1['master_object_id'], -1)
def test_som_get_doc_id(self):
get_data = {'document_id': self.document.id, 'is_mapped': 1}
resp = self.test_setup.get(
self, '/api/v1/source_object_map/', get_data)
self.assertHttpOK(resp)
data = self.deserialize(resp)
self.assertEqual(data['objects'][0]['master_object_id']\
, self.location.id)
def test_som_get_no_doc_param(self):
'''
the document_id is a required parameter so we need to make sure
that when we pass a request without a document_id, that we get the
expected error message.
'''
resp = self.test_setup.get(self, '/api/v1/source_object_map/')
data = self.deserialize(resp)
self.assertHttpApplicationError(resp)
# expected_error_msg = 'Missing required parameter document_id'
expected_error_msg = "'document_id'"
self.assertEqual(data['error'], str(expected_error_msg))
def test_som_get_unmapped(self):
filter_params = {'document_id': self.document.id, 'is_mapped': 0}
resp = self.test_setup.get(self, '/api/v1/source_object_map/',\
data = filter_params)
self.assertHttpOK(resp)
data = self.deserialize(resp)
data_objects = data['objects']
self.assertEqual(len(data_objects), 1) # since we created one unmapped
self.assertEqual(data_objects[0]['master_object_id'], -1)
self.assertEqual(str(data_objects[0]['source_object_code']),\
'This is not mapped')
def test_som_get_doc_id_invalid(self):
get_data = {'document_id': 123456}
get_resp = self.test_setup.get(
self, '/api/v1/source_object_map/', get_data)
self.assertHttpOK(get_resp)
get_data = self.deserialize(get_resp)
def test_som_get_id_invalid(self):
get_data_id = 123456
get_resp = self.test_setup.get(
self, '/api/v1/source_object_map/%s/' % get_data_id)
self.assertHttpApplicationError(get_resp)
get_data = self.deserialize(get_resp)
|
unicef/rhizome
|
rhizome/tests/test_api_source_object_map.py
|
Python
|
agpl-3.0
| 5,601 | 0.005892 |
#
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
import bpy
import _cycles
def osl_compile(input_path, report):
"""compile .osl file with given filepath to temporary .oso file"""
import tempfile
output_file = tempfile.NamedTemporaryFile(mode='w', suffix=".oso", delete=False)
output_path = output_file.name
output_file.close()
ok = _cycles.osl_compile(input_path, output_path)
if ok:
report({'INFO'}, "OSL shader compilation succeeded")
return ok, output_path
def update_script_node(node, report):
"""compile and update shader script node"""
import os
import shutil
import tempfile
if node.mode == 'EXTERNAL':
# compile external script file
script_path = bpy.path.abspath(node.filepath, library=node.id_data.library)
script_path_noext, script_ext = os.path.splitext(script_path)
if script_ext == ".oso":
# it's a .oso file, no need to compile
ok, oso_path = True, script_path
oso_file_remove = False
elif script_ext == ".osl":
# compile .osl file
ok, oso_path = osl_compile(script_path, report)
oso_file_remove = True
if ok:
# copy .oso from temporary path to .osl directory
dst_path = script_path_noext + ".oso"
try:
shutil.copy2(oso_path, dst_path)
except:
report({'ERROR'}, "Failed to write .oso file next to external .osl file at " + dst_path)
elif os.path.dirname(node.filepath) == "":
# module in search path
oso_path = node.filepath
oso_file_remove = False
ok = True
else:
# unknown
report({'ERROR'}, "External shader script must have .osl or .oso extension, or be a module name")
ok = False
if ok:
node.bytecode = ""
node.bytecode_hash = ""
elif node.mode == 'INTERNAL' and node.script:
# internal script, we will store bytecode in the node
script = node.script
osl_path = bpy.path.abspath(script.filepath, library=script.library)
if script.is_in_memory or script.is_dirty or script.is_modified or not os.path.exists(osl_path):
# write text datablock contents to temporary file
osl_file = tempfile.NamedTemporaryFile(mode='w', suffix=".osl", delete=False)
osl_file.write(script.as_string())
osl_file.close()
ok, oso_path = osl_compile(osl_file.name, report)
oso_file_remove = False
os.remove(osl_file.name)
else:
# compile text datablock from disk directly
ok, oso_path = osl_compile(osl_path, report)
oso_file_remove = False
if ok:
# read bytecode
try:
oso = open(oso_path, 'r')
node.bytecode = oso.read()
oso.close()
except:
import traceback
traceback.print_exc()
report({'ERROR'}, "Can't read OSO bytecode to store in node at %r" % oso_path)
ok = False
else:
report({'WARNING'}, "No text or file specified in node, nothing to compile")
return
if ok:
# now update node with new sockets
ok = _cycles.osl_update_node(node.id_data.as_pointer(), node.as_pointer(), oso_path)
if not ok:
report({'ERROR'}, "OSL query failed to open " + oso_path)
else:
report({'ERROR'}, "OSL script compilation failed, see console for errors")
# remove temporary oso file
if oso_file_remove:
try:
os.remove(oso_path)
except:
pass
return ok
|
pawkoz/dyplom
|
blender/intern/cycles/blender/addon/osl.py
|
Python
|
gpl-2.0
| 4,371 | 0.002974 |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
return self.dfsHeight(root) != -1
def dfsHeight(self, root):
if not root:
return 0
left_height = self.dfsHeight(root.left)
right_height = self.dfsHeight(root.right)
if left_height == -1 or right_height == -1:
return -1
if abs(left_height - right_height) > 1:
return -1
return max(left_height, right_height) + 1
|
liupangzi/codekata
|
leetcode/Algorithms/110.BalancedBinaryTree/Solution.py
|
Python
|
mit
| 712 | 0.001404 |
#! /usr/bin/env python
import sys, glob
from astropy.io import fits
try: ext = int(sys.argv[2])
except: ext = 0
print sys.argv[1]
ims = glob.glob(sys.argv[1])
for im in ims:
print repr(fits.getheader(im, ext))
|
Vb2341/image-junk
|
fhead.py
|
Python
|
mit
| 217 | 0.018433 |
from __future__ import absolute_import
from .telegram import TelegramService
|
brantje/telegram-github-bot
|
captain_hook/services/telegram/__init__.py
|
Python
|
apache-2.0
| 77 | 0 |
import sys
import subprocess
result = subprocess.Popen('sh test.sh', shell=True)
text = result.communicate()[0]
sys.exit(result.returncode)
|
marcindulak/accts
|
accts/asegpaw/3.6.0-0.9.0.8965/ase/test.py
|
Python
|
gpl-3.0
| 142 | 0.007042 |
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.www.authz import roles
class RolesFromGroups(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromGroups("buildbot-")
def test_noGroups(self):
ret = self.roles.getRolesFromUser(dict(
username="homer"))
self.assertEqual(ret, [])
def test_noBuildbotGroups(self):
ret = self.roles.getRolesFromUser(dict(
username="homer",
groups=["employee"]))
self.assertEqual(ret, [])
def test_someBuildbotGroups(self):
ret = self.roles.getRolesFromUser(dict(
username="homer",
groups=["employee", "buildbot-maintainer", "buildbot-admin"]))
self.assertEqual(ret, ["maintainer", "admin"])
class RolesFromEmails(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromEmails(
employee=["homer@plant.com", "burns@plant.com"], boss=["burns@plant.com"])
def test_noUser(self):
ret = self.roles.getRolesFromUser(dict(
username="lisa", email="lisa@school.com"))
self.assertEqual(ret, [])
def test_User1(self):
ret = self.roles.getRolesFromUser(dict(
username="homer", email="homer@plant.com"))
self.assertEqual(ret, ["employee"])
def test_User2(self):
ret = self.roles.getRolesFromUser(dict(
username="burns", email="burns@plant.com"))
self.assertEqual(sorted(ret), ["boss", "employee"])
class RolesFromOwner(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromOwner("ownerofbuild")
def test_noOwner(self):
ret = self.roles.getRolesFromUser(dict(
username="lisa", email="lisa@school.com"), None)
self.assertEqual(ret, [])
def test_notOwner(self):
ret = self.roles.getRolesFromUser(dict(
username="lisa", email="lisa@school.com"), "homer@plant.com")
self.assertEqual(ret, [])
def test_owner(self):
ret = self.roles.getRolesFromUser(dict(
username="homer", email="homer@plant.com"), "homer@plant.com")
self.assertEqual(ret, ["ownerofbuild"])
class RolesFromUsername(unittest.TestCase, ConfigErrorsMixin):
def setUp(self):
self.roles = roles.RolesFromUsername(roles=["admins"], usernames=["Admin"])
self.roles2 = roles.RolesFromUsername(
roles=["developers", "integrators"], usernames=["Alice", "Bob"])
def test_anonymous(self):
ret = self.roles.getRolesFromUser(dict(anonymous=True))
self.assertEqual(ret, [])
def test_normalUser(self):
ret = self.roles.getRolesFromUser(dict(username="Alice"))
self.assertEqual(ret, [])
def test_admin(self):
ret = self.roles.getRolesFromUser(dict(username="Admin"))
self.assertEqual(ret, ["admins"])
def test_multipleGroups(self):
ret = self.roles2.getRolesFromUser(dict(username="Bob"))
self.assertEqual(ret, ["developers", "integrators"])
def test_badUsernames(self):
with self.assertRaisesConfigError('Usernames cannot be None'):
roles.RolesFromUsername(roles=[], usernames=[None])
|
pmisik/buildbot
|
master/buildbot/test/unit/www/test_roles.py
|
Python
|
gpl-2.0
| 3,892 | 0.000514 |
import kivy
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.progressbar import ProgressBar
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty
kivy.require("1.9.1")
class MyPopupProgressBar(Widget):
progress_bar = ObjectProperty() # Kivy properties classes are used when you create an EventDispatcher.
def __init__(self, **kwa):
super(MyPopupProgressBar, self).__init__(**kwa) #super combines and initializes two widgets Popup and ProgressBar
self.progress_bar = ProgressBar() # instance of ProgressBar created.
self.popup = Popup(title='New Songs Detected: Updating Song Library', content=self.progress_bar) # progress bar assigned to popup
self.popup.bind(on_open=self.puopen) # Binds super widget to on_open.
Clock.schedule_once(self.progress_bar_start) # Uses clock to call progress_bar_start() (callback) one time only
def progress_bar_start(self, instance): # Provides initial value of of progress bar and lanches popup
self.progress_bar.value = 1 # Initial value of progress_bar
self.popup.open() # starts puopen()
def next(self, dt): # Updates Project Bar
if self.progress_bar.value >= 100: # Checks to see if progress_bar.value has met 100
return False # Returning False schedule is canceled and won't repeat
self.progress_bar.value += 1 # Updates progress_bar's progress
def puopen(self, instance): # Called from bind.
Clock.schedule_interval(self.next, .0005) # Creates Clock event scheduling next() every 5-1000th of a second.
class MyApp(App):
def build(self):
return MyPopupProgressBar()
if __name__ in ("__main__"):
MyApp().run()
|
bradfortner/Convergence-Jukebox-Experimental
|
working_popup_progress_bar rewrite.py
|
Python
|
gpl-3.0
| 1,763 | 0.016449 |
#!/usr/bin/env python
import string, copy
import sys
def read_fasta(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key of a given query ID only contains its ID, not the full header
@return {header: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
header = aline[1:]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
def read_fasta_pdb(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key = PDB accession
@return {PDB-acc: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
header = aline[1:].split()[0]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
if __name__ == "__main__":
afile = open(sys.argv[1], 'r')
if len(sys.argv) == 3:
query_id = sys.argv[2]
else:
query_id = ''
seq_dict = read_fasta(afile, query_id)
afile.close()
print 'There are %d entries with unique headers in your file.' % len(seq_dict)
|
ElofssonLab/pcons-fold
|
pconsc/plotting/parse_fasta.py
|
Python
|
mit
| 3,026 | 0.004296 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def plot_risk_grid(risk_grid, filename):
fig = plt.figure("Risk Map")
ax = fig.add_subplot(111)
ax.set_xlabel("X Location")
ax.set_ylabel("Y Location")
x_step = 1
y_step = 1
x_min = 0
y_min = 0
x_max = risk_grid.problem.width
y_max = risk_grid.problem.height
x = np.arange(x_min, x_max, x_step)
y = np.arange(y_min, y_max, y_step)
X, Y = np.meshgrid(x, y)
zs = np.array(
[
risk_grid.get_risk(x_i, y_i)
for x_i, y_i in zip(np.ravel(X), np.ravel(Y))
]
)
np.savetxt("sandbox/risk.out", zs)
Z = zs.reshape(X.shape)
ax.pcolormesh(X, Y, Z, cmap=cm.jet)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
plt.savefig(filename)
return plt
def plot_time_grid(time_grid, filename):
fig = plt.figure("TimeGrid Map")
ax = fig.add_subplot(111)
ax.set_xlabel("X Location")
ax.set_ylabel("Y Location")
x_step = 1
y_step = 1
x_min = 0
y_min = 0
x_max = time_grid.width - 1
y_max = time_grid.height - 1
x = np.arange(x_min, x_max, x_step)
y = np.arange(y_min, y_max, y_step)
X, Y = np.meshgrid(x, y)
zs = np.array(
[
time_grid.get_raw(x_i, y_max - y_i)
for x_i, y_i in zip(np.ravel(X), np.ravel(Y))
]
)
Z = zs.reshape(X.shape)
ax.pcolormesh(X, Y, Z, cmap=cm.jet)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
plt.savefig(filename)
return plt
class TimeGridPlotter(object):
def __init__(self, time_grid):
self.time_grid = time_grid
self.fig = plt.figure("TimeGrid Map")
self.ax = self.fig.add_subplot(111)
self.ax.set_xlabel("X Location")
self.ax.set_ylabel("Y Location")
self.x_step = 2
self.y_step = 2
self.x_min = 0
self.y_min = 0
self.x_max = time_grid.width - 1
self.y_max = time_grid.height - 1
self.x = np.arange(self.x_min, self.x_max, self.x_step)
self.y = np.arange(self.y_min, self.y_max, self.y_step)
self.X, self.Y = np.meshgrid(self.x, self.y)
plt.ion()
self.get_zs()
self.ax.set_xlim(self.x_min, self.x_max)
self.ax.set_ylim(self.y_min, self.y_max)
self.iteration = 0
def get_zs(self):
zs = np.array(
[
self.time_grid.get_raw(x_i, y_i)
for x_i, y_i in zip(np.ravel(self.X), np.ravel(self.Y))
]
)
return zs
def update(self):
try:
self.graph.remove()
except:
pass
zs = self.get_zs()
Z = zs.reshape(self.X.shape)
self.graph = self.ax.pcolormesh(self.X, self.Y, Z, cmap=cm.jet)
plt.draw()
plt.pause(0.0001)
filename = "sandbox/grids/{}.out".format(self.iteration)
self.iteration += 1
np.savetxt(filename, self.time_grid.grid)
|
wallarelvo/rover
|
rover/plot.py
|
Python
|
apache-2.0
| 3,026 | 0.00033 |
from .Peaks import Peaks
from .Amplitude import Amplitude
from .Combination import Combination
from .OutputFrame import OutputFrame
from .Track import Track
from .panels import Panel
import bpy
class SingleTrack(
bpy.types.PropertyGroup,
Panel,
Track,
Amplitude,
Combination,
OutputFrame,
Peaks
):
''' class containing all Curve to frame
Properties, methods and operators
for single track feature'''
def update_curves( self, context ):
'''update curve when settings have been changed'''
clip = self.id_data
# initialize animation data if required
if clip.animation_data is None:
clip.animation_data_create()
if clip.animation_data.action is None:
clip.animation_data.action = bpy.data.actions.new(
name= clip.name+'Action')
# check and get peaks shapes
peak_shapes = self.check_and_get_peaks_shapes()
if type(peak_shapes) is str:
return peak_shapes
# update amplitude net curve
amplitude_net_curve = self.update_net_amplitude_curve( clip, context )
# update peaks curve
peaks_curve = self.update_peaks_curve( context,
amplitude_net_curve, peak_shapes )
#update combination curve
combination_curve = self.update_combination_curve(
clip,
context,
amplitude_net_curve,
peaks_curve
)
# update output curve
self.update_output_curve(clip, context, combination_curve)
return True
|
CaptainDesAstres/Frames-Animated-By-Curve
|
single_track/SingleTrack.py
|
Python
|
gpl-3.0
| 1,451 | 0.052378 |
import json
import requests
from django.views.decorators.csrf import csrf_exempt
FB_MESSENGER_ACCESS_TOKEN = "[TOKEN]"
def respond_FB(sender_id, text):
json_data = {
"recipient": {"id": sender_id},
"message": {"text": text + " to you!"}
}
params = {
"access_token": FB_MESSENGER_ACCESS_TOKEN
}
r = requests.post('https://graph.facebook.com/v2.6/me/messages', json=json_data, params=params)
print(r, r.status_code, r.text)
@csrf_exempt
def fb_webhook(request):
if request.method == "GET":
if (request.GET.get('hub.verify_token') == 'this_is_a_verify_token_created_by_sean'):
return HttpResponse(request.GET.get('hub.challenge'))
return HttpResponse('Error, wrong validation token')
if request.method == "POST":
body = request.body
print("BODY", body)
messaging_events = json.loads(body.decode("utf-8"))
print("JSON BODY", body)
sender_id = messaging_events["entry"][0]["messaging"][0]["sender"]["id"]
message = messaging_events["entry"][0]["messaging"][0]["message"]["text"]
respond_FB(sender_id, message)
return HttpResponse('Received.')
|
voidabhi/python-scripts
|
webhook-fb-messenger.py
|
Python
|
mit
| 1,193 | 0.003353 |
#! /usr/bin/python
#--------------------------------------------------------------------
# PROGRAM : granule2map.py
# CREATED BY : hjkim @IIS.2015-07-13 11:56:07.989735
# MODIFED BY :
#
# USAGE : $ ./granule2map.py
#
# DESCRIPTION:
#------------------------------------------------------cf0.2@20120401
import os,sys
from optparse import OptionParser
from numpy import zeros, ma
from alien.upscale import upscale
from alien.nearest_idx import nearest_idx
from alien.GridCoordinates import GridCoordinates
def granule2map(lat, lon, aSrc, BBox=None, res=0.1, verbose=True):
'''
res : out resolution only support n-fold of 0.01 deg
'''
Grid = GridCoordinates('^001',BBox=BBox) # default mapCode:^001
aOut = zeros( (Grid.lat.size,Grid.lon.size), 'float32' )-9999.9
yIdx = nearest_idx(Grid.lat, lat.flatten())
xIdx = nearest_idx(Grid.lon, lon.flatten())
aOut[yIdx, xIdx] = aSrc.flatten()
nFold = int( res/Grid.res )
aOut = upscale(aOut, (Grid.lat.size/nFold, Grid.lon.size/nFold), mode='m', missing=-9999.9)
#aOut = upscale(aOut, (Grid.lat.size/nFold, Grid.lon.size/nFold), mode='s', missing=-9999.9)
if verbose:
print '\t[GRANULE2MAP] Domain:%s %s -> %s'%( BBox, aSrc.shape, aOut.shape)
return aOut
|
kimlab/GPyM
|
granule2map.py
|
Python
|
mit
| 1,360 | 0.025735 |
'''A collection of tasks to perform related to Piwik custom variables.'''
import logging
import re
import dbsources
import dbengine
class Populate(object):
'''Take existing data and populate custom variables.'''
def __init__(self):
self.CONFIG = None # tables and fields to use
self.CONNECTION = None # in this location
self.setup()
regexp = '(.*)([0-F]{8}-[0-F]{4}-[0-F]{4}-[0-F]{4}-[0-F]{12})(.*)'
self.PATTERN_CHECK = re.compile(regexp, re.IGNORECASE)
# These two codes indicate what type of update has occurred
self.DCODE_IGNORE = 'n' # value to insert when we are not interested
self.DCODE_VIEW = 'v' # value to insert when it is a view
self.DCODE_DOWN = 'd' # value to insert when a download
# Control how the WHERE clause will be generated.
self.FIND_WHERE_METHOD = self.where_notdone
self.FIND_BATCH_SIZE = 10000 # takes < 1 minute
def setup(self):
'''Setup the connection to the system being populated.'''
source = dbsources.ReadWriteDB()
source.setup_source1()
host, username, password, database = source.get_settings()
self.CONFIG = dbengine.PiwikConfig()
#self.CONFIG.setup_custom_vars(1) # check count finds stuff
self.CONNECTION = dbengine.Connection()
self.CONNECTION.setup(host, username, password, database)
# Count existing data
def sql_count_customvar_scode(self):
count = self.CONFIG.FIELD_CUSTOM_VARS_SCODE
table = self.CONFIG.TABLE_CUSTOM_VARS_STORE
return "SELECT COUNT(%s) FROM %s"%(count, table)
def sql_count_customvar_dcode(self):
count = self.CONFIG.FIELD_CUSTOM_VARS_DCODE
table = self.CONFIG.TABLE_CUSTOM_VARS_STORE
return "SELECT COUNT(%s) FROM %s"%(count, table)
def count_existing(self):
'''Return the number of custom variables that exist.'''
scode = self.CONNECTION.fetchone(self.sql_count_customvar_scode())
logging.info('Count of custom variable: %s'%scode)
dcode = self.CONNECTION.fetchone(self.sql_count_customvar_dcode())
logging.info('Count of custom variable: %s'%dcode)
return scode, dcode
# Lookup custom variables
def sql_action_lookup(self, action):
table, key, check, down = self.CONFIG.get_action_look_config()
return "SELECT %s , %s , %s FROM %s WHERE %s='%s'"%(key, check, down, table, key, action)
def action_lookup(self, action):
'''Returns data from the key to use as scode and dcode'''
query = self.sql_action_lookup(action)
return self.CONNECTION.fetchone(query)
def get_action(self, action):
'''Return details about an action.'''
result = self.action_lookup(action)
if not result:
return False
code = self.action_extract_code(result[1])
if not code:
return False
checktype = result[2]
if checktype == self.CONFIG.ACTION_ISUSEFUL:
return code, 'view'
elif checktype == self.CONFIG.ACTION_ISDOWNLOAD:
return code, 'down'
else:
return code, 'none'
def action_extract_code(self, checkname):
found = re.search(self.PATTERN_CHECK, checkname)
if found:
code = 'uuid:%s'%str(found.group(2)).lower()
return code
else:
return False
# Find data that needs checking to see if custom variables are needed.
def sql_find_items(self):
table, key, action, site, when, visit, scode, dcode = self.CONFIG.get_store_look_config()
select = 'SELECT %s , %s , %s , %s , %s , %s , %s FROM %s'%(key,
action, site, when, visit, scode, dcode, table)
return '%s%s'%(select, self.FIND_WHERE_METHOD())
def setup_where(self, cat='test'):
'''Setup the where clause to use when finding items to update.'''
if cat not in ['test','notdone']:
raise ValueError
if cat == 'test':
self.FIND_WHERE_METHOD = self.where_test
elif cat == 'notdone':
self.FIND_WHERE_METHOD = self.where_notdone
def where_test(self):
return ' LIMIT 0, 5'
def where_notdone(self):
return " WHERE %s IS NULL LIMIT 0, %s"%(
self.CONFIG.FIELD_CUSTOM_VARS_DCODE, self.FIND_BATCH_SIZE)
def find_items_to_populate(self, how='test'):
query = self.sql_find_items()
return self.CONNECTION.fetchall(query)
# Update the store if necessary.
def sql_update(self, key, scode, dcode):
table, fieldkey = self.CONFIG.get_update_store_config()
update = "UPDATE %s SET "%table
scode = "%s = '%s' , "%(self.CONFIG.FIELD_CUSTOM_VARS_SCODE, scode)
dcode = "%s = '%s' "%(self.CONFIG.FIELD_CUSTOM_VARS_DCODE, dcode)
where = "WHERE %s = %s"%(fieldkey, key)
return '%s%s%s%s'%(update, scode, dcode, where)
def update_codes(self, key, scode, dcode):
'''Execute the update of key with scode and dcode.'''
query = self.sql_update(key, scode, dcode)
return self.CONNECTION.update(query)
def run_populate(self):
'''Check the store and update any custom variables needed.'''
views = 0
downloads = 0
others = 0
for item in self.find_items_to_populate():
key = item[0]
action = item[1]
existing_scode = item[5]
existing_dcode = item[6]
# dcode controls if this item is updated.
check = (self.DCODE_IGNORE, self.DCODE_VIEW, self.DCODE_DOWN)
if existing_dcode in check:
continue
# It needs updating, find out what type of update is needed
# and work out the scodes and dcodes to use.
useful = self.get_action(action)
if not useful: # we can ignore it,
others += 1
scode = self.DCODE_IGNORE
dcode = self.DCODE_IGNORE
else: # its either a view or download
new_code = useful[0]
category = useful[1]
if category == 'view':
views += 1
if existing_scode:
scode = existing_scode
else:
scode = new_code
dcode = self.DCODE_VIEW
if category == 'down':
downloads += 1
dcode = self.DCODE_DOWN
# Deal with archived data that starts off with no scode,
if existing_scode:
scode = existing_scode
else:
scode = new_code
self.update_codes(key, scode, dcode)
return views, downloads, others
if __name__ == '__main__':
'''Do nothing unless enabled.'''
testing = False
process = False
if process:
p = Populate()
p.FIND_BATCH_SIZE = 10000000 # override the default
p.run_populate()
if testing:
logging.basicConfig(level=logging.INFO)
p = Populate()
count = p.count_existing()
logging.critical(count)
logging.warn('The above should be empty for a new populate.')
logging.warn('If not you need to CHECK why!!')
result = p.action_lookup('50') # test the lookup works
if result:
if len(result) == 3:
logging.info(result)
else:
logging.warn('Lookup failed.')
print 'Expect to see uuid:15b86a5d-21f4-44a3-95bb-b8543d326658'
print p.get_action('33162') #type 4
print p.get_action('33257') #view
print p.get_action('33258') #down
p.setup_where('test')
views, downloads, ignores = p.run_populate()
print 'View: %s'%views
print 'Downloads: %s'%downloads
print 'Ignores: %s'%ignores
|
bodleian/stats-time-cache
|
collate/custom_variables.py
|
Python
|
gpl-3.0
| 8,288 | 0.009411 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LabelSelector(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, match_expressions=None, match_labels=None):
"""
V1LabelSelector - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'match_expressions': 'list[V1LabelSelectorRequirement]',
'match_labels': 'dict(str, str)'
}
self.attribute_map = {
'match_expressions': 'matchExpressions',
'match_labels': 'matchLabels'
}
self._match_expressions = match_expressions
self._match_labels = match_labels
@property
def match_expressions(self):
"""
Gets the match_expressions of this V1LabelSelector.
matchExpressions is a list of label selector requirements. The requirements are ANDed.
:return: The match_expressions of this V1LabelSelector.
:rtype: list[V1LabelSelectorRequirement]
"""
return self._match_expressions
@match_expressions.setter
def match_expressions(self, match_expressions):
"""
Sets the match_expressions of this V1LabelSelector.
matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param match_expressions: The match_expressions of this V1LabelSelector.
:type: list[V1LabelSelectorRequirement]
"""
self._match_expressions = match_expressions
@property
def match_labels(self):
"""
Gets the match_labels of this V1LabelSelector.
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.
:return: The match_labels of this V1LabelSelector.
:rtype: dict(str, str)
"""
return self._match_labels
@match_labels.setter
def match_labels(self, match_labels):
"""
Sets the match_labels of this V1LabelSelector.
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.
:param match_labels: The match_labels of this V1LabelSelector.
:type: dict(str, str)
"""
self._match_labels = match_labels
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
skuda/client-python
|
kubernetes/client/models/v1_label_selector.py
|
Python
|
apache-2.0
| 4,559 | 0.001535 |
import pytest
from mock import Mock
from workbench.runtime import WorkbenchRuntime
from xblock.fields import ScopeIds
from xblock.runtime import DictKeyValueStore, KvsFieldData
from done.done import DoneXBlock
def generate_scope_ids(runtime, block_type):
""" helper to generate scope IDs for an XBlock """
def_id = runtime.id_generator.create_definition(block_type)
usage_id = runtime.id_generator.create_usage(def_id)
return ScopeIds('user', block_type, def_id, usage_id)
@pytest.fixture
def done_xblock():
"""Done XBlock pytest fixture."""
runtime = WorkbenchRuntime()
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
ids = generate_scope_ids(runtime, 'done')
done_xblock = DoneXBlock(runtime, db_model, scope_ids=ids)
done_xblock.usage_id = Mock()
return done_xblock
|
pmitros/DoneXBlock
|
tests/conftest.py
|
Python
|
agpl-3.0
| 842 | 0 |
from evosnap import constants
class POSDevice:
def __init__(self,**kwargs):
self.__order = [
'posDeviceType', 'posDeviceConnection', 'posDeviceColour', 'posDeviceQuantity',
]
self.__lower_camelcase = constants.ALL_FIELDS
self.pos_device_type = kwargs.get('pos_device_type')
self.pos_device_connection = kwargs.get('pos_device_connection')
self.pos_device_colour = kwargs.get('pos_device_colour')
self.pos_device_quantity = kwargs.get('pos_device_quantity')
@property
def hash_str(self):
required = [
'pos_device_type', 'pos_device_connection', 'pos_device_colour', 'pos_device_quantity',
]
return ''.join([str(getattr(self,f)).strip() for f in required if getattr(self,f) is not None])
|
Zertifica/evosnap
|
evosnap/merchant_applications/pos_device.py
|
Python
|
mit
| 806 | 0.007444 |
# Generated by Django 2.2.1 on 2019-05-08 23:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("private_sharing", "0022_auto_20190507_1843"),
("public_data", "0002_auto_20171213_1947"),
]
operations = [
migrations.AddField(
model_name="publicdataaccess",
name="project_membership",
field=models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="private_sharing.DataRequestProjectMember",
),
),
migrations.AlterField(
model_name="publicdataaccess",
name="data_source",
field=models.CharField(max_length=100, null=True),
),
]
|
OpenHumans/open-humans
|
public_data/migrations/0003_auto_20190508_2341.py
|
Python
|
mit
| 834 | 0 |
# -*- coding: utf-8 -*-
"""
ctf.py -- contrast transfer function in electron tomography
Copyright 2014 Holger Kohr
This file is part of tomok.
tomok is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
tomok is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with tomok. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import numpy as np
class ContrTransFunc(object):
"""Callable Contrast Transfer Function class.
TODO: finish this properly."""
def __init__(self, emcfg):
self.osc_polycoeff = emcfg.osc_polycoeff
self.env_polycoeff = emcfg.env_polycoeff
self.cutoff2 = (emcfg.wavenum * emcfg.aperture / (emcfg.focal_len *
emcfg.magnif))**2
def __call__(self, freq2, envelope=True):
ctfval = np.exp(np.polyval(1j * self.osc_polycoeff, freq2))
if envelope:
ctfval *= np.exp(-np.polyval(self.env_polycoeff, freq2))
return np.where(freq2 < self.cutoff2, ctfval, 0.0)
# TODO: display method
class ContrTransFuncACR(object):
"""Callable class for the constant acr CTF.
TODO: finish this."""
def __init__(self, emcfg, acr=0.1):
ocoeff = emcfg.osc_polycoeff
ocoeff[3] = np.arctan(acr)
self.osc_polycoeff = ocoeff
self.env_polycoeff = emcfg.env_polycoeff
self.cutoff2 = (emcfg.wavenum * emcfg.aperture / (emcfg.focal_len *
emcfg.magnif))**2
def __call__(self, freq2, envelope=True):
ctfval = np.sin(np.polyval(self.osc_polycoeff, freq2))
if envelope:
ctfval *= np.exp(-np.polyval(self.env_polycoeff, freq2))
return np.where(freq2 < self.cutoff2, ctfval, 0.0)
def zeros(self, num=0, maxfreq2=None):
"""The zeros as an array.
TODO: finish"""
# The sine zeros are those of the polynomials a*x^2 + b*x + c_i,
# where a and b are the quadratic / linear coefficients of
# the sine argument and c_i = constant coeff. - (i+1)*pi
zeros = []
p_a = self.osc_polycoeff[1]
p_b = self.osc_polycoeff[2]
maxzeros = 1000
nmax = num if num else maxzeros
for i in range(nmax):
p_c = self.osc_polycoeff[3] - (i + 1) * np.pi
zero = np.sqrt(p_b**2 - 4. * p_a * p_c) / (2 * p_a)
if maxfreq2 is not None and zero > maxfreq2:
break
zeros.append(zero)
return np.asarray(zeros)
# TODO: display method
|
kohr-h/tomok
|
ctf.py
|
Python
|
gpl-3.0
| 3,242 | 0.000925 |
# coding: utf-8
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division, unicode_literals
from tests.check_utils import api_get, api_post, api_delete, api_put, _dt
import json
import pytest
import jmespath
from navitiacommon import models
from tyr import app
@pytest.fixture
def create_autocomplete_parameter():
with app.app_context():
autocomplete_param = models.AutocompleteParameter('idf', 'OSM', 'BANO', 'FUSIO', 'OSM', [8, 9])
models.db.session.add(autocomplete_param)
models.db.session.commit()
# we also create 3 datasets, one for bano, 2 for osm
for i, dset_type in enumerate(['bano', 'osm', 'osm']):
job = models.Job()
dataset = models.DataSet()
dataset.type = dset_type
dataset.family_type = 'autocomplete_{}'.format(dataset.type)
dataset.name = '/path/to/dataset_{}'.format(i)
models.db.session.add(dataset)
job.autocomplete_params_id = autocomplete_param.id
job.data_sets.append(dataset)
job.state = 'done'
models.db.session.add(job)
models.db.session.commit()
@pytest.fixture
def create_two_autocomplete_parameters():
with app.app_context():
autocomplete_param1 = models.AutocompleteParameter('europe', 'OSM', 'BANO', 'OSM', 'OSM', [8, 9])
autocomplete_param2 = models.AutocompleteParameter('france', 'OSM', 'OSM', 'FUSIO', 'OSM', [8, 9])
models.db.session.add(autocomplete_param1)
models.db.session.add(autocomplete_param2)
models.db.session.commit()
@pytest.fixture
def autocomplete_parameter_json():
return {
"name": "peru",
"street": "OSM",
"address": "BANO",
"poi": "FUSIO",
"admin": "OSM",
"admin_level": [8],
}
def test_get_autocomplete_parameters_empty():
resp = api_get('/v0/autocomplete_parameters/')
assert resp == []
def test_get_all_autocomplete(create_autocomplete_parameter):
resp = api_get('/v0/autocomplete_parameters/')
assert len(resp) == 1
assert resp[0]['name'] == 'idf'
assert resp[0]['street'] == 'OSM'
assert resp[0]['address'] == 'BANO'
assert resp[0]['poi'] == 'FUSIO'
assert resp[0]['admin'] == 'OSM'
assert resp[0]['admin_level'] == [8, 9]
def test_get_autocomplete_by_name(create_two_autocomplete_parameters):
resp = api_get('/v0/autocomplete_parameters/')
assert len(resp) == 2
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
assert resp['street'] == 'OSM'
assert resp['address'] == 'OSM'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8, 9]
def test_post_autocomplete(autocomplete_parameter_json):
resp = api_post(
'/v0/autocomplete_parameters',
data=json.dumps(autocomplete_parameter_json),
content_type='application/json',
)
assert resp['name'] == 'peru'
assert resp['street'] == 'OSM'
assert resp['address'] == 'BANO'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8]
def test_post_autocomplete_cosmo():
resp = api_post(
'/v0/autocomplete_parameters',
data=json.dumps({"name": "bobette", "admin": "COSMOGONY"}),
content_type='application/json',
)
assert resp['name'] == 'bobette'
assert resp['street'] == 'OSM'
assert resp['address'] == 'BANO'
assert resp['poi'] == 'OSM'
assert resp['admin'] == 'COSMOGONY'
assert resp['admin_level'] == []
def test_put_autocomplete(create_two_autocomplete_parameters, autocomplete_parameter_json):
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
assert resp['street'] == 'OSM'
assert resp['address'] == 'OSM'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8, 9]
resp = api_put(
'/v0/autocomplete_parameters/france',
data=json.dumps(autocomplete_parameter_json),
content_type='application/json',
)
assert resp['street'] == 'OSM'
assert resp['address'] == 'BANO'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8]
def test_delete_autocomplete(create_two_autocomplete_parameters):
resp = api_get('/v0/autocomplete_parameters/')
assert len(resp) == 2
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
_, status = api_delete('/v0/autocomplete_parameters/france', check=False, no_json=True)
assert status == 204
_, status = api_get('/v0/autocomplete_parameters/france', check=False)
assert status == 404
resp = api_get('/v0/autocomplete_parameters/')
assert len(resp) == 1
def test_get_last_datasets_autocomplete(create_autocomplete_parameter):
"""
we query the loaded datasets of idf
we loaded 3 datasets, but by default we should get one by family_type, so one for bano, one for osm
"""
resp = api_get('/v0/autocomplete_parameters/idf/last_datasets')
assert len(resp) == 2
bano = next((d for d in resp if d['type'] == 'bano'), None)
assert bano
assert bano['family_type'] == 'autocomplete_bano'
assert bano['name'] == '/path/to/dataset_0'
osm = next((d for d in resp if d['type'] == 'osm'), None)
assert osm
assert osm['family_type'] == 'autocomplete_osm'
assert osm['name'] == '/path/to/dataset_2' # we should have the last one
# if we ask for the 2 last datasets per type, we got all of them
resp = api_get('/v0/autocomplete_parameters/idf/last_datasets?count=2')
assert len(resp) == 3
@pytest.fixture
def minimal_poi_types_json():
return {
"poi_types": [
{"id": "amenity:bicycle_rental", "name": "Station VLS"},
{"id": "amenity:parking", "name": "Parking"},
],
"rules": [
{
"osm_tags_filters": [{"key": "amenity", "value": "bicycle_rental"}],
"poi_type_id": "amenity:bicycle_rental",
},
{"osm_tags_filters": [{"key": "amenity", "value": "parking"}], "poi_type_id": "amenity:parking"},
],
}
def test_autocomplete_poi_types(create_two_autocomplete_parameters, minimal_poi_types_json):
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
# POST a minimal conf
resp = api_post(
'/v0/autocomplete_parameters/france/poi_types',
data=json.dumps(minimal_poi_types_json),
content_type='application/json',
)
def test_minimal_conf(resp):
assert len(resp['poi_types']) == 2
assert len(resp['rules']) == 2
bss_type = jmespath.search("poi_types[?id=='amenity:bicycle_rental']", resp)
assert len(bss_type) == 1
assert bss_type[0]['name'] == 'Station VLS'
bss_rule = jmespath.search("rules[?poi_type_id=='amenity:bicycle_rental']", resp)
assert len(bss_rule) == 1
assert bss_rule[0]['osm_tags_filters'][0]['value'] == 'bicycle_rental'
# check that it's not the "default" conf
assert not jmespath.search("poi_types[?id=='amenity:townhall']", resp)
# check that the conf is correctly set on france
test_minimal_conf(resp)
# check that the conf on europe is still empty
resp = api_get('/v0/autocomplete_parameters/europe/poi_types')
assert not resp
# check GET of newly defined france conf
resp = api_get('/v0/autocomplete_parameters/france/poi_types')
test_minimal_conf(resp)
# check DELETE of france conf
resp, code = api_delete('/v0/autocomplete_parameters/france/poi_types', check=False, no_json=True)
assert not resp
assert code == 204
# check get of conf on france is now empty
resp = api_get('/v0/autocomplete_parameters/france/poi_types')
assert not resp
# check that tyr refuses incorrect conf
resp, code = api_post(
'/v0/autocomplete_parameters/france/poi_types',
data=json.dumps({'poi_types': [{'id': 'bob', 'name': 'Bob'}]}),
content_type='application/json',
check=False,
)
assert code == 400
assert resp['status'] == 'error'
assert 'rules' in resp['message']
|
xlqian/navitia
|
source/tyr/tests/integration/autocomplete_test.py
|
Python
|
agpl-3.0
| 9,528 | 0.001364 |
import collections
import warnings
from operator import add
import pytest
import numpy as np
import dask
import dask.array as da
from dask.highlevelgraph import HighLevelGraph
from dask.blockwise import Blockwise, rewrite_blockwise, optimize_blockwise, index_subs
from dask.array.utils import assert_eq
from dask.array.numpy_compat import _numpy_116
from dask.utils_test import inc, dec
a, b, c, d, e, f, g = "abcdefg"
_0, _1, _2, _3, _4, _5, _6, _7, _8, _9 = ["_%d" % i for i in range(10)]
i, j, k = "ijk"
@pytest.mark.parametrize(
"inputs,expected",
[
# output name, output index, task, input indices
[[(b, "i", {b: (inc, _0)}, [(a, "i")])], (b, "i", {b: (inc, _0)}, [(a, "i")])],
[
[
(b, "i", {b: (inc, _0)}, [(a, "i")]),
(c, "i", {c: (dec, _0)}, [(a, "i")]),
(d, "i", {d: (add, _0, _1, _2)}, [(a, "i"), (b, "i"), (c, "i")]),
],
(d, "i", {b: (inc, _0), c: (dec, _0), d: (add, _0, b, c)}, [(a, "i")]),
],
[
[
(b, "i", {b: (inc, _0)}, [(a, "i")]),
(c, "j", {c: (inc, _0)}, [(b, "j")]),
],
(c, "j", {b: (inc, _0), c: (inc, b)}, [(a, "j")]),
],
[
[
(b, "i", {b: (sum, _0)}, [(a, "ij")]),
(c, "k", {c: (inc, _0)}, [(b, "k")]),
],
(c, "k", {b: (sum, _0), c: (inc, b)}, [(a, "kA")]),
],
[
[
(c, "i", {c: (inc, _0)}, [(a, "i")]),
(d, "i", {d: (inc, _0)}, [(b, "i")]),
(g, "ij", {g: (add, _0, _1)}, [(c, "i"), (d, "j")]),
],
(
g,
"ij",
{g: (add, c, d), c: (inc, _0), d: (inc, _1)},
[(a, "i"), (b, "j")],
),
],
[
[
(b, "ji", {b: (np.transpose, _0)}, [(a, "ij")]),
(c, "ij", {c: (add, _0, _1)}, [(a, "ij"), (b, "ij")]),
],
(c, "ij", {c: (add, _0, b), b: (np.transpose, _1)}, [(a, "ij"), (a, "ji")]),
],
[
[
(c, "i", {c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
(d, "i", {d: (inc, _0)}, [(c, "i")]),
],
(d, "i", {d: (inc, c), c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
],
[
[
(b, "ij", {b: (np.transpose, _0)}, [(a, "ji")]),
(d, "ij", {d: (np.dot, _0, _1)}, [(b, "ik"), (c, "kj")]),
],
(
d,
"ij",
{d: (np.dot, b, _0), b: (np.transpose, _1)},
[(c, "kj"), (a, "ki")],
),
],
[
[
(c, "i", {c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
(f, "i", {f: (add, _0, _1)}, [(d, "i"), (e, "i")]),
(g, "i", {g: (add, _0, _1)}, [(c, "i"), (f, "i")]),
],
(
g,
"i",
{g: (add, c, f), f: (add, _2, _3), c: (add, _0, _1)},
[(a, i), (b, i), (d, i), (e, i)],
),
],
[
[
(c, "i", {c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
(f, "i", {f: (add, _0, _1)}, [(a, "i"), (e, "i")]),
(g, "i", {g: (add, _0, _1)}, [(c, "i"), (f, "i")]),
],
(
g,
"i",
{g: (add, c, f), f: (add, _0, _2), c: (add, _0, _1)},
[(a, "i"), (b, "i"), (e, "i")],
),
],
[
[
(b, "i", {b: (sum, _0)}, [(a, "ij")]),
(c, "i", {c: (inc, _0)}, [(b, "i")]),
],
(c, "i", {c: (inc, b), b: (sum, _0)}, [(a, "iA")]),
],
[
[
(c, "i", {c: (inc, _0)}, [(b, "i")]),
(d, "i", {d: (add, _0, _1, _2)}, [(a, "i"), (b, "i"), (c, "i")]),
],
(d, "i", {d: (add, _0, _1, c), c: (inc, _1)}, [(a, "i"), (b, "i")]),
],
# Include literals
[
[(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)])],
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)]),
],
[
[
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)]),
(c, "j", {c: (add, _0, _1)}, [(b, "j"), (456, None)]),
],
(
c,
"j",
{b: (add, _1, _2), c: (add, b, _0)},
[(456, None), (a, "j"), (123, None)],
),
],
# Literals that compare equal (e.g. 0 and False) aren't deduplicated
[
[
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (0, None)]),
(c, "j", {c: (add, _0, _1)}, [(b, "j"), (False, None)]),
],
(
c,
"j",
{b: (add, _1, _2), c: (add, b, _0)},
[(False, None), (a, "j"), (0, None)],
),
],
# Literals are deduplicated
[
[
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)]),
(c, "j", {c: (add, _0, _1)}, [(b, "j"), (123, None)]),
],
(c, "j", {b: (add, _1, _0), c: (add, b, _0)}, [(123, None), (a, "j")]),
],
],
)
def test_rewrite(inputs, expected):
inputs = [
Blockwise(
*inp, numblocks={k: (1,) * len(v) for k, v in inp[-1] if v is not None}
)
for inp in inputs
]
result = rewrite_blockwise(inputs)
result2 = (
result.output,
"".join(result.output_indices),
result.dsk,
[
(name, "".join(ind) if ind is not None else ind)
for name, ind in result.indices
],
)
assert result2 == expected
def test_index_subs():
assert index_subs(tuple("ij"), {"i": "j", "j": "i"}) == tuple("ji")
def test_optimize_blockwise():
x = da.ones(10, chunks=(5,))
y = (((x + 1) + 2) + 3) + 4
dsk = da.optimization.optimize_blockwise(y.dask)
assert isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])
== 1
)
def test_blockwise_diamond_fusion():
x = da.ones(10, chunks=(5,))
y = ((x + 1) + 2) + 3
a = y * 2
b = y * 3
c = a + b
d = ((c + 1) + 2) + 3
dsk = da.optimization.optimize_blockwise(d.dask)
assert isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])
== 1
)
def test_blockwise_non_blockwise_output():
x = da.ones(10, chunks=(5,))
y = ((x + 1) + 2) + 3
w = y.sum()
z = ((y * 2) * 3) * 4
z_top_before = tuple(z.dask.dicts[z.name].indices)
(zz,) = dask.optimize(z)
z_top_after = tuple(z.dask.dicts[z.name].indices)
assert z_top_before == z_top_after, "z_top mutated"
dsk = optimize_blockwise(z.dask, keys=list(dask.core.flatten(z.__dask_keys__())))
assert isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])
== 1
)
dsk = optimize_blockwise(
HighLevelGraph.merge(w.dask, z.dask),
keys=list(dask.core.flatten([w.__dask_keys__(), z.__dask_keys__()])),
)
assert isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in z.dask.dicts.values() if isinstance(layer, Blockwise)])
>= 1
)
def test_top_len():
x = da.ones(10, chunks=(5,))
y = x[:, None] * x[None, :]
d = y.dask.dicts[y.name]
assert len(d) == 4
def test_inner_compute():
x = da.ones(10, chunks=(5,)) + 1 + 2 + 3
a = x.sum()
y = x * 2 * 3 * 4
b = y.sum()
z = x * 2 * 3
dask.compute(x, a, y, b, z)
@pytest.mark.parametrize("name", ["_", "_0", "_1", ".", ".0"])
def test_common_token_names_args(name):
x = np.array(["a", "bb", "ccc"], dtype=object)
d = da.from_array(x, chunks=2)
result = da.blockwise(add, "i", d, "i", name, None, dtype=object)
expected = x + name
assert_eq(result, expected)
@pytest.mark.parametrize("name", ["_0", "_1", ".", ".0", "_"])
def test_common_token_names_kwargs(name):
x = np.array(["a", "bb", "ccc"], dtype=object)
d = da.from_array(x, chunks=2)
result = da.blockwise(lambda x, y: x + y, "i", d, "i", y=name, dtype=object)
expected = x + name
assert_eq(result, expected)
def test_blockwise_names():
x = da.ones(5, chunks=(2,))
y = da.blockwise(add, "i", x, "i", dtype=x.dtype)
assert y.name.startswith("add")
def test_blockwise_new_axes():
def f(x):
return x[:, None] * np.ones((1, 7))
x = da.ones(5, chunks=2)
y = da.blockwise(
f, "aq", x, "a", new_axes={"q": 7}, concatenate=True, dtype=x.dtype
)
assert y.chunks == ((2, 2, 1), (7,))
assert_eq(y, np.ones((5, 7)))
def f(x):
return x[None, :] * np.ones((7, 1))
x = da.ones(5, chunks=2)
y = da.blockwise(
f, "qa", x, "a", new_axes={"q": 7}, concatenate=True, dtype=x.dtype
)
assert y.chunks == ((7,), (2, 2, 1))
assert_eq(y, np.ones((7, 5)))
def f(x):
y = x.sum(axis=1)
return y[:, None] * np.ones((1, 5))
x = da.ones((4, 6), chunks=(2, 2))
y = da.blockwise(
f, "aq", x, "ab", new_axes={"q": 5}, concatenate=True, dtype=x.dtype
)
assert y.chunks == ((2, 2), (5,))
assert_eq(y, np.ones((4, 5)) * 6)
def test_blockwise_new_axes_2():
x = da.ones((2, 2), chunks=(1, 1))
def func(x):
return np.stack([x, -x], axis=-1)
y = da.blockwise(
func,
("x", "y", "sign"),
x,
("x", "y"),
dtype=x.dtype,
concatenate=True,
new_axes={"sign": 2},
)
assert_eq(y, y)
@pytest.mark.parametrize("concatenate", [True, False])
def test_blockwise_stacked_new_axes(concatenate):
def f(x):
return x[..., None] * np.ones((1, 7))
x = da.ones(5, chunks=2)
y = da.blockwise(
f, "aq", x, "a", new_axes={"q": 7}, concatenate=concatenate, dtype=x.dtype
)
z = da.blockwise(
f, "abq", y, "ab", new_axes={"q": 7}, concatenate=concatenate, dtype=x.dtype
)
assert z.chunks == ((2, 2, 1), (7,), (7,))
assert_eq(z, np.ones((5, 7, 7)))
@pytest.mark.parametrize("concatenate", [True, False])
def test_blockwise_stacked_new_axes_front(concatenate):
def f(x):
if isinstance(x, list):
x = np.concatenate(x)
return x[None, ...] * np.ones(7)[(slice(None),) + (None,) * x.ndim]
x = da.ones(5, chunks=2)
y = da.blockwise(
f, "qa", x, "a", new_axes={"q": 7}, concatenate=concatenate, dtype=x.dtype
)
z = da.blockwise(
f, "qab", y, "ab", new_axes={"q": 7}, concatenate=concatenate, dtype=x.dtype
)
assert z.chunks == ((7,), (7,), (2, 2, 1))
assert_eq(z, np.ones((7, 7, 5)))
w = da.blockwise(
lambda x: x[:, 0, 0], "a", z, "abc", dtype=x.dtype, concatenate=True
)
assert w.chunks == ((7,),)
assert_eq(w, np.ones((7,)))
@pytest.mark.parametrize("concatenate", [True, False])
def test_blockwise_stacked_new_axes_same_dim(concatenate):
def f(x):
return x[..., None] * np.ones((1, 7))
x = da.ones(5, chunks=2)
y = da.zeros(5, chunks=2)
a = da.blockwise(
f, "aq", x, "a", new_axes={"q": 7}, concatenate=concatenate, dtype=x.dtype
)
b = da.blockwise(
f, "aq", y, "a", new_axes={"q": 7}, concatenate=concatenate, dtype=x.dtype
)
c = a + b
assert c.chunks == ((2, 2, 1), (7,))
assert_eq(c, np.ones((5, 7)))
def test_blockwise_new_axes_chunked():
def f(x):
return x[None, :] * 2
x = da.arange(0, 6, 1, chunks=2, dtype=np.int32)
y = da.blockwise(f, "qa", x, "a", new_axes={"q": (1, 1)}, dtype=x.dtype)
assert y.chunks == ((1, 1), (2, 2, 2))
assert_eq(y, np.array([[0, 2, 4, 6, 8, 10], [0, 2, 4, 6, 8, 10]], np.int32))
def test_blockwise_no_args():
def f():
return np.ones((2, 3), np.float32)
x = da.blockwise(f, "ab", new_axes={"a": 2, "b": (3, 3)}, dtype=np.float32)
assert x.chunks == ((2,), (3, 3))
assert_eq(x, np.ones((2, 6), np.float32))
def test_blockwise_no_array_args():
def f(dtype):
return np.ones((2, 3), dtype)
x = da.blockwise(
f, "ab", np.float32, None, new_axes={"a": 2, "b": (3, 3)}, dtype=np.float32
)
assert x.chunks == ((2,), (3, 3))
assert_eq(x, np.ones((2, 6), np.float32))
def test_blockwise_kwargs():
def f(a, b=0):
return a + b
x = da.ones(5, chunks=(2,))
y = da.blockwise(f, "i", x, "i", b=10, dtype=x.dtype)
assert_eq(y, np.ones(5) + 10)
def test_blockwise_chunks():
x = da.ones((5, 5), chunks=((2, 1, 2), (3, 2)))
def double(a, axis=0):
return np.concatenate([a, a], axis=axis)
y = da.blockwise(
double,
"ij",
x,
"ij",
adjust_chunks={"i": lambda n: 2 * n},
axis=0,
dtype=x.dtype,
)
assert y.chunks == ((4, 2, 4), (3, 2))
assert_eq(y, np.ones((10, 5)))
y = da.blockwise(
double,
"ij",
x,
"ij",
adjust_chunks={"j": lambda n: 2 * n},
axis=1,
dtype=x.dtype,
)
assert y.chunks == ((2, 1, 2), (6, 4))
assert_eq(y, np.ones((5, 10)))
x = da.ones((10, 10), chunks=(5, 5))
y = da.blockwise(
double, "ij", x, "ij", axis=0, adjust_chunks={"i": 10}, dtype=x.dtype
)
assert y.chunks == ((10, 10), (5, 5))
assert_eq(y, np.ones((20, 10)))
y = da.blockwise(
double, "ij", x, "ij", axis=0, adjust_chunks={"i": (10, 10)}, dtype=x.dtype
)
assert y.chunks == ((10, 10), (5, 5))
assert_eq(y, np.ones((20, 10)))
def test_blockwise_numpy_arg():
with warnings.catch_warnings():
if not _numpy_116:
# Not sure why, but this DeprecationWarning is no longer
# showing up for NumPy >=1.16. So we only filter here
# for 1.15 and earlier
warnings.simplefilter("ignore", DeprecationWarning)
x = da.arange(10, chunks=(5,))
y = np.arange(1000)
x = x.map_blocks(lambda x, y: x, 1.0)
x = x.map_blocks(lambda x, y: x, "abc")
x = x.map_blocks(lambda x, y: x, y)
x = x.map_blocks(lambda x, y: x, "abc")
x = x.map_blocks(lambda x, y: x, 1.0)
x = x.map_blocks(lambda x, y, z: x, "abc", np.array(["a", "b"], dtype=object))
assert_eq(x, np.arange(10))
def test_bag_array_conversion():
import dask.bag as db
b = db.range(10, npartitions=1)
(x,) = b.map_partitions(np.asarray).to_delayed()
(x,) = [da.from_delayed(a, shape=(10,), dtype=int) for a in [x]]
z = da.concatenate([x])
assert_eq(z, np.arange(10), check_graph=False)
def test_svd():
x = da.ones((1, 1), chunks=(1, 1))
y = x * 2
u, s, v = da.linalg.svd(y)
z = y + u
assert_eq(z, z)
def test_args_delayed():
x = da.arange(10, chunks=(5,))
y = dask.delayed(lambda: 100)()
z = da.blockwise(add, "i", x, "i", y, None, dtype=x.dtype)
assert_eq(z, np.arange(10) + 100)
z = da.blockwise(lambda x, y: x + y, "i", x, "i", y=y, dtype=x.dtype)
assert_eq(z, np.arange(10) + 100)
@pytest.mark.parametrize(
"tup", [(1, 2), collections.namedtuple("foo", ["a", "b"])(1, 2)]
)
def test_namedtuple(tup):
A = da.random.random((20, 20), chunks=(10, 10))
def f(data, x):
return data
B = da.blockwise(f, ("d1", "d2"), A, ("d1", "d2"), x=tup, dtype=A.dtype)
assert_eq(A, B)
def test_validate_top_inputs():
A = da.random.random((20, 20), chunks=(10, 10))
with pytest.raises(ValueError) as info:
da.blockwise(inc, "jk", A, "ij", dtype=A.dtype)
assert "unknown dimension" in str(info.value).lower()
assert "k" in str(info.value)
assert "j" not in str(info.value)
with pytest.raises(ValueError) as info:
da.blockwise(inc, "ii", A, "ij", dtype=A.dtype)
assert "repeated" in str(info.value).lower()
assert "i" in str(info.value)
def test_dont_merge_before_reductions():
x = da.ones(10, chunks=(5,))
y = da.blockwise(inc, "i", x, "i", dtype=x.dtype)
z = da.blockwise(sum, "", y, "i", dtype=y.dtype)
w = da.blockwise(sum, "", z, "", dtype=y.dtype)
dsk = optimize_blockwise(w.dask)
assert len([d for d in dsk.dicts.values() if isinstance(d, Blockwise)]) == 2
z.compute()
def test_atop_legacy():
x = da.ones(10, chunks=(5,))
with pytest.warns(None):
y = da.atop(inc, "i", x, "i", dtype=x.dtype)
z = da.blockwise(inc, "i", x, "i", dtype=x.dtype)
assert_eq(y, z)
assert y.name == z.name
def test_non_hlg():
# Regression test for https://github.com/dask/dask/issues/5850
a = da.from_array(np.ones(1, np.float64), chunks=(1,))
a.dask = dict(a.dask) # Convert from HighLevelGraph to plain dict
b = da.from_array(np.zeros(1, np.float64), chunks=(1,))
x = a + b
assert_eq(x, a)
|
ContinuumIO/dask
|
dask/array/tests/test_atop.py
|
Python
|
bsd-3-clause
| 17,215 | 0.00151 |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : array_like, sparse matrices
sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature selection procedure, not
a free standing feature selection procedure.
This is done in 2 steps:
1. The correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
mutual_info_regression: Mutual information for a continuous target.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
SelectPercentile: Select features based on percentile of the highest
scores.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
# compute centered values
# note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
# need not center X
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
# compute the scaled standard deviations via moments
X_norms = np.sqrt(row_norms(X.T, squared=True) -
n_samples * X_means ** 2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= np.linalg.norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
"""
X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.feature_selection import SelectPercentile, chi2
>>> X, y = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y)
>>> X_new.shape
(1797, 7)
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super().__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
threshold = np.percentile(scores, 100 - self.percentile)
mask = scores > threshold
ties = np.where(scores == threshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> X, y = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y)
>>> X_new.shape
(1797, 20)
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super().__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features = %d; got %r. "
"Use k='all' to return all features."
% (X.shape[1], self.k))
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFpr, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 16)
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif:
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information between features and the target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super().__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFdr, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 16)
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a contnuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super().__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features *
np.arange(1, n_features + 1)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFwe, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 15)
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super().__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned scores only.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import GenericUnivariateSelect, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> transformer = GenericUnivariateSelect(chi2, 'k_best', param=20)
>>> X_new = transformer.fit_transform(X, y)
>>> X_new.shape
(569, 20)
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super().__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
|
chrsrds/scikit-learn
|
sklearn/feature_selection/univariate_selection.py
|
Python
|
bsd-3-clause
| 28,149 | 0.000355 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Difference/trend_Lag1Trend/cycle_7/ar_12/test_artificial_128_Difference_Lag1Trend_7_12_20.py
|
Python
|
bsd-3-clause
| 266 | 0.086466 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.