text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of10']
class message(loxi.OFObject):
subtypes = {}
version = 1
def __init__(self, type=None, xid=None):
if type != None:
self.type = type
else:
self.type = 0
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('B', 1)
subclass = message.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = message()
_version = reader.read("!B")[0]
assert(_version == 1)
obj.type = reader.read("!B")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("message {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
class stats_reply(message):
subtypes = {}
version = 1
type = 17
def __init__(self, xid=None, stats_type=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if stats_type != None:
self.stats_type = stats_type
else:
self.stats_type = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = stats_reply.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.stats_type = reader.read("!H")[0]
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.stats_type != other.stats_type: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
message.subtypes[17] = stats_reply
class aggregate_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 2
def __init__(self, xid=None, flags=None, packet_count=None, byte_count=None, flow_count=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if packet_count != None:
self.packet_count = packet_count
else:
self.packet_count = 0
if byte_count != None:
self.byte_count = byte_count
else:
self.byte_count = 0
if flow_count != None:
self.flow_count = flow_count
else:
self.flow_count = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!Q", self.packet_count))
packed.append(struct.pack("!Q", self.byte_count))
packed.append(struct.pack("!L", self.flow_count))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = aggregate_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 2)
obj.flags = reader.read("!H")[0]
obj.packet_count = reader.read("!Q")[0]
obj.byte_count = reader.read("!Q")[0]
obj.flow_count = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.flow_count != other.flow_count: return False
return True
def pretty_print(self, q):
q.text("aggregate_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("packet_count = ");
q.text("%#x" % self.packet_count)
q.text(","); q.breakable()
q.text("byte_count = ");
q.text("%#x" % self.byte_count)
q.text(","); q.breakable()
q.text("flow_count = ");
q.text("%#x" % self.flow_count)
q.breakable()
q.text('}')
stats_reply.subtypes[2] = aggregate_stats_reply
class stats_request(message):
subtypes = {}
version = 1
type = 16
def __init__(self, xid=None, stats_type=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if stats_type != None:
self.stats_type = stats_type
else:
self.stats_type = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = stats_request.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.stats_type = reader.read("!H")[0]
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.stats_type != other.stats_type: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
message.subtypes[16] = stats_request
class aggregate_stats_request(stats_request):
version = 1
type = 16
stats_type = 2
def __init__(self, xid=None, flags=None, match=None, table_id=None, out_port=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if match != None:
self.match = match
else:
self.match = ofp.match()
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(self.match.pack())
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 1)
packed.append(util.pack_port_no(self.out_port))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = aggregate_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 2)
obj.flags = reader.read("!H")[0]
obj.match = ofp.match.unpack(reader)
obj.table_id = reader.read("!B")[0]
reader.skip(1)
obj.out_port = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def pretty_print(self, q):
q.text("aggregate_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.breakable()
q.text('}')
stats_request.subtypes[2] = aggregate_stats_request
class error_msg(message):
subtypes = {}
version = 1
type = 1
def __init__(self, xid=None, err_type=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if err_type != None:
self.err_type = err_type
else:
self.err_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = error_msg.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.err_type = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.err_type != other.err_type: return False
return True
def pretty_print(self, q):
q.text("error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[1] = error_msg
class bad_action_error_msg(error_msg):
version = 1
type = 1
err_type = 2
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bad_action_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 2)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bad_action_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[2] = bad_action_error_msg
class bad_request_error_msg(error_msg):
version = 1
type = 1
err_type = 1
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bad_request_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 1)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bad_request_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[1] = bad_request_error_msg
class barrier_reply(message):
version = 1
type = 19
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = barrier_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 19)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("barrier_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[19] = barrier_reply
class barrier_request(message):
version = 1
type = 18
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = barrier_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 18)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("barrier_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[18] = barrier_request
class experimenter(message):
subtypes = {}
version = 1
type = 4
def __init__(self, xid=None, experimenter=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[4] = experimenter
class bsn_header(experimenter):
subtypes = {}
version = 1
type = 4
experimenter = 6035143
def __init__(self, xid=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = bsn_header.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn_header()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn_header {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn_header
class bsn_bw_clear_data_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 22
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_clear_data_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 22)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_clear_data_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[22] = bsn_bw_clear_data_reply
class bsn_bw_clear_data_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 21
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_clear_data_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 21)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_clear_data_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[21] = bsn_bw_clear_data_request
class bsn_bw_enable_get_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 20
def __init__(self, xid=None, enabled=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enabled != None:
self.enabled = enabled
else:
self.enabled = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.enabled))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_get_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 20)
obj.enabled = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enabled != other.enabled: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_get_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enabled = ");
q.text("%#x" % self.enabled)
q.breakable()
q.text('}')
bsn_header.subtypes[20] = bsn_bw_enable_get_reply
class bsn_bw_enable_get_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 19
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_get_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 19)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_get_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[19] = bsn_bw_enable_get_request
class bsn_bw_enable_set_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 23
def __init__(self, xid=None, enable=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enable != None:
self.enable = enable
else:
self.enable = 0
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.enable))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_set_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 23)
obj.enable = reader.read("!L")[0]
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enable != other.enable: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_set_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enable = ");
q.text("%#x" % self.enable)
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[23] = bsn_bw_enable_set_reply
class bsn_bw_enable_set_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 18
def __init__(self, xid=None, enable=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enable != None:
self.enable = enable
else:
self.enable = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.enable))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_set_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 18)
obj.enable = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enable != other.enable: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_set_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enable = ");
q.text("%#x" % self.enable)
q.breakable()
q.text('}')
bsn_header.subtypes[18] = bsn_bw_enable_set_request
class bsn_get_interfaces_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 10
def __init__(self, xid=None, interfaces=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if interfaces != None:
self.interfaces = interfaces
else:
self.interfaces = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(loxi.generic_util.pack_list(self.interfaces))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_interfaces_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 10)
obj.interfaces = loxi.generic_util.unpack_list(reader, ofp.common.bsn_interface.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.interfaces != other.interfaces: return False
return True
def pretty_print(self, q):
q.text("bsn_get_interfaces_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("interfaces = ");
q.pp(self.interfaces)
q.breakable()
q.text('}')
bsn_header.subtypes[10] = bsn_get_interfaces_reply
class bsn_get_interfaces_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 9
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_interfaces_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 9)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_get_interfaces_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[9] = bsn_get_interfaces_request
class bsn_get_ip_mask_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 2
def __init__(self, xid=None, index=None, mask=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if index != None:
self.index = index
else:
self.index = 0
if mask != None:
self.mask = mask
else:
self.mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.index))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.mask))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_ip_mask_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
obj.index = reader.read("!B")[0]
reader.skip(3)
obj.mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.index != other.index: return False
if self.mask != other.mask: return False
return True
def pretty_print(self, q):
q.text("bsn_get_ip_mask_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("index = ");
q.text("%#x" % self.index)
q.text(","); q.breakable()
q.text("mask = ");
q.text("%#x" % self.mask)
q.breakable()
q.text('}')
bsn_header.subtypes[2] = bsn_get_ip_mask_reply
class bsn_get_ip_mask_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 1
def __init__(self, xid=None, index=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if index != None:
self.index = index
else:
self.index = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.index))
packed.append('\x00' * 7)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_ip_mask_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
obj.index = reader.read("!B")[0]
reader.skip(7)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.index != other.index: return False
return True
def pretty_print(self, q):
q.text("bsn_get_ip_mask_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("index = ");
q.text("%#x" % self.index)
q.breakable()
q.text('}')
bsn_header.subtypes[1] = bsn_get_ip_mask_request
class bsn_get_l2_table_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 14
def __init__(self, xid=None, l2_table_enable=None, l2_table_priority=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if l2_table_enable != None:
self.l2_table_enable = l2_table_enable
else:
self.l2_table_enable = 0
if l2_table_priority != None:
self.l2_table_priority = l2_table_priority
else:
self.l2_table_priority = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.l2_table_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.l2_table_priority))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_l2_table_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 14)
obj.l2_table_enable = reader.read("!B")[0]
reader.skip(1)
obj.l2_table_priority = reader.read("!H")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.l2_table_enable != other.l2_table_enable: return False
if self.l2_table_priority != other.l2_table_priority: return False
return True
def pretty_print(self, q):
q.text("bsn_get_l2_table_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("l2_table_enable = ");
q.text("%#x" % self.l2_table_enable)
q.text(","); q.breakable()
q.text("l2_table_priority = ");
q.text("%#x" % self.l2_table_priority)
q.breakable()
q.text('}')
bsn_header.subtypes[14] = bsn_get_l2_table_reply
class bsn_get_l2_table_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 13
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_l2_table_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 13)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_get_l2_table_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[13] = bsn_get_l2_table_request
class bsn_get_mirroring_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 5
def __init__(self, xid=None, report_mirror_ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if report_mirror_ports != None:
self.report_mirror_ports = report_mirror_ports
else:
self.report_mirror_ports = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.report_mirror_ports))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_mirroring_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 5)
obj.report_mirror_ports = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.report_mirror_ports != other.report_mirror_ports: return False
return True
def pretty_print(self, q):
q.text("bsn_get_mirroring_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("report_mirror_ports = ");
q.text("%#x" % self.report_mirror_ports)
q.breakable()
q.text('}')
bsn_header.subtypes[5] = bsn_get_mirroring_reply
class bsn_get_mirroring_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 4
def __init__(self, xid=None, report_mirror_ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if report_mirror_ports != None:
self.report_mirror_ports = report_mirror_ports
else:
self.report_mirror_ports = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.report_mirror_ports))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_mirroring_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
obj.report_mirror_ports = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.report_mirror_ports != other.report_mirror_ports: return False
return True
def pretty_print(self, q):
q.text("bsn_get_mirroring_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("report_mirror_ports = ");
q.text("%#x" % self.report_mirror_ports)
q.breakable()
q.text('}')
bsn_header.subtypes[4] = bsn_get_mirroring_request
class bsn_hybrid_get_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 28
def __init__(self, xid=None, hybrid_enable=None, hybrid_version=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if hybrid_enable != None:
self.hybrid_enable = hybrid_enable
else:
self.hybrid_enable = 0
if hybrid_version != None:
self.hybrid_version = hybrid_version
else:
self.hybrid_version = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.hybrid_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.hybrid_version))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_hybrid_get_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 28)
obj.hybrid_enable = reader.read("!B")[0]
reader.skip(1)
obj.hybrid_version = reader.read("!H")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.hybrid_enable != other.hybrid_enable: return False
if self.hybrid_version != other.hybrid_version: return False
return True
def pretty_print(self, q):
q.text("bsn_hybrid_get_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("hybrid_enable = ");
q.text("%#x" % self.hybrid_enable)
q.text(","); q.breakable()
q.text("hybrid_version = ");
q.text("%#x" % self.hybrid_version)
q.breakable()
q.text('}')
bsn_header.subtypes[28] = bsn_hybrid_get_reply
class bsn_hybrid_get_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 27
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_hybrid_get_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 27)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_hybrid_get_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[27] = bsn_hybrid_get_request
class bsn_pdu_rx_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 34
def __init__(self, xid=None, status=None, port_no=None, slot_num=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_rx_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 34)
obj.status = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_rx_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.breakable()
q.text('}')
bsn_header.subtypes[34] = bsn_pdu_rx_reply
class bsn_pdu_rx_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 33
def __init__(self, xid=None, timeout_ms=None, port_no=None, slot_num=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if timeout_ms != None:
self.timeout_ms = timeout_ms
else:
self.timeout_ms = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.timeout_ms))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
packed.append('\x00' * 3)
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_rx_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 33)
obj.timeout_ms = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
reader.skip(3)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.timeout_ms != other.timeout_ms: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_rx_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("timeout_ms = ");
q.text("%#x" % self.timeout_ms)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[33] = bsn_pdu_rx_request
class bsn_pdu_rx_timeout(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 35
def __init__(self, xid=None, port_no=None, slot_num=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_rx_timeout()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 35)
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_rx_timeout {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.breakable()
q.text('}')
bsn_header.subtypes[35] = bsn_pdu_rx_timeout
class bsn_pdu_tx_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 32
def __init__(self, xid=None, status=None, port_no=None, slot_num=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_tx_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 32)
obj.status = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_tx_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.breakable()
q.text('}')
bsn_header.subtypes[32] = bsn_pdu_tx_reply
class bsn_pdu_tx_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 31
def __init__(self, xid=None, tx_interval_ms=None, port_no=None, slot_num=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if tx_interval_ms != None:
self.tx_interval_ms = tx_interval_ms
else:
self.tx_interval_ms = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.tx_interval_ms))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
packed.append('\x00' * 3)
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_tx_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 31)
obj.tx_interval_ms = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
reader.skip(3)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.tx_interval_ms != other.tx_interval_ms: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_tx_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("tx_interval_ms = ");
q.text("%#x" % self.tx_interval_ms)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[31] = bsn_pdu_tx_request
class bsn_set_ip_mask(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 0
def __init__(self, xid=None, index=None, mask=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if index != None:
self.index = index
else:
self.index = 0
if mask != None:
self.mask = mask
else:
self.mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.index))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.mask))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_ip_mask()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 0)
obj.index = reader.read("!B")[0]
reader.skip(3)
obj.mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.index != other.index: return False
if self.mask != other.mask: return False
return True
def pretty_print(self, q):
q.text("bsn_set_ip_mask {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("index = ");
q.text("%#x" % self.index)
q.text(","); q.breakable()
q.text("mask = ");
q.text("%#x" % self.mask)
q.breakable()
q.text('}')
bsn_header.subtypes[0] = bsn_set_ip_mask
class bsn_set_l2_table_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 24
def __init__(self, xid=None, l2_table_enable=None, l2_table_priority=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if l2_table_enable != None:
self.l2_table_enable = l2_table_enable
else:
self.l2_table_enable = 0
if l2_table_priority != None:
self.l2_table_priority = l2_table_priority
else:
self.l2_table_priority = 0
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.l2_table_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.l2_table_priority))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_l2_table_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 24)
obj.l2_table_enable = reader.read("!B")[0]
reader.skip(1)
obj.l2_table_priority = reader.read("!H")[0]
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.l2_table_enable != other.l2_table_enable: return False
if self.l2_table_priority != other.l2_table_priority: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_set_l2_table_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("l2_table_enable = ");
q.text("%#x" % self.l2_table_enable)
q.text(","); q.breakable()
q.text("l2_table_priority = ");
q.text("%#x" % self.l2_table_priority)
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[24] = bsn_set_l2_table_reply
class bsn_set_l2_table_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 12
def __init__(self, xid=None, l2_table_enable=None, l2_table_priority=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if l2_table_enable != None:
self.l2_table_enable = l2_table_enable
else:
self.l2_table_enable = 0
if l2_table_priority != None:
self.l2_table_priority = l2_table_priority
else:
self.l2_table_priority = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.l2_table_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.l2_table_priority))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_l2_table_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 12)
obj.l2_table_enable = reader.read("!B")[0]
reader.skip(1)
obj.l2_table_priority = reader.read("!H")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.l2_table_enable != other.l2_table_enable: return False
if self.l2_table_priority != other.l2_table_priority: return False
return True
def pretty_print(self, q):
q.text("bsn_set_l2_table_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("l2_table_enable = ");
q.text("%#x" % self.l2_table_enable)
q.text(","); q.breakable()
q.text("l2_table_priority = ");
q.text("%#x" % self.l2_table_priority)
q.breakable()
q.text('}')
bsn_header.subtypes[12] = bsn_set_l2_table_request
class bsn_set_mirroring(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 3
def __init__(self, xid=None, report_mirror_ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if report_mirror_ports != None:
self.report_mirror_ports = report_mirror_ports
else:
self.report_mirror_ports = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.report_mirror_ports))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_mirroring()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 3)
obj.report_mirror_ports = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.report_mirror_ports != other.report_mirror_ports: return False
return True
def pretty_print(self, q):
q.text("bsn_set_mirroring {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("report_mirror_ports = ");
q.text("%#x" % self.report_mirror_ports)
q.breakable()
q.text('}')
bsn_header.subtypes[3] = bsn_set_mirroring
class bsn_set_pktin_suppression_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 25
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_pktin_suppression_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 25)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_set_pktin_suppression_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[25] = bsn_set_pktin_suppression_reply
class bsn_set_pktin_suppression_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 11
def __init__(self, xid=None, enabled=None, idle_timeout=None, hard_timeout=None, priority=None, cookie=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enabled != None:
self.enabled = enabled
else:
self.enabled = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.enabled))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!Q", self.cookie))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_pktin_suppression_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 11)
obj.enabled = reader.read("!B")[0]
reader.skip(1)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.cookie = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enabled != other.enabled: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.cookie != other.cookie: return False
return True
def pretty_print(self, q):
q.text("bsn_set_pktin_suppression_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enabled = ");
q.text("%#x" % self.enabled)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.breakable()
q.text('}')
bsn_header.subtypes[11] = bsn_set_pktin_suppression_request
class bsn_shell_command(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 6
def __init__(self, xid=None, service=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if service != None:
self.service = service
else:
self.service = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.service))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_shell_command()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 6)
obj.service = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.service != other.service: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_shell_command {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("service = ");
q.text("%#x" % self.service)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[6] = bsn_shell_command
class bsn_shell_output(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 7
def __init__(self, xid=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_shell_output()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 7)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_shell_output {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[7] = bsn_shell_output
class bsn_shell_status(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 8
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_shell_status()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 8)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_shell_status {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[8] = bsn_shell_status
class experimenter_stats_reply(stats_reply):
subtypes = {}
version = 1
type = 17
stats_type = 65535
def __init__(self, xid=None, flags=None, experimenter=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = experimenter_stats_reply.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
stats_reply.subtypes[65535] = experimenter_stats_reply
class bsn_stats_reply(experimenter_stats_reply):
subtypes = {}
version = 1
type = 19
stats_type = 65535
experimenter = 6035143
def __init__(self, xid=None, flags=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append('\x00' * 4)
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 20)
subclass = bsn_stats_reply.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 19)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
reader.skip(4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
experimenter_stats_reply.subtypes[6035143] = bsn_stats_reply
class experimenter_stats_request(stats_request):
subtypes = {}
version = 1
type = 16
stats_type = 65535
def __init__(self, xid=None, flags=None, experimenter=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = experimenter_stats_request.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
stats_request.subtypes[65535] = experimenter_stats_request
class bsn_stats_request(experimenter_stats_request):
subtypes = {}
version = 1
type = 18
stats_type = 65535
experimenter = 6035143
def __init__(self, xid=None, flags=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append('\x00' * 4)
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 20)
subclass = bsn_stats_request.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 18)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
reader.skip(4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
experimenter_stats_request.subtypes[6035143] = bsn_stats_request
class bsn_virtual_port_create_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 16
def __init__(self, xid=None, status=None, vport_no=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
if vport_no != None:
self.vport_no = vport_no
else:
self.vport_no = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
packed.append(struct.pack("!L", self.vport_no))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_create_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 16)
obj.status = reader.read("!L")[0]
obj.vport_no = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
if self.vport_no != other.vport_no: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_create_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.text(","); q.breakable()
q.text("vport_no = ");
q.text("%#x" % self.vport_no)
q.breakable()
q.text('}')
bsn_header.subtypes[16] = bsn_virtual_port_create_reply
class bsn_virtual_port_create_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 15
def __init__(self, xid=None, vport=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if vport != None:
self.vport = vport
else:
self.vport = ofp.bsn_vport()
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(self.vport.pack())
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_create_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 15)
obj.vport = ofp.bsn_vport.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.vport != other.vport: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_create_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("vport = ");
q.pp(self.vport)
q.breakable()
q.text('}')
bsn_header.subtypes[15] = bsn_virtual_port_create_request
class bsn_virtual_port_remove_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 26
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_remove_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 26)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_remove_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[26] = bsn_virtual_port_remove_reply
class bsn_virtual_port_remove_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 17
def __init__(self, xid=None, vport_no=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if vport_no != None:
self.vport_no = vport_no
else:
self.vport_no = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.vport_no))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_remove_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 17)
obj.vport_no = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.vport_no != other.vport_no: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_remove_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("vport_no = ");
q.text("%#x" % self.vport_no)
q.breakable()
q.text('}')
bsn_header.subtypes[17] = bsn_virtual_port_remove_request
class desc_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 0
def __init__(self, xid=None, flags=None, mfr_desc=None, hw_desc=None, sw_desc=None, serial_num=None, dp_desc=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if mfr_desc != None:
self.mfr_desc = mfr_desc
else:
self.mfr_desc = ""
if hw_desc != None:
self.hw_desc = hw_desc
else:
self.hw_desc = ""
if sw_desc != None:
self.sw_desc = sw_desc
else:
self.sw_desc = ""
if serial_num != None:
self.serial_num = serial_num
else:
self.serial_num = ""
if dp_desc != None:
self.dp_desc = dp_desc
else:
self.dp_desc = ""
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!256s", self.mfr_desc))
packed.append(struct.pack("!256s", self.hw_desc))
packed.append(struct.pack("!256s", self.sw_desc))
packed.append(struct.pack("!32s", self.serial_num))
packed.append(struct.pack("!256s", self.dp_desc))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = desc_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 0)
obj.flags = reader.read("!H")[0]
obj.mfr_desc = reader.read("!256s")[0].rstrip("\x00")
obj.hw_desc = reader.read("!256s")[0].rstrip("\x00")
obj.sw_desc = reader.read("!256s")[0].rstrip("\x00")
obj.serial_num = reader.read("!32s")[0].rstrip("\x00")
obj.dp_desc = reader.read("!256s")[0].rstrip("\x00")
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.mfr_desc != other.mfr_desc: return False
if self.hw_desc != other.hw_desc: return False
if self.sw_desc != other.sw_desc: return False
if self.serial_num != other.serial_num: return False
if self.dp_desc != other.dp_desc: return False
return True
def pretty_print(self, q):
q.text("desc_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("mfr_desc = ");
q.pp(self.mfr_desc)
q.text(","); q.breakable()
q.text("hw_desc = ");
q.pp(self.hw_desc)
q.text(","); q.breakable()
q.text("sw_desc = ");
q.pp(self.sw_desc)
q.text(","); q.breakable()
q.text("serial_num = ");
q.pp(self.serial_num)
q.text(","); q.breakable()
q.text("dp_desc = ");
q.pp(self.dp_desc)
q.breakable()
q.text('}')
stats_reply.subtypes[0] = desc_stats_reply
class desc_stats_request(stats_request):
version = 1
type = 16
stats_type = 0
def __init__(self, xid=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = desc_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 0)
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("desc_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
stats_request.subtypes[0] = desc_stats_request
class echo_reply(message):
version = 1
type = 3
def __init__(self, xid=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = echo_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 3)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("echo_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[3] = echo_reply
class echo_request(message):
version = 1
type = 2
def __init__(self, xid=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = echo_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 2)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("echo_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[2] = echo_request
class features_reply(message):
version = 1
type = 6
def __init__(self, xid=None, datapath_id=None, n_buffers=None, n_tables=None, capabilities=None, actions=None, ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if datapath_id != None:
self.datapath_id = datapath_id
else:
self.datapath_id = 0
if n_buffers != None:
self.n_buffers = n_buffers
else:
self.n_buffers = 0
if n_tables != None:
self.n_tables = n_tables
else:
self.n_tables = 0
if capabilities != None:
self.capabilities = capabilities
else:
self.capabilities = 0
if actions != None:
self.actions = actions
else:
self.actions = 0
if ports != None:
self.ports = ports
else:
self.ports = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!Q", self.datapath_id))
packed.append(struct.pack("!L", self.n_buffers))
packed.append(struct.pack("!B", self.n_tables))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.capabilities))
packed.append(struct.pack("!L", self.actions))
packed.append(loxi.generic_util.pack_list(self.ports))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = features_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 6)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.datapath_id = reader.read("!Q")[0]
obj.n_buffers = reader.read("!L")[0]
obj.n_tables = reader.read("!B")[0]
reader.skip(3)
obj.capabilities = reader.read("!L")[0]
obj.actions = reader.read("!L")[0]
obj.ports = loxi.generic_util.unpack_list(reader, ofp.common.port_desc.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.datapath_id != other.datapath_id: return False
if self.n_buffers != other.n_buffers: return False
if self.n_tables != other.n_tables: return False
if self.capabilities != other.capabilities: return False
if self.actions != other.actions: return False
if self.ports != other.ports: return False
return True
def pretty_print(self, q):
q.text("features_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("datapath_id = ");
q.text("%#x" % self.datapath_id)
q.text(","); q.breakable()
q.text("n_buffers = ");
q.text("%#x" % self.n_buffers)
q.text(","); q.breakable()
q.text("n_tables = ");
q.text("%#x" % self.n_tables)
q.text(","); q.breakable()
q.text("capabilities = ");
q.text("%#x" % self.capabilities)
q.text(","); q.breakable()
q.text("actions = ");
q.text("%#x" % self.actions)
q.text(","); q.breakable()
q.text("ports = ");
q.pp(self.ports)
q.breakable()
q.text('}')
message.subtypes[6] = features_reply
class features_request(message):
version = 1
type = 5
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = features_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 5)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("features_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[5] = features_request
class flow_mod(message):
subtypes = {}
version = 1
type = 14
def __init__(self, xid=None, match=None, cookie=None, _command=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if _command != None:
self._command = _command
else:
self._command = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 56)
subclass = flow_mod.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = flow_mod()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
obj._command = util.unpack_fm_cmd(reader)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self._command != other._command: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_mod {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
message.subtypes[14] = flow_mod
class flow_add(flow_mod):
version = 1
type = 14
_command = 0
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_add()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 0)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_add {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[0] = flow_add
class flow_delete(flow_mod):
version = 1
type = 14
_command = 3
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_delete()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 3)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_delete {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[3] = flow_delete
class flow_delete_strict(flow_mod):
version = 1
type = 14
_command = 4
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_delete_strict()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 4)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_delete_strict {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[4] = flow_delete_strict
class flow_mod_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 3
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_mod_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 3)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("flow_mod_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[3] = flow_mod_failed_error_msg
class flow_modify(flow_mod):
version = 1
type = 14
_command = 1
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_modify()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 1)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_modify {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[1] = flow_modify
class flow_modify_strict(flow_mod):
version = 1
type = 14
_command = 2
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_modify_strict()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 2)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_modify_strict {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[2] = flow_modify_strict
class flow_removed(message):
version = 1
type = 11
def __init__(self, xid=None, match=None, cookie=None, priority=None, reason=None, duration_sec=None, duration_nsec=None, idle_timeout=None, packet_count=None, byte_count=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if reason != None:
self.reason = reason
else:
self.reason = 0
if duration_sec != None:
self.duration_sec = duration_sec
else:
self.duration_sec = 0
if duration_nsec != None:
self.duration_nsec = duration_nsec
else:
self.duration_nsec = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if packet_count != None:
self.packet_count = packet_count
else:
self.packet_count = 0
if byte_count != None:
self.byte_count = byte_count
else:
self.byte_count = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!B", self.reason))
packed.append('\x00' * 1)
packed.append(struct.pack("!L", self.duration_sec))
packed.append(struct.pack("!L", self.duration_nsec))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append('\x00' * 2)
packed.append(struct.pack("!Q", self.packet_count))
packed.append(struct.pack("!Q", self.byte_count))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_removed()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 11)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
obj.priority = reader.read("!H")[0]
obj.reason = reader.read("!B")[0]
reader.skip(1)
obj.duration_sec = reader.read("!L")[0]
obj.duration_nsec = reader.read("!L")[0]
obj.idle_timeout = reader.read("!H")[0]
reader.skip(2)
obj.packet_count = reader.read("!Q")[0]
obj.byte_count = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.priority != other.priority: return False
if self.reason != other.reason: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.idle_timeout != other.idle_timeout: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
return True
def pretty_print(self, q):
q.text("flow_removed {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("reason = ");
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("duration_sec = ");
q.text("%#x" % self.duration_sec)
q.text(","); q.breakable()
q.text("duration_nsec = ");
q.text("%#x" % self.duration_nsec)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("packet_count = ");
q.text("%#x" % self.packet_count)
q.text(","); q.breakable()
q.text("byte_count = ");
q.text("%#x" % self.byte_count)
q.breakable()
q.text('}')
message.subtypes[11] = flow_removed
class flow_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 1
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 1)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.flow_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("flow_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[1] = flow_stats_reply
class flow_stats_request(stats_request):
version = 1
type = 16
stats_type = 1
def __init__(self, xid=None, flags=None, match=None, table_id=None, out_port=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if match != None:
self.match = match
else:
self.match = ofp.match()
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(self.match.pack())
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 1)
packed.append(util.pack_port_no(self.out_port))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 1)
obj.flags = reader.read("!H")[0]
obj.match = ofp.match.unpack(reader)
obj.table_id = reader.read("!B")[0]
reader.skip(1)
obj.out_port = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def pretty_print(self, q):
q.text("flow_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.breakable()
q.text('}')
stats_request.subtypes[1] = flow_stats_request
class get_config_reply(message):
version = 1
type = 8
def __init__(self, xid=None, flags=None, miss_send_len=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if miss_send_len != None:
self.miss_send_len = miss_send_len
else:
self.miss_send_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!H", self.miss_send_len))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = get_config_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 8)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.flags = reader.read("!H")[0]
obj.miss_send_len = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def pretty_print(self, q):
q.text("get_config_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("miss_send_len = ");
q.text("%#x" % self.miss_send_len)
q.breakable()
q.text('}')
message.subtypes[8] = get_config_reply
class get_config_request(message):
version = 1
type = 7
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = get_config_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 7)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("get_config_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[7] = get_config_request
class hello(message):
version = 1
type = 0
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = hello()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 0)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("hello {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[0] = hello
class hello_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 0
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = hello_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 0)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("hello_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[0] = hello_failed_error_msg
class nicira_header(experimenter):
subtypes = {}
version = 1
type = 4
experimenter = 8992
def __init__(self, xid=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = nicira_header.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = nicira_header()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("nicira_header {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
experimenter.subtypes[8992] = nicira_header
class nicira_controller_role_reply(nicira_header):
version = 1
type = 4
experimenter = 8992
subtype = 11
def __init__(self, xid=None, role=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if role != None:
self.role = role
else:
self.role = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.role))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = nicira_controller_role_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!L")[0]
assert(_subtype == 11)
obj.role = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.role != other.role: return False
return True
def pretty_print(self, q):
q.text("nicira_controller_role_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("role = ");
q.text("%#x" % self.role)
q.breakable()
q.text('}')
nicira_header.subtypes[11] = nicira_controller_role_reply
class nicira_controller_role_request(nicira_header):
version = 1
type = 4
experimenter = 8992
subtype = 10
def __init__(self, xid=None, role=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if role != None:
self.role = role
else:
self.role = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.role))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = nicira_controller_role_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!L")[0]
assert(_subtype == 10)
obj.role = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.role != other.role: return False
return True
def pretty_print(self, q):
q.text("nicira_controller_role_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("role = ");
q.text("%#x" % self.role)
q.breakable()
q.text('}')
nicira_header.subtypes[10] = nicira_controller_role_request
class packet_in(message):
version = 1
type = 10
def __init__(self, xid=None, buffer_id=None, total_len=None, in_port=None, reason=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if total_len != None:
self.total_len = total_len
else:
self.total_len = 0
if in_port != None:
self.in_port = in_port
else:
self.in_port = 0
if reason != None:
self.reason = reason
else:
self.reason = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(struct.pack("!H", self.total_len))
packed.append(util.pack_port_no(self.in_port))
packed.append(struct.pack("!B", self.reason))
packed.append('\x00' * 1)
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = packet_in()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 10)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.buffer_id = reader.read("!L")[0]
obj.total_len = reader.read("!H")[0]
obj.in_port = util.unpack_port_no(reader)
obj.reason = reader.read("!B")[0]
reader.skip(1)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.buffer_id != other.buffer_id: return False
if self.total_len != other.total_len: return False
if self.in_port != other.in_port: return False
if self.reason != other.reason: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("packet_in {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("total_len = ");
q.text("%#x" % self.total_len)
q.text(","); q.breakable()
q.text("in_port = ");
q.text(util.pretty_port(self.in_port))
q.text(","); q.breakable()
q.text("reason = ");
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[10] = packet_in
class packet_out(message):
version = 1
type = 13
def __init__(self, xid=None, buffer_id=None, in_port=None, actions=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if in_port != None:
self.in_port = in_port
else:
self.in_port = 0
if actions != None:
self.actions = actions
else:
self.actions = []
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.in_port))
packed.append(struct.pack("!H", 0)) # placeholder for actions_len at index 6
packed.append(loxi.generic_util.pack_list(self.actions))
packed[6] = struct.pack("!H", len(packed[-1]))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = packet_out()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 13)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.buffer_id = reader.read("!L")[0]
obj.in_port = util.unpack_port_no(reader)
_actions_len = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader.slice(_actions_len), ofp.action.action.unpack)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.buffer_id != other.buffer_id: return False
if self.in_port != other.in_port: return False
if self.actions != other.actions: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("packet_out {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("in_port = ");
q.text(util.pretty_port(self.in_port))
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[13] = packet_out
class port_mod(message):
version = 1
type = 15
def __init__(self, xid=None, port_no=None, hw_addr=None, config=None, mask=None, advertise=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if hw_addr != None:
self.hw_addr = hw_addr
else:
self.hw_addr = [0,0,0,0,0,0]
if config != None:
self.config = config
else:
self.config = 0
if mask != None:
self.mask = mask
else:
self.mask = 0
if advertise != None:
self.advertise = advertise
else:
self.advertise = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!6B", *self.hw_addr))
packed.append(struct.pack("!L", self.config))
packed.append(struct.pack("!L", self.mask))
packed.append(struct.pack("!L", self.advertise))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_mod()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 15)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.hw_addr = list(reader.read('!6B'))
obj.config = reader.read("!L")[0]
obj.mask = reader.read("!L")[0]
obj.advertise = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.config != other.config: return False
if self.mask != other.mask: return False
if self.advertise != other.advertise: return False
return True
def pretty_print(self, q):
q.text("port_mod {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("hw_addr = ");
q.text(util.pretty_mac(self.hw_addr))
q.text(","); q.breakable()
q.text("config = ");
q.text("%#x" % self.config)
q.text(","); q.breakable()
q.text("mask = ");
q.text("%#x" % self.mask)
q.text(","); q.breakable()
q.text("advertise = ");
q.text("%#x" % self.advertise)
q.breakable()
q.text('}')
message.subtypes[15] = port_mod
class port_mod_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 4
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_mod_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 4)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("port_mod_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[4] = port_mod_failed_error_msg
class port_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 4
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 4)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.port_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("port_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[4] = port_stats_reply
class port_stats_request(stats_request):
version = 1
type = 16
stats_type = 4
def __init__(self, xid=None, flags=None, port_no=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(util.pack_port_no(self.port_no))
packed.append('\x00' * 6)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 4)
obj.flags = reader.read("!H")[0]
obj.port_no = util.unpack_port_no(reader)
reader.skip(6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.port_no != other.port_no: return False
return True
def pretty_print(self, q):
q.text("port_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.breakable()
q.text('}')
stats_request.subtypes[4] = port_stats_request
class port_status(message):
version = 1
type = 12
def __init__(self, xid=None, reason=None, desc=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if reason != None:
self.reason = reason
else:
self.reason = 0
if desc != None:
self.desc = desc
else:
self.desc = ofp.port_desc()
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!B", self.reason))
packed.append('\x00' * 7)
packed.append(self.desc.pack())
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_status()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 12)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.reason = reader.read("!B")[0]
reader.skip(7)
obj.desc = ofp.port_desc.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.reason != other.reason: return False
if self.desc != other.desc: return False
return True
def pretty_print(self, q):
q.text("port_status {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("reason = ");
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("desc = ");
q.pp(self.desc)
q.breakable()
q.text('}')
message.subtypes[12] = port_status
class queue_get_config_reply(message):
version = 1
type = 21
def __init__(self, xid=None, port=None, queues=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port != None:
self.port = port
else:
self.port = 0
if queues != None:
self.queues = queues
else:
self.queues = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(util.pack_port_no(self.port))
packed.append('\x00' * 6)
packed.append(loxi.generic_util.pack_list(self.queues))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_get_config_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 21)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.port = util.unpack_port_no(reader)
reader.skip(6)
obj.queues = loxi.generic_util.unpack_list(reader, ofp.common.packet_queue.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port != other.port: return False
if self.queues != other.queues: return False
return True
def pretty_print(self, q):
q.text("queue_get_config_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port = ");
q.text(util.pretty_port(self.port))
q.text(","); q.breakable()
q.text("queues = ");
q.pp(self.queues)
q.breakable()
q.text('}')
message.subtypes[21] = queue_get_config_reply
class queue_get_config_request(message):
version = 1
type = 20
def __init__(self, xid=None, port=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port != None:
self.port = port
else:
self.port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(util.pack_port_no(self.port))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_get_config_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 20)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.port = util.unpack_port_no(reader)
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port != other.port: return False
return True
def pretty_print(self, q):
q.text("queue_get_config_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port = ");
q.text(util.pretty_port(self.port))
q.breakable()
q.text('}')
message.subtypes[20] = queue_get_config_request
class queue_op_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 5
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_op_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 5)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("queue_op_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[5] = queue_op_failed_error_msg
class queue_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 5
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 5)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.queue_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("queue_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[5] = queue_stats_reply
class queue_stats_request(stats_request):
version = 1
type = 16
stats_type = 5
def __init__(self, xid=None, flags=None, port_no=None, queue_id=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if queue_id != None:
self.queue_id = queue_id
else:
self.queue_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(util.pack_port_no(self.port_no))
packed.append('\x00' * 2)
packed.append(struct.pack("!L", self.queue_id))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 5)
obj.flags = reader.read("!H")[0]
obj.port_no = util.unpack_port_no(reader)
reader.skip(2)
obj.queue_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
return True
def pretty_print(self, q):
q.text("queue_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("queue_id = ");
q.text("%#x" % self.queue_id)
q.breakable()
q.text('}')
stats_request.subtypes[5] = queue_stats_request
class set_config(message):
version = 1
type = 9
def __init__(self, xid=None, flags=None, miss_send_len=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if miss_send_len != None:
self.miss_send_len = miss_send_len
else:
self.miss_send_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!H", self.miss_send_len))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_config()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 9)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.flags = reader.read("!H")[0]
obj.miss_send_len = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def pretty_print(self, q):
q.text("set_config {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("miss_send_len = ");
q.text("%#x" % self.miss_send_len)
q.breakable()
q.text('}')
message.subtypes[9] = set_config
class table_mod(message):
version = 1
type = 22
def __init__(self, xid=None, table_id=None, config=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if config != None:
self.config = config
else:
self.config = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.config))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = table_mod()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 22)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.table_id = reader.read("!B")[0]
reader.skip(3)
obj.config = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.table_id != other.table_id: return False
if self.config != other.config: return False
return True
def pretty_print(self, q):
q.text("table_mod {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("config = ");
q.text("%#x" % self.config)
q.breakable()
q.text('}')
message.subtypes[22] = table_mod
class table_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 3
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = table_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 3)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.table_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("table_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[3] = table_stats_reply
class table_stats_request(stats_request):
version = 1
type = 16
stats_type = 3
def __init__(self, xid=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = table_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 3)
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("table_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
stats_request.subtypes[3] = table_stats_request
def parse_header(buf):
if len(buf) < 8:
raise loxi.ProtocolError("too short to be an OpenFlow message")
return struct.unpack_from("!BBHL", buf)
def parse_message(buf):
msg_ver, msg_type, msg_len, msg_xid = parse_header(buf)
if msg_ver != ofp.OFP_VERSION and msg_type != ofp.OFPT_HELLO:
raise loxi.ProtocolError("wrong OpenFlow version (expected %d, got %d)" % (ofp.OFP_VERSION, msg_ver))
if len(buf) != msg_len:
raise loxi.ProtocolError("incorrect message size")
return message.unpack(loxi.generic_util.OFReader(buf))
| gzamboni/sdnResilience | loxi/of10/message.py | Python | gpl-2.0 | 248,179 | 0.006455 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_texture_expand_normal'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_texture_expand_normal',error_checker=_errors._error_checker)
GL_TEXTURE_UNSIGNED_REMAP_MODE_NV=_C('GL_TEXTURE_UNSIGNED_REMAP_MODE_NV',0x888F)
| stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/NV/texture_expand_normal.py | Python | lgpl-3.0 | 591 | 0.021997 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'SPUser', fields ['username']
db.create_unique(u'frontend_spuser', ['username'])
def backwards(self, orm):
# Removing unique constraint on 'SPUser', fields ['username']
db.delete_unique(u'frontend_spuser', ['username'])
models = {
u'frontend.backorder': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'BackOrder'},
'amount': ('django.db.models.fields.IntegerField', [], {}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'back_orders'", 'to': u"orm['frontend.OrderProduct']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'frontend.catalog': {
'Meta': {'ordering': "('name',)", 'object_name': 'Catalog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'frontend.catalogissue': {
'Meta': {'object_name': 'CatalogIssue'},
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues'", 'to': u"orm['frontend.Catalog']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'catalog_issues'", 'symmetrical': 'False', 'through': u"orm['frontend.CatalogIssueProduct']", 'to': u"orm['frontend.Product']"})
},
u'frontend.catalogissueproduct': {
'Meta': {'object_name': 'CatalogIssueProduct'},
'catalog_issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['frontend.CatalogIssue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_ref': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'page_ref': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'catalog_links'", 'to': u"orm['frontend.Product']"}),
'sub_ref': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
u'frontend.company': {
'Meta': {'object_name': 'Company'},
'fax': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'logo_img': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True'}),
'logo_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'registration': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'frontend.customer': {
'Meta': {'object_name': 'Customer'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'Australia'", 'max_length': '100'}),
'customer_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_attn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_country': ('django.db.models.fields.CharField', [], {'default': "'Australia'", 'max_length': '100'}),
'delivery_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'delivery_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'delivery_suburb': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'from_src_company_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'from_src_membadd_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'c_notes'", 'blank': 'True', 'to': u"orm['frontend.Note']"}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'registration': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suburb': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'telephone_clean': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'frontend.customercontact': {
'Meta': {'object_name': 'CustomerContact'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': u"orm['frontend.Customer']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'frontend.importnote': {
'Meta': {'object_name': 'ImportNote'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'model_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'src_model_id_field': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'src_model_id_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'})
},
u'frontend.invoice': {
'Meta': {'object_name': 'Invoice'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['frontend.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'to': u"orm['frontend.Order']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'})
},
u'frontend.medium': {
'Meta': {'ordering': "('name',)", 'object_name': 'Medium'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
u'frontend.note': {
'Meta': {'object_name': 'Note'},
'create_dt': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'frontend.order': {
'Meta': {'ordering': "('-order_date',)", 'object_name': 'Order'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'to': u"orm['frontend.Customer']"}),
'from_borders_fakeid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'from_src_order_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invoice_address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invoice_company_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invoice_company_reg': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'invoice_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'invoice_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'invoice_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'invoice_suburb': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'last_read': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'o_notes'", 'symmetrical': 'False', 'to': u"orm['frontend.Note']"}),
'order_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'order_notes': ('django.db.models.fields.CharField', [], {'max_length': '510', 'null': 'True', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'+'", 'symmetrical': 'False', 'through': u"orm['frontend.OrderProduct']", 'to': u"orm['frontend.Product']"}),
'shipping_address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'shipping_address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'shipping_attn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'shipping_cost': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '9', 'decimal_places': '2'}),
'shipping_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_suburb': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'total_cost': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'total_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'wanted_by': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
u'frontend.orderproduct': {
'Meta': {'ordering': "('product__code',)", 'object_name': 'OrderProduct'},
'back_order': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'discount_percentage': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '5', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_quantity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ordered_products'", 'to': u"orm['frontend.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ordered_list'", 'to': u"orm['frontend.Product']"}),
'quantity': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '9', 'decimal_places': '2'}),
'with_tax': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'frontend.orderstatus': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'OrderStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': u"orm['frontend.Order']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PS'", 'max_length': '2'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['frontend.SPUser']", 'null': 'True', 'blank': 'True'})
},
u'frontend.pricelevel': {
'Meta': {'ordering': "('-min_amount',)", 'object_name': 'PriceLevel'},
'cost_per_item': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_amount': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'min_amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'price_levels'", 'null': 'True', 'to': u"orm['frontend.Product']"})
},
u'frontend.product': {
'Meta': {'ordering': "('name',)", 'object_name': 'Product'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'current_stock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'manual_royalty': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'medium': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['frontend.Medium']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'minimum_stock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'p_notes'", 'blank': 'True', 'to': u"orm['frontend.Note']"}),
'royalty_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['frontend.RoyaltyGroup']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'on_delete': 'models.PROTECT', 'to': u"orm['frontend.Size']"}),
'sp_cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'on_delete': 'models.PROTECT', 'to': u"orm['frontend.Supplier']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'frontend.royaltygroup': {
'Meta': {'ordering': "('name',)", 'object_name': 'RoyaltyGroup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'royalty': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
u'frontend.size': {
'Meta': {'ordering': "('width', 'height', 'depth', 'units')", 'object_name': 'Size'},
'depth': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4'}),
'height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'sub_notes': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'width': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4'})
},
u'frontend.spuser': {
'Meta': {'object_name': 'SPUser'},
'bg_color': ('colorfield.fields.ColorField', [], {'default': "'#FFFFFF'", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'font_color': ('colorfield.fields.ColorField', [], {'default': "'#2B2B2B'", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'font_size': ('django.db.models.fields.IntegerField', [], {'default': '12', 'null': 'True', 'blank': 'True'}),
'font_weight': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jodabrian_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'label_bg_color': ('colorfield.fields.ColorField', [], {'default': "'#EEEEEE'", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'frontend.stockadjust': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'StockAdjust'},
'added_amount': ('django.db.models.fields.IntegerField', [], {}),
'current_amount': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stock_adjust'", 'to': u"orm['frontend.Product']"}),
'reason': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['frontend.SPUser']"})
},
u'frontend.supplier': {
'Meta': {'ordering': "('code', 'name')", 'object_name': 'Supplier'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
}
}
complete_apps = ['frontend'] | the0forge/sp | frontend/migrations/0038_auto__add_unique_spuser_username.py | Python | gpl-3.0 | 22,106 | 0.008097 |
#!/usr/bin/python
#
# Copyright (c) 2017 Bruno Medina Bolanos Cacho <bruno.medina@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_managed_disk
version_added: "2.4"
short_description: Manage Azure Manage Disks
description:
- Create, update and delete an Azure Managed Disk
options:
resource_group:
description:
- Name of a resource group where the managed disk exists or will be created.
required: true
name:
description:
- Name of the managed disk.
required: true
state:
description:
- Assert the state of the managed disk. Use C(present) to create or update a managed disk and C(absent) to delete a managed disk.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
storage_account_type:
description:
- "Type of storage for the managed disk: C(Standard_LRS) or C(Premium_LRS). If not specified the disk is created C(Standard_LRS)."
choices:
- Standard_LRS
- Premium_LRS
create_option:
description:
- "Allowed values: empty, import, copy.
- C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri)."
choices:
- empty
- import
- copy
source_uri:
description:
- URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
aliases:
- source_resource_uri
os_type:
description:
- "Type of Operating System: C(linux) or C(windows)."
- "Used when I(create_option) is either C(copy) or C(import) and the source is an OS disk."
- "If omitted during creation, no value is set."
- "If omitted during an update, no change is made."
- "Once set, this value cannot be cleared."
choices:
- linux
- windows
disk_size_gb:
description:
- "Size in GB of the managed disk to be created."
- "If I(create_option) is C(copy) then the value must be greater than or equal to the source's size."
managed_by:
description:
- Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
- To detach a disk from a vm, explicitly set to ''.
- If this option is unset, the value will not be changed.
version_added: 2.5
tags:
description:
- Tags to assign to the managed disk.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Bruno Medina (@brusMX)"
'''
EXAMPLES = '''
- name: Create managed disk
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Create managed operating system disk from page blob
azure_rm_managed_disk:
name: mymanageddisk
location: eastus2
resource_group: myResourceGroup
create_option: import
source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd
os_type: windows
storage_account_type: Premium_LRS
- name: Mount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
managed_by: testvm001
- name: Unmount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Delete managed disk
azure_rm_manage_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
state: absent
'''
RETURN = '''
id:
description: The managed disk resource ID.
returned: always
type: dict
state:
description: Current state of the managed disk
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
import re
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
# duplicated in azure_rm_managed_disk_facts
def managed_disk_to_dict(managed_disk):
create_data = managed_disk.creation_data
return dict(
id=managed_disk.id,
name=managed_disk.name,
location=managed_disk.location,
tags=managed_disk.tags,
create_option=create_data.create_option.lower(),
source_uri=create_data.source_uri or create_data.source_resource_id,
disk_size_gb=managed_disk.disk_size_gb,
os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
managed_by=managed_disk.managed_by
)
class AzureRMManagedDisk(AzureRMModuleBase):
"""Configuration class for an Azure RM Managed Disk resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
storage_account_type=dict(
type='str',
choices=['Standard_LRS', 'Premium_LRS']
),
create_option=dict(
type='str',
choices=['empty', 'import', 'copy']
),
source_uri=dict(
type='str',
aliases=['source_resource_uri']
),
os_type=dict(
type='str',
choices=['linux', 'windows']
),
disk_size_gb=dict(
type='int'
),
managed_by=dict(
type='str'
)
)
required_if = [
('create_option', 'import', ['source_uri']),
('create_option', 'copy', ['source_uri']),
('create_option', 'empty', ['disk_size_gb'])
]
self.results = dict(
changed=False,
state=dict())
self.resource_group = None
self.name = None
self.location = None
self.storage_account_type = None
self.create_option = None
self.source_uri = None
self.os_type = None
self.disk_size_gb = None
self.tags = None
self.managed_by = None
super(AzureRMManagedDisk, self).__init__(
derived_arg_spec=self.module_arg_spec,
required_if=required_if,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
result = None
changed = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
disk_instance = self.get_managed_disk()
result = disk_instance
# need create or update
if self.state == 'present':
parameter = self.generate_managed_disk_property()
if not disk_instance or self.is_different(disk_instance, parameter):
changed = True
if not self.check_mode:
result = self.create_or_update_managed_disk(parameter)
else:
result = True
# unmount from the old virtual machine and mount to the new virtual machine
if self.managed_by or self.managed_by == '':
vm_name = parse_resource_id(disk_instance.get('managed_by', '')).get('name') if disk_instance else None
vm_name = vm_name or ''
if self.managed_by != vm_name:
changed = True
if not self.check_mode:
if vm_name:
self.detach(vm_name, result)
if self.managed_by:
self.attach(self.managed_by, result)
result = self.get_managed_disk()
if self.state == 'absent' and disk_instance:
changed = True
if not self.check_mode:
self.delete_managed_disk()
result = True
self.results['changed'] = changed
self.results['state'] = result
return self.results
def attach(self, vm_name, disk):
vm = self._get_vm(vm_name)
# find the lun
luns = ([d.lun for d in vm.storage_profile.data_disks]
if vm.storage_profile.data_disks else [])
lun = max(luns) + 1 if luns else 0
# prepare the data disk
params = self.compute_models.ManagedDiskParameters(id=disk.get('id'), storage_account_type=disk.get('storage_account_type'))
data_disk = self.compute_models.DataDisk(lun=lun, create_option=self.compute_models.DiskCreateOptionTypes.attach, managed_disk=params)
vm.storage_profile.data_disks.append(data_disk)
self._update_vm(vm_name, vm)
def detach(self, vm_name, disk):
vm = self._get_vm(vm_name)
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk.get('name').lower()]
if len(vm.storage_profile.data_disks) == len(leftovers):
self.fail("No disk with the name '{0}' was found".format(disk.get('name')))
vm.storage_profile.data_disks = leftovers
self._update_vm(vm_name, vm)
def _update_vm(self, name, params):
try:
poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, name, params)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error updating virtual machine {0} - {1}".format(name, str(exc)))
def _get_vm(self, name):
try:
return self.compute_client.virtual_machines.get(self.resource_group, name, expand='instanceview')
except Exception as exc:
self.fail("Error getting virtual machine {0} - {1}".format(name, str(exc)))
def generate_managed_disk_property(self):
# TODO: Add support for EncryptionSettings, DiskIOPSReadWrite, DiskMBpsReadWrite, Zones
disk_params = {}
creation_data = {}
disk_params['location'] = self.location
disk_params['tags'] = self.tags
if self.storage_account_type:
storage_account_type = self.compute_models.DiskSku(name=self.storage_account_type)
disk_params['sku'] = storage_account_type
disk_params['disk_size_gb'] = self.disk_size_gb
creation_data['create_option'] = self.compute_models.DiskCreateOption.empty
if self.create_option == 'import':
creation_data['create_option'] = self.compute_models.DiskCreateOption.import_enum
creation_data['source_uri'] = self.source_uri
elif self.create_option == 'copy':
creation_data['create_option'] = self.compute_models.DiskCreateOption.copy
creation_data['source_resource_id'] = self.source_uri
if self.os_type:
typecon = {
'linux': self.compute_models.OperatingSystemTypes.linux,
'windows': self.compute_models.OperatingSystemTypes.windows
}
disk_params['os_type'] = typecon[self.os_type]
else:
disk_params['os_type'] = None
disk_params['creation_data'] = creation_data
return disk_params
def create_or_update_managed_disk(self, parameter):
try:
poller = self.compute_client.disks.create_or_update(
self.resource_group,
self.name,
parameter)
aux = self.get_poller_result(poller)
return managed_disk_to_dict(aux)
except CloudError as e:
self.fail("Error creating the managed disk: {0}".format(str(e)))
# This method accounts for the difference in structure between the
# Azure retrieved disk and the parameters for the new disk to be created.
def is_different(self, found_disk, new_disk):
resp = False
if new_disk.get('disk_size_gb'):
if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']:
resp = True
if new_disk.get('os_type'):
if not found_disk['os_type'] == new_disk['os_type']:
resp = True
if new_disk.get('sku'):
if not found_disk['storage_account_type'] == new_disk['sku'].name:
resp = True
# Check how to implement tags
if new_disk.get('tags') is not None:
if not found_disk['tags'] == new_disk['tags']:
resp = True
return resp
def delete_managed_disk(self):
try:
poller = self.compute_client.disks.delete(
self.resource_group,
self.name)
return self.get_poller_result(poller)
except CloudError as e:
self.fail("Error deleting the managed disk: {0}".format(str(e)))
def get_managed_disk(self):
try:
resp = self.compute_client.disks.get(
self.resource_group,
self.name)
return managed_disk_to_dict(resp)
except CloudError as e:
self.log('Did not find managed disk')
def main():
"""Main execution"""
AzureRMManagedDisk()
if __name__ == '__main__':
main()
| EvanK/ansible | lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py | Python | gpl-3.0 | 14,499 | 0.002 |
from contextlib import nested
from contextlib import contextmanager
import mock
import testing as T
import types
from core import db
from core.settings import Settings
from core.mail import MailQueue
from core.util import get_servlet_urlspec
from core.xmppclient import XMPPQueue
import servlets.newpush
from servlets.newpush import NewPushServlet
from servlets.newpush import send_notifications
class NewPushServletTest(T.TestCase, T.ServletTestMixin):
def get_handlers(self):
return [get_servlet_urlspec(NewPushServlet)]
def test_newpush(self):
pushes = []
def on_db_return(success, db_results):
assert success
pushes.extend(db_results.fetchall())
with nested(
mock.patch.dict(db.Settings, T.MockedSettings),
mock.patch.object(NewPushServlet, "get_current_user", return_value = "jblack"),
mock.patch.object(NewPushServlet, "redirect"),
mock.patch.object(MailQueue, "enqueue_user_email"),
):
with mock.patch("%s.servlets.newpush.subprocess.call" % __name__) as mocked_call:
title = "BestPushInTheWorld"
branch = "jblack"
push_type = "regular"
uri = "/newpush?push-title=%s&branch=%s&push-type=%s" % (
title, branch, push_type
)
pushes = []
db.execute_cb(db.push_pushes.select(), on_db_return)
num_pushes_before = len(pushes)
response = self.fetch(uri)
assert response.error == None
pushes = []
db.execute_cb(db.push_pushes.select(), on_db_return)
num_pushes_after = len(pushes)
T.assert_equal(num_pushes_before + 1, num_pushes_after)
# There should be one call to nodebot after a push is created
T.assert_equal(servlets.newpush.subprocess.call.call_count, 1)
# Verify that we have a valid call to
# subprocess.call. Getting the arguments involves ugly
# mock magic
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
mock.ANY, # nickname
mock.ANY, # channel
mock.ANY, # msg
])
def call_on_db_complete(self, urgent=False):
mocked_self = mock.Mock()
mocked_self.check_db_results = mock.Mock(return_value=None)
mocked_self.redirect = mock.Mock(return_value=None)
mocked_self.pushtype = 'normal'
mocked_self.on_db_complete = types.MethodType(NewPushServlet.on_db_complete.im_func, mocked_self)
push = mock.Mock()
push.lastrowid = 0
no_watcher_req = {
'user': 'testuser',
'watchers': None,
}
watched_req = {
'user': 'testuser',
'watchers': 'testuser1,testuser2',
}
if urgent:
no_watcher_req['tags'] = 'urgent'
watched_req['tags'] = 'urgent'
mocked_self.pushtype = 'urgent'
reqs = [no_watcher_req, watched_req]
mocked_self.on_db_complete('success', [push, reqs])
@mock.patch('servlets.newpush.send_notifications')
def test_normal_people_on_db_complete(self, notify):
self.call_on_db_complete()
notify.called_once_with(set(['testuser', 'testuser1', 'testuser2']), mock.ANY, mock.ANY)
@mock.patch('servlets.newpush.send_notifications')
def test_urgent_people_on_db_complete(self, notify):
self.call_on_db_complete(urgent=True)
notify.called_once_with(set(['testuser', 'testuser1', 'testuser2']), mock.ANY, mock.ANY)
class NotificationsTestCase(T.TestCase):
@contextmanager
def mocked_notifications(self):
with mock.patch("%s.servlets.newpush.subprocess.call" % __name__) as mocked_call:
with mock.patch.object(MailQueue, "enqueue_user_email") as mocked_mail:
with mock.patch.object(XMPPQueue, "enqueue_user_xmpp") as mocked_xmpp:
yield mocked_call, mocked_mail, mocked_xmpp
def test_send_notifications(self):
"""New push sends notifications via IRC, XMPP and emails."""
self.people = ["fake_user1", "fake_user2"]
self.pushurl = "/fake_push_url?id=123"
self.pushtype = "fake_puth_type"
with self.mocked_notifications() as (mocked_call, mocked_mail, mocked_xmpp):
send_notifications(self.people, self.pushtype, self.pushurl)
url = "https://%s%s" % (Settings['main_app']['servername'], self.pushurl)
msg = "%s: %s push starting! %s" % (', '.join(self.people), self.pushtype, url)
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
Settings['irc']['nickname'],
Settings['irc']['channel'],
msg
])
mocked_mail.assert_called_once_with(
Settings['mail']['notifyall'],
msg,
mock.ANY, # subject
)
mocked_xmpp.assert_called_once_with(
self.people,
"Push starting! %s" % url
)
def test_send_notifications_empty_user_list(self):
"""If there is no pending push request we'll only send IRC and
email notifications, but not XMPP messages."""
self.people = []
self.pushurl = "fake_push_url"
self.pushtype = "fake_puth_type"
with self.mocked_notifications() as (mocked_call, mocked_mail, mocked_xmpp):
send_notifications(self.people, self.pushtype, self.pushurl)
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
Settings['irc']['nickname'],
Settings['irc']['channel'],
mock.ANY, # msg
])
mocked_mail.assert_called_once_with(
Settings['mail']['notifyall'],
mock.ANY, # msg
mock.ANY, # subject
)
T.assert_is(mocked_xmpp.called, False)
if __name__ == '__main__':
T.run()
| pombredanne/pushmanager | tests/test_servlet_newpush.py | Python | apache-2.0 | 6,300 | 0.00381 |
import logging
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from celery import shared_task
from allianceauth.services.tasks import QueueOnce
from .models import MumbleUser
logger = logging.getLogger(__name__)
class MumbleTasks:
def __init__(self):
pass
@staticmethod
def has_account(user):
try:
return user.mumble.username != ''
except ObjectDoesNotExist:
return False
@staticmethod
def disable_mumble():
logger.info("Deleting all MumbleUser models")
MumbleUser.objects.all().delete()
@staticmethod
@shared_task(bind=True, name="mumble.update_groups", base=QueueOnce)
def update_groups(self, pk):
user = User.objects.get(pk=pk)
logger.debug("Updating mumble groups for user %s" % user)
if MumbleTasks.has_account(user):
try:
if not user.mumble.update_groups():
raise Exception("Group sync failed")
logger.debug("Updated user %s mumble groups." % user)
return True
except MumbleUser.DoesNotExist:
logger.info("Mumble group sync failed for {}, user does not have a mumble account".format(user))
except:
logger.exception("Mumble group sync failed for %s, retrying in 10 mins" % user)
raise self.retry(countdown=60 * 10)
else:
logger.debug("User %s does not have a mumble account, skipping" % user)
return False
@staticmethod
@shared_task(name="mumble.update_all_groups")
def update_all_groups():
logger.debug("Updating ALL mumble groups")
for mumble_user in MumbleUser.objects.exclude(username__exact=''):
MumbleTasks.update_groups.delay(mumble_user.user.pk)
| Kaezon/allianceauth | allianceauth/services/modules/mumble/tasks.py | Python | gpl-2.0 | 1,857 | 0.002154 |
# Bloom Framework
#
# John Boxall
# Copyright 2008 Handi Mobility
# www.handimobility.ca
| kevintom/django-bloom | bloom/image/templatetags/__init__.py | Python | gpl-3.0 | 89 | 0 |
#!/usr/bin/env python3
# Copyright 2018 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various code adapted from:
# https://cs.chromium.org/chromium/src/build/linux/sysroot_scripts/install-sysroot.py
import os
import shutil
import subprocess
import sys
import urllib.request
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Sysroot revision from:
# https://cs.chromium.org/chromium/src/build/linux/sysroot_scripts/sysroots.json
SERVER = 'https://commondatastorage.googleapis.com'
PATH = 'chrome-linux-sysroot/toolchain'
REVISION = '43a87bbebccad99325fdcf34166295b121ee15c7'
FILENAME = 'debian_sid_amd64_sysroot.tar.xz'
def main():
url = '%s/%s/%s/%s' % (SERVER, PATH, REVISION, FILENAME)
sysroot = os.path.join(SCRIPT_DIR, os.pardir, 'third_party', 'linux',
'sysroot')
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
return
print('Installing Debian root image from %s' % url)
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, FILENAME)
print('Downloading %s' % url)
for _ in range(3):
response = urllib.request.urlopen(url)
with open(tarball, 'wb') as f:
f.write(response.read())
break
else:
raise Exception('Failed to download %s' % url)
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
if __name__ == '__main__':
main()
sys.exit(0)
| nwjs/chromium.src | third_party/crashpad/crashpad/build/install_linux_sysroot.py | Python | bsd-3-clause | 2,174 | 0 |
"""
Django settings for sassafras project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_NAME = 'sassafras'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*(^jxc&^d46%8bi)dzq3!kezs=bnnh&lbgalj0%zy5y9w!^voi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'debug_toolbar',
'bootstrap3',
'sass_processor',
'trello_cards',
'storages'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'sassafras.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sassafras.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, PROJECT_NAME, STATIC_URL)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
)
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# Django storages
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_STORAGE_BUCKET_NAME = 'django-sassafras-test'
AWS_ACCESS_KEY = os.getenv('AWS_S3_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_S3_SECRET_ACCESS_KEY')
| mliudev/sassafras | sassafras/settings.py | Python | mit | 3,364 | 0 |
# Copyright (c) 2004 Gavin E. Crooks <gec@compbio.berkeley.edu>
#
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Custom extensions to OptionParse for parsing command line options."""
# FIXME: Docstring
# TODO: Add profiling option
# DeOptionParser :
#
# http://docs.python.org/lib/module-optparse.html
#
# Random_options :
# Set random generator and seed. Use options.random as
# source of random numbers
# Copyright :
# print copyright information
# Documentation :
# print extended document information
#
# Additional file_in and file_out types
import sys
from copy import copy
from optparse import Option
from optparse import OptionParser
from optparse import IndentedHelpFormatter
from optparse import OptionValueError
import random
def _copyright_callback(option, opt, value, parser):
if option or opt or value or parser: pass # Shut up lint checker
print parser.copyright
sys.exit()
def _doc_callback(option, opt, value, parser):
if option or opt or value or parser: pass # Shut up lint checker
print parser.long_description
sys.exit()
class DeHelpFormatter(IndentedHelpFormatter) :
def __init__ (self,
indent_increment=2,
max_help_position=32,
width=78,
short_first=1):
IndentedHelpFormatter.__init__(
self, indent_increment, max_help_position,
width, short_first)
def format_option_strings (self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = option._short_opts
long_opts = [lopt + " " + metavar for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if not short_opts : short_opts = [" ",]
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return " ".join(opts)
def _check_file_in(option, opt, value):
if option or opt or value : pass # Shut up lint checker
try:
return file(value, "r")
except IOError:
raise OptionValueError(
"option %s: cannot open file: %s" % (opt, value) )
def _check_file_out(option, opt, value):
if option or opt or value : pass # Shut up lint checker
try:
return file(value, "w+")
except IOError:
raise OptionValueError(
"option %s: cannot open file: %s" % (opt, value) )
def _check_boolean(option, opt, value) :
if option or opt or value : pass # Shut up lint checker
v = value.lower()
choices = {'no': False, 'false':False, '0': False,
'yes': True, 'true': True, '1':True }
try:
return choices[v]
except KeyError:
raise OptionValueError(
"option %s: invalid choice: '%s' " \
"(choose from 'yes' or 'no', 'true' or 'false')" % (opt, value))
def _check_dict(option, opt, value) :
if option or opt or value : pass # Shut up lint checker
v = value.lower()
choices = option.choices
try:
return choices[v]
except KeyError:
raise OptionValueError(
"option %s: invalid choice: '%s' " \
"(choose from '%s')" % (opt, value, "', '".join(choices)))
class DeOption(Option):
TYPES = Option.TYPES + ("file_in","file_out", "boolean", "dict")
TYPE_CHECKER = copy(Option.TYPE_CHECKER)
TYPE_CHECKER["file_in"] = _check_file_in
TYPE_CHECKER["file_out"] = _check_file_out
TYPE_CHECKER["boolean"] = _check_boolean
TYPE_CHECKER["dict"] = _check_dict
choices = None
def _new_check_choice(self):
if self.type == "dict":
if self.choices is None:
raise OptionValueError(
"must supply a dictionary of choices for type 'dict'")
elif not isinstance(self.choices, dict):
raise OptionValueError(
"choices must be a dictionary ('%s' supplied)"
% str(type(self.choices)).split("'")[1])
return
self._check_choice()
# Have to override _check_choices so that we can parse
# a dict through to check_dict
CHECK_METHODS = Option.CHECK_METHODS
CHECK_METHODS[2] = _new_check_choice
class DeOptionParser(OptionParser) :
def __init__(self,
usage=None,
option_list=None,
option_class=DeOption,
version=None,
conflict_handler="error",
description=None,
long_description = None,
formatter=DeHelpFormatter(),
add_help_option=True,
prog=None,
copyright=None,
add_verbose_options=True,
add_random_options=False
):
OptionParser.__init__(self,
usage,
option_list,
option_class,
version,
conflict_handler,
description,
formatter,
add_help_option,
prog )
if long_description :
self.long_description = long_description
self.add_option("--doc",
action="callback",
callback=_doc_callback,
help="Detailed documentation")
if copyright :
self.copyright = copyright
self.add_option("--copyright",
action="callback",
callback=_copyright_callback,
help="")
if add_verbose_options :
self.add_option("-q", "--quite",
action="store_false",
dest="verbose",
default=False,
help="Run quietly (default)")
self.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose output (Not quite)")
self.random_options = False
if add_random_options :
self.random_options = True
self.add_option("--seed",
action="store",
type = "int",
dest="random_seed",
help="Initial seed for pseudo-random number generator. (default: System time)",
metavar="INTEGER" )
self.add_option("--generator",
action="store",
dest="random_generator",
default="MersenneTwister",
help="Select MersenneTwister (default) or WichmannHill pseudo-random number generator",
metavar="TYPE" )
def parse_args(self,args, values=None) :
(options, args) = OptionParser.parse_args(self, args, values)
if self.random_options :
if options.random_generator is None or options.random_generator =="MersenneTwister" :
r = random.Random()
elif options.random_generator == "WichmannHill" :
r = random.WichmannHill()
else :
self.error("Acceptible generators are MersenneTwister (default) or WichmannHill")
if options.random_seed :
r.seed(options.random_seed)
options.__dict__["random"] = r
return (options, args)
| NarlikarLab/DIVERSITY | weblogoMod/corebio/utils/deoptparse.py | Python | gpl-3.0 | 9,010 | 0.016426 |
# -*- coding: utf-8 *-*
from redis._compat import iteritems, iterkeys, itervalues
from redis.connection import Token
from redis.exceptions import RedisError
from .base import RedisBase
class ZsetCommands(RedisBase):
# SORTED SET COMMANDS
def zadd(self, name, *args, **kwargs):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
If using non-strict Redis (strict_redis=False), args are expected in swapped form:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(self.strict_redis and args or reversed(args))
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, value, amount=1):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set ``name`` between the
lexicographical range ``min`` and ``max``.
"""
return self.execute_command('ZLEXCOUNT', name, min, max)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores,
score_cast_func)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
return self.execute_command(*pieces)
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrank(self, name, value):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return self.execute_command('ZREVRANK', name, value)
def zscan(self, name, cursor=0, match=None, count=None,
score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
options = {'score_cast_func': score_cast_func}
return self.execute_command('ZSCAN', *pieces, **options)
def zscan_iter(self, name, match=None, count=None,
score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count,
score_cast_func=score_cast_func)
for item in data:
yield item
def zscore(self, name, value):
"Return the score of element ``value`` in sorted set ``name``"
return self.execute_command('ZSCORE', name, value)
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
def _zaggregate(self, command, dest, keys, aggregate=None):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = iterkeys(keys), itervalues(keys)
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append(Token('WEIGHTS'))
pieces.extend(weights)
if aggregate:
pieces.append(Token('AGGREGATE'))
pieces.append(aggregate)
return self.execute_command(*pieces)
| katakumpo/niceredis | niceredis/client/zset.py | Python | mit | 11,593 | 0.000086 |
# -*- coding: utf-8 -*-
"""
Modulo per la gestione generica di una connessione al sito.
"""
#= IMPORT ======================================================================
import datetime
from twisted.internet import protocol
from src.config import config
from src.enums import LOG, OPTION, TRUST
from src.log import log
#= VARIABILI ===================================================================
# (TD) al posto di utilizzare le connessioni usare le sessions,
# attributi ereditato in questa classe, forse così risolto il problema
# delle connessioni
connections = {} # Connessioni al sito
#= CLASSI ======================================================================
# (TD) Sbagliato! Devo inserire questi attributi nella Twisted Session al sito
class Connection(protocol.Protocol):
"""
Questa classe serve a gestire le connessioni, e le relative sessioni, dei
differenti Client al Mud.
"""
def __init__(self):
self.account = None # Account utilizzato
self.new_player = None # Personaggio che si sta creando
self.player = None # Personaggio in gioco
self.session = None # Twisted Session
self.request = None # Twisted Request
self.ip = "None" # Ip del Client
self.buffer = "" # Buffer con tutti l'output del gioco da inviare al client ad ogni richiesta ajax
self.stop_buffering = False # Se impostato a valore di verità terminerà l'invio del buffer con una chiusura della sessione
self.already_closed = False # Indica se è già stata chiusa la connessione
self.defer_exit_from_game = None # Deferred che si attiva quando il giocatore esce dal gioco
self.logged_on = datetime.datetime.now() # Data e ora del login
#- Fine Inizializzazione -
def reinit(self):
self.stop_buffering = False
self.buffer = ""
#- Fine Metodo -
def get_id(self, conn_looker=None):
"""
Ritorna una o tutte tra le seguenti informazioni: l'ip della
connessione, il nome dell'account e il nome del personaggio.
Molto utile da utilizzare nei messaggi di log.
Questo metodo fa a coppia con quello nella classe Account.
"""
account_name = "None"
player_name = "None"
if self.account:
account_name = self.account.name
if self.player:
player_name = self.player.code
# Fa visualizzare l'IP completo solo a coloro che hanno abbastanza TRUST
if not conn_looker or (conn_looker and conn_looker.account and conn_looker.account.trust >= TRUST.IMPLEMENTOR):
return "%s %s.%s" % (self.ip, account_name, player_name)
else:
ip_number, port = self.ip.split(":")
return "*.*.*.%s:%s %s.%s" % (ip_number.split(".")[3], port, account_name, player_name)
#- Fine Metodo -
# (TD) magari pensare di convertire queste stringhe identificative in
# elementi di un'enumerazione
def get_browser(self):
"""
Ritorna un codice identificato del browser che il client sta utilizzando.
Utile quando bisogna creare del codice xhtml ad uopo per un browser.
"""
if (not self.request or not self.request.received_headers
or not "user-agent" in self.request.received_headers):
return ""
user_agent = self.request.received_headers["user-agent"]
if not user_agent:
return ""
browser = get_browser_from_ua(user_agent)
if browser == "???":
log.user_agent(self.request)
return browser
#- Fine Metodo -
def get_os(self):
"""
Ritorna un codice identificato del sistema operativo che il client
sta utilizzando.
"""
if (not self.request or not self.request.received_headers
or not "user-agent" in self.request.received_headers):
return ""
user_agent = self.request.received_headers["user-agent"]
if not user_agent:
return ""
operating_system = get_os_from_ua(user_agent)
# Non vengono loggati solamente gli user agent sconosciuti ma anche
# quelli generici, per veder se si riesce a carpire migliori
# informazioni oppure semplicemente per curiosità
if operating_system in ("???", "WINDOWS", "LINUX", "MAC", "MOBILE"):
log.user_agent(self.request)
return operating_system
#- Fine Metodo -
def get_user_agent(self):
if not self.request:
return ""
if not self.request.received_headers:
return ""
if not "user-agent" in self.request.received_headers:
return ""
if not self.request.received_headers["user-agent"]:
return ""
return self.request.received_headers["user-agent"]
#- Fine Metodo -
def close_game_request(self):
"""
Callback che serve a chiudere un'eventuale connessione alla pagina di
gioco ancora aperta quando la sessione web relativa all'account scade.
"""
if not self.player:
return
if not self.account or OPTION.COMET not in self.account.options:
self.player.exit_from_game(True)
if not self.player or not self.player.game_request:
return
log.conn("Chiusura della connessione al gioco: %s" % self.get_id())
try:
self.player.game_request.finish()
except UserWarning:
pass
if self.player:
self.player.game_request = None
self.player = None
#- Fine Metodo -
#= FUNZIONI ====================================================================
def close_all_connections():
for conn in reversed(connections.values()):
conn.close_game_request()
#- Fine Metodo -
def get_browser_from_ua(user_agent):
if not user_agent:
log.bug("user_agent non è un parametro valido: r" % user_agent)
return ""
# -------------------------------------------------------------------------
if "MSIE " in user_agent:
version = user_agent.split("MSIE")[1].split(".")[0]
return "IE_" + version.strip()
elif "Firefox/" in user_agent:
version = user_agent.split("Firefox/")[1].split(".")[0]
return "FIREFOX_" + version.strip()
elif "Chrome/" in user_agent:
version = user_agent.split("Chrome/")[1].split(".")[0]
return "CHROME_" + version.strip()
elif "Safari/" in user_agent:
version = user_agent.split("Version/")[1].split(".")[0]
return "SAFARI_" + version.strip()
elif "Opera/" in user_agent:
version = user_agent.split("Version/")[1].split(".")[0]
return "OPERA_" + version.strip()
elif "Iceweasel/" in user_agent:
version = user_agent.split("Iceweasel/")[1].split(".")[0]
return "FIREFOX_" + version.strip()
elif "Kindle" in user_agent:
versione = user_agent.split("Kindle/")[1].split(".")[0]
return "KINDLE_" + version.strip()
elif "Links (2" in user_agent:
return "LINKS_2"
elif "ELinks/0" in user_agent:
return "ELINKS_0"
return "???"
#- Fine Funzione -
def get_os_from_ua(user_agent):
if not user_agent:
log.bug("user_agent non è un parametro valido: r" % user_agent)
return ""
# -------------------------------------------------------------------------
if "Windows NT 6.1" in user_agent:
return "WINDOWS_SEVEN"
elif "Windows NT 6.0" in user_agent:
return "WINDOWS_VISTA"
elif "Windows NT 5.2" in user_agent:
return "WINDOWS_2003"
elif "Windows NT 5.1" in user_agent:
return "WINDOWS_XP"
elif "Windows NT 5.0" in user_agent:
return "WINDOWS_2000"
elif "Windows" in user_agent:
return "WINDOWS"
elif "Ubuntu" in user_agent:
return "LINUX_UBUNTU"
elif "Sabayon" in user_agent:
return "LINUX_SABAYON"
elif "CentOS" in user_agent:
return "LINUX_CENTOS"
elif "Linux" in user_agent:
return "LINUX"
elif "OS X" in user_agent:
return "MAC_OSX"
elif "Macintosh" in user_agent:
return "MAC"
elif "Mobile" in user_agent:
if "Android" in user_agent:
return "MOBILE_ANDROID"
else:
return "MOBILE"
return "???"
#- Fine Funzione -
| Onirik79/aaritmud | src/connection.py | Python | gpl-2.0 | 8,524 | 0.005397 |
#!/usr/bin/python2.4
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.tr_html'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import types
import unittest
from grit.gather import tr_html
from grit import clique
from grit import util
class ParserUnittest(unittest.TestCase):
def testChunking(self):
p = tr_html.HtmlChunks()
chunks = p.Parse('<p>Hello <b>dear</b> how <i>are</i>you?<p>Fine!')
self.failUnless(chunks == [
(False, '<p>', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, '<p>', ''), (True, 'Fine!', '')])
chunks = p.Parse('<p> Hello <b>dear</b> how <i>are</i>you? <p>Fine!')
self.failUnless(chunks == [
(False, '<p> ', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, ' <p>', ''), (True, 'Fine!', '')])
chunks = p.Parse('<p> Hello <b>dear how <i>are you? <p> Fine!')
self.failUnless(chunks == [
(False, '<p> ', ''), (True, 'Hello <b>dear how <i>are you?', ''),
(False, ' <p> ', ''), (True, 'Fine!', '')])
# Ensure translateable sections that start with inline tags contain
# the starting inline tag.
chunks = p.Parse('<b>Hello!</b> how are you?<p><i>I am fine.</i>')
self.failUnless(chunks == [
(True, '<b>Hello!</b> how are you?', ''), (False, '<p>', ''),
(True, '<i>I am fine.</i>', '')])
# Ensure translateable sections that end with inline tags contain
# the ending inline tag.
chunks = p.Parse("Hello! How are <b>you?</b><p><i>I'm fine!</i>")
self.failUnless(chunks == [
(True, 'Hello! How are <b>you?</b>', ''), (False, '<p>', ''),
(True, "<i>I'm fine!</i>", '')])
# Check capitals and explicit descriptions
chunks = p.Parse('<!-- desc=bingo! --><B>Hello!</B> how are you?<P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', 'bingo!'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
chunks = p.Parse('<B><!-- desc=bingo! -->Hello!</B> how are you?<P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', 'bingo!'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
# Linebreaks get changed to spaces just like any other HTML content
chunks = p.Parse('<B>Hello!</B> <!-- desc=bi\nngo\n! -->how are you?<P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', 'bi ngo !'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
# In this case, because the explicit description appears after the first
# translateable, it will actually apply to the second translateable.
chunks = p.Parse('<B>Hello!</B> how are you?<!-- desc=bingo! --><P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', ''), (False, '<P>', ''),
(True, '<I>I am fine.</I>', 'bingo!')])
# Check that replaceables within block tags (where attributes would go) are
# handled correctly.
chunks = p.Parse('<b>Hello!</b> how are you?<p [BINGO] [$~BONGO~$]>'
'<i>I am fine.</i>')
self.failUnless(chunks == [
(True, '<b>Hello!</b> how are you?', ''),
(False, '<p [BINGO] [$~BONGO~$]>', ''),
(True, '<i>I am fine.</i>', '')])
# Check that the contents of preformatted tags preserve line breaks.
chunks = p.Parse('<textarea>Hello\nthere\nhow\nare\nyou?</textarea>')
self.failUnless(chunks == [(False, '<textarea>', ''),
(True, 'Hello\nthere\nhow\nare\nyou?', ''), (False, '</textarea>', '')])
# ...and that other tags' line breaks are converted to spaces
chunks = p.Parse('<p>Hello\nthere\nhow\nare\nyou?</p>')
self.failUnless(chunks == [(False, '<p>', ''),
(True, 'Hello there how are you?', ''), (False, '</p>', '')])
def testTranslateableAttributes(self):
p = tr_html.HtmlChunks()
# Check that the translateable attributes in <img>, <submit>, <button> and
# <text> elements buttons are handled correctly.
chunks = p.Parse('<img src=bingo.jpg alt="hello there">'
'<input type=submit value="hello">'
'<input type="button" value="hello">'
'<input type=\'text\' value=\'Howdie\'>')
self.failUnless(chunks == [
(False, '<img src=bingo.jpg alt="', ''), (True, 'hello there', ''),
(False, '"><input type=submit value="', ''), (True, 'hello', ''),
(False, '"><input type="button" value="', ''), (True, 'hello', ''),
(False, '"><input type=\'text\' value=\'', ''), (True, 'Howdie', ''),
(False, '\'>', '')])
def testTranslateableHtmlToMessage(self):
msg = tr_html.HtmlToMessage(
'Hello <b>[USERNAME]</b>, <how> <i>are</i> you?')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'Hello BEGIN_BOLDX_USERNAME_XEND_BOLD, '
'<how> BEGIN_ITALICareEND_ITALIC you?')
msg = tr_html.HtmlToMessage('<b>Hello</b><I>Hello</I><b>Hello</b>')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'BEGIN_BOLD_1HelloEND_BOLD_1BEGIN_ITALICHelloEND_ITALIC'
'BEGIN_BOLD_2HelloEND_BOLD_2')
# Check that nesting (of the <font> tags) is handled correctly - i.e. that
# the closing placeholder numbers match the opening placeholders.
msg = tr_html.HtmlToMessage(
'''<font size=-1><font color=#FF0000>Update!</font> '''
'''<a href='http://desktop.google.com/whatsnew.html?hl=[$~LANG~$]'>'''
'''New Features</a>: Now search PDFs, MP3s, Firefox web history, and '''
'''more</font>''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'BEGIN_FONT_1BEGIN_FONT_2Update!END_FONT_2 BEGIN_LINK'
'New FeaturesEND_LINK: Now search PDFs, MP3s, Firefox '
'web history, and moreEND_FONT_1')
msg = tr_html.HtmlToMessage('''<a href='[$~URL~$]'><b>[NUM][CAT]</b></a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres == 'BEGIN_LINKBEGIN_BOLDX_NUM_XX_CAT_XEND_BOLDEND_LINK')
msg = tr_html.HtmlToMessage(
'''<font size=-1><a class=q onClick='return window.qs?qs(this):1' '''
'''href='http://[WEBSERVER][SEARCH_URI]'>Desktop</a></font> '''
''' ''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'''BEGIN_FONTBEGIN_LINKDesktopEND_LINKEND_FONTSPACE''')
msg = tr_html.HtmlToMessage(
'''<br><br><center><font size=-2>©2005 Google </font></center>''', 1)
pres = msg.GetPresentableContent()
self.failUnless(pres ==
u'BEGIN_BREAK_1BEGIN_BREAK_2BEGIN_CENTERBEGIN_FONT\xa92005'
u' Google END_FONTEND_CENTER')
msg = tr_html.HtmlToMessage(
''' - <a class=c href=[$~CACHE~$]>Cached</a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
' - BEGIN_LINKCachedEND_LINK')
# Check that upper-case tags are handled correctly.
msg = tr_html.HtmlToMessage(
'''You can read the <A HREF='http://desktop.google.com/privacypolicy.'''
'''html?hl=[LANG_CODE]'>Privacy Policy</A> and <A HREF='http://desktop'''
'''.google.com/privacyfaq.html?hl=[LANG_CODE]'>Privacy FAQ</A> online.''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'You can read the BEGIN_LINK_1Privacy PolicyEND_LINK_1 and '
'BEGIN_LINK_2Privacy FAQEND_LINK_2 online.')
# Check that tags with linebreaks immediately preceding them are handled
# correctly.
msg = tr_html.HtmlToMessage(
'''You can read the
<A HREF='http://desktop.google.com/privacypolicy.html?hl=[LANG_CODE]'>Privacy Policy</A>
and <A HREF='http://desktop.google.com/privacyfaq.html?hl=[LANG_CODE]'>Privacy FAQ</A> online.''')
pres = msg.GetPresentableContent()
self.failUnless(pres == '''You can read the
BEGIN_LINK_1Privacy PolicyEND_LINK_1
and BEGIN_LINK_2Privacy FAQEND_LINK_2 online.''')
class TrHtmlUnittest(unittest.TestCase):
def testTable(self):
html = tr_html.TrHtml('''<table class="shaded-header"><tr>
<td class="header-element b expand">Preferences</td>
<td class="header-element s">
<a href="http://desktop.google.com/preferences.html">Preferences Help</a>
</td>
</tr></table>''')
html.Parse()
self.failUnless(html.skeleton_[3].GetMessage().GetPresentableContent() ==
'BEGIN_LINKPreferences HelpEND_LINK')
def testSubmitAttribute(self):
html = tr_html.TrHtml('''</td>
<td class="header-element"><input type=submit value="Save Preferences"
name=submit2></td>
</tr></table>''')
html.Parse()
self.failUnless(html.skeleton_[1].GetMessage().GetPresentableContent() ==
'Save Preferences')
def testWhitespaceAfterInlineTag(self):
'''Test that even if there is whitespace after an inline tag at the start
of a translateable section the inline tag will be included.
'''
html = tr_html.TrHtml('''<label for=DISPLAYNONE><font size=-1> Hello</font>''')
html.Parse()
self.failUnless(html.skeleton_[1].GetMessage().GetRealContent() ==
'<font size=-1> Hello</font>')
def testSillyHeader(self):
html = tr_html.TrHtml('''[!]
title\tHello
bingo
bongo
bla
<p>Other stuff</p>''')
html.Parse()
content = html.skeleton_[1].GetMessage().GetRealContent()
self.failUnless(content == 'Hello')
self.failUnless(html.skeleton_[-1] == '</p>')
# Right after the translateable the nontranslateable should start with
# a linebreak (this catches a bug we had).
self.failUnless(html.skeleton_[2][0] == '\n')
def testExplicitDescriptions(self):
html = tr_html.TrHtml('Hello [USER]<br/><!-- desc=explicit --><input type="button">Go!</input>')
html.Parse()
msg = html.GetCliques()[1].GetMessage()
self.failUnless(msg.GetDescription() == 'explicit')
self.failUnless(msg.GetRealContent() == 'Go!')
def testRegressionInToolbarAbout(self):
html = tr_html.TrHtml.FromFile(
util.PathFromRoot(r'grit/testdata/toolbar_about.html'))
html.Parse()
cliques = html.GetCliques()
for cl in cliques:
content = cl.GetMessage().GetRealContent()
if content.count('De parvis grandis acervus erit'):
self.failIf(content.count('$/translate'))
def HtmlFromFileWithManualCheck(self, f):
html = tr_html.TrHtml.FromFile(f)
html.Parse()
# For manual results inspection only...
list = []
for item in html.skeleton_:
if isinstance(item, types.StringTypes):
list.append(item)
else:
list.append(item.GetMessage().GetPresentableContent())
return html
def testPrivacyHtml(self):
html = self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/testdata/privacy.html'))
self.failUnless(html.skeleton_[1].GetMessage().GetRealContent() ==
'Privacy and Google Desktop Search')
self.failUnless(html.skeleton_[3].startswith('<'))
self.failUnless(len(html.skeleton_) > 10)
def testPreferencesHtml(self):
html = self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/testdata/preferences.html'))
# Verify that we don't get '[STATUS-MESSAGE]' as the original content of
# one of the MessageClique objects (it would be a placeholder-only message
# and we're supposed to have stripped those).
for item in filter(lambda x: isinstance(x, clique.MessageClique),
html.skeleton_):
if (item.GetMessage().GetRealContent() == '[STATUS-MESSAGE]' or
item.GetMessage().GetRealContent() == '[ADDIN-DO] [ADDIN-OPTIONS]'):
self.fail()
self.failUnless(len(html.skeleton_) > 100)
def AssertNumberOfTranslateables(self, files, num):
'''Fails if any of the files in files don't have exactly
num translateable sections.
Args:
files: ['file1', 'file2']
num: 3
'''
for f in files:
f = util.PathFromRoot(r'grit/testdata/%s' % f)
html = self.HtmlFromFileWithManualCheck(f)
self.failUnless(len(html.GetCliques()) == num)
def testFewTranslateables(self):
self.AssertNumberOfTranslateables(['browser.html', 'email_thread.html',
'header.html', 'mini.html',
'oneclick.html', 'script.html',
'time_related.html', 'versions.html'], 0)
self.AssertNumberOfTranslateables(['footer.html', 'hover.html'], 1)
def testOtherHtmlFilesForManualInspection(self):
files = [
'about.html', 'bad_browser.html', 'cache_prefix.html',
'cache_prefix_file.html', 'chat_result.html', 'del_footer.html',
'del_header.html', 'deleted.html', 'details.html', 'email_result.html',
'error.html', 'explicit_web.html', 'footer.html',
'homepage.html', 'indexing_speed.html',
'install_prefs.html', 'install_prefs2.html',
'oem_enable.html', 'oem_non_admin.html', 'onebox.html',
'password.html', 'quit_apps.html', 'recrawl.html',
'searchbox.html', 'sidebar_h.html', 'sidebar_v.html', 'status.html',
]
for f in files:
self.HtmlFromFileWithManualCheck(
util.PathFromRoot(r'grit/testdata/%s' % f))
def testTranslate(self):
# Note that the English translation of documents that use character
# literals (e.g. ©) will not be the same as the original document
# because the character literal will be transformed into the Unicode
# character itself. So for this test we choose some relatively complex
# HTML without character entities (but with because that's handled
# specially).
html = tr_html.TrHtml(''' <script>
<!--
function checkOffice() { var w = document.getElementById("h7");
var e = document.getElementById("h8"); var o = document.getElementById("h10");
if (!(w.checked || e.checked)) { o.checked=0;o.disabled=1;} else {o.disabled=0;} }
// -->
</script>
<input type=checkbox [CHECK-DOC] name=DOC id=h7 onclick='checkOffice()'>
<label for=h7> Word</label><br>
<input type=checkbox [CHECK-XLS] name=XLS id=h8 onclick='checkOffice()'>
<label for=h8> Excel</label><br>
<input type=checkbox [CHECK-PPT] name=PPT id=h9>
<label for=h9> PowerPoint</label><br>
</span></td><td nowrap valign=top><span class="s">
<input type=checkbox [CHECK-PDF] name=PDF id=hpdf>
<label for=hpdf> PDF</label><br>
<input type=checkbox [CHECK-TXT] name=TXT id=h6>
<label for=h6> Text, media, and other files</label><br>
</tr>
<tr><td nowrap valign=top colspan=3><span class="s"><br />
<input type=checkbox [CHECK-SECUREOFFICE] name=SECUREOFFICE id=h10>
<label for=h10> Password-protected Office documents (Word, Excel)</label><br />
<input type=checkbox [DISABLED-HTTPS] [CHECK-HTTPS] name=HTTPS id=h12><label
for=h12> Secure pages (HTTPS) in web history</label></span></td></tr>
</table>''')
html.Parse()
trans = html.Translate('en')
if (html.GetText() != trans):
self.fail()
def testHtmlToMessageWithBlockTags(self):
msg = tr_html.HtmlToMessage(
'Hello<p>Howdie<img alt="bingo" src="image.gif">', True)
result = msg.GetPresentableContent()
self.failUnless(
result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK')
msg = tr_html.HtmlToMessage(
'Hello<p>Howdie<input type="button" value="bingo">', True)
result = msg.GetPresentableContent()
self.failUnless(
result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK')
def testHtmlToMessageRegressions(self):
msg = tr_html.HtmlToMessage(' - ', True)
result = msg.GetPresentableContent()
self.failUnless(result == ' - ')
def testEscapeUnescaped(self):
text = '© & "<hello>"'
unescaped = util.UnescapeHtml(text)
self.failUnless(unescaped == u'\u00a9\u00a0 & "<hello>"')
escaped_unescaped = util.EscapeHtml(unescaped, True)
self.failUnless(escaped_unescaped ==
u'\u00a9\u00a0 & "<hello>"')
def testRegressionCjkHtmlFile(self):
# TODO(joi) Fix this problem where unquoted attributes that
# have a value that is CJK characters causes the regular expression
# match never to return. (culprit is the _ELEMENT regexp(
if False:
html = self.HtmlFromFileWithManualCheck(util.PathFromRoot(
r'grit/testdata/ko_oem_enable_bug.html'))
self.failUnless(True)
def testRegressionCpuHang(self):
# If this regression occurs, the unit test will never return
html = tr_html.TrHtml(
'''<input type=text size=12 id=advFileTypeEntry [~SHOW-FILETYPE-BOX~] value="[EXT]" name=ext>''')
html.Parse()
if __name__ == '__main__':
unittest.main()
| paul99/clank | tools/grit/grit/gather/tr_html_unittest.py | Python | bsd-3-clause | 17,175 | 0.004134 |
from parso.python import tree
from parso.python.token import PythonTokenTypes
from parso.parser import BaseParser
NAME = PythonTokenTypes.NAME
INDENT = PythonTokenTypes.INDENT
DEDENT = PythonTokenTypes.DEDENT
class Parser(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'sync_comp_for': tree.SyncCompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
'namedexpr_test': tree.NamedExpr,
}
default_node = tree.PythonNode
# Names/Keywords are handled separately
_leaf_map = {
PythonTokenTypes.STRING: tree.String,
PythonTokenTypes.NUMBER: tree.Number,
PythonTokenTypes.NEWLINE: tree.Newline,
PythonTokenTypes.ENDMARKER: tree.EndMarker,
PythonTokenTypes.FSTRING_STRING: tree.FStringString,
PythonTokenTypes.FSTRING_START: tree.FStringStart,
PythonTokenTypes.FSTRING_END: tree.FStringEnd,
}
def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'):
super().__init__(pgen_grammar, start_nonterminal,
error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
def parse(self, tokens):
if self._error_recovery:
if self._start_nonterminal != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
return super().parse(tokens)
def convert_node(self, nonterminal, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
try:
node = self.node_map[nonterminal](children)
except KeyError:
if nonterminal == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
node = self.default_node(nonterminal, children)
for c in children:
c.parent = node
return node
def convert_leaf(self, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in self._pgen_grammar.reserved_syntax_strings:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix)
def error_recovery(self, token):
tos_nodes = self.stack[-1].nodes
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_nonterminal == 'file_input' and \
(token.type == PythonTokenTypes.ENDMARKER
or token.type == DEDENT and not last_leaf.value.endswith('\n')
and not last_leaf.value.endswith('\r')):
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
if self.stack[-1].dfa.from_rule == 'simple_stmt':
try:
plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE]
except KeyError:
pass
else:
if plan.next_dfa.is_final and not plan.dfa_pushes:
# We are ignoring here that the newline would be
# required for a simple_stmt.
self.stack[-1].dfa = plan.next_dfa
self._add_token(token)
return
if not self._error_recovery:
return super().error_recovery(token)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for until_index, stack_node in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
if stack_node.nonterminal == 'file_input':
break
elif stack_node.nonterminal == 'suite':
# In the case where we just have a newline we don't want to
# do error recovery here. In all other cases, we want to do
# error recovery.
if len(stack_node.nodes) != 1:
break
return until_index
until_index = current_suite(self.stack)
if self._stack_removal(until_index + 1):
self._add_token(token)
else:
typ, value, start_pos, prefix = token
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix)
self.stack[-1].nodes.append(error_leaf)
tos = self.stack[-1]
if tos.nonterminal == 'suite':
# Need at least one statement in the suite. This happend with the
# error recovery above.
try:
tos.dfa = tos.dfa.arcs['stmt']
except KeyError:
# We're already in a final state.
pass
def _stack_removal(self, start_index):
all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes]
if all_nodes:
node = tree.PythonErrorNode(all_nodes)
for n in all_nodes:
n.parent = node
self.stack[start_index - 1].nodes.append(node)
self.stack[start_index:] = []
return bool(all_nodes)
def _recovery_tokenize(self, tokens):
for token in tokens:
typ = token[0]
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
self._indent_counter -= 1
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
yield token
| snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/jedi_deps/parso/parso/python/parser.py | Python | gpl-3.0 | 8,227 | 0.000851 |
# -*- coding: utf-8 -*-
#
#
# Project name: OpenVAS2Report: A set of tools to manager OpenVAS XML report files.
# Project URL: https://github.com/cr0hn/openvas_to_report
#
# Copyright (c) 2015, cr0hn<-AT->cr0hn.com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
| cr0hn/openvas_to_report | openvas_to_report/examples/__init__.py | Python | bsd-3-clause | 1,761 | 0.007382 |
# Author: John Elkins <john.elkins@yahoo.com>
# License: MIT <LICENSE>
from common import *
if len(sys.argv) < 2:
log('ERROR output directory is required')
time.sleep(3)
exit()
# setup the output directory, create it if needed
output_dir = sys.argv[1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# log in and load personal library
api = open_api()
library = load_personal_library()
def playlist_handler(playlist_name, playlist_description, playlist_tracks):
# skip empty and no-name playlists
if not playlist_name: return
if len(playlist_tracks) == 0: return
# setup output files
playlist_name = playlist_name.replace('/', '')
open_log(os.path.join(output_dir,playlist_name+u'.log'))
outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
encoding='utf-8',mode='w')
# keep track of stats
stats = create_stats()
export_skipped = 0
# keep track of songids incase we need to skip duplicates
song_ids = []
log('')
log('============================================================')
log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
+playlist_name)
log('============================================================')
# add the playlist description as a "comment"
if playlist_description:
outfile.write(tsep)
outfile.write(playlist_description)
outfile.write(os.linesep)
for tnum, pl_track in enumerate(playlist_tracks):
track = pl_track.get('track')
# we need to look up these track in the library
if not track:
library_track = [
item for item in library if item.get('id')
in pl_track.get('trackId')]
if len(library_track) == 0:
log(u'!! '+str(tnum+1)+repr(pl_track))
export_skipped += 1
continue
track = library_track[0]
result_details = create_result_details(track)
if not allow_duplicates and result_details['songid'] in song_ids:
log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
export_skipped += 1
continue
# update the stats
update_stats(track,stats)
# export the track
song_ids.append(result_details['songid'])
outfile.write(create_details_string(result_details))
outfile.write(os.linesep)
# calculate the stats
stats_results = calculate_stats_results(stats,len(playlist_tracks))
# output the stats to the log
log('')
log_stats(stats_results)
log(u'export skipped: '+unicode(export_skipped))
# close the files
close_log()
outfile.close()
# the personal library is used so we can lookup tracks that fail to return
# info from the ...playlist_contents() call
playlist_contents = api.get_all_user_playlist_contents()
for playlist in playlist_contents:
playlist_name = playlist.get('name')
playlist_description = playlist.get('description')
playlist_tracks = playlist.get('tracks')
playlist_handler(playlist_name, playlist_description, playlist_tracks)
if export_thumbs_up:
# get thumbs up playlist
thumbs_up_tracks = []
for track in library:
if track.get('rating') is not None and int(track.get('rating')) > 1:
thumbs_up_tracks.append(track)
# modify format of each dictionary to match the data type
# of the other playlists
thumbs_up_tracks_formatted = []
for t in thumbs_up_tracks:
thumbs_up_tracks_formatted.append({'track': t})
playlist_handler('Thumbs up', 'Thumbs up tracks', thumbs_up_tracks_formatted)
if export_all:
all_tracks_formatted = []
for t in library:
all_tracks_formatted.append({'track': t})
playlist_handler('All', 'All tracks', all_tracks_formatted)
close_api()
| soulfx/gmusic-playlist | ExportLists.py | Python | mit | 3,890 | 0.004627 |
# Copyright (C) 2014 Dustin Spicuzza
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import gio
import cgi
import json
from os.path import dirname, join
from contextlib import closing
from xl.nls import gettext as _
from xl import (
providers,
settings
)
from xl.metadata.tags import tag_data
from xlgui.widgets import menu
from analyzer_dialog import AnalyzerDialog
class PlaylistAnalyzerPlugin(object):
def __init__(self):
self.menu_items = []
self.dialog = None
self._get_track_groups = None
self.d3_loc = join(dirname(__file__), 'ext', 'd3.min.js')
def enable(self, exaile):
self.exaile = exaile
def on_gui_loaded(self):
# register menu items
item = menu.simple_menu_item('pz-run', [], _('Analyze playlists'),
callback=self.on_analyze_playlists)
item.register('menubar-tools-menu')
self.menu_items.append(item)
item = menu.simple_menu_item('pz-run', ['export-files'], _('Analyze playlist'),
callback=self.on_analyze_playlist)
item.register('playlist-panel-context-menu')
self.menu_items.append(item)
# -> this could have a submenu that gets filled in with all
# of the presets
def on_exaile_loaded(self):
pass
def disable(self, exaile):
if self.dialog is not None:
self.dialog.destroy()
self.dialog = None
for menu_item in self.menu_items:
menu_item.unregister()
#
# Misc
#
def get_track_groups(self, track):
if self._get_track_groups is None:
if 'grouptagger' not in self.exaile.plugins.enabled_plugins:
raise ValueError("GroupTagger plugin must be loaded to use the GroupTagger tag")
self._get_track_groups = self.exaile.plugins.enabled_plugins['grouptagger'].get_track_groups
return self._get_track_groups(track)
#
# Menu functions
#
def on_analyze_playlist(self, widget, name, parent, context):
if self.dialog is None:
self.dialog = AnalyzerDialog(self, context['selected-playlist'])
def on_analyze_playlists(self, widget, name, parent, context):
if self.dialog is None:
self.dialog = AnalyzerDialog(self)
#
# Functions to generate the analysis
#
def get_tag(self, track, tagname, extra):
data = tag_data.get(tagname)
if data is not None:
if data.type == 'int':
ret = track.get_tag_raw(tagname, join=True)
if ret is not None:
if extra == 0:
return int(ret)
else:
return int(ret) - (int(ret) % extra)
return
if data.use_disk:
return track.get_tag_disk(tagname)
if tagname == '__grouptagger':
return list(self.get_track_groups(track))
return track.get_tag_raw(tagname, join=True)
def generate_data(self, tracks, tagdata):
data = []
for track in tracks:
if track is None:
data.append(None)
else:
data.append([self.get_tag(track, tag, extra) for tag, extra in tagdata])
return data
def write_to_file(self, tmpl, uri, **kwargs):
'''
Opens a template file, performs substitution, writes it to the
output URI, and also writes d3.min.js to the output directory.
:param tmpl: Local pathname to template file
:param uri: URI of output file suitable for passing to gio.File
:param kwargs: Named parameters to substitute in template
'''
# read the template file
with open(tmpl, 'rb') as fp:
contents = fp.read()
try:
contents = contents % kwargs
except:
raise RuntimeError("Format string error in template (probably has unescaped % in it)")
outfile = gio.File(uri)
parent_dir = outfile.get_parent()
if parent_dir:
parent_dir = gio.File(parent_dir.get_uri() + "/d3.min.js")
with closing(outfile.replace('', False)) as fp:
fp.write(contents)
# copy d3 to the destination
# -> TODO: add checkbox to indicate whether it should write d3 there or not
if parent_dir:
with open(self.d3_loc, 'rb') as d3fp:
with closing(parent_dir.replace('', False)) as pfp:
pfp.write(d3fp.read())
# New plugin API; requires exaile 3.4.0 or later
plugin_class = PlaylistAnalyzerPlugin
| eri-trabiccolo/exaile | plugins/playlistanalyzer/__init__.py | Python | gpl-2.0 | 6,185 | 0.008892 |
# #
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Module for doing parallel builds. This uses a PBS-like cluster. You should be able to submit jobs (which can have
dependencies)
Support for PBS is provided via the PbsJob class. If you want you could create other job classes and use them here.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import math
import os
import subprocess
import easybuild.tools.config as config
from easybuild.framework.easyblock import get_easyblock_instance
from easybuild.framework.easyconfig.easyconfig import ActiveMNS
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import get_repository, get_repositorypath
from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version
from easybuild.tools.pbs_job import PbsJob, connect_to_server, disconnect_from_server, get_ppn
from easybuild.tools.repository.repository import init_repository
from vsc.utils import fancylogger
_log = fancylogger.getLogger('parallelbuild', fname=False)
def build_easyconfigs_in_parallel(build_command, easyconfigs, output_dir=None, prepare_first=True):
"""
easyconfigs is a list of easyconfigs which can be built (e.g. they have no unresolved dependencies)
this function will build them in parallel by submitting jobs
@param build_command: build command to use
@param easyconfigs: list of easyconfig files
@param output_dir: output directory
returns the jobs
"""
_log.info("going to build these easyconfigs in parallel: %s", easyconfigs)
job_ids = {}
# dependencies have already been resolved,
# so one can linearly walk over the list and use previous job id's
jobs = []
# create a single connection, and reuse it
conn = connect_to_server()
if conn is None:
_log.error("connect_to_server returned %s, can't submit jobs." % (conn))
# determine ppn once, and pass is to each job being created
# this avoids having to figure out ppn over and over again, every time creating a temp connection to the server
ppn = get_ppn()
def tokey(dep):
"""Determine key for specified dependency."""
return ActiveMNS().det_full_module_name(dep)
for ec in easyconfigs:
# this is very important, otherwise we might have race conditions
# e.g. GCC-4.5.3 finds cloog.tar.gz but it was incorrectly downloaded by GCC-4.6.3
# running this step here, prevents this
if prepare_first:
prepare_easyconfig(ec)
# the new job will only depend on already submitted jobs
_log.info("creating job for ec: %s" % str(ec))
new_job = create_job(build_command, ec, output_dir=output_dir, conn=conn, ppn=ppn)
# sometimes unresolved_deps will contain things, not needed to be build
job_deps = [job_ids[dep] for dep in map(tokey, ec['unresolved_deps']) if dep in job_ids]
new_job.add_dependencies(job_deps)
# place user hold on job to prevent it from starting too quickly,
# we might still need it in the queue to set it as a dependency for another job;
# only set hold for job without dependencies, other jobs have a dependency hold set anyway
with_hold = False
if not job_deps:
with_hold = True
# actually (try to) submit job
new_job.submit(with_hold)
_log.info("job for module %s has been submitted (job id: %s)" % (new_job.module, new_job.jobid))
# update dictionary
job_ids[new_job.module] = new_job.jobid
new_job.cleanup()
jobs.append(new_job)
# release all user holds on jobs after submission is completed
for job in jobs:
if job.has_holds():
_log.info("releasing hold on job %s" % job.jobid)
job.release_hold()
disconnect_from_server(conn)
return jobs
def submit_jobs(ordered_ecs, cmd_line_opts, testing=False):
"""
Submit jobs.
@param ordered_ecs: list of easyconfigs, in the order they should be processed
@param cmd_line_opts: list of command line options (in 'longopt=value' form)
"""
curdir = os.getcwd()
# the options to ignore (help options can't reach here)
ignore_opts = ['robot', 'job']
# generate_cmd_line returns the options in form --longopt=value
opts = [x for x in cmd_line_opts if not x.split('=')[0] in ['--%s' % y for y in ignore_opts]]
# compose string with command line options, properly quoted and with '%' characters escaped
opts_str = subprocess.list2cmdline(opts).replace('%', '%%')
command = "unset TMPDIR && cd %s && eb %%(spec)s %s --testoutput=%%(output_dir)s" % (curdir, opts_str)
_log.info("Command template for jobs: %s" % command)
job_info_lines = []
if testing:
_log.debug("Skipping actual submission of jobs since testing mode is enabled")
else:
jobs = build_easyconfigs_in_parallel(command, ordered_ecs)
job_info_lines = ["List of submitted jobs:"]
job_info_lines.extend(["%s (%s): %s" % (job.name, job.module, job.jobid) for job in jobs])
job_info_lines.append("(%d jobs submitted)" % len(jobs))
return '\n'.join(job_info_lines)
def create_job(build_command, easyconfig, output_dir=None, conn=None, ppn=None):
"""
Creates a job, to build a *single* easyconfig
@param build_command: format string for command, full path to an easyconfig file will be substituted in it
@param easyconfig: easyconfig as processed by process_easyconfig
@param output_dir: optional output path; --regtest-output-dir will be used inside the job with this variable
@param conn: open connection to PBS server
@param ppn: ppn setting to use (# 'processors' (cores) per node to use)
returns the job
"""
if output_dir is None:
output_dir = 'easybuild-build'
# capture PYTHONPATH, MODULEPATH and all variables starting with EASYBUILD
easybuild_vars = {}
for name in os.environ:
if name.startswith("EASYBUILD"):
easybuild_vars[name] = os.environ[name]
others = ["PYTHONPATH", "MODULEPATH"]
for env_var in others:
if env_var in os.environ:
easybuild_vars[env_var] = os.environ[env_var]
_log.info("Dictionary of environment variables passed to job: %s" % easybuild_vars)
# obtain unique name based on name/easyconfig version tuple
ec_tuple = (easyconfig['ec']['name'], det_full_ec_version(easyconfig['ec']))
name = '-'.join(ec_tuple)
# create command based on build_command template
command = build_command % {
'spec': easyconfig['spec'],
'output_dir': os.path.join(os.path.abspath(output_dir), name),
}
# just use latest build stats
repo = init_repository(get_repository(), get_repositorypath())
buildstats = repo.get_buildstats(*ec_tuple)
resources = {}
if buildstats:
previous_time = buildstats[-1]['build_time']
resources['hours'] = int(math.ceil(previous_time * 2 / 60))
job = PbsJob(command, name, easybuild_vars, resources=resources, conn=conn, ppn=ppn)
job.module = easyconfig['ec'].full_mod_name
return job
def prepare_easyconfig(ec):
"""
Prepare for building specified easyconfig (fetch sources)
@param ec: parsed easyconfig (EasyConfig instance)
"""
try:
easyblock_instance = get_easyblock_instance(ec)
easyblock_instance.update_config_template_run_step()
easyblock_instance.fetch_step(skip_checksums=True)
_log.debug("Cleaning up log file %s..." % easyblock_instance.logfile)
easyblock_instance.close_log()
os.remove(easyblock_instance.logfile)
except (OSError, EasyBuildError), err:
_log.error("An error occured while preparing %s: %s" % (ec, err))
| pneerincx/easybuild-framework | easybuild/tools/parallelbuild.py | Python | gpl-2.0 | 8,870 | 0.003157 |
import sqlite3
TWITTER_CONSUMER_KEY = 'twitter_consumer_key'
TWITTER_CONSUMER_SECRET = 'twitter_consumer_secret'
TWITTER_ACCESS_TOKEN = 'twitter_access_token'
TWITTER_ACCESS_TOKEN_SECRET = 'twitter_access_token_secret'
LAST_LATITUDE = 'last_latitude'
LAST_LONGITUDE = 'last_longitude'
UPC_DATABASE_KEY = 'upc_database_key'
USER_TIMEOUT = 500
class Data(object):
conn = sqlite3.connect('CoD.db')
c = conn.cursor()
def __del__(self):
self.conn.commit()
self.c.close()
self.conn.close()
def _getter(self, key):
self.c.execute('SELECT value FROM kvs WHERE key=?', (key,))
out = self.c.fetchone()[0]
return out
@property
def last_latitude(self): return self._getter(LAST_LATITUDE)
@property
def last_longitude(self): return self._getter(LAST_LONGITUDE)
@property
def twitter_access_token(self): return self._getter(TWITTER_ACCESS_TOKEN)
@property
def twitter_access_token_secret(self):
return self._getter(TWITTER_ACCESS_TOKEN_SECRET)
@property
def twitter_consumer_key(self):
return self._getter(TWITTER_CONSUMER_KEY)
@property
def twitter_consumer_secret(self):
return self._getter(TWITTER_CONSUMER_SECRET)
@property
def upc_database_key(self):
return self._getter(UPC_DATABASE_KEY)
def get_beverage(self, upc):
self.c.execute('SELECT upc, description, untappd_id FROM beverages WHERE upc = ?',
(upc,))
ret = self.c.fetchone()
return Beverage(ret) if ret else None
def new_beverage(self, upc, description):
self.c.execute('''INSERT
INTO beverages (upc, description, untappd_id)
VALUES (?, ?, ?)''',
(upc, description, ''))
self.conn.commit()
def update_user(self, user_id):
print 'updating user ', user_id
self.c.execute('''UPDATE users
SET last_seen=datetime('now')
WHERE user_id=?''',
(user_id,))
def log(self, upc):
self.c.execute('''INSERT INTO log(upc, timestamp)
VALUES (?, datetime('now'))''',
(upc,))
def get_current_user(self):
self.c.execute('''SELECT *
FROM users
WHERE last_seen BETWEEN datetime('now','-500 seconds')
AND datetime('now')
ORDER BY last_seen DESC
LIMIT 1
''')
ret = self.c.fetchone()
if ret is not None: return User(ret)
def log_beverage(self, user, beverage):
self.c.execute('''INSERT
INTO drinks(user_id, beverage_id, timestamp)
VALUES (?, ?, datetime('now'))''',
(user.id, beverage.upc))
class Beverage(object):
def __init__(self, tup):
if type(tup) is tuple:
self.upc, self.description, self.untapped_id = tup
class User(object):
def __init__(self, tup):
if type(tup) is tuple:
self.user_id, self.name, self.email, self.last_seen, self.twitter_handle = tup
data = Data() | drewhutchison/coolerofdoom | data.py | Python | mit | 3,415 | 0.009663 |
#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2011 Jekin Trivedi <jekintrivedi@gmail.com> (See the file COPYING for details).
from atlas import *
from physics import *
from physics import Quaternion
from physics import Vector3D
import server
class Repairing(server.Task):
"""A very simple Repair system for Repairing structures."""
materials = ["wood"]
def consume_materials (self) :
""" A method which gets the material to be consumed from the inventory & returns the consume operation """
for item in self.character.contains:
if item.type[0] == str(self.materials[0]):
set = Operation("set", Entity(item.id, status = -1), to = item)
return set
else :
print "No Wood in inventory"
return 0
def repair_operation(self, op):
""" The repair op is FROM the the character,
TO the structure that is getting Repaired which we
term the target. """
if len(op) < 1:
sys.stderr.write("Repair task has no target in repair op")
# FIXME Use weak references, once we have them
self.target = server.world.get_object_ref(op[0].id)
self.tool = op.to
def tick_operation(self, op):
""" This method is called repeatedly, each time a Repair turn occurs.
In this example the interval is fixed, but it can be varied. """
# print "Repair.tick"
res=Oplist()
current_status = 0
if self.target() is None:
# print "Target is no more"
self.irrelevant()
return
if self.character.stamina <= 0:
# print "I am exhausted"
self.irrelevant()
return
if square_distance(self.character.location, self.target().location) > self.target().location.bbox.square_bounding_radius():
self.progress = current_status
self.rate = 0
return self.next_tick(1.75)
# Some entity do not have status defined. If not present we assume that the entity is unharmed & stop the task
if hasattr ( self.target(), 'status' ) :
current_status = self.target().status
else:
set = Operation("set", Entity(self.self.target(), status = 1),
to = self.target)
res.append(set)
current_status = 1.0
self.irrelevant()
if current_status < 0.9:
set=Operation("set", Entity(self.target().id, status=current_status+0.1), to=self.target())
res.append(set)
consume = self.consume_materials ()
if consume :
res.append(consume)
else :
self.irrelevant()
else:
set = Operation("set", Entity(self.target().id, status = 1),
to = self.target())
res.append(set)
self.irrelevant()
self.progress = current_status
self.rate = 0.1 / 1.75
res.append(self.next_tick(1.75))
return res
| alriddoch/cyphesis | rulesets/mason/world/tasks/Repairing.py | Python | gpl-2.0 | 3,147 | 0.013028 |
# -*- coding: utf-8 -*-
"""
wakatime.compat
~~~~~~~~~~~~~~~
For working with Python2 and Python3.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import codecs
import os
import platform
import subprocess
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_win = platform.system() == 'Windows'
if is_py2: # pragma: nocover
def u(text):
if text is None:
return None
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
try:
return unicode(text)
except:
return text.decode('utf-8', 'replace')
open = codecs.open
basestring = basestring
elif is_py3: # pragma: nocover
def u(text):
if text is None:
return None
if isinstance(text, bytes):
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
pass
try:
return str(text)
except:
return text.decode('utf-8', 'replace')
open = open
basestring = (str, bytes)
try:
from importlib import import_module
except ImportError: # pragma: nocover
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import.
It specifies the package to use as the anchor point from which to
resolve the relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' "
"argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
try:
from .packages import simplejson as json
except (ImportError, SyntaxError): # pragma: nocover
import json
class Popen(subprocess.Popen):
"""Patched Popen to prevent opening cmd window on Windows platform."""
def __init__(self, *args, **kwargs):
startupinfo = kwargs.get('startupinfo')
if is_win or True:
try:
startupinfo = startupinfo or subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
pass
kwargs['startupinfo'] = startupinfo
if 'env' not in kwargs:
kwargs['env'] = os.environ.copy()
kwargs['env']['LANG'] = 'en-US' if is_win else 'en_US.UTF-8'
subprocess.Popen.__init__(self, *args, **kwargs)
| wakatime/komodo-wakatime | components/wakatime/compat.py | Python | bsd-3-clause | 3,553 | 0.001689 |
#!/usr/bin/env python
import boto
from boto.s3.connection import OrdinaryCallingFormat
from fabric.api import prompt
def confirm(message):
"""
Verify a users intentions.
"""
answer = prompt(message, default="Not at all")
if answer.lower() not in ('y', 'yes', 'buzz off', 'screw you'):
exit()
def replace_in_file(filename, find, replace):
with open(filename, 'r') as f:
contents = f.read()
contents = contents.replace(find, replace)
with open(filename, 'w') as f:
f.write(contents)
def get_bucket(bucket_name):
"""
Established a connection and gets s3 bucket
"""
if '.' in bucket_name:
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
else:
s3 = boto.connect_s3()
return s3.get_bucket(bucket_name)
| swastvedt/dailygraphics | fabfile/utils.py | Python | mit | 813 | 0.00369 |
# coding=utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <i.82@me.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from django.utils.translation import ugettext as _
@staff_member_required
def about_view(request):
"""
:param request: Django Request
:return: Django HttpResponse
:rtype: HttpResponse
"""
context = admin.site.each_context(request)
context.update({
'title': _('About'),
'version': "4.1",
})
template = 'admin/help/about.html'
return render(request, template, context)
| 82Flex/DCRM | WEIPDCRM/views/admin/help/about.py | Python | agpl-3.0 | 1,308 | 0 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pprint
import shutil
import sys
import tempfile
import numpy as np
import tensorflow as tf
from px.nmt import attention_model
from px.nmt import gnmt_model
from px.nmt import model
from px.nmt.utils import common_test_utils
from px.nmt.utils import nmt_utils
from px.nmt.utils import trie_decoder_utils
float32 = np.float32
int32 = np.int32
array = np.array
class ModelTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.actual_vars_values = {}
cls.expected_vars_values = {
'AttentionMechanismBahdanau/att_layer_weight/shape': (10, 5),
'AttentionMechanismBahdanau/att_layer_weight/sum':
-0.64981574,
'AttentionMechanismBahdanau/last_dec_weight/shape': (10, 20),
'AttentionMechanismBahdanau/last_dec_weight/sum':
0.058069646,
'AttentionMechanismBahdanau/last_enc_weight/shape': (10, 20),
'AttentionMechanismBahdanau/last_enc_weight/sum':
0.058028102,
'AttentionMechanismLuong/att_layer_weight/shape': (10, 5),
'AttentionMechanismLuong/att_layer_weight/sum':
-0.64981574,
'AttentionMechanismLuong/last_dec_weight/shape': (10, 20),
'AttentionMechanismLuong/last_dec_weight/sum':
0.058069646,
'AttentionMechanismLuong/last_enc_weight/shape': (10, 20),
'AttentionMechanismLuong/last_enc_weight/sum':
0.058028102,
'AttentionMechanismNormedBahdanau/att_layer_weight/shape': (10, 5),
'AttentionMechanismNormedBahdanau/att_layer_weight/sum':
-0.64981973,
'AttentionMechanismNormedBahdanau/last_dec_weight/shape': (10, 20),
'AttentionMechanismNormedBahdanau/last_dec_weight/sum':
0.058067322,
'AttentionMechanismNormedBahdanau/last_enc_weight/shape': (10, 20),
'AttentionMechanismNormedBahdanau/last_enc_weight/sum':
0.058022559,
'AttentionMechanismScaledLuong/att_layer_weight/shape': (10, 5),
'AttentionMechanismScaledLuong/att_layer_weight/sum':
-0.64981574,
'AttentionMechanismScaledLuong/last_dec_weight/shape': (10, 20),
'AttentionMechanismScaledLuong/last_dec_weight/sum':
0.058069646,
'AttentionMechanismScaledLuong/last_enc_weight/shape': (10, 20),
'AttentionMechanismScaledLuong/last_enc_weight/sum':
0.058028102,
'ContextModel_context_bilstm_last/ctx_resizer/shape': (10, 5),
'ContextModel_context_bilstm_last/ctx_resizer/sum':
-0.64984089,
'ContextModel_context_bilstm_pool/ctx_resizer/shape': (10, 5),
'ContextModel_context_bilstm_pool/ctx_resizer/sum':
-0.64984130,
'ContextModel_bilstm_last_decoder_hidden_state/last_dec_weight/shape':
(10, 20),
'ContextModel_bilstm_last_decoder_hidden_state/last_dec_weight/sum':
0.058056116,
'ContextModel_bilstm_last_decoder_hidden_state/last_enc_weight/shape':
(10, 20),
'ContextModel_bilstm_last_decoder_hidden_state/last_enc_weight/sum':
0.058025479,
'ContextModel_bilstm_pool_encoder_output/last_dec_weight/shape': (10,
20),
'ContextModel_bilstm_pool_encoder_output/last_dec_weight/sum':
0.058035135,
'ContextModel_bilstm_pool_encoder_output/last_enc_weight/shape': (10,
20),
'ContextModel_bilstm_pool_encoder_output/last_enc_weight/sum':
0.058024108,
'GNMTModel_gnmt/last_dec_weight/shape': (15, 20),
'GNMTModel_gnmt/last_dec_weight/sum':
-0.48634407,
'GNMTModel_gnmt/last_enc_weight/shape': (10, 20),
'GNMTModel_gnmt/last_enc_weight/sum':
0.058025002,
'GNMTModel_gnmt/mem_layer_weight/shape': (5, 5),
'GNMTModel_gnmt/mem_layer_weight/sum':
-0.44815454,
'GNMTModel_gnmt_v2/last_dec_weight/shape': (15, 20),
'GNMTModel_gnmt_v2/last_dec_weight/sum':
-0.48634392,
'GNMTModel_gnmt_v2/last_enc_weight/shape': (10, 20),
'GNMTModel_gnmt_v2/last_enc_weight/sum':
0.058024824,
'GNMTModel_gnmt_v2/mem_layer_weight/shape': (5, 5),
'GNMTModel_gnmt_v2/mem_layer_weight/sum':
-0.44815454,
'NoAttentionNoResidualUniEncoder/last_dec_weight/shape': (10, 20),
'NoAttentionNoResidualUniEncoder/last_dec_weight/sum':
0.057424068,
'NoAttentionNoResidualUniEncoder/last_enc_weight/shape': (10, 20),
'NoAttentionNoResidualUniEncoder/last_enc_weight/sum':
0.058453858,
'NoAttentionResidualBiEncoder/last_dec_weight/shape': (10, 20),
'NoAttentionResidualBiEncoder/last_dec_weight/sum':
0.058025062,
'NoAttentionResidualBiEncoder/last_enc_weight/shape': (10, 20),
'NoAttentionResidualBiEncoder/last_enc_weight/sum':
0.058053195,
'UniEncoderBottomAttentionArchitecture/last_dec_weight/shape': (10, 20),
'UniEncoderBottomAttentionArchitecture/last_dec_weight/sum':
0.058024943,
'UniEncoderBottomAttentionArchitecture/last_enc_weight/shape': (10, 20),
'UniEncoderBottomAttentionArchitecture/last_enc_weight/sum':
0.058025122,
'UniEncoderBottomAttentionArchitecture/mem_layer_weight/shape': (5, 5),
'UniEncoderBottomAttentionArchitecture/mem_layer_weight/sum':
-0.44815454,
'UniEncoderStandardAttentionArchitecture/last_dec_weight/shape': (10,
20),
'UniEncoderStandardAttentionArchitecture/last_dec_weight/sum':
0.058025002,
'UniEncoderStandardAttentionArchitecture/last_enc_weight/shape': (10,
20),
'UniEncoderStandardAttentionArchitecture/last_enc_weight/sum':
0.058024883,
'UniEncoderStandardAttentionArchitecture/mem_layer_weight/shape': (5,
5),
'UniEncoderStandardAttentionArchitecture/mem_layer_weight/sum':
-0.44815454,
}
cls.actual_train_values = {}
cls.expected_train_values = {
'AttentionMechanismBahdanau/loss': 8.8519039,
'AttentionMechanismLuong/loss': 8.8519039,
'AttentionMechanismNormedBahdanau/loss': 8.851902,
'AttentionMechanismScaledLuong/loss': 8.8519039,
'ContextModel_bilstm_last_decoder_hidden_state/loss': 8.8519096,
'ContextModel_bilstm_pool_encoder_output/loss': 8.8519124,
'GNMTModel_gnmt/loss': 8.8519087,
'GNMTModel_gnmt_v2/loss': 8.8519087,
'NoAttentionNoResidualUniEncoder/loss': 8.8516064,
'NoAttentionResidualBiEncoder/loss': 8.851984,
'UniEncoderStandardAttentionArchitecture/loss': 8.8519087,
'InitializerGlorotNormal/loss': 8.9779415,
'InitializerGlorotUniform/loss': 8.7643699,
}
cls.actual_eval_values = {}
cls.expected_eval_values = {
'AttentionMechanismBahdanau/loss': 8.8517132,
'AttentionMechanismBahdanau/predict_count': 11.0,
'AttentionMechanismLuong/loss': 8.8517132,
'AttentionMechanismLuong/predict_count': 11.0,
'AttentionMechanismNormedBahdanau/loss': 8.8517132,
'AttentionMechanismNormedBahdanau/predict_count': 11.0,
'AttentionMechanismScaledLuong/loss': 8.8517132,
'AttentionMechanismScaledLuong/predict_count': 11.0,
'ContextModel_bilstm_last_decoder_hidden_state/loss': 8.8517217,
'ContextModel_bilstm_last_decoder_hidden_state/predict_count': 11.0,
'ContextModel_bilstm_pool_encoder_output/loss': 8.8517265,
'ContextModel_bilstm_pool_encoder_output/predict_count': 11.0,
'GNMTModel_gnmt/loss': 8.8443403,
'GNMTModel_gnmt/predict_count': 11.0,
'GNMTModel_gnmt_v2/loss': 8.8443756,
'GNMTModel_gnmt_v2/predict_count': 11.0,
'NoAttentionNoResidualUniEncoder/loss': 8.8440113,
'NoAttentionNoResidualUniEncoder/predict_count': 11.0,
'NoAttentionResidualBiEncoder/loss': 8.8291245,
'NoAttentionResidualBiEncoder/predict_count': 11.0,
'UniEncoderBottomAttentionArchitecture/loss': 8.844492,
'UniEncoderBottomAttentionArchitecture/predict_count': 11.0,
'UniEncoderStandardAttentionArchitecture/loss': 8.8517151,
'UniEncoderStandardAttentionArchitecture/predict_count': 11.0
}
cls.actual_infer_values = {}
cls.expected_infer_values = {
'AttentionMechanismBahdanau/logits_sum': -0.026374687,
'AttentionMechanismLuong/logits_sum': -0.026374735,
'AttentionMechanismNormedBahdanau/logits_sum': -0.026376063,
'AttentionMechanismScaledLuong/logits_sum': -0.026374735,
'ContextModel_bilstm_last_decoder_hidden_state/logits_sum': -0.02681549,
'ContextModel_bilstm_pool_encoder_output/logits_sum': -0.024540234,
'GNMTModel_gnmt/logits_sum': -1.10848486,
'GNMTModel_gnmt_v2/logits_sum': -1.10950875,
'NoAttentionNoResidualUniEncoder/logits_sum': -1.0808625,
'NoAttentionResidualBiEncoder/logits_sum': -2.8147559,
'UniEncoderBottomAttentionArchitecture/logits_sum': -0.97026241,
'UniEncoderStandardAttentionArchitecture/logits_sum': -0.02665353
}
cls.actual_beam_sentences = {}
cls.expected_beam_sentences = {
'BeamSearchAttentionModel: batch 0 of beam 0': '',
'BeamSearchAttentionModel: batch 0 of beam 1': 'sos a sos a',
'BeamSearchAttentionModel: batch 1 of beam 0': '',
'BeamSearchAttentionModel: batch 1 of beam 1': 'b',
'BeamSearchBasicModel: batch 0 of beam 0': 'b b b b',
'BeamSearchBasicModel: batch 0 of beam 1': 'b b b sos',
'BeamSearchBasicModel: batch 0 of beam 2': 'b b b c',
'BeamSearchBasicModel: batch 1 of beam 0': 'b b b b',
'BeamSearchBasicModel: batch 1 of beam 1': 'a b b b',
'BeamSearchBasicModel: batch 1 of beam 2': 'b b b sos',
'TrieBeamSearchBasicModel: batch 0 of beam 0': 'b b b b',
'TrieBeamSearchBasicModel: batch 0 of beam 1': 'b b b sos',
'TrieBeamSearchBasicModel: batch 0 of beam 2': 'a b c b c',
'TrieBeamSearchBasicModel: batch 1 of beam 0': 'b b b b',
'TrieBeamSearchBasicModel: batch 1 of beam 1': 'b b b sos',
'TrieBeamSearchBasicModel: batch 1 of beam 2': 'a b c b c',
'TrieBeamSearchAttentionModel: batch 0 of beam 0': 'b b b sos',
'TrieBeamSearchAttentionModel: batch 0 of beam 1': 'b b b b',
'TrieBeamSearchAttentionModel: batch 0 of beam 2': 'a b c b c',
'TrieBeamSearchAttentionModel: batch 1 of beam 0': 'b b b sos',
'TrieBeamSearchAttentionModel: batch 1 of beam 1': 'b b b b',
'TrieBeamSearchAttentionModel: batch 1 of beam 2': 'a b c b c',
'BeamSearchGNMTModel: batch 0 of beam 0': '',
'BeamSearchGNMTModel: batch 1 of beam 0': '',
'TrieBeamSearchAttentionModelExclude: batch 0 of beam 0': 'b b b b',
'TrieBeamSearchAttentionModelExclude: batch 0 of beam 1': 'a b c b c',
'TrieBeamSearchAttentionModelExclude: batch 0 of beam 2': '',
'TrieBeamSearchAttentionModelExclude: batch 1 of beam 0': 'b b b b',
'TrieBeamSearchAttentionModelExclude: batch 1 of beam 1': 'a b c b c',
'TrieBeamSearchAttentionModelExclude: batch 1 of beam 2': '',
}
cls.actual_sample_sentences = {}
cls.expected_sample_sentences = {
'TrieSampleBasicModel: batch 0': 'a b c b c',
'TrieSampleAttentionModel: batch 0': 'a b c b c',
'TrieGreedyAttentionModel: batch 0': 'a b c b c',
'TrieSampleGNMTModel: batch 0': 'b c b c',
'TrieSampleGNMTModel: batch 1': 'a b c b c',
'TrieSampleGNMTModel: batch 2': 'a b c b',
'TrieSampleGNMTModelExcludeOne: batch 0': 'a b c b c',
'TrieSampleGNMTModelExcludeTwo: batch 0': 'a b c b',
}
cls.tmpdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
print('ModelTest - actual_vars_values: ')
pprint.pprint(cls.actual_vars_values)
sys.stdout.flush()
print('ModelTest - actual_train_values: ')
pprint.pprint(cls.actual_train_values)
sys.stdout.flush()
print('ModelTest - actual_eval_values: ')
pprint.pprint(cls.actual_eval_values)
sys.stdout.flush()
print('ModelTest - actual_infer_values: ')
pprint.pprint(cls.actual_infer_values)
sys.stdout.flush()
print('ModelTest - actual_beam_sentences: ')
pprint.pprint(cls.actual_beam_sentences)
sys.stdout.flush()
print('ModelTest - actual_sample_sentences: ')
pprint.pprint(cls.actual_sample_sentences)
sys.stdout.flush()
shutil.rmtree(cls.tmpdir)
def assertAllClose(self, *args, **kwargs):
kwargs['atol'] = 5e-2
kwargs['rtol'] = 5e-2
return super(ModelTest, self).assertAllClose(*args, **kwargs)
def _assertModelVariableNames(self, expected_var_names, model_var_names,
name):
print('{} variable names are: '.format(name), model_var_names)
self.assertEqual(len(expected_var_names), len(model_var_names))
self.assertEqual(sorted(expected_var_names), sorted(model_var_names))
def _assertModelVariable(self, variable, sess, name):
var_shape = tuple(variable.get_shape().as_list())
var_res = sess.run(variable)
var_weight_sum = np.sum(var_res)
print('{} weight sum is: '.format(name), var_weight_sum)
expected_sum = self.expected_vars_values[name + '/sum']
expected_shape = self.expected_vars_values[name + '/shape']
self.actual_vars_values[name + '/sum'] = var_weight_sum
self.actual_vars_values[name + '/shape'] = var_shape
self.assertEqual(expected_shape, var_shape)
self.assertAllClose(expected_sum, var_weight_sum)
def _assertTrainStepsLoss(self, m, sess, name, num_steps=1):
for _ in range(num_steps):
_, loss, _, _, _, _, _, _, _, _, _ = m.train(sess)
print('{} {}-th step loss is: '.format(name, num_steps), loss)
expected_loss = self.expected_train_values[name + '/loss']
self.actual_train_values[name + '/loss'] = loss
self.assertAllClose(expected_loss, loss)
def _assertEvalLossAndPredictCount(self, m, sess, name):
loss, _, _, predict_count, _ = m.eval(sess)
print('{} eval loss is: '.format(name), loss)
print('{} predict count is: '.format(name), predict_count)
expected_loss = self.expected_eval_values[name + '/loss']
expected_predict_count = self.expected_eval_values[name + '/predict_count']
self.actual_eval_values[name + '/loss'] = loss
self.actual_eval_values[name + '/predict_count'] = predict_count
self.assertAllClose(expected_loss, loss)
self.assertAllClose(expected_predict_count, predict_count)
def _assertInferLogits(self, m, sess, name):
results = m.infer(sess)
logits_sum = np.sum(results[0])
print('{} infer logits sum is: '.format(name), logits_sum)
expected_logits_sum = self.expected_infer_values[name + '/logits_sum']
self.actual_infer_values[name + '/logits_sum'] = logits_sum
self.assertAllClose(expected_logits_sum, logits_sum)
def _assertBeamSearchOutputs(self, m, sess, assert_top_k_sentence, name):
_, _, _, nmt_outputs, _ = m.infer(sess)
for i in range(assert_top_k_sentence):
output_words = nmt_outputs[i]
for j in range(output_words.shape[0]):
sentence = nmt_utils.get_translation(
output_words, j, tgt_eos='eos', subword_option='')
sentence_key = ('%s: batch %d of beam %d' % (name, j, i))
self.actual_beam_sentences[sentence_key] = sentence
expected_sentence = self.expected_beam_sentences[sentence_key]
self.assertEqual(expected_sentence, sentence)
def _assertTrieSampleOutputs(self, m, sess, name, max_len):
_, _, _, nmt_outputs, _ = m.infer(sess)
output_words = nmt_outputs
for j in range(output_words.shape[0]):
sentence = nmt_utils.get_translation(
output_words, j, tgt_eos='eos', subword_option='')
sentence_key = ('%s: batch %d' % (name, j))
exp_sentence_key = ('%s: batch 0' % (name,))
self.actual_sample_sentences[sentence_key] = sentence
expected_sentence = self.expected_sample_sentences[exp_sentence_key]
self.assertEqual(expected_sentence, sentence)
def _createTestTrainModel(self, m_creator, hparams, sess):
train_mode = tf.contrib.learn.ModeKeys.TRAIN
(train_iterator, src_vocab_table, tgt_vocab_table,
reverse_tgt_vocab_table) = (
common_test_utils.create_test_iterator(hparams, train_mode))
train_m = m_creator(
hparams,
train_mode,
train_iterator,
src_vocab_table,
tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope='dynamic_seq2seq')
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(train_iterator.initializer)
return train_m
def _createTestEvalModel(self, m_creator, hparams, sess):
eval_mode = tf.contrib.learn.ModeKeys.EVAL
(eval_iterator, src_vocab_table, tgt_vocab_table,
reverse_tgt_vocab_table) = (
common_test_utils.create_test_iterator(hparams, eval_mode))
eval_m = m_creator(
hparams,
eval_mode,
eval_iterator,
src_vocab_table,
tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope='dynamic_seq2seq')
sess.run(tf.tables_initializer())
sess.run(eval_iterator.initializer)
return eval_m
def _createTestInferModel(self,
m_creator,
hparams,
sess,
init_global_vars=False,
trie_excludes=None):
infer_mode = tf.contrib.learn.ModeKeys.INFER
(infer_iterator, src_vocab_table, tgt_vocab_table,
reverse_tgt_vocab_table) = (
common_test_utils.create_test_iterator(
hparams, infer_mode, trie_excludes=trie_excludes))
if hparams.infer_mode.startswith('trie_'):
trie = trie_decoder_utils.DecoderTrie(hparams.tgt_vocab_file, hparams.eos)
trie.populate_from_text_file(hparams.trie_path)
else:
trie = None
infer_m = m_creator(
hparams,
infer_mode,
infer_iterator,
src_vocab_table,
tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope='dynamic_seq2seq',
trie=trie)
if init_global_vars:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(infer_iterator.initializer)
return infer_m
def _get_session_config(self):
config = tf.ConfigProto()
config.allow_soft_placement = True
return config
## Testing 3 encoders:
# uni: no attention, no residual, 1 layers
# bi: no attention, with residual, 4 layers
def testNoAttentionNoResidualUniEncoder(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=1,
attention='',
attention_architecture='',
use_residual=False,
)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/rnn/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(model.Model, hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars],
'NoAttentionNoResidualUniEncoder')
with tf.variable_scope('dynamic_seq2seq', reuse=True):
last_enc_weight = tf.get_variable(
'encoder/rnn/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable('decoder/basic_lstm_cell/kernel')
self._assertTrainStepsLoss(train_m, sess,
'NoAttentionNoResidualUniEncoder')
self._assertModelVariable(
last_enc_weight, sess,
'NoAttentionNoResidualUniEncoder/last_enc_weight')
self._assertModelVariable(
last_dec_weight, sess,
'NoAttentionNoResidualUniEncoder/last_dec_weight')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(model.Model, hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess,
'NoAttentionNoResidualUniEncoder')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(model.Model, hparams, sess)
self._assertInferLogits(infer_m, sess,
'NoAttentionNoResidualUniEncoder')
def testNoAttentionResidualBiEncoder(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='bi',
num_layers=4,
attention='',
attention_architecture='',
use_residual=True,
)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_2/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_2/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_3/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_3/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(model.Model, hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars],
'NoAttentionResidualBiEncoder')
with tf.variable_scope('dynamic_seq2seq', reuse=True):
last_enc_weight = tf.get_variable(
'encoder/bidirectional_rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
)
last_dec_weight = tf.get_variable(
'decoder/multi_rnn_cell/cell_3/basic_lstm_cell/kernel')
self._assertTrainStepsLoss(train_m, sess,
'NoAttentionResidualBiEncoder')
self._assertModelVariable(
last_enc_weight, sess,
'NoAttentionResidualBiEncoder/last_enc_weight')
self._assertModelVariable(
last_dec_weight, sess,
'NoAttentionResidualBiEncoder/last_dec_weight')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(model.Model, hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess,
'NoAttentionResidualBiEncoder')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(model.Model, hparams, sess)
self._assertInferLogits(infer_m, sess, 'NoAttentionResidualBiEncoder')
## Test attention mechanisms: luong, scaled_luong, bahdanau, normed_bahdanau
def testAttentionMechanismLuong(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
attention='luong',
attention_architecture='standard',
num_layers=2,
use_residual=False,
)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/memory_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/attention_layer/kernel:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(attention_model.AttentionModel,
hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars],
'AttentionMechanismLuong')
with tf.variable_scope('dynamic_seq2seq', reuse=True):
# pylint: disable=line-too-long
last_enc_weight = tf.get_variable(
'encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable(
'decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
att_layer_weight = tf.get_variable(
'decoder/attention/attention_layer/kernel')
# pylint: enable=line-too-long
self._assertTrainStepsLoss(train_m, sess, 'AttentionMechanismLuong')
self._assertModelVariable(last_enc_weight, sess,
'AttentionMechanismLuong/last_enc_weight')
self._assertModelVariable(last_dec_weight, sess,
'AttentionMechanismLuong/last_dec_weight')
self._assertModelVariable(att_layer_weight, sess,
'AttentionMechanismLuong/att_layer_weight')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(attention_model.AttentionModel,
hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess,
'AttentionMechanismLuong')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess)
self._assertInferLogits(infer_m, sess, 'AttentionMechanismLuong')
def testAttentionMechanismScaledLuong(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
attention='scaled_luong',
attention_architecture='standard',
num_layers=2,
use_residual=False,
)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/memory_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/luong_attention/attention_g:0',
'dynamic_seq2seq/decoder/attention/attention_layer/kernel:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(attention_model.AttentionModel,
hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars],
'AttentionMechanismScaledLuong')
with tf.variable_scope('dynamic_seq2seq', reuse=True):
# pylint: disable=line-too-long
last_enc_weight = tf.get_variable(
'encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable(
'decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
att_layer_weight = tf.get_variable(
'decoder/attention/attention_layer/kernel')
# pylint: enable=line-too-long
self._assertTrainStepsLoss(train_m, sess,
'AttentionMechanismScaledLuong')
self._assertModelVariable(
last_enc_weight, sess,
'AttentionMechanismScaledLuong/last_enc_weight')
self._assertModelVariable(
last_dec_weight, sess,
'AttentionMechanismScaledLuong/last_dec_weight')
self._assertModelVariable(
att_layer_weight, sess,
'AttentionMechanismScaledLuong/att_layer_weight')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(attention_model.AttentionModel,
hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess,
'AttentionMechanismScaledLuong')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess)
self._assertInferLogits(infer_m, sess, 'AttentionMechanismScaledLuong')
def testAttentionMechanismBahdanau(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
attention='bahdanau',
attention_architecture='standard',
num_layers=2,
use_residual=False,
)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/memory_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/bahdanau_attention/query_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/bahdanau_attention/attention_v:0',
'dynamic_seq2seq/decoder/attention/attention_layer/kernel:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(attention_model.AttentionModel,
hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars],
'AttentionMechanismBahdanau')
with tf.variable_scope('dynamic_seq2seq', reuse=True):
# pylint: disable=line-too-long
last_enc_weight = tf.get_variable(
'encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable(
'decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
att_layer_weight = tf.get_variable(
'decoder/attention/attention_layer/kernel')
# pylint: enable=line-too-long
self._assertTrainStepsLoss(train_m, sess, 'AttentionMechanismBahdanau')
self._assertModelVariable(last_enc_weight, sess,
'AttentionMechanismBahdanau/last_enc_weight')
self._assertModelVariable(last_dec_weight, sess,
'AttentionMechanismBahdanau/last_dec_weight')
self._assertModelVariable(
att_layer_weight, sess,
'AttentionMechanismBahdanau/att_layer_weight')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(attention_model.AttentionModel,
hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess,
'AttentionMechanismBahdanau')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess)
self._assertInferLogits(infer_m, sess, 'AttentionMechanismBahdanau')
def testAttentionMechanismNormedBahdanau(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
attention='normed_bahdanau',
attention_architecture='standard',
num_layers=2,
use_residual=False,
)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/memory_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/bahdanau_attention/query_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/bahdanau_attention/attention_v:0',
'dynamic_seq2seq/decoder/attention/bahdanau_attention/attention_g:0',
'dynamic_seq2seq/decoder/attention/bahdanau_attention/attention_b:0',
'dynamic_seq2seq/decoder/attention/attention_layer/kernel:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(attention_model.AttentionModel,
hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars],
'AttentionMechanismNormedBahdanau')
with tf.variable_scope('dynamic_seq2seq', reuse=True):
# pylint: disable=line-too-long
last_enc_weight = tf.get_variable(
'encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable(
'decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
att_layer_weight = tf.get_variable(
'decoder/attention/attention_layer/kernel')
# pylint: enable=line-too-long
self._assertTrainStepsLoss(train_m, sess,
'AttentionMechanismNormedBahdanau')
self._assertModelVariable(
last_enc_weight, sess,
'AttentionMechanismNormedBahdanau/last_enc_weight')
self._assertModelVariable(
last_dec_weight, sess,
'AttentionMechanismNormedBahdanau/last_dec_weight')
self._assertModelVariable(
att_layer_weight, sess,
'AttentionMechanismNormedBahdanau/att_layer_weight')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(attention_model.AttentionModel,
hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess,
'AttentionMechanismNormedBahdanau')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess)
self._assertInferLogits(infer_m, sess,
'AttentionMechanismNormedBahdanau')
## Test encoder vs. attention (all use residual):
# uni encoder, standard attention
def testUniEncoderStandardAttentionArchitecture(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=4,
attention='scaled_luong',
attention_architecture='standard',
)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_2/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_2/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/memory_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_2/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_2/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_3/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_3/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/luong_attention/attention_g:0',
'dynamic_seq2seq/decoder/attention/attention_layer/kernel:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(attention_model.AttentionModel,
hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(
expected_var_names, [v.name for v in m_vars],
'UniEncoderStandardAttentionArchitecture')
with tf.variable_scope('dynamic_seq2seq', reuse=True):
last_enc_weight = tf.get_variable(
'encoder/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable(
'decoder/attention/multi_rnn_cell/cell_3/basic_lstm_cell/kernel')
mem_layer_weight = tf.get_variable('decoder/memory_layer/kernel')
self._assertTrainStepsLoss(train_m, sess,
'UniEncoderStandardAttentionArchitecture')
self._assertModelVariable(
last_enc_weight, sess,
'UniEncoderStandardAttentionArchitecture/last_enc_weight')
self._assertModelVariable(
last_dec_weight, sess,
'UniEncoderStandardAttentionArchitecture/last_dec_weight')
self._assertModelVariable(
mem_layer_weight, sess,
'UniEncoderStandardAttentionArchitecture/mem_layer_weight')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(attention_model.AttentionModel,
hparams, sess)
self._assertEvalLossAndPredictCount(
eval_m, sess, 'UniEncoderStandardAttentionArchitecture')
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess)
self._assertInferLogits(infer_m, sess,
'UniEncoderStandardAttentionArchitecture')
# Test gnmt model.
def _testGNMTModel(self, architecture):
hparams = common_test_utils.create_test_hparams(
encoder_type='gnmt',
num_layers=4,
attention='scaled_luong',
attention_architecture=architecture)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/fw/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/fw/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/bw/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/bidirectional_rnn/bw/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_2/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_2/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/memory_layer/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_0_attention/attention/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_0_attention/attention/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_0_attention/attention/luong_attention/attention_g:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_2/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_2/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_3/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/multi_rnn_cell/cell_3/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
test_prefix = 'GNMTModel_%s' % architecture
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(gnmt_model.GNMTModel, hparams,
sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars], test_prefix)
with tf.variable_scope('dynamic_seq2seq', reuse=True):
last_enc_weight = tf.get_variable(
'encoder/rnn/multi_rnn_cell/cell_2/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable(
'decoder/multi_rnn_cell/cell_3/basic_lstm_cell/kernel')
mem_layer_weight = tf.get_variable('decoder/memory_layer/kernel')
self._assertTrainStepsLoss(train_m, sess, test_prefix)
self._assertModelVariable(last_enc_weight, sess,
'%s/last_enc_weight' % test_prefix)
self._assertModelVariable(last_dec_weight, sess,
'%s/last_dec_weight' % test_prefix)
self._assertModelVariable(mem_layer_weight, sess,
'%s/mem_layer_weight' % test_prefix)
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(gnmt_model.GNMTModel, hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess, test_prefix)
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(gnmt_model.GNMTModel, hparams,
sess)
self._assertInferLogits(infer_m, sess, test_prefix)
def testGNMTModel(self):
self._testGNMTModel('gnmt')
def testGNMTModelV2(self):
self._testGNMTModel('gnmt_v2')
# Test Context
def _testContextModel(self, context_vector, context_feed):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=2,
attention='scaled_luong',
attention_architecture='standard',
ctx='ctx',
context_vector=context_vector,
context_feed=context_feed)
workers, _ = tf.test.create_local_cluster(1, 0)
worker = workers[0]
# pylint: disable=line-too-long
expected_var_names = [
'dynamic_seq2seq/encoder/embedding_encoder:0',
'dynamic_seq2seq/decoder/embedding_decoder:0',
'dynamic_seq2seq/context_rnn_encoder/embedding_context:0',
'dynamic_seq2seq/context_rnn_encoder/bidirectional_rnn/fw/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/context_rnn_encoder/bidirectional_rnn/fw/basic_lstm_cell/bias:0',
'dynamic_seq2seq/context_rnn_encoder/bidirectional_rnn/bw/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/context_rnn_encoder/bidirectional_rnn/bw/basic_lstm_cell/bias:0',
'dynamic_seq2seq/context_resizer:0',
u'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/memory_layer/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0',
'dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0',
'dynamic_seq2seq/decoder/attention/luong_attention/attention_g:0',
'dynamic_seq2seq/decoder/attention/attention_layer/kernel:0',
'dynamic_seq2seq/decoder/output_projection/kernel:0'
]
# pylint: enable=line-too-long
test_prefix = 'ContextModel_%s_%s' % (context_vector, context_feed)
context_vector_prefix = 'ContextModel_context_%s' % context_vector
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
train_m = self._createTestTrainModel(attention_model.AttentionModel,
hparams, sess)
m_vars = tf.trainable_variables()
self._assertModelVariableNames(expected_var_names,
[v.name for v in m_vars], test_prefix)
with tf.variable_scope('dynamic_seq2seq', reuse=True):
last_enc_weight = tf.get_variable(
'encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
last_dec_weight = tf.get_variable(
'decoder/attention/multi_rnn_cell/cell_1/basic_lstm_cell/kernel')
ctx_resizer = tf.get_variable('context_resizer')
self._assertTrainStepsLoss(train_m, sess, test_prefix)
self._assertModelVariable(last_enc_weight, sess,
'%s/last_enc_weight' % test_prefix)
self._assertModelVariable(last_dec_weight, sess,
'%s/last_dec_weight' % test_prefix)
self._assertModelVariable(ctx_resizer, sess,
'%s/ctx_resizer' % context_vector_prefix)
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
eval_m = self._createTestEvalModel(attention_model.AttentionModel,
hparams, sess)
self._assertEvalLossAndPredictCount(eval_m, sess, test_prefix)
with tf.Graph().as_default():
with tf.Session(worker.target, config=self._get_session_config()) as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess)
self._assertInferLogits(infer_m, sess, test_prefix)
def testContextModel(self):
self._testContextModel('bilstm_pool', 'encoder_output')
self._testContextModel('bilstm_last', 'decoder_hidden_state')
# Test beam search.
def testBeamSearchBasicModel(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=1,
attention='',
attention_architecture='',
use_residual=False,
infer_mode='beam_search')
hparams.beam_width = 3
hparams.tgt_max_len_infer = 4
assert_top_k_sentence = 3
with self.test_session() as sess:
infer_m = self._createTestInferModel(model.Model, hparams, sess, True)
self._assertBeamSearchOutputs(infer_m, sess, assert_top_k_sentence,
'BeamSearchBasicModel')
def testBeamSearchAttentionModel(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
attention='scaled_luong',
attention_architecture='standard',
num_layers=2,
use_residual=False,
infer_mode='beam_search')
hparams.beam_width = 3
hparams.tgt_max_len_infer = 4
assert_top_k_sentence = 2
with self.test_session() as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess, True)
self._assertBeamSearchOutputs(infer_m, sess, assert_top_k_sentence,
'BeamSearchAttentionModel')
def testBeamSearchGNMTModel(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='gnmt',
num_layers=4,
attention='scaled_luong',
attention_architecture='gnmt',
infer_mode='beam_search')
hparams.beam_width = 3
hparams.tgt_max_len_infer = 4
assert_top_k_sentence = 1
with self.test_session() as sess:
infer_m = self._createTestInferModel(gnmt_model.GNMTModel, hparams, sess,
True)
self._assertBeamSearchOutputs(infer_m, sess, assert_top_k_sentence,
'BeamSearchGNMTModel')
def testInitializerGlorotNormal(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=1,
attention='',
attention_architecture='',
use_residual=False,
init_op='glorot_normal')
with self.test_session() as sess:
train_m = self._createTestTrainModel(model.Model, hparams, sess)
self._assertTrainStepsLoss(train_m, sess, 'InitializerGlorotNormal')
def testInitializerGlorotUniform(self):
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=1,
attention='',
attention_architecture='',
use_residual=False,
init_op='glorot_uniform')
with self.test_session() as sess:
train_m = self._createTestTrainModel(model.Model, hparams, sess)
self._assertTrainStepsLoss(train_m, sess, 'InitializerGlorotUniform')
# Test trie search.
def testTrieBeamSearchBasicModel(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=1,
attention='',
attention_architecture='',
use_residual=False,
infer_mode='trie_beam_search')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as voc_file:
for token in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
voc_file.write(token)
voc_file.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 3
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
assert_top_k_sentence = 3
trie_keys = [
'TrieBeamSearchBasicModel: batch 0 of beam 0',
'TrieBeamSearchBasicModel: batch 0 of beam 1',
'TrieBeamSearchBasicModel: batch 0 of beam 2',
'TrieBeamSearchBasicModel: batch 1 of beam 0',
'TrieBeamSearchBasicModel: batch 1 of beam 1',
'TrieBeamSearchBasicModel: batch 1 of beam 2',
]
for key in trie_keys:
trie_file.write(self.expected_beam_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(model.Model, hparams, sess, True)
self._assertBeamSearchOutputs(infer_m, sess, assert_top_k_sentence,
'TrieBeamSearchBasicModel')
# Test trie sampling.
def testTrieSampleBasicModel(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=1,
attention='',
attention_architecture='',
use_residual=False,
infer_mode='trie_sample')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as voc_file:
for token in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
voc_file.write(token)
voc_file.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 0
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
hparams.sampling_temperature = 0.
hparams.batch_size = 20
trie_keys = ['TrieSampleBasicModel: batch {}'.format(i) for i in range(1)]
for key in trie_keys:
trie_file.write(self.expected_sample_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(model.Model, hparams, sess, True)
self._assertTrieSampleOutputs(infer_m, sess, 'TrieSampleBasicModel',
hparams.tgt_max_len_infer)
# Test trie sampling.
def testTrieSampleAttentionModel(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=2,
attention='scaled_luong',
attention_architecture='standard',
use_residual=False,
infer_mode='trie_sample')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as voc_file:
for token in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
voc_file.write(token)
voc_file.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 0
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
hparams.sampling_temperature = 0.
hparams.batch_size = 20
trie_keys = [
'TrieSampleAttentionModel: batch {}'.format(i) for i in range(1)
]
for key in trie_keys:
trie_file.write(self.expected_sample_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess, True)
self._assertTrieSampleOutputs(infer_m, sess, 'TrieSampleAttentionModel',
hparams.tgt_max_len_infer)
# Test trie greedy.
def testTrieGreedyAttentionModel(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=2,
attention='scaled_luong',
attention_architecture='standard',
use_residual=False,
infer_mode='trie_greedy')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as voc_file:
for token in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
voc_file.write(token)
voc_file.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 0
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
hparams.batch_size = 20
trie_keys = [
'TrieGreedyAttentionModel: batch {}'.format(i) for i in range(1)
]
for key in trie_keys:
trie_file.write(self.expected_sample_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess, True)
self._assertTrieSampleOutputs(infer_m, sess, 'TrieGreedyAttentionModel',
hparams.tgt_max_len_infer)
# Test trie search.
def testTrieBeamSearchAttentionModel(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=2,
attention='scaled_luong',
attention_architecture='standard',
use_residual=False,
infer_mode='trie_beam_search')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as voc_file:
for token in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
voc_file.write(token)
voc_file.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 3
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
assert_top_k_sentence = 3
trie_keys = [
'TrieBeamSearchAttentionModel: batch 0 of beam 0',
'TrieBeamSearchAttentionModel: batch 0 of beam 1',
'TrieBeamSearchAttentionModel: batch 0 of beam 2',
'TrieBeamSearchAttentionModel: batch 1 of beam 0',
'TrieBeamSearchAttentionModel: batch 1 of beam 1',
'TrieBeamSearchAttentionModel: batch 1 of beam 2',
]
for key in trie_keys:
trie_file.write(self.expected_beam_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(attention_model.AttentionModel,
hparams, sess, True)
self._assertBeamSearchOutputs(infer_m, sess, assert_top_k_sentence,
'TrieBeamSearchAttentionModel')
# Test trie sampling.
def testTrieSampleGNMTModel(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='gnmt',
num_layers=4,
attention='scaled_luong',
attention_architecture='gnmt_v2',
infer_mode='trie_sample')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as voc_file:
for token in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
voc_file.write(token)
voc_file.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 0
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
hparams.sampling_temperature = 0.
hparams.batch_size = 20
trie_keys = ['TrieSampleGNMTModel: batch {}'.format(i) for i in range(1)]
for key in trie_keys:
trie_file.write(self.expected_sample_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(gnmt_model.GNMTModel, hparams,
sess, True)
self._assertTrieSampleOutputs(infer_m, sess, 'TrieSampleGNMTModel',
hparams.tgt_max_len_infer)
# Test trie sampling.
def testTrieSampleGNMTModelExcludeOne(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='gnmt',
num_layers=4,
attention='scaled_luong',
attention_architecture='gnmt_v2',
infer_mode='trie_sample')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as f_voc:
for t in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
f_voc.write(t)
f_voc.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 0
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
hparams.sampling_temperature = 0.
hparams.batch_size = 20
trie_keys = ['TrieSampleGNMTModel: batch {}'.format(i) for i in range(3)]
for key in trie_keys:
trie_file.write(self.expected_sample_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(
gnmt_model.GNMTModel,
hparams,
sess,
True,
trie_excludes=[self.expected_sample_sentences[trie_keys[0]]])
self._assertTrieSampleOutputs(infer_m, sess,
'TrieSampleGNMTModelExcludeOne',
hparams.tgt_max_len_infer)
# Test trie sampling.
def testTrieSampleGNMTModelExcludeTwo(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='gnmt',
num_layers=4,
attention='scaled_luong',
attention_architecture='gnmt_v2',
infer_mode='trie_sample')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as f_voc:
for t in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
f_voc.write(t)
f_voc.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 0
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
hparams.sampling_temperature = 0.
hparams.batch_size = 20
trie_keys = ['TrieSampleGNMTModel: batch {}'.format(i) for i in range(3)]
for key in trie_keys:
trie_file.write(self.expected_sample_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(
gnmt_model.GNMTModel,
hparams,
sess,
True,
trie_excludes=[
self.expected_sample_sentences[trie_keys[0]],
self.expected_sample_sentences[trie_keys[1]],
])
self._assertTrieSampleOutputs(infer_m, sess,
'TrieSampleGNMTModelExcludeTwo',
hparams.tgt_max_len_infer)
# Test trie search.
def testTrieBeamSearchAttentionModelExclude(self):
with tempfile.NamedTemporaryFile(mode='w+') as trie_file:
hparams = common_test_utils.create_test_hparams(
encoder_type='uni',
num_layers=2,
attention='scaled_luong',
attention_architecture='standard',
use_residual=False,
infer_mode='trie_beam_search')
vocab_path = os.path.join(ModelTest.tmpdir, 'vocab.tgt')
with open(vocab_path, 'w') as f_voc:
for t in (hparams.sos, hparams.eos, 'a', 'b', 'c'):
f_voc.write(t)
f_voc.write('\n')
hparams.src_vocab_file = vocab_path
hparams.tgt_vocab_file = vocab_path
hparams.beam_width = 3
hparams.tgt_max_len_infer = 7
hparams.trie_path = trie_file.name
assert_top_k_sentence = 2
trie_keys = [
'TrieBeamSearchAttentionModel: batch 0 of beam 0',
'TrieBeamSearchAttentionModel: batch 0 of beam 1',
'TrieBeamSearchAttentionModel: batch 0 of beam 2',
'TrieBeamSearchAttentionModel: batch 1 of beam 0',
'TrieBeamSearchAttentionModel: batch 1 of beam 1',
'TrieBeamSearchAttentionModel: batch 1 of beam 2',
]
for key in trie_keys:
trie_file.write(self.expected_beam_sentences[key] + '\n')
trie_file.flush()
with self.test_session() as sess:
infer_m = self._createTestInferModel(
attention_model.AttentionModel,
hparams,
sess,
True,
trie_excludes=[
self.expected_beam_sentences[trie_keys[0]],
])
self._assertBeamSearchOutputs(infer_m, sess, assert_top_k_sentence,
'TrieBeamSearchAttentionModelExclude')
if __name__ == '__main__':
tf.test.main()
| google/active-qa | px/nmt/model_test.py | Python | apache-2.0 | 70,985 | 0.005776 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.goal.error import GoalError
class Goal(object):
"""Factory for objects representing goals.
Ensures that we have exactly one instance per goal name.
:API: public
"""
_goal_by_name = dict()
def __new__(cls, *args, **kwargs):
raise TypeError('Do not instantiate {0}. Call by_name() instead.'.format(cls))
@classmethod
def register(cls, name, description):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
"""
cls.by_name(name)._description = description
@classmethod
def by_name(cls, name):
"""Returns the unique object representing the goal of the specified name.
:API: public
"""
if name not in cls._goal_by_name:
cls._goal_by_name[name] = _Goal(name)
return cls._goal_by_name[name]
@classmethod
def clear(cls):
"""Remove all goals and tasks.
This method is EXCLUSIVELY for use in tests and during pantsd startup.
:API: public
"""
cls._goal_by_name.clear()
@staticmethod
def scope(goal_name, task_name):
"""Returns options scope for specified task in specified goal.
:API: public
"""
return goal_name if goal_name == task_name else '{0}.{1}'.format(goal_name, task_name)
@staticmethod
def all():
"""Returns all registered goals, sorted alphabetically by name.
:API: public
"""
return [pair[1] for pair in sorted(Goal._goal_by_name.items())]
@classmethod
def subsystems(cls):
"""Returns all subsystem types used by all tasks, in no particular order.
:API: public
"""
ret = set()
for goal in cls.all():
ret.update(goal.subsystems())
return ret
class _Goal(object):
def __init__(self, name):
"""Don't call this directly.
Create goals only through the Goal.by_name() factory.
"""
self.name = name
self._description = ''
self.serialize = False
self._task_type_by_name = {} # name -> Task subclass.
self._ordered_task_names = [] # The task names, in the order imposed by registration.
@property
def description(self):
if self._description:
return self._description
# Return the docstring for the Task registered under the same name as this goal, if any.
# This is a very common case, and therefore a useful idiom.
namesake_task = self._task_type_by_name.get(self.name)
if namesake_task and namesake_task.__doc__:
# First line of docstring.
# TODO: This is repetitive of Optionable.get_description(). We should probably just
# make Goal an Optionable, for uniformity.
return namesake_task.__doc__.partition('\n')[0].strip()
return ''
def register_options(self, options):
for task_type in sorted(self.task_types(), key=lambda cls: cls.options_scope):
task_type.register_options_on_scope(options)
def install(self, task_registrar, first=False, replace=False, before=None, after=None):
"""Installs the given task in this goal.
The placement of the task in this goal's execution list defaults to the end but its position
can be influenced by specifying exactly one of the following arguments:
first: Places the task 1st in the execution list.
replace: Removes all existing tasks in this goal and installs this task.
before: Places the task before the named task in the execution list.
after: Places the task after the named task in the execution list.
"""
if [bool(place) for place in [first, replace, before, after]].count(True) > 1:
raise GoalError('Can only specify one of first, replace, before or after')
task_name = task_registrar.name
options_scope = Goal.scope(self.name, task_name)
# Currently we need to support registering the same task type multiple times in different
# scopes. However we still want to have each task class know the options scope it was
# registered in. So we create a synthetic subclass here.
# TODO(benjy): Revisit this when we revisit the task lifecycle. We probably want to have
# a task *instance* know its scope, but this means converting option registration from
# a class method to an instance method, and instantiating the task much sooner in the
# lifecycle.
superclass = task_registrar.task_type
subclass_name = b'{0}_{1}'.format(superclass.__name__,
options_scope.replace('.', '_').replace('-', '_'))
task_type = type(subclass_name, (superclass,), {
'__doc__': superclass.__doc__,
'__module__': superclass.__module__,
'options_scope': options_scope,
'_stable_name': superclass.stable_name()
})
otn = self._ordered_task_names
if replace:
for tt in self.task_types():
tt.options_scope = None
del otn[:]
self._task_type_by_name = {}
if first:
otn.insert(0, task_name)
elif before in otn:
otn.insert(otn.index(before), task_name)
elif after in otn:
otn.insert(otn.index(after) + 1, task_name)
else:
otn.append(task_name)
self._task_type_by_name[task_name] = task_type
if task_registrar.serialize:
self.serialize = True
return self
def uninstall_task(self, name):
"""Removes the named task from this goal.
Allows external plugins to modify the execution plan. Use with caution.
Note: Does not relax a serialization requirement that originated
from the uninstalled task's install() call.
"""
if name in self._task_type_by_name:
self._task_type_by_name[name].options_scope = None
del self._task_type_by_name[name]
self._ordered_task_names = [x for x in self._ordered_task_names if x != name]
else:
raise GoalError('Cannot uninstall unknown task: {0}'.format(name))
def known_scope_infos(self):
"""Yields ScopeInfos for all known scopes under this goal."""
# Note that we don't yield the goal's own scope. We don't need it (as we don't register
# options on it), and it's needlessly confusing when a task has the same name as its goal,
# in which case we shorten its scope to the goal's scope (e.g., idea.idea -> idea).
for task_type in self.task_types():
for scope_info in task_type.known_scope_infos():
yield scope_info
def subsystems(self):
"""Returns all subsystem types used by tasks in this goal, in no particular order."""
ret = set()
for task_type in self.task_types():
ret.update([dep.subsystem_cls for dep in task_type.subsystem_dependencies_iter()])
return ret
def ordered_task_names(self):
"""The task names in this goal, in registration order."""
return self._ordered_task_names
def task_type_by_name(self, name):
"""The task type registered under the given name."""
return self._task_type_by_name[name]
def task_types(self):
"""Returns the task types in this goal, unordered."""
return self._task_type_by_name.values()
def task_items(self):
for name, task_type in self._task_type_by_name.items():
yield name, task_type
def has_task_of_type(self, typ):
"""Returns True if this goal has a task of the given type (or a subtype of it)."""
for task_type in self.task_types():
if issubclass(task_type, typ):
return True
return False
def __repr__(self):
return self.name
| dbentley/pants | src/python/pants/goal/goal.py | Python | apache-2.0 | 8,127 | 0.010336 |
import psycopg2
import unittest
import sys
import os
class GFugaRotaTest(unittest.TestCase):
def setUp(self):
self.table = open(os.path.abspath('../') + '/sql/createsTable/FugaRota.sql', 'r')
self.constraints = open(os.path.abspath('../') + '/sql/createsTable/FugaRota_const.sql', 'r')
self.insert = open(os.path.abspath('../') + '/sql/inserts/Horarios_inserts.sql', 'r')
self.falho = open(os.path.abspath('../') + '/sql/inserts/FugaRota_inserts_falhos.sql', 'r')
self.FugaRota = self.table.read()
self.cons = self.constraints.read()
self.inserts = self.insert.readlines()
self.falhos = self.falho.readlines()
self.table.close()
self.constraints.close()
self.insert.close()
self.falho.close()
conn = psycopg2.connect("dbname=teste user=postgres")
conn.set_isolation_level(0) # set autocommit
self.cur = conn.cursor()
def tearDown(self):
self.cur.close()
def testBCreateTable(self):
self.cur.execute(self.FugaRota)
self.assertEqual(self.cur.statusmessage, "CREATE TABLE")
def testCConstraints(self):
self.cur.execute(self.cons)
self.assertEqual(self.cur.statusmessage, "ALTER TABLE")
def testDInsertTable(self):
for self.dados in self.inserts:
self.cur.execute(self.dados)
self.assertEqual(self.cur.statusmessage, "INSERT 0 1")
def testEInsertTableFalhos(self):
for self.dadosFalhos in self.falhos:
try:
self.cur.execute(self.dadosFalhos)
except:
self.assertTrue(True)
| UFCGProjects/sig | src/tests/FugaRotaTest.py | Python | mit | 1,680 | 0.008333 |
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2022 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from django.test import SimpleTestCase
from ddd.logic.shared_kernel.academic_year.domain.model.academic_year import AcademicYear, AcademicYearIdentity
from infrastructure.shared_kernel.academic_year.repository.in_memory.academic_year import AcademicYearInMemoryRepository
class TestAcademicYearInMemoryRepository(SimpleTestCase):
def setUp(self):
self.academic_year_repository = AcademicYearInMemoryRepository()
for annee in range(2016, 2021):
self.academic_year_repository.save(AcademicYear(
entity_id=AcademicYearIdentity(year=annee),
start_date=datetime.date(annee, 9, 15),
end_date=datetime.date(annee+1, 9, 30),
))
def test_search_should_return_specific_academic_years_if_specified_year(self):
years = self.academic_year_repository.search(from_year=2018)
self.assertEqual(len(years), 3)
for index, annee in enumerate(range(2018, 2021)):
self.assertEqual(years[index].year, annee)
def test_search_should_return_all_academic_years_if_not_specified_year(self):
years = self.academic_year_repository.search()
self.assertEqual(len(years), 5)
for index, annee in enumerate(range(2016, 2021)):
self.assertEqual(years[index].year, annee)
| uclouvain/osis | infrastructure/tests/shared_kernel/academic_year/repository/in_memory/test_academic_year.py | Python | agpl-3.0 | 2,533 | 0.001975 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestDefaults(helpers.AdminTestCase):
def setUp(self):
super(TestDefaults, self).setUp()
self.defaults_page = self.home_pg.go_to_admin_system_defaultspage()
self.add_up = random.randint(1, 10)
def test_update_compute_defaults(self):
"""Tests the Update Default Compute Quotas functionality:
1) Login as Admin and go to Admin > System > Defaults
2) Updates default compute Quotas by adding a random
number between 1 and 10
3) Verifies that the updated values are present in the
Compute Quota Defaults table
"""
default_quota_values = self.defaults_page.compute_quota_values
self.defaults_page.update_compute_defaults(self.add_up)
self.assertTrue(
self.defaults_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.defaults_page.find_message_and_dismiss(messages.ERROR))
self.assertGreater(len(default_quota_values), 0)
for quota_name in default_quota_values:
self.assertTrue(
self.defaults_page.is_compute_quota_a_match(
quota_name,
default_quota_values[quota_name] + self.add_up
))
def test_update_volume_defaults(self):
"""Tests the Update Default Volume Quotas functionality:
1) Login as Admin and go to Admin > System > Defaults
2) Clicks on Volume Quotas tab
3) Updates default volume Quotas by adding a random
number between 1 and 10
4) Verifies that the updated values are present in the
Volume Quota Defaults table
"""
self.defaults_page.go_to_volume_quotas_tab()
default_quota_values = self.defaults_page.volume_quota_values
self.defaults_page.update_volume_defaults(self.add_up)
self.assertTrue(
self.defaults_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.defaults_page.find_message_and_dismiss(messages.ERROR))
self.assertGreater(len(default_quota_values), 0)
for quota_name in default_quota_values:
self.assertTrue(
self.defaults_page.is_volume_quota_a_match(
quota_name,
default_quota_values[quota_name] + self.add_up
))
| NeCTAR-RC/horizon | openstack_dashboard/test/integration_tests/tests/test_defaults.py | Python | apache-2.0 | 3,108 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2018-01-24 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0009_auto_20180124_0105'),
]
operations = [
migrations.AlterField(
model_name='page',
name='template_key',
field=models.CharField(choices=[(b'content/pages/page.html', 'Page'), (b'content/pages/index_page.html', 'Index Page')], default=b'content/pages/page.html', max_length=255, verbose_name='template'),
),
]
| mcmaxwell/idea_digital_agency | idea/feincms/module/page/migrations/0010_auto_20180124_1945.py | Python | mit | 612 | 0.001634 |
from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.decorators import classonlymethod
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the URLconf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = False
url = None
pattern_name = None
query_string = False
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', request.path,
extra={
'status_code': 410,
'request': request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
| yephper/django | django/views/generic/base.py | Python | bsd-3-clause | 7,898 | 0.001013 |
def fake_get_value_from_db():
return 5
def check_outdated():
total = fake_get_value_from_db()
return total > 10
def task_put_more_stuff_in_db():
def put_stuff(): pass
return {'actions': [put_stuff],
'uptodate': [check_outdated],
}
| gh0std4ncer/doit | doc/tutorial/uptodate_callable.py | Python | mit | 279 | 0.003584 |
#!/usr/bin/env python
#============================================================================
# Copyright (c) Microsoft Corporation. All rights reserved. See license.txt for license information.
#============================================================================
from __future__ import print_function
from __future__ import with_statement
import os
import sys
import tempfile
import re
import platform
import imp
import socket
protocol=imp.load_source('protocol','../protocol.py')
"""
MOF:
[ClassVersion("1.0.0"), FriendlyName("nxIPAddress")]
class MSFT_nxIPAddress : OMI_BaseResource
{
[write] string IPAddress;
[Key] string InterfaceName;
[write,ValueMap{"Automatic", "Static"},Values{"Automatic", "Static"}] string BootProtocol;
[write] string DefaultGateway;
[write,ValueMap{"Present", "Absent"}, Values{"Present", "Absent"}] string Ensure;
[write] integer PrefixLength;
[Key,write,ValueMap{"IPv4", "IPv6"},Values{"IPv4", "IPv6"}] string AddressFamily;
};
"""
def ValidateAddresses(IPAddress,AddressFamily,PrefixLength):
if 'IPv4' in AddressFamily:
ptype=socket.AF_INET
elif 'IPv6' in AddressFamily:
ptype=socket.AF_INET6
else:
return False
try:
socket.inet_pton(ptype,IPAddress)
except:
print('Error: IPAddress "'+IPAddress+'" is invalid.',file=sys.stderr)
return False
if type(PrefixLength) == int or type(PrefixLength) == long :
if 'IPv4' in AddressFamily and ( PrefixLength < 0 or PrefixLength > 32) :
print('Error: PrefixLength "'+ str(PrefixLength) +'" is invalid. Values are 0-32.',file=sys.stderr)
return False
if 'IPv6' in AddressFamily and ( PrefixLength < 0 or PrefixLength > 128) :
print('Error: PrefixLength "'+ str(PrefixLength) +'" is invalid. Values are 0-128.',file=sys.stderr)
return False
return True
def bitNetmaskConversion(PrefixLength):
if PrefixLength == '':
return ''
if type(PrefixLength) != long and type(PrefixLength) != int :
N = int(PrefixLength)
else :
N = PrefixLength
M = int(N / 8) #number of 255 sections (full octets)
MASK = 255
netmaskIP = ""
count = 0
while count < M:
netmaskIP = netmaskIP + "255."
count += 1
if N % 8 != 0:
netmaskIP += str((MASK << (8 - N%8)) & MASK) + "."
count += 1
while count < 4:
netmaskIP = netmaskIP + "0."
count += 1
if netmaskIP[-1] == ".":
netmaskIP = netmaskIP[:-1]
return netmaskIP
def netmaskBitConversion(netmask):
if netmask==None or netmask=='' :
return 0
arrTmp = netmask.strip("'")
arr = arrTmp.split(".")
sumT = 0
for i in arr:
i = int(i)
if i == 255:
sumT += 8
else:
j = 0
while j < 8:
sumT += (i >> j) & 1
j+=1
return sumT
def init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if PrefixLength == None:
PrefixLength=''
if BootProtocol == None or len(BootProtocol)<1:
BootProtocol='Automatic'
else :
BootProtocol=BootProtocol[0].upper()+BootProtocol[1:].lower()
if Ensure == None or len(Ensure)<1:
Ensure='Present'
else :
Ensure=Ensure[0].upper()+Ensure[1:].lower()
if AddressFamily == None or len(AddressFamily)<1:
AddressFamily='IPv4'
else :
AddressFamily=AddressFamily[0].upper()+AddressFamily[1].upper()+AddressFamily[2].lower()+AddressFamily[3:]
if IPAddress == None:
IPAddress=''
if len(IPAddress)>0:
if ValidateAddresses(IPAddress,AddressFamily,PrefixLength) == False:
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
elif BootProtocol != 'Automatic' and Ensure == 'Present':
print('ERROR: BootProtocol != Automatic. IPAdress is required.',file=sys.stdout)
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
if DefaultGateway == None:
DefaultGateway=''
if len(DefaultGateway) > 0 and ValidateAddresses(DefaultGateway,AddressFamily,'') == False:
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
return True,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
def Set_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1]
MyDistro=GetMyDistro()
retval = MyDistro.Set(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
return retval
def Test_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1]
MyDistro=GetMyDistro()
return MyDistro.Test(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
def Get_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
arg_names=list(locals().keys())
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily]
retval = 0
MyDistro=GetMyDistro()
(retval, IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily) = MyDistro.Get(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
Ensure = protocol.MI_String(Ensure.encode("utf-8"))
IPAddress = protocol.MI_String(IPAddress.encode("utf-8"))
AddressFamily= protocol.MI_String(AddressFamily.encode("utf-8"))
InterfaceName = protocol.MI_String(InterfaceName.encode("utf-8"))
BootProtocol = protocol.MI_String(BootProtocol.encode("utf-8"))
DefaultGateway = protocol.MI_String(DefaultGateway.encode("utf-8"))
if type(PrefixLength) == int or type(PrefixLength) == long :
PrefixLength=protocol.MI_Uint32(PrefixLength)
else:
PrefixLength=protocol.MI_Uint32(int(PrefixLength))
retd={}
ld=locals()
for k in arg_names :
retd[k]=ld[k]
return retval, retd
def ReplaceFileContentsAtomic(filepath, contents):
"""
Write 'contents' to 'filepath' by creating a temp file, and replacing original.
"""
handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath))
if type(contents) == str :
contents=contents.encode('latin-1')
try:
os.write(handle, contents)
except IOError, e:
print('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e),file=sys.stderr)
return None
finally:
os.close(handle)
try:
os.rename(temp, filepath)
return None
except IOError, e:
print('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' +str(e),file=sys.stderr)
try:
os.remove(filepath)
except IOError, e:
print('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' +str(e),file=sys.stderr)
try:
os.rename(temp,filepath)
except IOError, e:
print('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' +str(e),file=sys.stderr)
return 1
return 0
def GetMyDistro(dist_class_name=''):
"""
Return MyDistro object.
NOTE: Logging is not initialized at this point.
"""
if dist_class_name == '':
if 'Linux' in platform.system():
Distro=platform.dist()[0]
else : # I know this is not Linux!
if 'FreeBSD' in platform.system():
Distro=platform.system()
Distro=Distro.strip('"')
Distro=Distro.strip(' ')
dist_class_name=Distro+'Distro'
else:
Distro=dist_class_name
if not globals().has_key(dist_class_name):
print(Distro+' is not a supported distribution.')
return None
return globals()[dist_class_name]() # the distro class inside this module.
class AbstractDistro(object):
def __init__(self):
self.gateway_file='/etc/sysconfig/network'
self.gateway_prefix=''
self.ifcfg_prefix='/etc/sysconfig/network-scripts/ifcfg-'
def init_re_dict(self,src_dict):
re_dict=dict()
for k in src_dict:
re_dict[k]=re.compile(r'\s*'+k+'.*')
return re_dict
def init_src_dicts(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
self.gateway_dict=dict()
self.ifcfg_v4_dict=dict()
self.ifcfg_v4_dict['ONBOOT=']='yes'
self.ifcfg_v4_dict['DEVICE=']=InterfaceName
if BootProtocol.lower() == 'static':
self.ifcfg_v4_dict['BOOTPROTO=']='none'
else:
self.ifcfg_v4_dict['BOOTPROTO=']='dhcp'
self.ifcfg_v4_dict['DHCPCLASS=']=''
self.ifcfg_v4_dict['IPADDR=']=IPAddress
if PrefixLength != 0 and PrefixLength != '':
self.ifcfg_v4_dict['NETMASK=']=bitNetmaskConversion(PrefixLength)
else:
self.ifcfg_v4_dict['NETMASK=']=''
self.ifcfg_v6_dict=dict()
self.ifcfg_v6_dict['ONBOOT=']='yes'
self.ifcfg_v6_dict['DEVICE=']=InterfaceName
if BootProtocol.lower() == 'static':
self.ifcfg_v6_dict['BOOTPROTO=']='none'
else:
self.ifcfg_v6_dict['BOOTPROTO=']='dhcp'
self.ifcfg_v6_dict['DHCPCLASS=']=''
if BootProtocol.lower() == 'static':
self.ifcfg_v6_dict['IPV6INIT=']='yes'
self.ifcfg_v6_dict['IPV6_AUTOCONF=']='no'
else :
self.ifcfg_v6_dict['IPV6INIT=']='yes'
self.ifcfg_v6_dict['IPV6_AUTOCONF=']='yes'
if PrefixLength != 0 and PrefixLength != '':
self.ifcfg_v6_dict['IPV6ADDR=']=IPAddress+'/'+str(PrefixLength)
else:
self.ifcfg_v6_dict['IPV6ADDR=']=IPAddress
self.gateway_dict['GATEWAY=']=DefaultGateway
if AddressFamily == 'IPv4':
self.ifcfg_dict=self.ifcfg_v4_dict
self.addr_key='IPADDR='
else :
self.ifcfg_dict=self.ifcfg_v6_dict
self.addr_key='IPV6ADDR='
self.gateway_dict['NETWORKING_IPV6=']='yes'
def src_dicts_to_params(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if AddressFamily=='IPv4':
if 'NETMASK=' in self.ifcfg_dict.keys() and len(self.ifcfg_dict['NETMASK=']) > 0 :
PrefixLength=netmaskBitConversion(self.ifcfg_dict['NETMASK='])
elif PrefixLength != '' and PrefixLength > 0 and '/' in self.ifcfg_dict[self.addr_key] :
PrefixLength=int(self.ifcfg_dict[self.addr_key].split('/')[1])
self.ifcfg_dict[self.addr_key]=self.ifcfg_dict[self.addr_key].split('/')[0]
bootproto=''
if BootProtocol != None and len(BootProtocol) > 0 :
if self.ifcfg_dict['BOOTPROTO='] == 'dhcp':
bootproto='Automatic'
else:
bootproto='Static'
gateway=''
if len(self.gateway_dict['GATEWAY=']) >0:
gateway=self.gateway_dict['GATEWAY=']
return self.ifcfg_dict[self.addr_key],self.ifcfg_dict['DEVICE='],bootproto,gateway,Ensure,PrefixLength,AddressFamily
def restart_network(self,Interface):
os.system('ifdown ' + Interface)
os.system('ifup ' + Interface)
return [0]
def interface_down(self,Interface):
os.system('ifconfig ' + Interface + ' down')
return [0]
def UpdateValuesInFile(self,fname,src_dict,re_dict,Ensure):
updated=''
if os.path.exists(fname) != True:
# if this file is not here - we will create it
with open(fname,'w+') as F:
F.write('# Created by Microsoft DSC nxIPAddress Provider\n')
F.close()
try:
with open(fname,'r') as F:
for l in F.readlines():
if l[0]=='#':
updated+=l
continue
for k in re_dict:
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
if len(src_dict[k])==0 :
l=''
re_dict[k]=None
break
else:
l=re.sub(re_dict[k],k+src_dict[k],l)
re_dict[k]=None
if len(l)>2:
updated+=l
for k in re_dict:
if re_dict[k] != None and len(src_dict[k]) > 0 :
l=k+src_dict[k]+'\n'
updated+=l
except:
raise
ReplaceFileContentsAtomic(fname,updated)
return [0]
def GetValuesFromFile(self,fname,src_dict,re_dict):
if os.path.exists(fname) != True:
return
try:
with (open(fname,'r')) as F:
for l in F.readlines():
for k in re_dict:
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
src_dict[k]=l.split(k[-1])[1].strip('\n')
re_dict[k]=None
except:
raise
def Set(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
retval=[-1]
if len(self.ifcfg_prefix)>0:
self.ifcfg_file=self.ifcfg_prefix+InterfaceName
if len(self.gateway_prefix)>0:
self.gateway_file=self.gateway_prefix+InterfaceName
self.init_src_dicts(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
gateway_re_dict=self.init_re_dict(self.gateway_dict)
ifcfg_re_dict=self.init_re_dict(self.ifcfg_dict)
if Ensure == 'Absent':
if len(self.ifcfg_prefix)>0:
if os.path.exists(self.ifcfg_file):
os.remove(self.ifcfg_file)
retval=[0]
else:
retval=self.UpdateValuesInFile(self.ifcfg_file,self.ifcfg_dict,ifcfg_re_dict,Ensure)
if len(self.gateway_prefix)>0:
if os.path.exists(self.gateway_file):
os.remove(self.gateway_file)
retval=[0]
else:
retval=self.UpdateValuesInFile(self.gateway_file,self.gateway_dict,gateway_re_dict,Ensure)
self.interface_down(InterfaceName)
else:
retval=self.UpdateValuesInFile(self.gateway_file,self.gateway_dict,gateway_re_dict,Ensure)
retval=self.UpdateValuesInFile(self.ifcfg_file,self.ifcfg_dict,ifcfg_re_dict,Ensure)
retval=self.restart_network(InterfaceName)
return retval
def Test(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if len(self.ifcfg_prefix)>0:
self.ifcfg_file=self.ifcfg_prefix+InterfaceName
if len(self.gateway_prefix)>0:
self.gateway_file=self.gateway_prefix+InterfaceName
self.init_src_dicts(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
test_gateway=dict(self.gateway_dict)
for k in test_gateway:
test_gateway[k]=''
test_gateway_re_dict=self.init_re_dict(self.gateway_dict)
self.GetValuesFromFile(self.gateway_file,test_gateway,test_gateway_re_dict)
for k in self.gateway_dict:
if k == 'default ' and len(self.gateway_dict[k]) >0: # SuSE
self.gateway_dict[k]=self.gateway_dict[k].split(' ')[0]
if self.gateway_dict[k] != test_gateway[k]:
return [-1]
test_ifcfg=dict(self.ifcfg_dict)
for k in test_ifcfg:
if k != 'iface ':
test_ifcfg[k]=''
test_ifcfg_re_dict=self.init_re_dict(self.ifcfg_dict)
self.GetValuesFromFile(self.ifcfg_file,test_ifcfg,test_ifcfg_re_dict)
if Ensure == 'Absent':
if 'iface ' in test_ifcfg.keys() and test_ifcfg['iface ']!=None and len(test_ifcfg['iface '])>0:
return [-1]
elif len(self.ifcfg_prefix)>0 and os.path.exists(self.ifcfg_file) :
return [-1]
else:
return [0]
for k in self.ifcfg_dict:
if self.ifcfg_dict[k] != test_ifcfg[k]:
return [-1]
return [0]
def Get(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
# calling Test here will fill the dicts with values
self.Test(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
self.src_dicts_to_params(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if PrefixLength=='':
PrefixLength=0
return 0,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
class SuSEDistro(AbstractDistro):
def __init__(self):
super(SuSEDistro,self).__init__()
self.gateway_prefix='/etc/sysconfig/network/ifroute-'
self.ifcfg_prefix='/etc/sysconfig/network/ifcfg-'
def init_src_dicts(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
self.gateway_v4_dict=dict()
self.gateway_v6_dict=dict()
if BootProtocol.lower() != 'static' or len(DefaultGateway) == 0:
self.gateway_v4_dict['default ']=''
self.gateway_v6_dict['default ']=''
else:
self.gateway_v4_dict['default ']=DefaultGateway+' '+bitNetmaskConversion(PrefixLength)+' '+InterfaceName
self.gateway_v6_dict['default ']=DefaultGateway+' '+InterfaceName
self.ifcfg_v4_dict=dict()
if BootProtocol.lower() != 'static':
self.ifcfg_v4_dict['BOOTPROTO=']='dhcp'
else:
self.ifcfg_v4_dict['BOOTPROTO=']='static'
self.ifcfg_v4_dict['STARTMODE=']='auto'
self.ifcfg_v4_dict['IPADDR=']=IPAddress
self.ifcfg_v4_dict['NETMASK=']=bitNetmaskConversion(PrefixLength)
self.ifcfg_v6_dict=dict()
if BootProtocol.lower() != 'static':
self.ifcfg_v6_dict['BOOTPROTO=']='autoip'
else:
self.ifcfg_v6_dict['BOOTPROTO=']='static'
self.ifcfg_v6_dict['STARTMODE=']='auto'
if PrefixLength != 0 and PrefixLength != '':
self.ifcfg_v6_dict['IPADDR=']=IPAddress+'/'+str(PrefixLength)
else:
self.ifcfg_v6_dict['IPADDR=']=IPAddress
if AddressFamily == 'IPv4':
self.ifcfg_dict=self.ifcfg_v4_dict
self.addr_key='IPADDR='
self.gateway_dict=self.gateway_v4_dict
else :
self.ifcfg_dict=self.ifcfg_v6_dict
self.addr_key='IPADDR='
self.gateway_dict=self.gateway_v6_dict
def src_dicts_to_params(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if AddressFamily=='IPv4':
if 'NETMASK=' in self.ifcfg_dict.keys() and len(self.ifcfg_dict['NETMASK=']) > 0 :
PrefixLength=netmaskBitConversion(self.ifcfg_dict['NETMASK='])
elif PrefixLength != '' and PrefixLength > 0 and '/' in self.ifcfg_dict[self.addr_key] :
PrefixLength=int(self.ifcfg_dict[self.addr_key].split('/')[1])
self.ifcfg_dict[self.addr_key]=self.ifcfg_dict[self.addr_key].split('/')[0]
bootproto=''
if BootProtocol != '' and 'BOOTPROTO=' in self.ifcfg_v4_dict.keys() and len(self.ifcfg_v4_dict['BOOTPROTO=']) >0 :
if self.ifcfg_v4_dict['BOOTPROTO='] != 'static':
bootproto='Automatic'
else:
bootproto='Static'
gateway=''
# The gateway line here for SuSE is 'default <addr> <interface>'.
# Remove the <interface> so it can match <addr>.
if len(self.gateway_dict['default ']) >0:
gateway=self.gateway_dict['default '].split(' ')[0]
return self.ifcfg_dict['IPADDR='],self.ifcfg_file.split('-')[-1],bootproto,gateway,Ensure,PrefixLength,AddressFamily
def restart_network(self,Interface):
os.system('ifdown ' + Interface)
os.system('ifup ' + Interface)
return [0]
class debianDistro(AbstractDistro):
def __init__(self):
super(debianDistro,self).__init__()
self.ifcfg_prefix=''
self.gateway_prefix=''
self.ifcfg_file='/etc/network/interfaces'
self.gateway_file='/etc/network/interfaces'
def init_re_dict(self,src_dict):
re_dict=dict()
for k in src_dict:
re_dict[k]=re.compile(r'\s*'+k+'.*')
if 'iface ' in re_dict:
re_dict['iface ']=re.compile(r'\s*iface '+src_dict['iface '])
if 'inet ' in re_dict:
re_dict['inet ']=re.compile(r'\s*iface '+src_dict['iface '] + ' inet .*')
if 'inet6 ' in re_dict:
re_dict['inet6 ']=re.compile(r'\s*iface '+src_dict['iface '] + ' inet6 .*')
return re_dict
def init_src_dicts(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
self.ifcfg_v4_dict={}
self.ifcfg_v6_dict={}
self.gateway_dict={}
if BootProtocol.lower() == 'static' :
self.ifcfg_v4_dict['inet '] = 'static'
elif BootProtocol.lower() == 'automatic':
self.ifcfg_v4_dict['inet '] = 'dhcp'
else:
self.ifcfg_v4_dict['inet '] = ''
self.ifcfg_v4_dict['iface ']=InterfaceName
self.ifcfg_v4_dict['autoconf ']=''
self.ifcfg_v4_dict['network ']=''
self.ifcfg_v4_dict['address ']=IPAddress
if PrefixLength !=0 and PrefixLength != '':
self.ifcfg_v4_dict['netmask ']=bitNetmaskConversion(PrefixLength)
self.ifcfg_v6_dict['netmask ']=str(PrefixLength)
else:
self.ifcfg_v4_dict['netmask ']=''
self.ifcfg_v6_dict['netmask ']=''
self.ifcfg_v4_dict['gateway ']=DefaultGateway
if len(BootProtocol) > 0:
self.ifcfg_v6_dict['inet6 ']='static' # static is used for autoconf as well
else:
self.ifcfg_v6_dict['inet6 ']=''
self.ifcfg_v6_dict['iface ']=InterfaceName
if PrefixLength !=0 and PrefixLength != '':
self.ifcfg_v6_dict['address ']=IPAddress
else:
self.ifcfg_v6_dict['address ']=IPAddress
self.ifcfg_v6_dict['gateway ']=DefaultGateway
if AddressFamily == "IPv4":
self.ifcfg_dict=self.ifcfg_v4_dict
self.inet='inet '
else:
if BootProtocol.lower() != 'static':
self.ifcfg_v6_dict['autoconf ']='1'
else:
self.ifcfg_v6_dict['autoconf ']='0'
self.ifcfg_dict=self.ifcfg_v6_dict
self.inet='inet6 '
if Ensure == "Absent":
auto='auto '+InterfaceName
self.ifcfg_dict[auto]=''
def src_dicts_to_params(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
inet=''
if BootProtocol != None and len(BootProtocol) > 0 :
if AddressFamily=='IPv6':
if self.ifcfg_dict['autoconf '] == '1' :
inet = 'Automatic'
else:
inet = 'Static'
else:
if self.ifcfg_dict[self.inet] == 'dhcp':
inet = 'Automatic'
else:
inet = 'Static'
if AddressFamily=='IPv4':
if 'netmask' in self.ifcfg_dict.keys() and len(self.ifcfg_dict['netmask']) > 0 :
PrefixLength=netmaskBitConversion(self.ifcfg_dict['netmask'])
elif PrefixLength != '' and PrefixLength > 0 and '/' in self.ifcfg_dict['address '] :
PrefixLength=int(self.ifcfg_dict['address '].split('/')[1])
self.ifcfg_dict['address ']=self.ifcfg_dict['address '].split('/')[0]
gateway=''
if len(self.ifcfg_dict['gateway ']) >0:
gateway=self.ifcfg_dict['gateway ']
return self.ifcfg_dict['address '],self.ifcfg_dict['iface '],inet,gateway,Ensure,PrefixLength,AddressFamily
def restart_network(self,Interface):
os.system('ifdown --exclude=lo ' + Interface +'; ifup --exclude=lo '+ Interface)
return [0]
def UpdateValuesInFile(self,fname,src_dict,re_dict,Ensure):
if len(src_dict) == 0:
return [0]
removing=False
if self.inet in src_dict.keys() and Ensure=='Absent': # we are trying to remove
removing=True
if removing == False and os.path.exists(fname) != True:
# if this file is not here - we will create it
with open(fname,'w+') as F:
F.write('# Created by nxIPAddress DSC PRovider\n')
F.close()
with open(fname,'r') as F:
txt=F.read()
if 'iface ' in src_dict.keys():
srch=r'(^auto '+src_dict['iface ']+'$.*?^iface '+src_dict['iface ']+'.*?$|^iface '+src_dict['iface ']+'.*?$).*?((^auto )|(^iface )|(^$))'
updated=''
r=re.search(srch,txt,flags=re.S|re.M)
if r == None:
if removing: #nothing to remove
return [0]
else : # append values to the end
l='auto ' + src_dict['iface '] + '\niface '+src_dict['iface '] + ' ' + self.inet+src_dict[self.inet] + '\n'
if len(updated) > 0 and updated[-1] != '\n':
updated+='\n'
updated+=l
re_dict['iface ']=None
re_dict[self.inet]=None
for k in re_dict:
if re_dict[k] != None and len(src_dict[k]) > 0 :
l=k+src_dict[k]+'\n'
updated+=l
txt=txt+updated
else: #matched
if removing:
tail=''
rpl=re.compile(r.group(0),flags=re.S|re.M)
txt=rpl.sub(tail,txt)
if txt[-2:] == '\n\n':
txt=txt[:-1]
else : # replace tags - preserve unknown tags
t=r.group(0)
for l in t.splitlines():
if len(l)>1:
l+='\n'
else:
continue
if 'iface ' in re_dict.keys() and re_dict['iface '] != None :
if re.match(re_dict['iface '],l) :
l='iface '+src_dict['iface '] + ' ' + self.inet+src_dict[self.inet] + '\n'
re_dict['iface ']=None
re_dict[self.inet]=None
updated+=l
continue
for k in re_dict.keys():
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
if len(src_dict[k])==0 :
l=''
else:
l=re.sub(re_dict[k],k+src_dict[k],l)
if len(l)>0 and l[-1]!='\n':
l+='\n'
re_dict[k]=None
break
if len(l)>2:
updated+=l
for k in re_dict:
if re_dict[k] != None and len(src_dict[k]) > 0 :
l=k+src_dict[k]+'\n'
updated+=l
tail=''
if updated[-1] != '\n':
tail='\n'
updated+=tail
rpl=re.compile(r.group(0),flags=re.S|re.M)
txt=rpl.sub(updated,txt)
if txt[-2:] == '\n\n':
txt=txt[:-1]
ReplaceFileContentsAtomic(fname,txt)
return [0]
def GetValuesFromFile(self,fname,src_dict,re_dict):
if os.path.exists(fname) != True:
return
try:
with (open(fname,'r')) as F:
txt=F.read()
if 'iface ' in src_dict.keys():
srch=r'(^auto '+src_dict['iface ']+'$.*?^iface '+src_dict['iface ']+'.*?$|^iface '+src_dict['iface ']+'.*?$).*?((^auto )|(^iface )|(^$))'
r=re.search(srch,txt,flags=re.S|re.M)
if r == None:
return
txt=r.group(0)
for l in txt.splitlines():
for k in re_dict:
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
if k == self.inet:
src_dict[k]=l.split(k[-1])[3].strip('\n')
else:
src_dict[k]=l.split(k[-1])[1].strip('\n')
re_dict[k]=None
except:
raise
class redhatDistro(AbstractDistro):
def __init__(self):
super(redhatDistro,self).__init__()
class centosDistro(redhatDistro):
def __init__(self):
super(centosDistro,self).__init__()
class UbuntuDistro(debianDistro):
def __init__(self):
super(UbuntuDistro,self).__init__()
class LinuxMintDistro(UbuntuDistro):
def __init__(self):
super(LinuxMintDistro,self).__init__()
class fedoraDistro(redhatDistro):
def __init__(self):
super(fedoraDistro,self).__init__()
| MSFTOSSMgmt/WPSDSCLinux | Providers/Scripts/2.6x-2.7x/Scripts/nxIPAddress.py | Python | mit | 30,909 | 0.027727 |
"""
Support for Wink fans.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/fan.wink/
"""
import logging
from homeassistant.components.fan import (
SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SUPPORT_DIRECTION,
SUPPORT_SET_SPEED, FanEntity)
from homeassistant.components.wink import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
SPEED_AUTO = 'auto'
SPEED_LOWEST = 'lowest'
SUPPORTED_FEATURES = SUPPORT_DIRECTION + SUPPORT_SET_SPEED
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for fan in pywink.get_fans():
if fan.object_id() + fan.name() not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkFanDevice(fan, hass)])
class WinkFanDevice(WinkDevice, FanEntity):
"""Representation of a Wink fan."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['fan'].append(self)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self.wink.set_fan_direction(direction)
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
self.wink.set_state(True, speed)
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
self.wink.set_state(True, speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
self.wink.set_state(False)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.wink.state()
@property
def speed(self) -> str:
"""Return the current speed."""
current_wink_speed = self.wink.current_fan_speed()
if SPEED_AUTO == current_wink_speed:
return SPEED_AUTO
if SPEED_LOWEST == current_wink_speed:
return SPEED_LOWEST
if SPEED_LOW == current_wink_speed:
return SPEED_LOW
if SPEED_MEDIUM == current_wink_speed:
return SPEED_MEDIUM
if SPEED_HIGH == current_wink_speed:
return SPEED_HIGH
return None
@property
def current_direction(self):
"""Return direction of the fan [forward, reverse]."""
return self.wink.current_fan_direction()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
wink_supported_speeds = self.wink.fan_speeds()
supported_speeds = []
if SPEED_AUTO in wink_supported_speeds:
supported_speeds.append(SPEED_AUTO)
if SPEED_LOWEST in wink_supported_speeds:
supported_speeds.append(SPEED_LOWEST)
if SPEED_LOW in wink_supported_speeds:
supported_speeds.append(SPEED_LOW)
if SPEED_MEDIUM in wink_supported_speeds:
supported_speeds.append(SPEED_MEDIUM)
if SPEED_HIGH in wink_supported_speeds:
supported_speeds.append(SPEED_HIGH)
return supported_speeds
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
| PetePriority/home-assistant | homeassistant/components/wink/fan.py | Python | apache-2.0 | 3,231 | 0 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow input/output utilities."""
import collections
import json
import math
import os
import numpy as np
import tensorflow.compat.v1 as tf
class Features(object):
"""Feature keys."""
# Waveform(s) of audio observed at receiver(s).
RECEIVER_AUDIO = 'receiver_audio'
# Images of each source at each microphone, including reverberation.
# Images are real valued with shape [sources, microphones, length].
SOURCE_IMAGES = 'source_images'
# Boolean diarization labels of shape (sources, length) which indicates
# whether a source is active or not. For nonexisting source, it is all zeros.
DIARIZATION_LABELS = 'diarization_labels'
# Speaker indices (global indices which are contiguous over all training data
# starting with 0) that are present in this meeting or meeting chunk with
# shape (sources,). If number of speakers present in the meeting is less
# than sources, for a non-existing speaker/source, the speaker index is
# set to -1. Note that, for a meeting sub-block, we still have all the
# speaker indices in the meeting even if not all the speakers are present
# in that meeting sub-block.
SPEAKER_INDEX = 'speaker_indices'
def get_inference_spec(num_receivers=1,
num_samples=None):
"""Returns a specification of features in tf.Examples in roomsim format."""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
return spec
def get_roomsim_spec(num_sources,
num_receivers,
num_samples):
"""Returns a specification of features in tf.Examples in roomsim format.
Args:
num_sources: Expected number of sources.
num_receivers: Number of microphones in array.
num_samples: Expected length of sources in samples. 'None' for variable.
Returns:
Feature specifications suitable to pass to tf.parse_example.
"""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
spec[Features.SOURCE_IMAGES] = tf.FixedLenFeature(
[num_sources, num_receivers, num_samples], tf.float32)
return spec
def placeholders_from_spec(feature_spec):
"""Returns placeholders compatible with a given feature spec."""
placeholders = {}
for key, feature in feature_spec.items():
placeholders[key] = tf.placeholder(dtype=feature.dtype,
shape=[1] + feature.shape,
name=key)
return placeholders
def _read_meeting_list(meeting_list, meeting_length_type):
"""Reads meeting list from json file to get necessary information.
Args:
meeting_list: A meeting list read from a json file.
meeting_length_type: One of 'maximum', 'minimum' or 'average'.
Since typically meeting lengths are not fixed, we can
set the training/eval length to the maximum, minimum or average meeting
length in the json file based on the value of this argument. We
eventually pad or clip individual meetings to attain the desired constant
meeting length in our data reading pipeline.
Returns:
num_meetings: Number of meetings.
max_num_spk_per_meeting: Maximum number of speakers in a meeting.
max_num_utt_per_spk: Maximum number of utterances per speaker.
max_dia_seg_per_utt: Maximum diarization segments per utterance.
max_utt_length: Maximum utterance length.
meeting_length: Meeting length that will be used.
speaker_ids: A list of speaker ids that appear in meetings.
"""
max_num_spk_per_meeting = 0
max_num_utt_per_meeting = 0
meeting_lengths = []
speaker_id_to_count = collections.defaultdict(int)
num_meetings = len(meeting_list)
total_spk = 0
total_utt = 0
max_utt_length = 0
max_num_utt_per_spk = 0
max_dia_seg_per_utt = 0
for one_meeting in meeting_list:
sources_start_end = one_meeting['utterance_start_end']
meeting_length = int(one_meeting['duration'])
num_utt_in_meeting = len(sources_start_end)
max_num_utt_per_meeting = max(max_num_utt_per_meeting, num_utt_in_meeting)
utt2spk = []
spk2wavs = collections.defaultdict(list)
spk_utt_idx = collections.defaultdict(int)
for start, end, spkid, wav_path in sources_start_end:
max_utt_length = max(max_utt_length, end - start)
utt2spk.append(spkid)
spk2wavs[spkid].append(wav_path)
speaker_id_to_count[spkid] += 1
spk_utt_idx[spkid] += 1
diarization_info = \
one_meeting['diarization_label'][spkid][spk_utt_idx[spkid] - 1]
num_seg_in_utt = len(diarization_info)
max_dia_seg_per_utt = max(max_dia_seg_per_utt, num_seg_in_utt)
speakers_in_meeting = list(set(utt2spk))
num_spk = len(speakers_in_meeting)
for spkid in speakers_in_meeting:
max_num_utt_per_spk = max(max_num_utt_per_spk,
len(set(spk2wavs[spkid])))
max_num_spk_per_meeting = max(max_num_spk_per_meeting, num_spk)
total_spk += num_spk
total_utt += num_utt_in_meeting
meeting_lengths.append(meeting_length)
if meeting_length_type == 'maximum':
meeting_length = int(math.ceil(np.max(meeting_lengths)))
elif meeting_length_type == 'minimum':
meeting_length = int(math.floor(np.min(meeting_lengths)))
elif meeting_length_type == 'average':
meeting_length = int(round(np.mean(meeting_lengths)))
elif isinstance(meeting_length_type, int):
meeting_length = meeting_length_type
else:
raise ValueError(f'Unknown meeting_length_type={meeting_length_type}')
speaker_ids = sorted(speaker_id_to_count.keys())
tf.logging.info('Read %s meetings from json file.', num_meetings)
tf.logging.info('Average number of speakers per meeting = %f.',
total_spk / num_meetings)
tf.logging.info('Average number of utterances per speaker = %f.',
total_utt / total_spk)
return (num_meetings, max_num_spk_per_meeting, max_num_utt_per_spk,
max_dia_seg_per_utt, max_utt_length,
meeting_length, speaker_ids)
def _pad_mics_tf(signal, new_mics):
"""Pads new mic channels to an input tensor and returns the updated tensor.
Args:
signal: A tf.tensor of shape (input_mics, samples)
new_mics: The number of new mic channels to be added (integer scalar tensor)
Returns:
padded_signal: A tf.tensor of shape (input_mics + new_mics, samples)
"""
# Take first new_mics channels and shift them by 1 sample.
new_inputs = tf.roll(signal[:new_mics, :], shift=1, axis=-1)
# Add noise 1e-3 times the RMS value in the signal.
noise_scale = 1e-3 * tf.sqrt(tf.reduce_mean(tf.square(new_inputs)))
new_inputs += noise_scale * tf.random.normal(tf.shape(new_inputs))
return tf.concat((signal, new_inputs), axis=0)
def json_to_dataset(json_file,
batch_size,
parallel_readers=tf.data.experimental.AUTOTUNE,
randomize_order=False,
num_examples=-1,
prefetch_buffer_size=tf.data.experimental.AUTOTUNE,
shuffle_buffer_size=5,
repeat=True,
num_mics=1,
sample_rate=16000,
use_relative_path=True,
meeting_length_type='maximum',
num_meeting_subdivisions=1,
sensor_noise_range=(0.0, 0.0)):
r"""Fetches features from a dictionary and source .wav files.
Args:
json_file: A json file containing meeting information.
batch_size: The number of examples to read.
parallel_readers: Number of dataset.map operations that should happen in
parallel.
randomize_order: Whether to randomly shuffle features.
num_examples: Limit number of examples to this value. Unlimited if -1.
prefetch_buffer_size: How many batches to prefecth.
shuffle_buffer_size: The size of the shuffle buffer.
repeat: If True, repeat the dataset.
num_mics: The expected number of mics in source wav files.
sample_rate: Sample rate of wav files read.
use_relative_path: If True, the path for .wav files is relative to the
json file, otherwise, the paths are absolute.
meeting_length_type: 'maximum', 'minimum' or 'average'. Can also specify
an integer value which is the length in samples, which will be used.
num_meeting_subdivisions: If > 1, chop the meeting in time into this
many chunks.
sensor_noise_range: Range of standard deviation for sensor noise. If
sensor_noise_range[1] <= 0.0, then no sensor noise is added. Otherwise,
white Gaussian sensor noise with uniformly random standard deviation
from the provided range is added as the first reference signal.
Returns:
A batch_size number of features constructed from wav files.
Raises:
ValueError if max_sources_override is less than assumed max number sources.
"""
tf.logging.info('Reading %s.', json_file)
with open(json_file, 'r') as f:
meeting_list = json.load(f)
(num_meetings, max_num_spk, max_num_utt_per_spk, max_dia_seg_per_utt,
max_utt_length, samples, speaker_id_list) = _read_meeting_list(
meeting_list, meeting_length_type)
tf.logging.info('Maximum number of speakers per meeting = %s', max_num_spk)
tf.logging.info('Maximum number of utterances per speaker = %s',
max_num_utt_per_spk)
tf.logging.info('Maximum diarization segments per utterance = %s',
max_dia_seg_per_utt)
tf.logging.info('Maximum utterance length in seconds = %s',
max_utt_length/sample_rate)
tf.logging.info('Used meeting length in seconds = %s', samples/sample_rate)
tf.logging.info('Number of speakers seen in all meetings = %s',
len(speaker_id_list))
tf.logging.info('Using %s parallel readers.', parallel_readers)
tf.logging.info('shuffle_buffer=%s, prefetch_buffer=%s, num_mics=%s, '
'randomize=%s.', shuffle_buffer_size, prefetch_buffer_size,
num_mics, randomize_order)
if use_relative_path:
base_path = os.path.dirname(json_file)
spkid2idx = {key: idx for idx, key in enumerate(speaker_id_list)}
def utterance_info_generator():
"""Yields utterance informations from each meeting.
Utterance info is in the form of a 6-tuple:
wav_path, diarization, spkidx, meeting_scale, start, gain.
"""
default_diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32)
default_utt = ('0', default_diarization, -1, 0.0, 0, 0.0)
for one_meeting in meeting_list:
meeting_info = collections.defaultdict(list)
sources_start_end = one_meeting['utterance_start_end']
num_utt_in_meeting = len(sources_start_end)
spk_num_in_meeting = {}
new_spknum = 0
spkids_in_meeting = []
spk_utt_idx = collections.defaultdict(int)
meeting_scale = float(one_meeting['meeting_scale'])
for utt_idx in range(num_utt_in_meeting):
start, end, spkid, wav_path = sources_start_end[utt_idx]
spkidx = spkid2idx[spkid]
if start >= samples:
continue
if end >= samples:
end = samples
if spkidx in spk_num_in_meeting:
spknum = spk_num_in_meeting[spkidx]
else:
spknum = new_spknum
if spknum > max_num_spk:
continue
spkids_in_meeting.append(spkidx)
spk_num_in_meeting[spkidx] = spknum
new_spknum += 1
if use_relative_path:
wav_path = os.path.join(base_path, wav_path)
gain = one_meeting['utterance_gain'][utt_idx]
# Make diarization_labels array.
diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32)
spk_utt_idx[spknum] += 1
diarization_info = \
one_meeting['diarization_label'][spkid][spk_utt_idx[spknum] - 1]
# Go over diarization segments in utterance.
for i, segment_st_end in enumerate(diarization_info):
segment_start, segment_end = segment_st_end
if segment_start >= samples:
continue
if segment_end > samples:
segment_end = samples
adjusted_start = segment_start - start
adjusted_end = segment_end - start
diarization[i, 0] = adjusted_start
diarization[i, 1] = adjusted_end
meeting_info[spknum].append((wav_path, diarization, spkidx,
meeting_scale, start, gain))
for spknum in range(max_num_spk):
if spknum in meeting_info:
for utt in range(max_num_utt_per_spk):
if utt < len(meeting_info[spknum]):
yield meeting_info[spknum][utt]
else:
yield default_utt
else:
for utt in range(max_num_utt_per_spk):
yield default_utt
utterance_info_list = list(utterance_info_generator())
# No need for the original meeting_list from now on.
del meeting_list
num_utterances = len(utterance_info_list)
tensor_shape = [(num_utterances, 1),
(num_utterances, max_dia_seg_per_utt, 2),
(num_utterances, 1),
(num_utterances, 1),
(num_utterances, 1),
(num_utterances, 1)]
tensor_type = [np.string_, np.int32, np.int32, np.float32,
np.int32, np.float32]
(wav_paths, diarizations, spkindices, meeting_scales, start_samples,
utterance_gains) = [np.reshape(
tensor, tensor_shape[i]).astype(tensor_type[i]) for i, tensor in
enumerate(list(zip(*utterance_info_list)))]
dataset = tf.data.Dataset.from_tensor_slices(
(wav_paths, diarizations, spkindices, meeting_scales, start_samples,
utterance_gains))
if repeat:
dataset = dataset.repeat()
if randomize_order:
# Randomize meeting order for each epoch through the dataset.
dataset = dataset.batch(max_num_spk * max_num_utt_per_spk)
dataset = dataset.shuffle(num_meetings)
dataset = dataset.flat_map(
lambda w, d, s, m, t, u: tf.data.Dataset.from_tensor_slices(
(w, d, s, m, t, u)))
# Read in wav files.
def decode_wav(wav):
audio_bytes = tf.read_file(wav)
waveform, _ = tf.audio.decode_wav(audio_bytes,
desired_samples=max_utt_length)
waveform = tf.transpose(waveform)
num_read_mics = tf.shape(waveform)[0]
waveform = tf.cond(num_read_mics >= num_mics,
lambda: waveform[:num_mics, :],
lambda: _pad_mics_tf(waveform, num_mics - num_read_mics))
waveform = tf.reshape(waveform, (num_mics, max_utt_length))
return waveform
def decode_wav_or_return_zeros(wav, gain=1.0):
return tf.cond(
tf.equal(wav, '0'),
lambda: tf.zeros((num_mics, max_utt_length), dtype=tf.float32),
lambda: gain * decode_wav(wav))
def utterance_reader(wav_path, diarization, spkidx, meet_scale, start, gain):
"""Reads wave file for utterance and scale it."""
utt_tensor = decode_wav_or_return_zeros(wav_path[0], gain=gain)
return utt_tensor, diarization, spkidx, meet_scale, start
# Sandwich heavy IO part between prefetch's.
dataset = dataset.prefetch(parallel_readers)
dataset = dataset.map(utterance_reader,
num_parallel_calls=parallel_readers)
dataset = dataset.prefetch(parallel_readers)
def pad_utterance(utt_tensor, diarization, spkidx, meeting_scale, start):
"""Pads utterance to meeting length.
Args:
utt_tensor: Utterance with shape (num_mics, max_utt_length).
diarization: Diarization with shape (max_dia_seg_per_utt, 2).
spkidx: Speaker index (global) for the utterance.
meeting_scale: Target meeting scale.
start: Start index of utterance in the meeting.
Returns:
utt_tensor_padded: Padded utt tensor (num_mics, samples + max_utt_length)
diarization_padded: Diarization updated using the start index.
spkidx: Speaker index passed unchanged.
meeting_scale: Target meeting scale passed unchanged.
"""
start = start[0]
end_paddings = samples - start
utt_tensor_padded = tf.pad(utt_tensor, ((0, 0), (start, end_paddings)))
diarization_padded = start + diarization
return utt_tensor_padded, diarization_padded, spkidx, meeting_scale
dataset = dataset.map(pad_utterance,
num_parallel_calls=parallel_readers)
dataset = dataset.batch(max_num_utt_per_spk)
def make_reference(utt_tensor, diarization, spkidx, meeting_scale):
"""Makes a reference from fixed length utterance tensors.
Args:
utt_tensor: Utterances with shape
(max_num_utt_per_spk, num_mics, samples + max_utt_len)
diarization: Diarization ranges with shape
(max_num_utt_per_spk, max_dia_seg_per_utt, 2).
spkidx: Speaker indices (repeated) with shape (max_num_utt_per_spk)
meeting_scale: Target meeting scale (repeated).
Returns:
reference: Meeting audio with shape (num_mics, samples)
diarization_labels: tf.bool with shape (samples)
spkidx: Scalar speaker index.
meeting_scale: Target meeting scale.
"""
reference_waveform = tf.reduce_sum(utt_tensor, axis=0)
reference_waveform = reference_waveform[:, :samples]
diarization = tf.reshape(diarization,
(max_num_utt_per_spk * max_dia_seg_per_utt, 2))
active_samples_list = [
tf.range(diarization[i, 0], diarization[i, 1]) for i in
range(max_num_utt_per_spk * max_dia_seg_per_utt)]
active_samples = tf.reshape(
tf.concat(active_samples_list, axis=0), (-1, 1))
dia_full_init = tf.zeros((samples + max_utt_length, 1), dtype=tf.int32)
dia_full = tf.tensor_scatter_add(
dia_full_init, active_samples, tf.ones(tf.shape(active_samples),
dtype=tf.int32))
dia_full = tf.cast(dia_full[:samples, 0], dtype=tf.bool)
spkidx = spkidx[0]
meeting_scale = meeting_scale[0]
return reference_waveform, dia_full, spkidx, meeting_scale
dataset = dataset.map(make_reference,
num_parallel_calls=parallel_readers)
dataset = dataset.batch(max_num_spk)
# If num_meeting_subdivisions > 1, split time-dependent meeting data in time
# into num_meeting_subdivisions equal chunks. Note that speaker ids and
# meeting_scale are repeated for each chunk.
if num_meeting_subdivisions > 1:
def chop_meeting_data(reference_waveforms, diarization_labels, speaker_ids,
meeting_scale, nsplit=num_meeting_subdivisions):
samples = tf.shape(reference_waveforms)[-1]
new_samples = nsplit * (samples // nsplit)
reference_waveforms = tf.stack(
tf.split(reference_waveforms[..., :new_samples],
nsplit, axis=-1), axis=0)
diarization_labels = tf.stack(
tf.split(diarization_labels[..., :new_samples],
nsplit, axis=-1), axis=0)
speaker_ids = tf.reshape(speaker_ids, (1, max_num_spk))
speaker_ids = tf.broadcast_to(speaker_ids, (nsplit, max_num_spk))
meeting_scale = meeting_scale[0] * tf.ones((nsplit, max_num_spk))
return tf.data.Dataset.from_tensor_slices((reference_waveforms,
diarization_labels,
speaker_ids,
meeting_scale))
dataset = dataset.flat_map(chop_meeting_data)
samples = (samples // num_meeting_subdivisions)
# Build mixture and sources waveforms.
def combine_mixture_and_sources(reference_waveforms, diarization_labels,
speaker_ids, meeting_scale):
# waveforms has shape (num_sources, num_mics, num_samples).
speaker_ids = tf.reshape(speaker_ids, (max_num_spk,))
meeting_scale = meeting_scale[0]
mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0)
current_mixture_scale = tf.reduce_max(tf.abs(mixture_waveform))
# Note that when meetings are chopped, we cannot apply a meeting level
# scale. Instead, we apply the scale in the chunk level so that each
# chunk has a maximum scale equal to the meeting_scale. However, we should
# not apply any gain to an all noise chunk to avoid amplifying the noise,
# so we try not to scale those chunks by checking the current_mixture_scale
# value.
scale_refs = tf.cond(current_mixture_scale > 0.005,
lambda: meeting_scale / current_mixture_scale,
lambda: 1.0)
reference_waveforms *= scale_refs
num_sources = max_num_spk
if sensor_noise_range[1] > 0.0:
num_sources += 1
sensor_noise_gain = tf.random.uniform((), minval=sensor_noise_range[0],
maxval=sensor_noise_range[1])
sensor_noise = sensor_noise_gain * tf.random.normal(
(1, num_mics, samples))
reference_waveforms = tf.concat(
(sensor_noise, reference_waveforms), axis=0)
mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0)
reference_waveforms.set_shape((num_sources, num_mics, samples))
mixture_waveform.set_shape((num_mics, samples))
diarization_labels.set_shape((max_num_spk, samples))
speaker_ids.set_shape((max_num_spk,))
return {'receiver_audio': mixture_waveform,
'source_images': reference_waveforms,
'diarization_labels': diarization_labels,
'speaker_indices': speaker_ids,
}
dataset = dataset.map(combine_mixture_and_sources,
num_parallel_calls=parallel_readers)
if randomize_order and num_meeting_subdivisions > 1:
# It would be good to shuffle examples to avoid having all examples
# coming from a single meeting when we split a meeting.
dataset = dataset.shuffle(shuffle_buffer_size * num_meeting_subdivisions)
dataset = dataset.prefetch(prefetch_buffer_size)
dataset = dataset.take(num_examples)
dataset = dataset.batch(batch_size, drop_remainder=True)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def input_fn(params):
"""An input function that uses params['feature_spec'].
Args:
params: A dictionary of experiment params.
Returns:
Features specified by params['feature_spec']. If 'inference' exists and is
True in params, then placeholders will be returned based on the spec in
params['inference_spec'], otherwise a dataset of examples read from
params['input_data'] will be returned.
"""
if params.get('inference', False):
feature_spec = params['inference_spec']
with tf.variable_scope('input_audio'):
return placeholders_from_spec(feature_spec)
else:
json_file = params.get('input_data', None)
io_params = params.get('io_params', {})
batch_size = params.get('batch_size', None)
randomize_order = params.get('randomize_order', False)
io_params['randomize_order'] = randomize_order
return json_to_dataset(json_file,
batch_size,
**io_params)
| google-research/sound-separation | models/train/data_meeting_io.py | Python | apache-2.0 | 23,738 | 0.007162 |
class MethodMissing(object):
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
def method(*args, **kw):
return self.method_missing(name, *args, **kw)
return method
def method_missing(self, name, *args, **kw):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, name))
class ValMissing(object):
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
return self.val_missing(name)
def val_missing(self, name):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, name))
| ganow/gq | gq/missing.py | Python | mit | 788 | 0.002538 |
#Music Class and support functions
import pygame
import parameters
from filemanager import filemanager
from pygame.locals import *
from pygame import *
from pygame.mixer import *
#Pygame Module for Music and Sound
pigmusic = None
currentStdMusic=None
currentMenuMusic=None
currentType = None
def initmusic():
global pigmusic
#Init pygame mixer and music
print "music init GO"
try:
if pygame.mixer and not pygame.mixer.get_init():
pygame.mixer.init()
if not pygame.mixer:
print 'Warning, sound disabled'
else:
pigmusic=pygame.mixer.music
except (pygame.error):
print 'Warning, unable to init music'
print "music init OUT ",pigmusic
def upmusic():
global pigmusic
if not pigmusic:
return
vol=pigmusic.get_volume()
if vol <= 0.9:
pigmusic.set_volume(vol+0.1)
def downmusic():
global pigmusic
if not pigmusic:
return
vol=pigmusic.get_volume()
if vol > 0.0:
pigmusic.set_volume(vol-0.1)
def stopmusic():
global pigmusic
if not pygame.mixer.get_init():
return
if not pigmusic:
return
if pigmusic.get_busy():
pigmusic.stop()
def setvolume(vol):
global pigmusic
pigmusic.set_volume(vol)
def getcurrentStdMusic():
global currentStdMusic
return currentStdMusic
def getcurrentMenuMusic():
global currentMenuMusic
return currentMenuMusic
def returtostdmusic():
#called when we want to force the music to play std music
cur=currentStdMusic
cur.playmusic()
class Music:
def __init__(self, name, filename, musictype='std', vol=0.5):
self._name=name
self._file=filename
self._type=musictype
self._vol=vol
def playmusic(self,loop=-1):
global pigmusic,currentStdMusic,currentMenuMusic,currentType
print "music play",self._file
if not pigmusic:
initmusic()
if self._type == 'std':
#print "music std type current is ",currentType
if not currentStdMusic:
#print "music std no currentStdMusic, we create it with ",self._file
currentStdMusic=self
#print "is pigmusic busy ? ",pigmusic.get_busy()
if pigmusic.get_busy():
#print "music std, music is busy"
if currentType == 'std':
#print "music std, currentType is std isn't it : ",currentType
if currentStdMusic.getfile()==self._file:
#print "music std, same music don't do anything"
return
else:
#print "music std, not the same we change, currentStdMusic=",self._file
currentStdMusic=self
#print "is pigmusic busy ? ",pigmusic.get_busy()
if pigmusic.get_busy():
print " music std, music is busy"
if currentType == 'std':
print " music std, currentType is std isn't it : ",currentType
if currentStdMusic.getfile()==self._file:
print " music std, same music don't do anything"
return
else:
print " music std, not the same we change, currentStdMusic=",self._file
currentStdMusic=self
else:
print " music std, current type is menu isn't it :", currentType ," so we change it to std\n"
#we change menu slide to standard slide
currentType='std'
else:
#print "music std, current type is menu isn't it :", currentType ," so we change it to std\n"
#we change menu slide to standard slide
currentType='std'
else:
#print "music std, music is not busy we start it"
currentType='std'
currentStdMusic=self
else:
#print "music menu type current is ",currentType
if not currentMenuMusic:
#print "music menu no currentMenuMusic, we create it with ",self._file
currentMenuMusic=self
if pigmusic.get_busy():
#print "music menu, music is busy"
if currentType == 'menu':
#print "music menu, currentType is menu isn't it : ",currentType
if currentMenuMusic.getfile()==self._file:
#print "music menu, same music don't do anything"
#return
pass
else:
#print "music menu, not the same we change, currentMenuMusic=",self._file
currentMenuMusic=self
if pigmusic.get_busy():
print " music menu, music is busy"
if currentType == 'menu':
print " music menu, currentType is menu isn't it : ",currentType
if currentMenuMusic.getfile()==self._file:
print " music menu, same music don't do anything"
return
else:
print " music menu, not the same we change, currentMenuMusic=",self._file
currentMenuMusic=self
else:
print " music menu, current type is std isn't it :", currentType ," so we change it to menu\n"
#we change standard slide to menu slide
currentType='menu'
else:
#print "music menu, current type is std isn't it :", currentType ," so we change it to menu\n"
#we change standard slide to menu slide
currentType='menu'
else:
#print "music menu ,music is not busy we start it"
currentType='menu'
currentMenuMusic=self
pigmusic.load(filemanager.find_music(self._file))
pigmusic.set_volume(self._vol)
pigmusic.play(loop)
def getfile(self):
return self._file
def getname(self):
return self._name
def stopmusic(self):
print "we stop music!!!!! ",self._file
global pigmusic
if not pigmusic:
return
if pigmusic.get_busy():
if self._type == 'std':
if currentStdMusic.getfile()==self._file:
pigmusic.stop()
else:
if currentMenuMusic.getfile()==self._file:
pigmusic.stop()
| ilathid/ilathidEngine | engine/music.py | Python | epl-1.0 | 6,871 | 0.015282 |
import pytest
def test_unknown_virtual_host_is_503(docker_compose, nginxproxy):
r = nginxproxy.get("http://unknown.nginx-proxy.tld/port")
assert r.status_code == 503
def test_webA_is_forwarded(docker_compose, nginxproxy):
r = nginxproxy.get("http://webA.nginx-proxy.tld/port")
assert r.status_code == 200
assert r.text == "answer from port 81\n"
def test_webB_is_forwarded(docker_compose, nginxproxy):
r = nginxproxy.get("http://webB.nginx-proxy.tld/port")
assert r.status_code == 200
assert r.text == "answer from port 81\n"
| jwilder/nginx-proxy | test/test_multiple-hosts.py | Python | mit | 562 | 0.003559 |
# -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(module_name, result[0][0], module_version))
############################################################################
def get_module_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
| andreas-kowasch/DomainSearch | DomainSearchViewer/additional/Scheduler.py | Python | bsd-2-clause | 3,767 | 0.003186 |
import copy
import six
from eclcli.common import command
from eclcli.common import utils
class ListUsage(command.Lister):
def get_parser(self, prog_name):
parser = super(ListUsage, self).get_parser(prog_name)
parser.add_argument(
"--From",
help="Date to list usage from",
metavar='<from>'
)
parser.add_argument(
"--to",
help="Date to list usage upto. Month of the parameter should be same as 'from'",
metavar='<to>'
)
parser.add_argument(
"--license-type",
help="Name of license type to list",
metavar='<license-type>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
search_opts = {
"from":parsed_args.From,
"to":parsed_args.to,
"license_type":parsed_args.license_type,
}
self.log.debug('search options: %s',search_opts)
columns = [
'ID', 'Type', 'Value', 'Unit', 'Name', 'Has License Key', 'Resource ID'
]
column_headers = columns
data = dh_client.usages.list(search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns
) for s in data))
class ShowUsageHistory(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowUsageHistory, self).get_parser(prog_name)
parser.add_argument(
"usage",
help="Usage id whose history to be shown",
metavar='<usage>'
)
parser.add_argument(
"--From",
help="Date to list usage from",
metavar='<from>'
)
parser.add_argument(
"--to",
help="Date to list usage upto. Month of the parameter should be same as 'from'",
metavar='<to>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
search_opts = {
"from":parsed_args.From,
"to":parsed_args.to
}
self.log.debug('search options: %s',search_opts)
rows = [
'Tenant ID',
'Unit',
'Resource ID',
'License Type',
'Histories'
]
row_headers = rows
data = dh_client.usages.get_histories(search_opts=search_opts,usage=parsed_args.usage)
return (row_headers, (utils.get_item_properties(
data, rows
))) | anythingrandom/eclcli | eclcli/dh/v2/usage.py | Python | apache-2.0 | 2,651 | 0.010562 |
#!/usr/bin/python3
# -*- coding: utf-8 -*
import cgi
import base64
import random
import requests, json
def getCurrentPhoto(currentPhotoMD5):
response = requests.get("http://localhost:9200/photodisplayer/photo/"+currentPhotoMD5)
jsonPhoto = json.loads(response.text)
return jsonPhoto
def add_note(currentPhotoMD5,ajout) :
jsonCurrentPhoto=getCurrentPhoto(currentPhotoMD5)
note = jsonCurrentPhoto["_source"]["note"]
jsonCurrentPhoto["_source"]["note"] = note+ajout
returnJson=jsonCurrentPhoto["_source"]
query2 = json.dumps(returnJson)
print(query2)
url="http://localhost:9200/photodisplayer/photo/"+jsonCurrentPhoto["_id"];
response2 = requests.put(url, data=query2)
print(json.loads(response2.text))
def ban(currentPhotoMD5):
jsonCurrentPhoto=getCurrentPhoto(currentPhotoMD5)
note = jsonCurrentPhoto["_source"]["note"]
jsonCurrentPhoto["_source"]["note"] = 0
returnJson=jsonCurrentPhoto["_source"]
query2 = json.dumps(returnJson)
url="http://localhost:9200/photodisplayer/photo/"+jsonCurrentPhoto["_id"];
response2 = requests.put(url, data=query2)
def getRandom():
query = json.dumps(
{
"query": {
"function_score": {
"functions": [
{
"random_score": {},
"weight": 1
},
{
"field_value_factor": {
"field": "note"
},
"weight": 1
}
],
"score_mode": "multiply"
}
}
})
response = requests.get("http://localhost:9200/photodisplayer/photo/_search?size=1", data=query)
results = json.loads(response.text)
photoMD5=results["hits"]["hits"][0]["_id"]
return photoMD5
form = cgi.FieldStorage()
print("Content-type: text/html; charset=utf-8\n")
#Init
var = 0;
html = ""
#Get the action
action = form.getvalue("action")
idcurrent = form.getvalue("idcurrent")
idprevious = form.getvalue("previous")
#Switch "action"
if action=="ban":
ban(idcurrent)
html="ok"
if action=="next":
html=getRandom();
if action=="like":
add_note(idcurrent,1)
html="ok"
if action=="dislike":
add_note(idcurrent,-1)
html="ok"
if action=="" or str(action) == "None":
getRandom();
mon_fichier = open("main.html", "r")
contenu = mon_fichier.read()
html=contenu
#Return the content
#print("<!--" +str(action) +"-->")
print(html)
| Annubis45/PhotoDisplayer | cgi-bin/index.py | Python | gpl-2.0 | 2,228 | 0.04623 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#rapidshare config
USER = ''
PASS = ''
USSL = False
import sys
import os
import urllib
import time
import re
import argparse
import md5
def say(message):
print message
def error(message):
print 'ERROR: ' + message
def progress(done, ind_size, size):
total = float((done * ind_size * 100) / size)
progress = float(done * ind_size / 1024)
speed = (float(done * ind_size) / float(time.time() - starttime)) / 1024
sys.stdout.write('Progress: %.0f%%, Complete: %.2fKb, Speed: %.3fkb/s\r' % (total, progress, speed))
sys.stdout.flush()
def download(src, dest):
global starttime
starttime = time.time()
filename, headers = urllib.urlretrieve(src, dest, progress)
sys.stdout.write('Complete: 100%\n')
sys.stdout.flush()
for a in headers:
if a.lower() == 'content-disposition':
filename = headers[a][headers[a].find('filename=') + 9:]
urllib.urlcleanup()
return filename
# Based on rsapiget by George Notaras
def rsdl(link, USER=None, PASS=None):
try:
rapidshare, files, fileid, filename = link.rsplit('/') [-4:]
except ValueError:
error('Invalid Rapidshare link')
return
if not rapidshare.endswith('rapidshare.com') or files != 'files':
error('Invalid Rapidshare link')
return
if USSL:
proto = 'https'
else:
proto = 'http'
say('Downloading: %s' % link)
if filename.endswith('.html'):
target_filename = filename[:-5]
else:
target_filename = filename
say('Save file as: %s' % target_filename)
params = {
'sub': 'download',
'fileid': fileid,
'filename': filename,
'try': '1',
'withmd5hex': '0',
}
if USER and PASS:
params.update({
'login': USER,
'password': PASS,
})
params_string = urllib.urlencode(params)
api_url = '%s://api.rapidshare.com/cgi-bin/rsapi.cgi' % proto
conn = urllib.urlopen('%s?%s' % (api_url, params_string))
data = conn.read()
conn.close()
try:
key, value = data.split(':')
except ValueError:
error(data)
return
try:
server, dlauth, countdown, remote_md5sum = value.split(',')
except ValueError:
error(data)
return
#free account wait
if int(countdown):
for t in range(int(countdown), 0, -1):
sys.stdout.write('Waiting for %s seconds..\r' % t)
sys.stdout.flush()
time.sleep(1)
say('Waited for %s seconds, downloading' % countdown)
dl_params = {
'sub': 'download',
'fileid': fileid,
'filename': filename,
}
if USER and PASS:
dl_params.update({
'login': USER,
'password': PASS,
})
else:
dl_params.update({
'dlauth': dlauth,
})
dl_params_string = urllib.urlencode(dl_params)
download_link = '%s://%s/cgi-bin/rsapi.cgi?%s' % (proto, server, dl_params_string)
download(download_link, target_filename)
def mfdl(link):
conn = urllib.urlopen(link)
data = conn.read()
conn.close()
dlink = re.search("kNO = \"(.*)\";", data).group(0)
dlink = dlink[7:-2]
filename = dlink.split('/')[5:]
say('Downloading: %s' % filename[0])
download(dlink, filename[0])
def hfdl(link, USER=None, PASS=None):
apilink = 'http://api.hotfile.com/?action=getdirectdownloadlink'
if USER and PASS:
conn = urllib.urlopen(apilink + '&username=' + USER + '&password=' + PASS)
data = conn.read()
conn.close()
if "premium required" in data:
error('A premium account is required to download from hotfile.')
return
say('Downloading: %s' % filename)
download(data, filename)
def checkLink(link, USER=None, PASS=None):
if "rapidshare.com" in link:
rsdl(link, USER, PASS)
elif "mediafire.com" in link:
mfdl(link)
elif "hotfile.com" in link:
if USER or PASS:
hfdl(link, USER, PASS)
else:
error('You need to enter a username and password for hotfile')
return
elif "http://" in link:
filename = link.split('/')
filename = filename[len(filename)-1]
say('Downloading: %s' % filename)
download(link, filename)
else:
error('Invalid or unsupported link')
return
def main():
parser = argparse.ArgumentParser(description='Command-line Python Rapidshare, Mediafire and Hotfile downloader.')
parser.add_argument('file_url')
parser.add_argument('--user', '-u')
parser.add_argument('--password', '-p')
USER = parser.parse_args().user
PASS = parser.parse_args().password
file_link = parser.parse_args().file_url
if ".txt" in file_link and not "http://" in file_link:
f = open(file_link, 'r')
if f.read(1) == '\xef':
f.seek(3)
file_list = list(f.readlines())
for item in file_list:
checkLink(item.strip('\n'), USER, PASS)
else:
checkLink(file_link, USER, PASS)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
error('\nAborted')
sys.exit(1) | yehialicious/pyrf | pyrf.py | Python | gpl-3.0 | 5,240 | 0.033206 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Reference.year'
db.add_column(u'citations_reference', 'year',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Reference.year'
db.delete_column(u'citations_reference', 'year')
models = {
u'citations.reference': {
'Meta': {'object_name': 'Reference'},
'abstract': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'edition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '17', 'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'series': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'BK'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'volume': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['citations'] | will-hart/django-citations | citations/migrations/0006_auto__add_field_reference_year.py | Python | mit | 2,201 | 0.008178 |
# References:
#
# https://www.tensorflow.org/guide/low_level_intro
#
# only needed for python 2.7
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import numpy as np
from numpy import array
from numpy import float32
# a complete input set on 7 bits
# useful for training various sorts of data
bin7 = array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 1],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 0],
[0, 0, 1, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 0, 1],
[0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 1],
[0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 1],
[0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 1, 1],
[1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 0],
[1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 1, 0],
[1, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0],
[1, 0, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 1, 0, 0],
[1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 0, 0],
[1, 1, 0, 0, 1, 0, 1],
[1, 1, 0, 0, 1, 1, 0],
[1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 1, 0, 0, 0],
[1, 1, 0, 1, 0, 0, 1],
[1, 1, 0, 1, 0, 1, 0],
[1, 1, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 0, 0],
[1, 1, 0, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 1, 0],
[1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 1, 0, 0],
[1, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
])
'''
Train the network to count to 3
column 0: less than 3
column 1: exactly 3
column 2: more than 3
'''
count3 = array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
])
# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(8,
activation=keras.activations.sigmoid,
))
model.add(Dense(3,
activation=keras.activations.sigmoid,
))
model.compile(
optimizer=tf.train.AdamOptimizer(0.001),
# loss=keras.losses.categorical_crossentropy,
loss=keras.losses.mse,
metrics=[keras.metrics.binary_accuracy]
)
# This is the process I used to train my weights
# model.fit(bin7, count3, epochs=2000)
# myWeights = model.get_weights()
# np.set_printoptions(suppress=True)
# np.set_printoptions(precision=2)
# print('myWeights =', myWeights)
# These are the weights I got, pretty-printed
myWeights = [
# first layer, 7x8
array([[ 1.2 , -1.16, -1.97, 2.16, 0.97, 0.86, -1.2 , 1.12],
[ 1.21, -1.17, -1.97, 2.16, 0.84, 0.76, -1.19, 1.22],
[ 1.19, -1.2 , -1.98, 2.15, 0.87, 0.84, -1.19, 1.13],
[ 1.21, -1.2 , -1.97, 2.15, 0.89, 0.8 , -1.2 , 1.16],
[ 1.21, -1.12, -1.97, 2.16, 0.99, 0.8 , -1.21, 1.18],
[ 1.23, -1.09, -1.98, 2.15, 1.12, 0.81, -1.24, 1.13],
[ 1.24, -1.11, -1.99, 2.14, 1. , 0.77, -1.23, 1.17]],
dtype=float32),
# biases for 8 intermediate nodes
array([-4.57, 3.13, 4. , -4.44, -1.08, -3.11, 4.39, -4.35],
dtype=float32),
# second layer, 8x3
array([[-2.37, -1.54, 2.82],
[ 2.57, -0.09, -3. ],
[ 3.42, -2.18, -4.26],
[-3.27, 1.66, 2.1 ],
[-1.64, 0.12, -0.26],
[-1.85, -1.73, 2.25],
[ 2.71, 0.95, -4.85],
[-2.82, -1.4 , 2.69]], dtype=float32),
# biases for 3 output nodes
array([ 0.21, -0.39, -1.22], dtype=float32)
]
# test the model and your weights
# model.fit(bin7, count3, epochs=1)
# model.set_weights(myWeights)
# predict3 = model.predict(bin7)
# np.set_printoptions(suppress=True)
# np.set_printoptions(precision=1)
# print('prediction =', predict3)
Examples = {
'count3' : [ bin7, count3, model, myWeights ],
}
| WmHHooper/aima-python | submissions/aardvark/myNN.py | Python | mit | 8,182 | 0.004033 |
"""
.. image::
../_static/mongodb.png
`GridFS <https://docs.mongodb.com/manual/core/gridfs/>`_ is a specification for storing large files
(>16 MB) in MongoDB. See :py:mod:`~requests_cache.backends.mongodb` for more general info on MongoDB.
API Reference
^^^^^^^^^^^^^
.. automodsumm:: requests_cache.backends.gridfs
:classes-only:
:nosignatures:
"""
from logging import getLogger
from threading import RLock
from gridfs import GridFS
from gridfs.errors import CorruptGridFile, FileExists
from pymongo import MongoClient
from .._utils import get_valid_kwargs
from .base import BaseCache, BaseStorage
from .mongodb import MongoDict
logger = getLogger(__name__)
class GridFSCache(BaseCache):
"""GridFS cache backend.
Example:
>>> session = CachedSession('http_cache', backend='gridfs')
Args:
db_name: Database name
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name: str, **kwargs):
super().__init__(**kwargs)
self.responses = GridFSPickleDict(db_name, **kwargs)
self.redirects = MongoDict(
db_name, collection_name='redirects', connection=self.responses.connection, **kwargs
)
def remove_expired_responses(self, *args, **kwargs):
with self.responses._lock:
return super().remove_expired_responses(*args, **kwargs)
class GridFSPickleDict(BaseStorage):
"""A dictionary-like interface for a GridFS database
Args:
db_name: Database name
collection_name: Ignored; GridFS internally uses collections 'fs.files' and 'fs.chunks'
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name, collection_name=None, connection=None, **kwargs):
super().__init__(**kwargs)
connection_kwargs = get_valid_kwargs(MongoClient, kwargs)
self.connection = connection or MongoClient(**connection_kwargs)
self.db = self.connection[db_name]
self.fs = GridFS(self.db)
self._lock = RLock()
def __getitem__(self, key):
try:
with self._lock:
result = self.fs.find_one({'_id': key})
if result is None:
raise KeyError
return self.serializer.loads(result.read())
except CorruptGridFile as e:
logger.warning(e, exc_info=True)
raise KeyError
def __setitem__(self, key, item):
value = self.serializer.dumps(item)
encoding = None if isinstance(value, bytes) else 'utf-8'
with self._lock:
try:
self.fs.delete(key)
self.fs.put(value, encoding=encoding, **{'_id': key})
# This can happen because GridFS is not thread-safe for concurrent writes
except FileExists as e:
logger.warning(e, exc_info=True)
def __delitem__(self, key):
with self._lock:
res = self.fs.find_one({'_id': key})
if res is None:
raise KeyError
self.fs.delete(res._id)
def __len__(self):
return self.db['fs.files'].estimated_document_count()
def __iter__(self):
for d in self.fs.find():
yield d._id
def clear(self):
self.db['fs.files'].drop()
self.db['fs.chunks'].drop()
| reclosedev/requests-cache | requests_cache/backends/gridfs.py | Python | bsd-2-clause | 3,582 | 0.002792 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# all regexes are case insensitive
normal_regexes = [
('standard_repeat',
# Show.Name.S01E02.S01E03.Source.Quality.Etc-Group
# Show Name - S01E02 - S01E03 - S01E04 - Ep Name
r"""
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+) # E02 and separator
([. _-]+s(?P=season_num)[. _-]* # S01 and optional separator
e(?P<extra_ep_num>\d+))+ # E03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('fov_repeat',
# Show.Name.1x02.1x03.Source.Quality.Etc-Group
# Show Name - 1x02 - 1x03 - 1x04 - Ep Name
r"""
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
([. _-]+(?P=season_num)x # 1x
(?P<extra_ep_num>\d+))+ # 03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('standard',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
\(?s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+)\)? # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720|480)[pi])\d+)(\))?)* # additional E03/etc
([. _,-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?)?$ # Group
"""),
('newpct',
# American Horror Story - Temporada 4 HDTV x264[Cap.408_409]SPANISH AUDIO -NEWPCT
# American Horror Story - Temporada 4 [HDTV][Cap.408][Espanol Castellano]
# American Horror Story - Temporada 4 HDTV x264[Cap.408]SPANISH AUDIO –NEWPCT)
r"""
(?P<series_name>.+?).-.+\d{1,2}[ ,.] # Show name: American Horror Story
(?P<extra_info>.+)\[Cap\. # Quality: HDTV x264, [HDTV], HDTV x264
(?P<season_num>\d{1,2}) # Season Number: 4
(?P<ep_num>\d{2}) # Episode Number: 08
((_\d{1,2}(?P<extra_ep_num>\d{2}))|.*\]) # Episode number2: 09
"""),
('fov',
# Show_Name.1x02.Source_Quality_Etc-Group
# Show Name - 1x02 - My Ep Name
# Show_Name.1x02x03x04.Source_Quality_Etc-Group
# Show Name - 1x02-03-04 - My Ep Name
r"""
^((?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
(([. _-]*x|-) # linking x/- char
(?P<extra_ep_num>
(?!(1080|720|480)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps
\d+))* # additional x03/etc
[\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('scene_date_format',
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('scene_sports_format',
# Show.Name.100.Event.2010.11.23.Source.Quality.Etc-Group
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r"""
^(?P<series_name>.*?(UEFA|MLB|ESPN|WWE|MMA|UFC|TNA|EPL|NASCAR|NBA|NFL|NHL|NRL|PGA|SUPER LEAGUE|FORMULA|FIFA|NETBALL|MOTOGP).*?)[. _-]+
((?P<series_num>\d{1,3})[. _-]+)?
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))[. _-]+
((?P<extra_info>.+?)((?<![. _-])
(?<!WEB)-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$
"""),
('stupid',
# tpz-abc102
r"""
(?P<release_group>.+?)(?<!WEB)-\w+?[\. ]? # tpz-abc
(?!264) # don't count x264
(?P<season_num>\d{1,2}) # 1
(?P<ep_num>\d{2})$ # 02
"""),
('verbose',
# Show Name Season 1 Episode 2 Ep Name
r"""
^(?P<series_name>.+?)[. _-]+ # Show Name and separator
(season|series)[. _-]+ # season and separator
(?P<season_num>\d+)[. _-]+ # 1
episode[. _-]+ # episode and separator
(?P<ep_num>\d+)[. _-]+ # 02 and separator
(?P<extra_info>.+)$ # Source_Quality_Etc-
"""),
('season_only',
# Show.Name.S01.Source.Quality.Etc-Group
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
s(eason[. _-])? # S01/Season 01
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('no_season_multi_ep',
# Show.Name.E02-03
# Show.Name.E02.2010
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|(?<!e)[ivx]+)) # first ep num
((([. _-]+(and|&|to)[. _-]+)|-) # and/&/to joiner
(?P<extra_ep_num>(?!(1080|720|480)[pi])(\d+|(?<!e)[ivx]+))[. _-]) # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('no_season_general',
# Show.Name.E23.Test
# Show.Name.Part.3.Source.Quality.Etc-Group
# Show.Name.Part.1.and.Part.2.Blah-Group
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|((?<!e)[ivx]+(?=[. _-])))) # first ep num
([. _-]+((and|&|to)[. _-]+)? # and/&/to joiner
((e(p(isode)?)?|part|pt)[. _-]?) # e, ep, episode, or part
(?P<extra_ep_num>(?!(1080|720|480)[pi])
(\d+|((?<!e)[ivx]+(?=[. _-]))))[. _-])* # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('bare',
# Show.Name.102.Source.Quality.Etc-Group
r"""
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d{1,2}) # 1
(?P<ep_num>\d{2}) # 02 and separator
([. _-]+(?P<extra_info>(?!\d{3}[. _-]+)[^-]+) # Source_Quality_Etc-
(-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('no_season',
# Show Name - 01 - Ep Name
# 01 - Ep Name
# 01 - Ep Name
r"""
^((?P<series_name>.+?)(?:[. _-]{2,}|[. _]))? # Show_Name and separator
(?P<ep_num>\d{1,3}) # 02
(?:-(?P<extra_ep_num>\d{1,3}))* # -03-04-05 etc
(\s*(?:of)?\s*\d{1,3})? # of joiner (with or without spaces) and series total ep
[. _-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
]
anime_regexes = [
('anime_horriblesubs',
# [HorribleSubs] Maria the Virgin Witch - 01 [720p].mkv
r"""
^(?:\[(?P<release_group>HorribleSubs)\][\s\.])
(?:(?P<series_name>.+?)[\s\.]-[\s\.])
(?P<ep_ab_num>((?!(1080|720|480)[pi]))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?
(?:v(?P<version>[0-9]))?
(?:[\w\.\s]*)
(?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp]))
.*?
"""),
('anime_ultimate',
r"""
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?[ ._-]+?
(?:v(?P<version>[0-9]))?
(?:[\w\.]*)
(?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp]))
(?:[ ._]?\[(?P<crc>\w+)\])?
.*?
"""),
('anime_french_fansub',
# [Kaerizaki-Fansub]_One_Piece_727_[VOSTFR][HD_1280x720].mp4
# [Titania-Fansub]_Fairy_Tail_269_[VOSTFR]_[720p]_[1921E00C].mp4
# [ISLAND]One_Piece_726_[VOSTFR]_[V1]_[8bit]_[720p]_[2F7B3FA2].mp4
# Naruto Shippuden 445 VOSTFR par Fansub-Resistance (1280*720) - version MQ
# Dragon Ball Super 015 VOSTFR par Fansub-Resistance (1280x720) - HQ version
# [Z-Team][DBSuper.pw] Dragon Ball Super - 028 (VOSTFR)(720p AAC)(MP4)
# [SnF] Shokugeki no Souma - 24 VOSTFR [720p][41761A60].mkv
# [Y-F] Ao no Kanata no Four Rhythm - 03 Vostfr HD 8bits
# Phantasy Star Online 2 - The Animation 04 vostfr FHD
# Detective Conan 804 vostfr HD
# Active Raid 04 vostfr [1080p]
# Sekko Boys 04 vostfr [720p]
r"""
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator (Optional)
((\[|\().+?(\]|\))[ ._-]*)? # Extra info (Optionnal)
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>\d{1,3})[ ._-]+ # Episode number and separator
(((\[|\())?(VOSTFR|vostfr|Vostfr|VostFR)((\]|\)))?([ ._-])*)+ # Subtitle Language and separator
(par Fansub-Resistance)? # Sentence for special fansub (Optionnal)
(\[((v|V)(?P<version>[0-9]))\]([ ._-])*)? # Version and separator (Optional)
((\[(8|10)(Bits|bits|Bit|bit)\])?([ ._-])*)? # Colour resolution and separator (Optional)
((\[|\()((FHD|HD|SD)*([ ._-])*((?P<extra_info>\d{3,4}[xp*]?\d{0,4}[\.\w\s-]*)))(\]|\)))? # Source_Quality_Etc-
([ ._-]*\[(?P<crc>\w{8})\])? # CRC (Optional)
.* # Separator and EOL
"""),
('anime_standard',
# [Group Name] Show Name.13-14
# [Group Name] Show Name - 13-14
# Show Name 13-14
# [Group Name] Show Name.13
# [Group Name] Show Name - 13
# Show Name 13
r"""
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
"""),
('anime_standard_round',
# [Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]
# [ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)
r"""
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\((?P<extra_info>(CX[ ._-]?)?\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
"""),
('anime_slash',
# [SGKK] Bleach 312v1 [720p/MKV]
r"""
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}p) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
"""),
('anime_standard_codec',
# [Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]
# [Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]
# [Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]
r"""
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._]* # Show_Name and separator
([ ._-]+-[ ._-]+[A-Z]+[ ._-]+)?[ ._-]+ # funny stuff, this is sooo nuts ! this will kick me in the butt one day
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
([ ._-](\[\w{1,2}\])?\[[a-z][.]?\w{2,4}\])? #codec
[ ._-]*\[(?P<extra_info>(\d{3,4}[xp]?\d{0,4})?[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])?
.*? # Separator and EOL
"""),
('anime_codec_crc',
r"""
^(?:\[(?P<release_group>.*?)\][ ._-]*)?
(?:(?P<series_name>.*?)[ ._-]*)?
(?:(?P<ep_ab_num>(((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))[ ._-]*).+?
(?:\[(?P<codec>.*?)\][ ._-]*)
(?:\[(?P<crc>\w{8})\])?
.*?
"""),
('anime_SxxExx',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(\()?s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+)(\))? # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720|480)[pi])\d+)(\))?)* # additional E03/etc
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('anime_and_normal',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
r"""
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
((?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
"""),
('anime_and_normal_x',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
r"""
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[xX](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
((?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
"""),
('anime_and_normal_reverse',
# Bleach - 313-314 - s16e03-04
r"""
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
"""),
('anime_and_normal_front',
# 165.Naruto Shippuuden.s08e014
r"""
^(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # start of string and absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))?[ ._-]+ # the version e.g. "v2"
(?P<series_name>.+?)[ ._-]+
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+)
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
"""),
('anime_ep_name',
r"""
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?[ ._-]*?
(?:v(?P<version>[0-9])[ ._-]+?)?
(?:.+?[ ._-]+?)?
\[(?P<extra_info>\w+)\][ ._-]?
(?:\[(?P<crc>\w{8})\])?
.*?
"""),
('anime_WarB3asT',
# 003. Show Name - Ep Name.ext
# 003-004. Show Name - Ep Name.ext
r"""
^(?P<ep_ab_num>\d{3,4})(-(?P<extra_ab_ep_num>\d{3,4}))?\.\s+(?P<series_name>.+?)\s-\s.*
"""),
('anime_bare',
# One Piece - 102
# [ACX]_Wolf's_Spirit_001.mkv
r"""
^(\[(?P<release_group>.+?)\][ ._-]*)?
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # v2
.*? # Separator and EOL
"""),
('anime_season_only',
# Show.Name.S01.Source.Quality.Etc-Group
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
s(eason[. _-])? # S01/Season 01
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
""")
]
| Thraxis/pymedusa | sickbeard/name_parser/regexes.py | Python | gpl-3.0 | 23,532 | 0.0034 |
import os
from time import localtime, strftime
import re
import sys
from fabric.api import local, lcd, settings, task
from fabric.utils import puts
from blog_config import INPUT_PATH, OUTPUT_PATH
SETTINGS_FILE = 'blog_config'
# Load paths
ABS_DIR_PATH = os.path.dirname(os.path.abspath(__file__))
ABS_SETTINGS_FILE = os.path.join(ABS_DIR_PATH, SETTINGS_FILE)
# ABS_OUTPUT_PATH = os.path.join(ABS_DIR_PATH, os.path.normpath(OUTPUT_PATH))
ABS_INPUT_PATH = os.path.normpath(os.path.join(ABS_DIR_PATH, INPUT_PATH))
__all__ = ['generate_new_post']
@task(alias="np")
def generate_new_post(name = "", extension = ".md",
should_open = True, list_existing = False):
""" Make a new post """
if list_existing:
path = _post_path()
existing_files = os.listdir(path)
puts("Files in today's folder already:")
for n in existing_files:
puts("\t" + n)
if not name:
puts("Enter a post name, or 'quit' to exit':")
name = raw_input("\t:")
if name == "quit":
puts("Done!")
sys.exit(0)
path = _post_path()
file_name = _post_name(name) + extension
full_post_uri = os.path.join(path, file_name)
if not _name_is_unique(full_post_uri):
puts("Name not unique!")
generate_new_post(list_existing = True)
sys.exit(0)
puts("Generated new post: ", file_name)
puts("Stored it in: ", path)
puts("Adding default metadata")
_write_default_metadata(name, full_post_uri)
if should_open:
puts("Opening new post")
_open_file(full_post_uri)
else:
puts("Complete.")
sys.exit(0)
def _write_default_metadata(post_real_name, post_full_path):
# Control structure for metadata order
def load_config_or_else(key, default):
""" Try to load a value from config; if not found, return default """
try:
val = getattr(__import__(SETTINGS_FILE, globals(),
locals(), key.upper()), key.upper())
return val
except AttributeError:
return default
metadata_keys = [
"Title", "Author", "Date", "Slug", "Category", "Tags", "Summary", "Status"
]
metadata_defaults = {
"Title": post_real_name,
"Date": strftime("%Y-%m-%d %H:%M", localtime()),
"Category": "",
"Tags": "",
"Slug": os.path.basename(post_full_path[:-3]),
"Author": "",
"Summary": "",
"Status": "draft"
}
for key in metadata_keys:
metadata_defaults[key] = load_config_or_else(key, metadata_defaults[key])
with open(post_full_path, 'w') as pointer:
for key in metadata_keys:
pointer.write("%s: %s\n" % (key, metadata_defaults[key]))
def _name_is_unique(candidate_path):
""" Check if the generated path name is unique or not """
return False if os.path.isfile(candidate_path) else True
def _post_path():
""" Generate the correct post path and make sure it exists """
abs_path = os.path.join(ABS_INPUT_PATH, 'posts')
if not os.path.exists(abs_path):
local("mkdir -p %s" % abs_path)
return abs_path
def _post_name(input_string):
""" Generate a valid post name """
def is_not_empty(entry): return True if entry else False
first_pass = re.sub("\s", "_", input_string.lower())
second_pass = "".join(filter(is_not_empty, re.findall("\w", first_pass)))
third_pass = re.search("([a-z0-9]*_){,4}[a-z0-9]*", second_pass).group()
timestamp = strftime("%Y-%m-%d", localtime())
return "_".join([timestamp, third_pass])
def _open_file(filepath):
""" Open the given file for editing """
cmd = "$EDITOR " + filepath
local(cmd)
| Gastove/blogric | new_post.py | Python | epl-1.0 | 3,746 | 0.004271 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import warnings
from apache_beam.utils.annotations import deprecated
from apache_beam.utils.annotations import experimental
class AnnotationTests(unittest.TestCase):
# Note: use different names for each of the the functions decorated
# so that a warning is produced for each of them.
def test_deprecated_with_since_current_message(self):
with warnings.catch_warnings(record=True) as w:
@deprecated(since='v.1', current='multiply', extra_message='Do this')
def fnc_test_deprecated_with_since_current_message():
return 'lol'
fnc_test_deprecated_with_since_current_message()
self.check_annotation(
warning=w, warning_size=1,
warning_type=DeprecationWarning,
fnc_name='fnc_test_deprecated_with_since_current_message',
annotation_type='deprecated',
label_check_list=[('since', True),
('instead', True),
('Do this', True)])
def test_deprecated_with_since_current(self):
with warnings.catch_warnings(record=True) as w:
@deprecated(since='v.1', current='multiply')
def fnc_test_deprecated_with_since_current():
return 'lol'
fnc_test_deprecated_with_since_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=DeprecationWarning,
fnc_name='fnc_test_deprecated_with_since_current',
annotation_type='deprecated',
label_check_list=[('since', True),
('instead', True)])
def test_deprecated_without_current(self):
with warnings.catch_warnings(record=True) as w:
@deprecated(since='v.1')
def fnc_test_deprecated_without_current():
return 'lol'
fnc_test_deprecated_without_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=DeprecationWarning,
fnc_name='fnc_test_deprecated_without_current',
annotation_type='deprecated',
label_check_list=[('since', True),
('instead', False)])
def test_deprecated_without_since_should_fail(self):
with warnings.catch_warnings(record=True) as w:
with self.assertRaises(TypeError):
@deprecated()
def fnc_test_deprecated_without_since_should_fail():
return 'lol'
fnc_test_deprecated_without_since_should_fail()
assert not w
def test_experimental_with_current_message(self):
with warnings.catch_warnings(record=True) as w:
@experimental(current='multiply', extra_message='Do this')
def fnc_test_experimental_with_current_message():
return 'lol'
fnc_test_experimental_with_current_message()
self.check_annotation(
warning=w, warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_experimental_with_current_message',
annotation_type='experimental',
label_check_list=[('instead', True),
('Do this', True)])
def test_experimental_with_current(self):
with warnings.catch_warnings(record=True) as w:
@experimental(current='multiply')
def fnc_test_experimental_with_current():
return 'lol'
fnc_test_experimental_with_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_experimental_with_current',
annotation_type='experimental',
label_check_list=[('instead', True)])
def test_experimental_without_current(self):
with warnings.catch_warnings(record=True) as w:
@experimental()
def fnc_test_experimental_without_current():
return 'lol'
fnc_test_experimental_without_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_experimental_without_current',
annotation_type='experimental',
label_check_list=[('instead', False)])
def test_frequency(self):
"""Tests that the filter 'once' is sufficient to print once per
warning independently of location."""
with warnings.catch_warnings(record=True) as w:
@experimental()
def fnc_test_annotate_frequency():
return 'lol'
@experimental()
def fnc2_test_annotate_frequency():
return 'lol'
fnc_test_annotate_frequency()
fnc_test_annotate_frequency()
fnc2_test_annotate_frequency()
self.check_annotation(warning=[w[0]], warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_annotate_frequency',
annotation_type='experimental',
label_check_list=[])
self.check_annotation(warning=[w[1]], warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc2_test_annotate_frequency',
annotation_type='experimental',
label_check_list=[])
# helper function
def check_annotation(self, warning, warning_size, warning_type, fnc_name,
annotation_type, label_check_list):
self.assertEqual(1, warning_size)
self.assertTrue(issubclass(warning[-1].category, warning_type))
self.assertIn(fnc_name + ' is ' + annotation_type, str(warning[-1].message))
for label in label_check_list:
if label[1] is True:
self.assertIn(label[0], str(warning[-1].message))
else:
self.assertNotIn(label[0], str(warning[-1].message))
if __name__ == '__main__':
unittest.main()
| shakamunyi/beam | sdks/python/apache_beam/utils/annotations_test.py | Python | apache-2.0 | 6,739 | 0.007716 |
############################################################################
#
# Copyright (C) 2015 The Qt Company Ltd.
# Contact: http://www.qt.io/licensing
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms and
# conditions see http://www.qt.io/terms-conditions. For further information
# use the contact form at http://www.qt.io/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 or version 3 as published by the Free
# Software Foundation and appearing in the file LICENSE.LGPLv21 and
# LICENSE.LGPLv3 included in the packaging of this file. Please review the
# following information to ensure the GNU Lesser General Public License
# requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, The Qt Company gives you certain additional
# rights. These rights are described in The Qt Company LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
#############################################################################
import platform
from dumper import *
def qdump__QAtomicInt(d, value):
d.putValue(int(value["_q_value"]))
d.putNumChild(0)
def qdump__QBasicAtomicInt(d, value):
d.putValue(int(value["_q_value"]))
d.putNumChild(0)
def qdump__QAtomicPointer(d, value):
d.putType(value.type)
q = value["_q_value"]
p = toInteger(q)
d.putValue("@0x%x" % p)
d.putNumChild(1 if p else 0)
if d.isExpanded():
with Children(d):
d.putSubItem("_q_value", q.dereference())
def qform__QByteArray():
return [Latin1StringFormat, SeparateLatin1StringFormat,
Utf8StringFormat, SeparateUtf8StringFormat ]
def qdump__QByteArray(d, value):
data, size, alloc = d.byteArrayData(value)
d.putNumChild(size)
elided, p = d.encodeByteArrayHelper(d.extractPointer(value), d.displayStringLimit)
displayFormat = d.currentItemFormat()
if displayFormat == AutomaticFormat or displayFormat == Latin1StringFormat:
d.putValue(p, Hex2EncodedLatin1, elided=elided)
elif displayFormat == SeparateLatin1StringFormat:
d.putValue(p, Hex2EncodedLatin1, elided=elided)
d.putField("editformat", DisplayLatin1String)
d.putField("editvalue", d.encodeByteArray(value, limit=100000))
elif displayFormat == Utf8StringFormat:
d.putValue(p, Hex2EncodedUtf8, elided=elided)
elif displayFormat == SeparateUtf8StringFormat:
d.putValue(p, Hex2EncodedUtf8, elided=elided)
d.putField("editformat", DisplayUtf8String)
d.putField("editvalue", d.encodeByteArray(value, limit=100000))
if d.isExpanded():
d.putArrayData(data, size, d.charType())
def qdump__QByteArrayData(d, value):
data, size, alloc = d.byteArrayDataHelper(d.addressOf(value))
d.putValue(d.readMemory(data, size), Hex2EncodedLatin1)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem("size", size)
d.putIntItem("alloc", alloc)
def qdump__QChar(d, value):
d.putValue(int(value["ucs"]))
d.putNumChild(0)
def qform__QAbstractItemModel():
return [SimpleFormat, EnhancedFormat]
def qdump__QAbstractItemModel(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
#displayFormat == EnhancedFormat:
# Create a default-constructed QModelIndex on the stack.
try:
ri = d.makeValue(d.qtNamespace() + "QModelIndex", "-1, -1, 0, 0")
this_ = d.makeExpression(value)
ri_ = d.makeExpression(ri)
rowCount = int(d.parseAndEvaluate("%s.rowCount(%s)" % (this_, ri_)))
columnCount = int(d.parseAndEvaluate("%s.columnCount(%s)" % (this_, ri_)))
except:
d.putPlainChildren(value)
return
d.putValue("%d x %d" % (rowCount, columnCount))
d.putNumChild(rowCount * columnCount)
if d.isExpanded():
with Children(d, numChild=rowCount * columnCount, childType=ri.type):
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with SubItem(d, i):
d.putName("[%s, %s]" % (row, column))
mi = d.parseAndEvaluate("%s.index(%d,%d,%s)"
% (this_, row, column, ri_))
#warn("MI: %s " % mi)
#name = "[%d,%d]" % (row, column)
#d.putValue("%s" % mi)
d.putItem(mi)
i = i + 1
#warn("MI: %s " % mi)
#d.putName("[%d,%d]" % (row, column))
#d.putValue("%s" % mi)
#d.putNumChild(0)
#d.putType(mi.type)
#gdb.execute("call free($ri)")
def qform__QModelIndex():
return [SimpleFormat, EnhancedFormat]
def qdump__QModelIndex(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
r = value["r"]
c = value["c"]
try:
p = value["p"]
except:
p = value["i"]
m = value["m"]
if d.isNull(m) or r < 0 or c < 0:
d.putValue("(invalid)")
d.putPlainChildren(value)
return
mm = m.dereference()
mm = mm.cast(mm.type.unqualified())
ns = d.qtNamespace()
try:
mi = d.makeValue(ns + "QModelIndex", "%s,%s,%s,%s" % (r, c, p, m))
mm_ = d.makeExpression(mm)
mi_ = d.makeExpression(mi)
rowCount = int(d.parseAndEvaluate("%s.rowCount(%s)" % (mm_, mi_)))
columnCount = int(d.parseAndEvaluate("%s.columnCount(%s)" % (mm_, mi_)))
except:
d.putPlainChildren(value)
return
try:
# Access DisplayRole as value
val = d.parseAndEvaluate("%s.data(%s, 0)" % (mm_, mi_))
v = val["d"]["data"]["ptr"]
d.putStringValue(d.makeValue(ns + 'QString', v))
except:
d.putValue("")
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putFields(value, False)
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with UnnamedSubItem(d, i):
d.putName("[%s, %s]" % (row, column))
mi2 = d.parseAndEvaluate("%s.index(%d,%d,%s)"
% (mm_, row, column, mi_))
d.putItem(mi2)
i = i + 1
d.putCallItem("parent", value, "parent")
#gdb.execute("call free($mi)")
def qdump__QDate(d, value):
jd = int(value["jd"])
if jd:
d.putValue(jd, JulianDate)
d.putNumChild(1)
if d.isExpanded():
# FIXME: This improperly uses complex return values.
with Children(d):
if d.canCallLocale():
d.putCallItem("toString", value, "toString",
d.enumExpression("DateFormat", "TextDate"))
d.putCallItem("(ISO)", value, "toString",
d.enumExpression("DateFormat", "ISODate"))
d.putCallItem("(SystemLocale)", value, "toString",
d.enumExpression("DateFormat", "SystemLocaleDate"))
d.putCallItem("(Locale)", value, "toString",
d.enumExpression("DateFormat", "LocaleDate"))
d.putFields(value)
else:
d.putValue("(invalid)")
d.putNumChild(0)
def qdump__QTime(d, value):
mds = int(value["mds"])
if mds >= 0:
d.putValue(mds, MillisecondsSinceMidnight)
d.putNumChild(1)
if d.isExpanded():
# FIXME: This improperly uses complex return values.
with Children(d):
d.putCallItem("toString", value, "toString",
d.enumExpression("DateFormat", "TextDate"))
d.putCallItem("(ISO)", value, "toString",
d.enumExpression("DateFormat", "ISODate"))
if d.canCallLocale():
d.putCallItem("(SystemLocale)", value, "toString",
d.enumExpression("DateFormat", "SystemLocaleDate"))
d.putCallItem("(Locale)", value, "toString",
d.enumExpression("DateFormat", "LocaleDate"))
d.putFields(value)
else:
d.putValue("(invalid)")
d.putNumChild(0)
def qdump__QTimeZone(d, value):
base = d.extractPointer(value)
if base == 0:
d.putValue("(null)")
d.putNumChild(0)
return
idAddr = base + 2 * d.ptrSize() # [QSharedData] + [vptr]
d.putByteArrayValue(idAddr)
d.putPlainChildren(value["d"])
def qdump__QDateTime(d, value):
qtVersion = d.qtVersion()
isValid = False
# This relies on the Qt4/Qt5 internal structure layout:
# {sharedref(4), ...
base = d.extractPointer(value)
is32bit = d.is32bit()
if qtVersion >= 0x050200:
if d.isWindowsTarget():
msecsOffset = 8
specOffset = 16
offsetFromUtcOffset = 20
timeZoneOffset = 24
statusOffset = 28 if is32bit else 32
else:
msecsOffset = 4 if is32bit else 8
specOffset = 12 if is32bit else 16
offsetFromUtcOffset = 16 if is32bit else 20
timeZoneOffset = 20 if is32bit else 24
statusOffset = 24 if is32bit else 32
status = d.extractInt(base + statusOffset)
if int(status & 0x0c == 0x0c): # ValidDate and ValidTime
isValid = True
msecs = d.extractInt64(base + msecsOffset)
spec = d.extractInt(base + specOffset)
offset = d.extractInt(base + offsetFromUtcOffset)
tzp = d.extractPointer(base + timeZoneOffset)
if tzp == 0:
tz = ""
else:
idBase = tzp + 2 * d.ptrSize() # [QSharedData] + [vptr]
tz = d.encodeByteArrayHelper(d.extractPointer(idBase), limit=100)
d.putValue("%s/%s/%s/%s/%s" % (msecs, spec, offset, tz, status),
DateTimeInternal)
else:
# This relies on the Qt4/Qt5 internal structure layout:
# {sharedref(4), date(8), time(4+x)}
# QDateTimePrivate:
# - QAtomicInt ref; (padded on 64 bit)
# - [QDate date;]
# - - uint jd in Qt 4, qint64 in Qt 5.0 and Qt 5.1; padded on 64 bit
# - [QTime time;]
# - - uint mds;
# - Spec spec;
dateSize = 8 if qtVersion >= 0x050000 else 4 # Qt5: qint64, Qt4 uint
# 4 byte padding after 4 byte QAtomicInt if we are on 64 bit and QDate is 64 bit
refPlusPadding = 8 if qtVersion >= 0x050000 and not d.is32bit() else 4
dateBase = base + refPlusPadding
timeBase = dateBase + dateSize
mds = d.extractInt(timeBase)
isValid = mds > 0
if isValid:
jd = d.extractInt(dateBase)
d.putValue("%s/%s" % (jd, mds), JulianDateAndMillisecondsSinceMidnight)
if isValid:
d.putNumChild(1)
if d.isExpanded():
# FIXME: This improperly uses complex return values.
with Children(d):
d.putCallItem("toTime_t", value, "toTime_t")
if d.canCallLocale():
d.putCallItem("toString", value, "toString",
d.enumExpression("DateFormat", "TextDate"))
d.putCallItem("(ISO)", value, "toString",
d.enumExpression("DateFormat", "ISODate"))
d.putCallItem("toUTC", value, "toTimeSpec",
d.enumExpression("TimeSpec", "UTC"))
d.putCallItem("(SystemLocale)", value, "toString",
d.enumExpression("DateFormat", "SystemLocaleDate"))
d.putCallItem("(Locale)", value, "toString",
d.enumExpression("DateFormat", "LocaleDate"))
d.putCallItem("toLocalTime", value, "toTimeSpec",
d.enumExpression("TimeSpec", "LocalTime"))
d.putFields(value)
else:
d.putValue("(invalid)")
d.putNumChild(0)
def qdump__QDir(d, value):
d.putNumChild(1)
privAddress = d.extractPointer(value)
bit32 = d.is32bit()
qt5 = d.qtVersion() >= 0x050000
# Change 9fc0965 reorders members again.
# bool fileListsInitialized;\n"
# QStringList files;\n"
# QFileInfoList fileInfos;\n"
# QStringList nameFilters;\n"
# QDir::SortFlags sort;\n"
# QDir::Filters filters;\n"
# Before 9fc0965:
# QDirPrivate:
# QAtomicInt ref
# QStringList nameFilters;
# QDir::SortFlags sort;
# QDir::Filters filters;
# // qt3support:
# QChar filterSepChar;
# bool matchAllDirs;
# // end qt3support
# QScopedPointer<QAbstractFileEngine> fileEngine;
# bool fileListsInitialized;
# QStringList files;
# QFileInfoList fileInfos;
# QFileSystemEntry dirEntry;
# QFileSystemEntry absoluteDirEntry;
# QFileSystemEntry:
# QString m_filePath
# QByteArray m_nativeFilePath
# qint16 m_lastSeparator
# qint16 m_firstDotInFileName
# qint16 m_lastDotInFileName
# + 2 byte padding
fileSystemEntrySize = 2 * d.ptrSize() + 8
if d.qtVersion() < 0x050200:
case = 0
elif d.qtVersion() >= 0x050300:
case = 1
else:
# Try to distinguish bool vs QStringList at the first item
# after the (padded) refcount. If it looks like a bool assume
# this is after 9fc0965. This is not safe.
firstValue = d.extractInt(privAddress + d.ptrSize())
case = 1 if firstValue == 0 or firstValue == 1 else 0
if case == 1:
if bit32:
filesOffset = 4
fileInfosOffset = 8
dirEntryOffset = 0x20
absoluteDirEntryOffset = 0x30
else:
filesOffset = 0x08
fileInfosOffset = 0x10
dirEntryOffset = 0x30
absoluteDirEntryOffset = 0x48
else:
# Assume this is before 9fc0965.
qt3support = d.isQt3Support()
qt3SupportAddition = d.ptrSize() if qt3support else 0
filesOffset = (24 if bit32 else 40) + qt3SupportAddition
fileInfosOffset = filesOffset + d.ptrSize()
dirEntryOffset = fileInfosOffset + d.ptrSize()
absoluteDirEntryOffset = dirEntryOffset + fileSystemEntrySize
d.putStringValue(privAddress + dirEntryOffset)
if d.isExpanded():
with Children(d):
ns = d.qtNamespace()
d.call(value, "count") # Fill cache.
#d.putCallItem("absolutePath", value, "absolutePath")
#d.putCallItem("canonicalPath", value, "canonicalPath")
with SubItem(d, "absolutePath"):
typ = d.lookupType(ns + "QString")
d.putItem(d.createValue(privAddress + absoluteDirEntryOffset, typ))
with SubItem(d, "entryInfoList"):
typ = d.lookupType(ns + "QList<" + ns + "QFileInfo>")
d.putItem(d.createValue(privAddress + fileInfosOffset, typ))
with SubItem(d, "entryList"):
typ = d.lookupType(ns + "QStringList")
d.putItem(d.createValue(privAddress + filesOffset, typ))
d.putFields(value)
def qdump__QFile(d, value):
# 9fc0965 and a373ffcd change the layout of the private structure
qtVersion = d.qtVersion()
is32bit = d.is32bit()
if qtVersion >= 0x050500:
if d.isWindowsTarget():
offset = 164 if is32bit else 248
else:
offset = 156 if is32bit else 248
elif qtVersion >= 0x050400:
if d.isWindowsTarget():
offset = 188 if is32bit else 272
else:
offset = 180 if is32bit else 272
elif qtVersion > 0x050200:
if d.isWindowsTarget():
offset = 180 if is32bit else 272
else:
offset = 176 if is32bit else 272
elif qtVersion >= 0x050000:
offset = 176 if is32bit else 280
else:
if d.isWindowsTarget():
offset = 144 if is32bit else 232
else:
offset = 140 if is32bit else 232
privAddress = d.extractPointer(d.addressOf(value) + d.ptrSize())
fileNameAddress = privAddress + offset
d.putStringValue(fileNameAddress)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem("exists", value, "exists")
d.putFields(value)
def qdump__QFileInfo(d, value):
privAddress = d.extractPointer(value)
#bit32 = d.is32bit()
#qt5 = d.qtVersion() >= 0x050000
#try:
# d.putStringValue(value["d_ptr"]["d"].dereference()["fileNames"][3])
#except:
# d.putPlainChildren(value)
# return
filePathAddress = privAddress + d.ptrSize()
d.putStringValue(filePathAddress)
d.putNumChild(1)
if d.isExpanded():
ns = d.qtNamespace()
with Children(d, childType=d.lookupType(ns + "QString")):
d.putCallItem("absolutePath", value, "absolutePath")
d.putCallItem("absoluteFilePath", value, "absoluteFilePath")
d.putCallItem("canonicalPath", value, "canonicalPath")
d.putCallItem("canonicalFilePath", value, "canonicalFilePath")
d.putCallItem("completeBaseName", value, "completeBaseName")
d.putCallItem("completeSuffix", value, "completeSuffix")
d.putCallItem("baseName", value, "baseName")
if False:
#ifdef Q_OS_MACX
d.putCallItem("isBundle", value, "isBundle")
d.putCallItem("bundleName", value, "bundleName")
d.putCallItem("fileName", value, "fileName")
d.putCallItem("filePath", value, "filePath")
# Crashes gdb (archer-tromey-python, at dad6b53fe)
#d.putCallItem("group", value, "group")
#d.putCallItem("owner", value, "owner")
d.putCallItem("path", value, "path")
d.putCallItem("groupid", value, "groupId")
d.putCallItem("ownerid", value, "ownerId")
#QFile::Permissions permissions () const
perms = d.call(value, "permissions")
if perms is None:
d.putValue("<not available>")
else:
with SubItem(d, "permissions"):
d.putEmptyValue()
d.putType(ns + "QFile::Permissions")
d.putNumChild(10)
if d.isExpanded():
with Children(d, 10):
perms = perms['i']
d.putBoolItem("ReadOwner", perms & 0x4000)
d.putBoolItem("WriteOwner", perms & 0x2000)
d.putBoolItem("ExeOwner", perms & 0x1000)
d.putBoolItem("ReadUser", perms & 0x0400)
d.putBoolItem("WriteUser", perms & 0x0200)
d.putBoolItem("ExeUser", perms & 0x0100)
d.putBoolItem("ReadGroup", perms & 0x0040)
d.putBoolItem("WriteGroup", perms & 0x0020)
d.putBoolItem("ExeGroup", perms & 0x0010)
d.putBoolItem("ReadOther", perms & 0x0004)
d.putBoolItem("WriteOther", perms & 0x0002)
d.putBoolItem("ExeOther", perms & 0x0001)
#QDir absoluteDir () const
#QDir dir () const
d.putCallItem("caching", value, "caching")
d.putCallItem("exists", value, "exists")
d.putCallItem("isAbsolute", value, "isAbsolute")
d.putCallItem("isDir", value, "isDir")
d.putCallItem("isExecutable", value, "isExecutable")
d.putCallItem("isFile", value, "isFile")
d.putCallItem("isHidden", value, "isHidden")
d.putCallItem("isReadable", value, "isReadable")
d.putCallItem("isRelative", value, "isRelative")
d.putCallItem("isRoot", value, "isRoot")
d.putCallItem("isSymLink", value, "isSymLink")
d.putCallItem("isWritable", value, "isWritable")
d.putCallItem("created", value, "created")
d.putCallItem("lastModified", value, "lastModified")
d.putCallItem("lastRead", value, "lastRead")
d.putFields(value)
def qdump__QFixed(d, value):
v = int(value["val"])
d.putValue("%s/64 = %s" % (v, v/64.0))
d.putNumChild(0)
def qform__QFiniteStack():
return arrayForms()
def qdump__QFiniteStack(d, value):
alloc = int(value["_alloc"])
size = int(value["_size"])
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(value["_array"], size, d.templateArgument(value.type, 0))
def qdump__QFlags(d, value):
i = value["i"]
try:
enumType = d.templateArgument(value.type.unqualified(), 0)
d.putValue("%s (%s)" % (i.cast(enumType), i))
except:
d.putValue("%s" % i)
d.putNumChild(0)
def qform__QHash():
return mapForms()
def qdump__QHash(d, value):
def hashDataFirstNode(dPtr, numBuckets):
ePtr = dPtr.cast(nodeTypePtr)
bucket = dPtr.dereference()["buckets"]
for n in xrange(numBuckets - 1, -1, -1):
n = n - 1
if n < 0:
break
if d.pointerValue(bucket.dereference()) != d.pointerValue(ePtr):
return bucket.dereference()
bucket = bucket + 1
return ePtr;
def hashDataNextNode(nodePtr, numBuckets):
nextPtr = nodePtr.dereference()["next"]
if d.pointerValue(nextPtr.dereference()["next"]):
return nextPtr
start = (int(nodePtr.dereference()["h"]) % numBuckets) + 1
dPtr = nextPtr.cast(dataTypePtr)
bucket = dPtr.dereference()["buckets"] + start
for n in xrange(numBuckets - start):
if d.pointerValue(bucket.dereference()) != d.pointerValue(nextPtr):
return bucket.dereference()
bucket += 1
return nextPtr
keyType = d.templateArgument(value.type, 0)
valueType = d.templateArgument(value.type, 1)
anon = d.childAt(value, 0)
d_ptr = anon["d"]
e_ptr = anon["e"]
size = int(d_ptr["size"])
dataTypePtr = d_ptr.type # QHashData * = { Node *fakeNext, Node *buckets }
nodeTypePtr = d_ptr.dereference()["fakeNext"].type # QHashData::Node
d.check(0 <= size and size <= 100 * 1000 * 1000)
d.checkRef(d_ptr["ref"])
d.putItemCount(size)
if d.isExpanded():
numBuckets = int(d_ptr.dereference()["numBuckets"])
innerType = e_ptr.dereference().type
isCompact = d.isMapCompact(keyType, valueType)
childType = valueType if isCompact else innerType
with Children(d, size, maxNumChild=1000, childType=childType):
j = 0
for i in d.childRange():
if i == 0:
node = hashDataFirstNode(d_ptr, numBuckets)
else:
node = hashDataNextNode(node, numBuckets)
it = node.dereference().cast(innerType)
with SubItem(d, i):
if isCompact:
key = it["key"]
if not key:
# LLDB can't access directly since it's in anonymous union
# for Qt4 optimized int keytype
key = it[1]["key"]
d.putMapName(key, j)
d.putItem(it["value"])
d.putType(valueType)
j += 1
else:
d.putItem(it)
def qform__QHashNode():
return mapForms()
def qdump__QHashNode(d, value):
key = value["key"]
if not key:
# LLDB can't access directly since it's in anonymous union
# for Qt4 optimized int keytype
key = value[1]["key"]
val = value["value"]
if d.isMapCompact(key.type, val.type):
d.putMapName(key)
d.putItem(val)
d.putType(value.type)
else:
d.putEmptyValue()
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putSubItem("key", key)
d.putSubItem("value", val)
def qHashIteratorHelper(d, value):
typeName = str(value.type)
hashTypeName = typeName[0:typeName.rfind("::")]
hashType = d.lookupType(hashTypeName)
keyType = d.templateArgument(hashType, 0)
valueType = d.templateArgument(hashType, 1)
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
# We need something like QHash<int, float>::iterator
# -> QHashNode<int, float> with 'proper' spacing,
# as space changes confuse LLDB.
innerTypeName = hashTypeName.replace("QHash", "QHashNode", 1)
node = value["i"].cast(d.lookupType(innerTypeName).pointer()).dereference()
key = node["key"]
if not key:
# LLDB can't access directly since it's in anonymous union
# for Qt4 optimized int keytype
key = node[1]["key"]
d.putSubItem("key", key)
d.putSubItem("value", node["value"])
def qdump__QHash__const_iterator(d, value):
qHashIteratorHelper(d, value)
def qdump__QHash__iterator(d, value):
qHashIteratorHelper(d, value)
def qdump__QHostAddress(d, value):
# QHostAddress in Qt 4.5 (byte offsets)
# quint32 a (0)
# Q_IPV6ADDR a6 (4)
# protocol (20)
# QString ipString (24)
# QString scopeId (24 + ptrSize)
# bool isParsed (24 + 2 * ptrSize)
# QHostAddress in Qt 5.0
# QString ipString (0)
# QString scopeId (ptrSize)
# quint32 a (2*ptrSize)
# Q_IPV6ADDR a6 (2*ptrSize + 4)
# protocol (2*ptrSize + 20)
# bool isParsed (2*ptrSize + 24)
privAddress = d.extractPointer(value)
isQt5 = d.qtVersion() >= 0x050000
sizeofQString = d.ptrSize()
ipStringAddress = privAddress + (0 if isQt5 else 24)
isParsedAddress = privAddress + 24 + 2 * sizeofQString
# value.d.d->ipString
ipString = d.encodeString(ipStringAddress, limit=100)
if d.extractByte(isParsedAddress) and len(ipString) > 0:
d.putValue(ipString, Hex4EncodedLittleEndian)
else:
# value.d.d->protocol:
# QAbstractSocket::IPv4Protocol = 0
# QAbstractSocket::IPv6Protocol = 1
protoAddress = privAddress + 20 + (2 * sizeofQString if isQt5 else 0);
proto = d.extractInt(protoAddress)
if proto == 1:
# value.d.d->a6
a6Offset = 4 + (2 * sizeofQString if isQt5 else 0)
data = d.readMemory(privAddress + a6Offset, 16)
address = ':'.join("%x" % int(data[i:i+4], 16) for i in xrange(0, 32, 4))
scopeId = privAddress + sizeofQString + (0 if isQt5 else 24)
scopeId = d.encodeString(scopeId, limit=100)
d.putValue("%s%%%s" % (address, scopeId), IPv6AddressAndHexScopeId)
elif proto == 0:
# value.d.d->a
a = d.extractInt(privAddress + (2 * sizeofQString if isQt5 else 0))
a, n4 = divmod(a, 256)
a, n3 = divmod(a, 256)
a, n2 = divmod(a, 256)
a, n1 = divmod(a, 256)
d.putValue("%d.%d.%d.%d" % (n1, n2, n3, n4));
else:
d.putValue("<unspecified>")
d.putPlainChildren(value["d"]["d"].dereference())
def qdump__QIPv6Address(d, value):
#warn("IPV6.VALUE: %s" % value)
#warn("IPV6.ADDR: 0x%x" % d.addressOf(value))
#warn("IPV6.LOADADDR: 0x%x" % value.GetLoadAddress())
c = value["c"]
data = d.readMemory(d.addressOf(c), 16)
d.putValue(':'.join("%x" % int(data[i:i+4], 16) for i in xrange(0, 32, 4)))
#d.putValue('xx')
#d.putValue("0x%x - 0x%x" % (d.addressOf(value), d.addressOf(c)))
#d.putValue("0x%x - 0x%x" % (value.GetAddress(), c.GetAddress()))
#d.putValue("0x%x - 0x%x" % (value.GetLoadAddress(), c.GetLoadAddress()))
d.putPlainChildren(c)
def qform__QList():
return [DirectQListStorageFormat, IndirectQListStorageFormat]
def qdump__QList(d, value):
base = d.extractPointer(value)
begin = d.extractInt(base + 8)
end = d.extractInt(base + 12)
array = base + 16
if d.qtVersion() < 0x50000:
array += d.ptrSize()
d.check(begin >= 0 and end >= 0 and end <= 1000 * 1000 * 1000)
size = end - begin
d.check(size >= 0)
#d.checkRef(private["ref"])
innerType = d.templateArgument(value.type, 0)
d.putItemCount(size)
if d.isExpanded():
innerSize = innerType.sizeof
stepSize = d.ptrSize()
addr = array + begin * stepSize
# The exact condition here is:
# QTypeInfo<T>::isLarge || QTypeInfo<T>::isStatic
# but this data is available neither in the compiled binary nor
# in the frontend.
# So as first approximation only do the 'isLarge' check:
displayFormat = d.currentItemFormat()
if displayFormat == DirectQListStorageFormat:
isInternal = True
elif displayFormat == IndirectQListStorageFormat:
isInternal = False
else:
isInternal = innerSize <= stepSize and d.isMovableType(innerType)
if isInternal:
if innerSize == stepSize:
d.putArrayData(addr, size, innerType)
else:
with Children(d, size, childType=innerType):
for i in d.childRange():
p = d.createValue(addr + i * stepSize, innerType)
d.putSubItem(i, p)
else:
# about 0.5s / 1000 items
with Children(d, size, maxNumChild=2000, childType=innerType):
for i in d.childRange():
p = d.extractPointer(addr + i * stepSize)
x = d.createValue(p, innerType)
d.putSubItem(i, x)
def qform__QImage():
return [SimpleFormat, SeparateFormat]
def qdump__QImage(d, value):
# This relies on current QImage layout:
# QImageData:
# - QAtomicInt ref
# - int width, height, depth, nbytes
# - padding on 64 bit machines
# - qreal devicePixelRatio (+20 + padding) # Assume qreal == double, Qt 5 only
# - QVector<QRgb> colortable (+20 + padding + gap)
# - uchar *data (+20 + padding + gap + ptr)
# [- uchar **jumptable jumptable with Qt 3 suppor]
# - enum format (+20 + padding + gap + 2 * ptr)
ptrSize = d.ptrSize()
isQt5 = d.qtVersion() >= 0x050000
offset = (3 if isQt5 else 2) * ptrSize
base = d.extractPointer(d.addressOf(value) + offset)
if base == 0:
d.putValue("(invalid)")
return
qt3Support = d.isQt3Support()
width = d.extractInt(base + 4)
height = d.extractInt(base + 8)
nbytes = d.extractInt(base + 16)
padding = d.ptrSize() - d.intSize()
pixelRatioSize = 8 if isQt5 else 0
jumpTableSize = ptrSize if qt3Support else 0
bits = d.extractPointer(base + 20 + padding + pixelRatioSize + ptrSize)
iformat = d.extractInt(base + 20 + padding + pixelRatioSize + jumpTableSize + 2 * ptrSize)
d.putValue("(%dx%d)" % (width, height))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem("width", width)
d.putIntItem("height", height)
d.putIntItem("nbytes", nbytes)
d.putIntItem("format", iformat)
with SubItem(d, "data"):
d.putValue("0x%x" % bits)
d.putNumChild(0)
d.putType("void *")
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
# This is critical for performance. Writing to an external
# file using the following is faster when using GDB.
# file = tempfile.mkstemp(prefix="gdbpy_")
# filename = file[1].replace("\\", "\\\\")
# gdb.execute("dump binary memory %s %s %s" %
# (filename, bits, bits + nbytes))
# d.putDisplay(DisplayImageFile, " %d %d %d %d %s"
# % (width, height, nbytes, iformat, filename))
d.putField("editformat", DisplayImageData)
d.put('editvalue="')
d.put('%08x%08x%08x%08x' % (width, height, nbytes, iformat))
d.put(d.readMemory(bits, nbytes))
d.put('",')
def qdump__QLinkedList(d, value):
dd = d.extractPointer(value)
ptrSize = d.ptrSize()
n = d.extractInt(dd + 4 + 2 * ptrSize);
ref = d.extractInt(dd + 2 * ptrSize);
d.check(0 <= n and n <= 100*1000*1000)
d.check(-1 <= ref and ref <= 1000)
d.putItemCount(n)
if d.isExpanded():
innerType = d.templateArgument(value.type, 0)
with Children(d, n, maxNumChild=1000, childType=innerType):
pp = d.extractPointer(dd)
for i in d.childRange():
d.putSubItem(i, d.createValue(pp + 2 * ptrSize, innerType))
pp = d.extractPointer(pp)
qqLocalesCount = None
def qdump__QLocale(d, value):
# Check for uninitialized 'index' variable. Retrieve size of
# QLocale data array from variable in qlocale.cpp.
# Default is 368 in Qt 4.8, 438 in Qt 5.0.1, the last one
# being 'System'.
#global qqLocalesCount
#if qqLocalesCount is None:
# #try:
# qqLocalesCount = int(value(ns + 'locale_data_size'))
# #except:
# qqLocalesCount = 438
#try:
# index = int(value["p"]["index"])
#except:
# try:
# index = int(value["d"]["d"]["m_index"])
# except:
# index = int(value["d"]["d"]["m_data"]...)
#d.check(index >= 0)
#d.check(index <= qqLocalesCount)
d.putStringValue(d.call(value, "name"))
d.putNumChild(0)
return
# FIXME: Poke back for variants.
if d.isExpanded():
ns = d.qtNamespace()
with Children(d, childType=d.lookupType(ns + "QChar"), childNumChild=0):
d.putCallItem("country", value, "country")
d.putCallItem("language", value, "language")
d.putCallItem("measurementSystem", value, "measurementSystem")
d.putCallItem("numberOptions", value, "numberOptions")
d.putCallItem("timeFormat_(short)", value,
"timeFormat", ns + "QLocale::ShortFormat")
d.putCallItem("timeFormat_(long)", value,
"timeFormat", ns + "QLocale::LongFormat")
d.putCallItem("decimalPoint", value, "decimalPoint")
d.putCallItem("exponential", value, "exponential")
d.putCallItem("percent", value, "percent")
d.putCallItem("zeroDigit", value, "zeroDigit")
d.putCallItem("groupSeparator", value, "groupSeparator")
d.putCallItem("negativeSign", value, "negativeSign")
d.putFields(value)
def qdump__QMapNode(d, value):
d.putEmptyValue()
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putSubItem("key", value["key"])
d.putSubItem("value", value["value"])
def qdumpHelper__Qt4_QMap(d, value):
anon = d.childAt(value, 0)
d_ptr = anon["d"].dereference()
e_ptr = anon["e"].dereference()
n = int(d_ptr["size"])
d.check(0 <= n and n <= 100*1000*1000)
d.checkRef(d_ptr["ref"])
d.putItemCount(n)
if d.isExpanded():
if n > 10000:
n = 10000
keyType = d.templateArgument(value.type, 0)
valueType = d.templateArgument(value.type, 1)
it = e_ptr["forward"].dereference()
# QMapPayloadNode is QMapNode except for the 'forward' member, so
# its size is most likely the offset of the 'forward' member therein.
# Or possibly 2 * sizeof(void *)
# Note: Keeping the spacing in the type lookup
# below is important for LLDB.
needle = str(value.type).replace("QMap", "QMapNode", 1)
needle = d.qtNamespace() + "QMapNode<%s,%s>" % (keyType, valueType)
nodeType = d.lookupType(needle)
nodePointerType = nodeType.pointer()
# symbols reports payload size at wrong size 24
if d.isArmArchitecture() and d.isQnxTarget() and str(valueType) == 'QVariant':
payloadSize = 28
else:
payloadSize = nodeType.sizeof - 2 * nodePointerType.sizeof
with PairedChildren(d, n, useKeyAndValue=True,
keyType=keyType, valueType=valueType, pairType=nodeType):
for i in xrange(n):
base = it.cast(d.charPtrType()) - payloadSize
node = base.cast(nodePointerType).dereference()
with SubItem(d, i):
#d.putField("iname", d.currentIName)
d.putPair(node, i)
it = it.dereference()["forward"].dereference()
def qdumpHelper__Qt5_QMap(d, value):
d_ptr = value["d"].dereference()
n = int(d_ptr["size"])
d.check(0 <= n and n <= 100*1000*1000)
d.checkRef(d_ptr["ref"])
d.putItemCount(n)
if d.isExpanded():
if n > 10000:
n = 10000
keyType = d.templateArgument(value.type, 0)
valueType = d.templateArgument(value.type, 1)
# Note: Keeping the spacing in the type lookup
# below is important for LLDB.
needle = str(d_ptr.type).replace("QMapData", "QMapNode", 1)
nodeType = d.lookupType(needle)
def helper(d, node, nodeType, i):
left = node["left"]
if not d.isNull(left):
i = helper(d, left.dereference(), nodeType, i)
if i >= n:
return i
nodex = node.cast(nodeType)
with SubItem(d, i):
d.putPair(nodex, i)
i += 1
if i >= n:
return i
right = node["right"]
if not d.isNull(right):
i = helper(d, right.dereference(), nodeType, i)
return i
with PairedChildren(d, n, useKeyAndValue=True,
keyType=keyType, valueType=valueType, pairType=nodeType):
node = d_ptr["header"]
helper(d, node, nodeType, 0)
def qform__QMap():
return mapForms()
def qdump__QMap(d, value):
if d.qtVersion() < 0x50000:
qdumpHelper__Qt4_QMap(d, value)
else:
qdumpHelper__Qt5_QMap(d, value)
def qform__QMultiMap():
return mapForms()
def qdump__QMultiMap(d, value):
qdump__QMap(d, value)
def qdump__QMetaObjectPrivate(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
# int revision;
# int className;
# int classInfoCount, classInfoData;
# int methodCount, methodData;
# int propertyCount, propertyData;
# int enumeratorCount, enumeratorData;
# int constructorCount, constructorData; //since revision 2
# int flags; //since revision 3
# int signalCount; //since revision 4
d.putIntItem("revision", value["revision"])
d.putIntItem("methodCount", value["methodCount"])
d.putIntItem("propertyCount", value["propertyCount"])
d.putIntItem("enumeratorCount", value["enumeratorCount"])
d.putIntItem("constructorCount", value["constructorCount"])
d.putIntItem("flags", value["flags"])
d.putIntItem("signalCount", value["signalCount"])
def qdump__QMetaObject(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
dd = value["d"]
d.putSubItem("d", dd)
data = d.extractPointer(dd["data"])
propertyNames = d.staticQObjectPropertyNames(value)
propertyIndex = 0
for propertyName in propertyNames:
with SubItem(d, "property_%s" % propertyIndex):
d.putValue(propertyName)
propertyIndex += 1
#byteArrayDataType = d.lookupType(d.qtNamespace() + "QByteArrayData")
#byteArrayDataSize = byteArrayDataType.sizeof
#sd = d.extractPointer(dd["stringdata"])
#stringdata, size, alloc = d.byteArrayDataHelper(sd)
#propertyCount = d.extractInt(data + 24)
#propertyData = d.extractInt(data + 28)
## This is the 'data' member in the qt_meta_stringdata_qobject__*_t struct
#d.putIntItem("_byteArrayDataSize", byteArrayDataSize)
#d.putAddressItem("_data", data)
#d.putAddressItem("_sd_", stringdata)
#with SubItem(d, "_sd"):
# d.putValue(d.readMemory(stringdata, size), Hex2EncodedLatin1)
#with SubItem(d, "_cn"):
# d.putValue(d.readMemory(stringdata + d.extractInt(data + 4), size), Hex2EncodedLatin1)
#for i in range(propertyCount):
# with SubItem(d, "property_%s" % i):
# x = data + (propertyData + 3 * i) * 4
# literal = sd + d.extractInt(x) * byteArrayDataSize
# ldata, lsize, lalloc = d.byteArrayDataHelper(literal)
# d.putValue(d.readMemory(ldata, lsize), Hex2EncodedLatin1)
# d.putNumChild(1)
# if d.isExpanded():
# with Children(d):
# if d.isExpanded():
# d.putAddressItem("_literal", literal)
# d.putIntItem("__data", ldata)
# d.putIntItem("__size", lsize)
# d.putIntItem("__alloc", lalloc)
# d.putIntItem("name", d.extractInt(x))
# d.putIntItem("type", d.extractInt(x + 4))
# d.putIntItem("flags", d.extractInt(x + 8))
methodCount = d.extractInt(data + 16)
methodData = d.extractInt(data + 20)
for i in range(methodCount):
with SubItem(d, "method_%s" % i):
x = data + (methodData + 5 * i) * 4
#d.putEmptyValue()
d.putValue(d.readCString(stringdata + d.extractInt(x)))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
if d.isExpanded():
d.putIntItem("name", d.extractInt(x))
d.putIntItem("argc", d.extractInt(x + 4))
d.putIntItem("argv", d.extractInt(x + 8))
d.putIntItem("type", d.extractInt(x + 12))
d.putIntItem("flags", d.extractInt(x + 16))
d.putSubItem("stringData", dd["stringdata"])
d.putIntItem("revision", d.extractInt(data))
d.putIntItem("className", d.extractInt(data + 4))
d.putIntItem("classInfoCount", d.extractInt(data + 8))
d.putIntItem("className", d.extractInt(data + 12))
d.putIntItem("methodCount", d.extractInt(data + 16))
d.putIntItem("methodData", d.extractInt(data + 20))
d.putIntItem("propertyCount", d.extractInt(data + 24))
d.putIntItem("propertyData", d.extractInt(data + 28))
d.putIntItem("enumeratorCount", d.extractInt(data + 32))
d.putIntItem("enumeratorData", d.extractInt(data + 36))
d.putIntItem("constructorCount", d.extractInt(data + 40))
d.putIntItem("constructorData", d.extractInt(data + 44))
d.putIntItem("flags", d.extractInt(data + 48))
d.putIntItem("signalCount", d.extractInt(data + 52))
def _qdump__QObject(d, value):
d.putQObjectNameValue(value)
ns = d.qtNamespace()
try:
privateTypeName = ns + "QObjectPrivate"
privateType = d.lookupType(privateTypeName)
staticMetaObject = value["staticMetaObject"]
except:
d.putPlainChildren(value)
return
#warn("SMO: %s " % staticMetaObject)
#warn("SMO DATA: %s " % staticMetaObject["d"]["stringdata"])
superData = staticMetaObject["d"]["superdata"]
#warn("SUPERDATA: %s" % superData)
#while not d.isNull(superData):
# superData = superData.dereference()["d"]["superdata"]
# warn("SUPERDATA: %s" % superData)
if privateType is None:
#d.putValue(d.cleanAddress(d.pointerValue(value))
d.putPlainChildren(value)
return
#warn("OBJECTNAME: %s " % objectName)
dd = value["d_ptr"]["d"]
d_ptr = dd.cast(privateType.pointer()).dereference()
#warn("D_PTR: %s " % d_ptr)
mo = d_ptr["metaObject"]
if d.isNull(mo):
mo = staticMetaObject
#warn("MO: %s " % mo)
#warn("MO.D: %s " % mo["d"])
metaData = mo["d"]["data"]
metaStringData = mo["d"]["stringdata"]
# This is char * in Qt 4 and ByteArrayData * in Qt 5.
# Force it to the char * data in the Qt 5 case.
try:
offset = metaStringData["offset"]
metaStringData = metaStringData.cast(d.charPtrType()) + int(offset)
except:
pass
#extradata = mo["d"]["extradata"] # Capitalization!
#warn("METADATA: %s " % metaData)
#warn("STRINGDATA: %s " % metaStringData)
#warn("TYPE: %s " % value.type)
#warn("INAME: %s " % d.currentIName)
d.putEmptyValue()
#QSignalMapper::staticMetaObject
#d.checkRef(d_ptr["ref"])
d.putNumChild(4)
if d.isExpanded():
with Children(d):
d.putQObjectGuts(value)
# Local data.
if privateTypeName != ns + "QObjectPrivate":
if not privateType is None:
with SubItem(d, "data"):
d.putEmptyValue()
d.putNoType()
d.putPlainChildren(d_ptr, False)
d.putFields(value)
# Parent and children.
if d.stripClassTag(str(value.type)) == ns + "QObject":
d.putSubItem("parent", d_ptr["parent"])
d.putSubItem("children", d_ptr["children"])
# Metaobject.
d.putSubItem("metaobject", mo)
# Dynamic Properties.
with SubItem(d, "dynamics"):
# Prolog
extraData = d_ptr["extraData"] # Capitalization!
if d.isNull(extraData):
dynamicPropertyCount = 0
else:
extraDataType = d.lookupType(
ns + "QObjectPrivate::ExtraData").pointer()
extraData = extraData.cast(extraDataType)
ed = extraData.dereference()
names = ed["propertyNames"]
values = ed["propertyValues"]
#userData = ed["userData"]
namesBegin = names["d"]["begin"]
namesEnd = names["d"]["end"]
namesArray = names["d"]["array"]
dynamicPropertyCount = namesEnd - namesBegin
d.putNoType()
d.putItemCount(dynamicPropertyCount)
if d.isExpanded() and d.isGdb:
import gdb
# FIXME: Make this global. Don't leak.
variant = "'%sQVariant'" % ns
# Avoid malloc symbol clash with QVector
gdb.execute("set $d = (%s*)calloc(sizeof(%s), 1)"
% (variant, variant))
gdb.execute("set $d.d.is_shared = 0")
with Children(d):
dummyType = d.voidPtrType().pointer()
namesType = d.lookupType(ns + "QByteArray")
valuesBegin = values["d"]["begin"]
valuesEnd = values["d"]["end"]
valuesArray = values["d"]["array"]
valuesType = d.lookupType(ns + "QVariant")
p = namesArray.cast(dummyType) + namesBegin
q = valuesArray.cast(dummyType) + valuesBegin
for i in xrange(dynamicPropertyCount):
with SubItem(d, i):
pp = p.cast(namesType.pointer()).dereference();
d.putField("key", d.encodeByteArray(pp))
d.putField("keyencoded", Hex2EncodedLatin1)
qq = q.cast(valuesType.pointer().pointer())
qq = qq.dereference();
d.putField("addr", d.cleanAddress(qq))
d.putField("exp", "*(%s*)%s"
% (variant, d.cleanAddress(qq)))
t = qdump__QVariant(d, qq)
# Override the "QVariant (foo)" output.
d.putBetterType(t)
p += 1
q += 1
# Connections.
with SubItem(d, "connections"):
d.putNoType()
connections = d_ptr["connectionLists"]
connectionListCount = 0
if not d.isNull(connections):
connectionListCount = connections["d"]["size"]
d.putItemCount(connectionListCount, 0)
if d.isExpanded():
pp = 0
with Children(d):
vectorType = d.fieldAt(connections.type.target(), 0).type
innerType = d.templateArgument(vectorType, 0)
# Should check: innerType == ns::QObjectPrivate::ConnectionList
p = gdb.Value(connections["p"]["array"]).cast(innerType.pointer())
for i in xrange(connectionListCount):
first = p.dereference()["first"]
while not d.isNull(first):
with SubItem(d, pp):
connection = first.dereference()
d.putItem(connection)
d.putValue(connection["callFunction"])
first = first["nextConnectionList"]
# We need to enforce some upper limit.
pp += 1
if pp > 1000:
break
p += 1
if pp < 1000:
d.putItemCount(pp)
# Active connection.
with SubItem(d, "currentSender"):
d.putNoType()
sender = d_ptr["currentSender"]
d.putPointerValue(sender)
if d.isNull(sender):
d.putNumChild(0)
else:
d.putNumChild(1)
if d.isExpanded():
with Children(d):
# Sending object
d.putSubItem("object", sender["sender"])
# Signal in sending object
with SubItem(d, "signal"):
d.putValue(sender["signal"])
d.putNoType()
d.putNumChild(0)
# QObject
# static const uint qt_meta_data_QObject[] = {
# int revision;
# int className;
# int classInfoCount, classInfoData;
# int methodCount, methodData;
# int propertyCount, propertyData;
# int enumeratorCount, enumeratorData;
# int constructorCount, constructorData; //since revision 2
# int flags; //since revision 3
# int signalCount; //since revision 4
# // content:
# 4, // revision
# 0, // classname
# 0, 0, // classinfo
# 4, 14, // methods
# 1, 34, // properties
# 0, 0, // enums/sets
# 2, 37, // constructors
# 0, // flags
# 2, // signalCount
# /* 14 */
# // signals: signature, parameters, type, tag, flags
# 9, 8, 8, 8, 0x05,
# 29, 8, 8, 8, 0x25,
# /* 24 */
# // slots: signature, parameters, type, tag, flags
# 41, 8, 8, 8, 0x0a,
# 55, 8, 8, 8, 0x08,
# /* 34 */
# // properties: name, type, flags
# 90, 82, 0x0a095103,
# /* 37 */
# // constructors: signature, parameters, type, tag, flags
# 108, 101, 8, 8, 0x0e,
# 126, 8, 8, 8, 0x2e,
# 0 // eod
# };
# static const char qt_meta_stringdata_QObject[] = {
# "QObject\0\0destroyed(QObject*)\0destroyed()\0"
# "deleteLater()\0_q_reregisterTimers(void*)\0"
# "QString\0objectName\0parent\0QObject(QObject*)\0"
# "QObject()\0"
# };
# QSignalMapper
# static const uint qt_meta_data_QSignalMapper[] = {
# // content:
# 4, // revision
# 0, // classname
# 0, 0, // classinfo
# 7, 14, // methods
# 0, 0, // properties
# 0, 0, // enums/sets
# 0, 0, // constructors
# 0, // flags
# 4, // signalCount
# // signals: signature, parameters, type, tag, flags
# 15, 14, 14, 14, 0x05,
# 27, 14, 14, 14, 0x05,
# 43, 14, 14, 14, 0x05,
# 60, 14, 14, 14, 0x05,
# // slots: signature, parameters, type, tag, flags
# 77, 14, 14, 14, 0x0a,
# 90, 83, 14, 14, 0x0a,
# 104, 14, 14, 14, 0x08,
# 0 // eod
# };
# static const char qt_meta_stringdata_QSignalMapper[] = {
# "QSignalMapper\0\0mapped(int)\0mapped(QString)\0"
# "mapped(QWidget*)\0mapped(QObject*)\0"
# "map()\0sender\0map(QObject*)\0"
# "_q_senderDestroyed()\0"
# };
# const QMetaObject QSignalMapper::staticMetaObject = {
# { &QObject::staticMetaObject, qt_meta_stringdata_QSignalMapper,
# qt_meta_data_QSignalMapper, 0 }
# };
# // Meta enumeration helpers
# static inline void dumpMetaEnumType(QDumper &d, const QMetaEnum &me)
# {
# QByteArray type = me.scope()
# if !type.isEmpty())
# type += "::"
# type += me.name()
# d.putField("type", type.constData())
# }
#
# static inline void dumpMetaEnumValue(QDumper &d, const QMetaProperty &mop,
# int value)
# {
#
# const QMetaEnum me = mop.enumerator()
# dumpMetaEnumType(d, me)
# if const char *enumValue = me.valueToKey(value)) {
# d.putValue(enumValue)
# } else {
# d.putValue(value)
# }
# d.putField("numchild", 0)
# }
#
# static inline void dumpMetaFlagValue(QDumper &d, const QMetaProperty &mop,
# int value)
# {
# const QMetaEnum me = mop.enumerator()
# dumpMetaEnumType(d, me)
# const QByteArray flagsValue = me.valueToKeys(value)
# if flagsValue.isEmpty():
# d.putValue(value)
# else:
# d.putValue(flagsValue.constData())
# d.putNumChild(0)
# }
def qdump__QPixmap(d, value):
offset = (3 if d.qtVersion() >= 0x050000 else 2) * d.ptrSize()
base = d.extractPointer(d.addressOf(value) + offset)
if base == 0:
d.putValue("(invalid)")
else:
width = d.extractInt(base + d.ptrSize())
height = d.extractInt(base + d.ptrSize() + 4)
d.putValue("(%dx%d)" % (width, height))
d.putNumChild(0)
def qdump__QPoint(d, value):
x = int(value["xp"])
y = int(value["yp"])
d.putValue("(%s, %s)" % (x, y))
d.putPlainChildren(value)
def qdump__QPointF(d, value):
x = float(value["xp"])
y = float(value["yp"])
d.putValue("(%s, %s)" % (x, y))
d.putPlainChildren(value)
def qdump__QRect(d, value):
def pp(l):
if l >= 0: return "+%s" % l
return l
x1 = int(value["x1"])
y1 = int(value["y1"])
x2 = int(value["x2"])
y2 = int(value["y2"])
w = x2 - x1 + 1
h = y2 - y1 + 1
d.putValue("%sx%s%s%s" % (w, h, pp(x1), pp(y1)))
d.putPlainChildren(value)
def qdump__QRectF(d, value):
def pp(l):
if l >= 0: return "+%s" % l
return l
x = float(value["xp"])
y = float(value["yp"])
w = float(value["w"])
h = float(value["h"])
d.putValue("%sx%s%s%s" % (w, h, pp(x), pp(y)))
d.putPlainChildren(value)
def qdump__QRegExp(d, value):
# value.priv.engineKey.pattern
privAddress = d.extractPointer(value)
engineKeyAddress = privAddress + d.ptrSize()
patternAddress = engineKeyAddress
d.putStringValue(patternAddress)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
# QRegExpPrivate:
# - QRegExpEngine *eng (+0)
# - QRegExpEngineKey: (+1ptr)
# - QString pattern; (+1ptr)
# - QRegExp::PatternSyntax patternSyntax; (+2ptr)
# - Qt::CaseSensitivity cs; (+2ptr +1enum +pad?)
# - bool minimal (+2ptr +2enum +2pad?)
# - QString t (+2ptr +2enum +1bool +3pad?)
# - QStringList captures (+3ptr +2enum +1bool +3pad?)
# FIXME: Remove need to call. Needed to warm up cache.
d.call(value, "capturedTexts") # create cache
ns = d.qtNamespace()
with SubItem(d, "syntax"):
# value["priv"]["engineKey"["capturedCache"]
address = engineKeyAddress + d.ptrSize()
typ = d.lookupType(ns + "QRegExp::PatternSyntax")
d.putItem(d.createValue(address, typ))
with SubItem(d, "captures"):
# value["priv"]["capturedCache"]
address = privAddress + 3 * d.ptrSize() + 12
typ = d.lookupType(ns + "QStringList")
d.putItem(d.createValue(address, typ))
def qdump__QRegion(d, value):
p = value["d"].dereference()["qt_rgn"]
if d.isNull(p):
d.putValue("<empty>")
d.putNumChild(0)
else:
# struct QRegionPrivate:
# int numRects;
# QVector<QRect> rects;
# QRect extents;
# QRect innerRect;
# int innerArea;
pp = d.extractPointer(p)
n = d.extractInt(pp)
d.putItemCount(n)
if d.isExpanded():
with Children(d):
v = d.ptrSize()
ns = d.qtNamespace()
rectType = d.lookupType(ns + "QRect")
d.putIntItem("numRects", n)
if d.qtVersion() >= 0x050400:
# Changed in ee324e4ed
d.putSubItem("extents", d.createValue(pp + 8 + v, rectType))
d.putSubItem("innerRect", d.createValue(pp + 8 + v + rectType.sizeof, rectType))
d.putIntItem("innerArea", d.extractInt(pp + 4))
rectsOffset = 8
else:
d.putSubItem("extents", d.createValue(pp + 2 * v, rectType))
d.putSubItem("innerRect", d.createValue(pp + 2 * v + rectType.sizeof, rectType))
d.putIntItem("innerArea", d.extractInt(pp + 2 * v + 2 * rectType.sizeof))
rectsOffset = v
# FIXME
try:
# Can fail if QVector<QRect> debuginfo is missing.
vectType = d.lookupType("%sQVector<%sQRect>" % (ns, ns))
d.putSubItem("rects", d.createValue(pp + rectsOffset, vectType))
except:
with SubItem(d, "rects"):
d.putItemCount(n)
d.putType("%sQVector<%sQRect>" % (ns, ns))
d.putNumChild(0)
def qdump__QScopedPointer(d, value):
d.putBetterType(d.currentType)
d.putItem(value["d"])
def qdump__QSet(d, value):
def hashDataFirstNode(dPtr, numBuckets):
ePtr = dPtr.cast(nodeTypePtr)
bucket = dPtr["buckets"]
for n in xrange(numBuckets - 1, -1, -1):
n = n - 1
if n < 0:
break
if d.pointerValue(bucket.dereference()) != d.pointerValue(ePtr):
return bucket.dereference()
bucket = bucket + 1
return ePtr
def hashDataNextNode(nodePtr, numBuckets):
nextPtr = nodePtr.dereference()["next"]
if d.pointerValue(nextPtr.dereference()["next"]):
return nextPtr
dPtr = nodePtr.cast(hashDataType.pointer()).dereference()
start = (int(nodePtr.dereference()["h"]) % numBuckets) + 1
bucket = dPtr.dereference()["buckets"] + start
for n in xrange(numBuckets - start):
if d.pointerValue(bucket.dereference()) != d.pointerValue(nextPtr):
return bucket.dereference()
bucket += 1
return nodePtr
anon = d.childAt(value, 0)
if d.isLldb: # Skip the inheritance level.
anon = d.childAt(anon, 0)
d_ptr = anon["d"]
e_ptr = anon["e"]
size = int(d_ptr.dereference()["size"])
d.check(0 <= size and size <= 100 * 1000 * 1000)
d.checkRef(d_ptr["ref"])
d.putItemCount(size)
if d.isExpanded():
hashDataType = d_ptr.type
nodeTypePtr = d_ptr.dereference()["fakeNext"].type
numBuckets = int(d_ptr.dereference()["numBuckets"])
innerType = e_ptr.dereference().type
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
if i == 0:
node = hashDataFirstNode(d_ptr, numBuckets)
else:
node = hashDataNextNode(node, numBuckets)
it = node.dereference().cast(innerType)
with SubItem(d, i):
key = it["key"]
if not key:
# LLDB can't access directly since it's in anonymous union
# for Qt4 optimized int keytype
key = it[1]["key"]
d.putItem(key)
def qdump__QSharedData(d, value):
d.putValue("ref: %s" % value["ref"]["_q_value"])
d.putNumChild(0)
def qdump__QSharedDataPointer(d, value):
d_ptr = value["d"]
if d.isNull(d_ptr):
d.putValue("(null)")
d.putNumChild(0)
else:
# This replaces the pointer by the pointee, making the
# pointer transparent.
try:
innerType = d.templateArgument(value.type, 0)
except:
d.putValue(d_ptr)
d.putPlainChildren(value)
return
d.putBetterType(d.currentType)
d.putItem(d_ptr.cast(innerType.pointer()).dereference())
def qdump__QSharedPointer(d, value):
qdump__QWeakPointer(d, value)
def qdump__QSize(d, value):
w = int(value["wd"])
h = int(value["ht"])
d.putValue("(%s, %s)" % (w, h))
d.putPlainChildren(value)
def qdump__QSizeF(d, value):
w = float(value["wd"])
h = float(value["ht"])
d.putValue("(%s, %s)" % (w, h))
d.putPlainChildren(value)
def qform__QStack():
return arrayForms()
def qdump__QStack(d, value):
qdump__QVector(d, value)
def qdump__QPolygonF(d, value):
qdump__QVector(d, value.cast(d.directBaseClass(value.type, 0)))
d.putBetterType(d.currentType)
def qdump__QPolygon(d, value):
qdump__QVector(d, value.cast(d.directBaseClass(value.type, 0)))
d.putBetterType(d.currentType)
def qdump__QGraphicsPolygonItem(d, value):
dptr = d.extractPointer(d.addressOf(value) + d.ptrSize()) # Skip vtable
# Assume sizeof(QGraphicsPolygonItemPrivate) == 400
offset = 308 if d.is32bit() else 384
data, size, alloc = d.vectorDataHelper(d.extractPointer(dptr + offset))
d.putItemCount(size)
d.putPlotData(data, size, d.lookupQtType("QPointF"))
def qdump__QStandardItem(d, value):
d.putBetterType(d.currentType)
try:
d.putItem(value["d_ptr"])
except:
d.putPlainChildren(value)
def qedit__QString(d, value, data):
d.call(value, "resize", str(len(data)))
(base, size, alloc) = d.stringData(value)
d.setValues(base, "short", [ord(c) for c in data])
def qform__QString():
return [SimpleFormat, SeparateFormat]
def qdump__QString(d, value):
d.putStringValue(value)
data, size, alloc = d.stringData(value)
d.putNumChild(size)
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putField("editformat", DisplayUtf16String)
d.putField("editvalue", d.encodeString(value, limit=100000))
if d.isExpanded():
d.putArrayData(data, size, d.lookupType(d.qtNamespace() + "QChar"))
def qdump__QStringData(d, value):
d.putStringValueByAddress(toInteger(value))
d.putNumChild(0)
def qdump__QHashedString(d, value):
stringType = d.directBaseClass(value.type)
qdump__QString(d, value.cast(stringType))
d.putBetterType(value.type)
def qdump__QQmlRefCount(d, value):
d.putItem(value["refCount"])
d.putBetterType(value.type)
def qdump__QStringRef(d, value):
if d.isNull(value["m_string"]):
d.putValue("(null)");
d.putNumChild(0)
return
s = value["m_string"].dereference()
data, size, alloc = d.stringData(s)
data += 2 * int(value["m_position"])
size = int(value["m_size"])
s = d.readMemory(data, 2 * size)
d.putValue(s, Hex4EncodedLittleEndian)
d.putPlainChildren(value)
def qdump__QStringList(d, value):
listType = d.directBaseClass(value.type)
qdump__QList(d, value.cast(listType))
d.putBetterType(value.type)
def qdump__QTemporaryFile(d, value):
qdump__QFile(d, value)
def qdump__QTextCodec(d, value):
name = d.call(value, "name")
d.putValue(d.encodeByteArray(name, limit=100), 6)
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putCallItem("name", value, "name")
d.putCallItem("mibEnum", value, "mibEnum")
d.putFields(value)
def qdump__QTextCursor(d, value):
privAddress = d.extractPointer(value)
if privAddress == 0:
d.putValue("(invalid)")
d.putNumChild(0)
else:
positionAddress = privAddress + 2 * d.ptrSize() + 8
d.putValue(d.extractInt(positionAddress))
d.putNumChild(1)
if d.isExpanded():
with Children(d):
positionAddress = privAddress + 2 * d.ptrSize() + 8
d.putIntItem("position", d.extractInt(positionAddress))
d.putIntItem("anchor", d.extractInt(positionAddress + d.intSize()))
d.putCallItem("selected", value, "selectedText")
d.putFields(value)
def qdump__QTextDocument(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putCallItem("blockCount", value, "blockCount")
d.putCallItem("characterCount", value, "characterCount")
d.putCallItem("lineCount", value, "lineCount")
d.putCallItem("revision", value, "revision")
d.putCallItem("toPlainText", value, "toPlainText")
d.putFields(value)
def qform__QUrl():
return [SimpleFormat, SeparateFormat]
def qdump__QUrl(d, value):
if d.qtVersion() < 0x050000:
privAddress = d.extractPointer(value)
if not privAddress:
# d == 0 if QUrl was constructed with default constructor
d.putValue("<invalid>")
return
encodedOriginalAddress = privAddress + 8 * d.ptrSize()
d.putValue(d.encodeByteArrayHelper(d.extractPointer(encodedOriginalAddress), 100), Hex2EncodedLatin1)
d.putNumChild(8)
if d.isExpanded():
stringType = d.lookupType(d.qtNamespace() + "QString")
baType = d.lookupType(d.qtNamespace() + "QByteArray")
with Children(d):
# Qt 4 only decodes the original string if some detail is requested
d.putCallItem("scheme", value, "scheme")
d.putCallItem("userName", value, "userName")
d.putCallItem("password", value, "password")
d.putCallItem("host", value, "host")
d.putCallItem("path", value, "path")
d.putCallItem("query", value, "encodedQuery")
d.putCallItem("fragment", value, "fragment")
d.putCallItem("port", value, "port")
d.putFields(value)
else:
# QUrlPrivate:
# - QAtomicInt ref;
# - int port;
# - QString scheme;
# - QString userName;
# - QString password;
# - QString host;
# - QString path;
# - QString query;
# - QString fragment;
privAddress = d.extractPointer(value)
if not privAddress:
# d == 0 if QUrl was constructed with default constructor
d.putValue("<invalid>")
return
schemeAddr = privAddress + 2 * d.intSize()
scheme = d.encodeString(schemeAddr, limit=1000)
userName = d.encodeString(schemeAddr + 1 * d.ptrSize(), limit=100)
password = d.encodeString(schemeAddr + 2 * d.ptrSize(), limit=100)
host = d.encodeString(schemeAddr + 3 * d.ptrSize(), limit=100)
path = d.encodeString(schemeAddr + 4 * d.ptrSize(), limit=1000)
query = d.encodeString(schemeAddr + 5 * d.ptrSize(), limit=10000)
fragment = d.encodeString(schemeAddr + 6 * d.ptrSize(), limit=10000)
port = d.extractInt(d.extractPointer(value) + d.intSize())
url = scheme
url += "3a002f002f00"
if len(userName):
url += userName
url += "4000"
url += host
if port >= 0:
url += "3a00"
url += ''.join(["%02x00" % ord(c) for c in str(port)])
url += path
d.putValue(url, Hex4EncodedLittleEndian)
displayFormat = d.currentItemFormat()
if displayFormat == SeparateFormat:
d.putField("editformat", DisplayUtf16String)
d.putField("editvalue", url)
d.putNumChild(8)
if d.isExpanded():
stringType = d.lookupType(d.qtNamespace() + "QString")
with Children(d):
d.putIntItem("port", port)
d.putGenericItem("scheme", stringType, scheme, Hex4EncodedLittleEndian)
d.putGenericItem("userName", stringType, userName, Hex4EncodedLittleEndian)
d.putGenericItem("password", stringType, password, Hex4EncodedLittleEndian)
d.putGenericItem("host", stringType, host, Hex4EncodedLittleEndian)
d.putGenericItem("path", stringType, path, Hex4EncodedLittleEndian)
d.putGenericItem("query", stringType, query, Hex4EncodedLittleEndian)
d.putGenericItem("fragment", stringType, fragment, Hex4EncodedLittleEndian)
d.putFields(value)
def qdumpHelper_QVariant_0(d, blob):
# QVariant::Invalid
d.putBetterType("%sQVariant (invalid)" % d.qtNamespace())
d.putValue("(invalid)")
def qdumpHelper_QVariant_1(d, blob):
# QVariant::Bool
d.putBetterType("%sQVariant (bool)" % d.qtNamespace())
d.putValue("true" if blob.extractByte() else "false")
def qdumpHelper_QVariant_2(d, blob):
# QVariant::Int
d.putBetterType("%sQVariant (int)" % d.qtNamespace())
d.putValue("%s" % blob.extractInt())
def qdumpHelper_QVariant_3(d, blob):
# uint
d.putBetterType("%sQVariant (uint)" % d.qtNamespace())
d.putValue(blob.extractUInt())
def qdumpHelper_QVariant_4(d, blob):
# qlonglong
d.putBetterType("%sQVariant (qlonglong)" % d.qtNamespace())
d.putValue(blob.extractInt64())
def qdumpHelper_QVariant_5(d, blob):
# qulonglong
d.putBetterType("%sQVariant (qulonglong)" % d.qtNamespace())
d.putValue(blob.extractUInt64())
def qdumpHelper_QVariant_6(d, blob):
# QVariant::Double
d.putBetterType("%sQVariant (double)" % d.qtNamespace())
d.putValue(blob.extractDouble())
qdumpHelper_QVariants_A = [
qdumpHelper_QVariant_0,
qdumpHelper_QVariant_1,
qdumpHelper_QVariant_2,
qdumpHelper_QVariant_3,
qdumpHelper_QVariant_4,
qdumpHelper_QVariant_5,
qdumpHelper_QVariant_6
]
qdumpHelper_QVariants_B = [
"QChar", # 7
"QVariantMap", # 8
"QVariantList",# 9
"QString", # 10
"QStringList", # 11
"QByteArray", # 12
"QBitArray", # 13
"QDate", # 14
"QTime", # 15
"QDateTime", # 16
"QUrl", # 17
"QLocale", # 18
"QRect", # 19
"QRectF", # 20
"QSize", # 21
"QSizeF", # 22
"QLine", # 23
"QLineF", # 24
"QPoint", # 25
"QPointF", # 26
"QRegExp", # 27
"QVariantHash",# 28
]
def qdumpHelper_QVariant_31(d, blob):
# QVariant::VoidStar
d.putBetterType("%sQVariant (void *)" % d.qtNamespace())
d.putValue("0x%x" % d.extractPointer(blob))
def qdumpHelper_QVariant_32(d, blob):
# QVariant::Long
d.putBetterType("%sQVariant (long)" % d.qtNamespace())
d.putValue("%s" % blob.extractLong())
def qdumpHelper_QVariant_33(d, blob):
# QVariant::Short
d.putBetterType("%sQVariant (short)" % d.qtNamespace())
d.putValue("%s" % blob.extractShort())
def qdumpHelper_QVariant_34(d, blob):
# QVariant::Char
d.putBetterType("%sQVariant (char)" % d.qtNamespace())
d.putValue("%s" % blob.extractByte())
def qdumpHelper_QVariant_35(d, blob):
# QVariant::ULong
d.putBetterType("%sQVariant (unsigned long)" % d.qtNamespace())
d.putValue("%s" % blob.extractULong())
def qdumpHelper_QVariant_36(d, blob):
# QVariant::UShort
d.putBetterType("%sQVariant (unsigned short)" % d.qtNamespace())
d.putValue("%s" % blob.extractUShort())
def qdumpHelper_QVariant_37(d, blob):
# QVariant::UChar
d.putBetterType("%sQVariant (unsigned char)" % d.qtNamespace())
d.putValue("%s" % blob.extractByte())
def qdumpHelper_QVariant_38(d, blob):
# QVariant::Float
d.putBetterType("%sQVariant (float)" % d.qtNamespace())
d.putValue("%s" % blob.extractFloat())
qdumpHelper_QVariants_D = [
qdumpHelper_QVariant_31,
qdumpHelper_QVariant_32,
qdumpHelper_QVariant_33,
qdumpHelper_QVariant_34,
qdumpHelper_QVariant_35,
qdumpHelper_QVariant_36,
qdumpHelper_QVariant_37,
qdumpHelper_QVariant_38
]
qdumpHelper_QVariants_E = [
"QFont", # 64
"QPixmap", # 65
"QBrush", # 66
"QColor", # 67
"QPalette", # 68
"QIcon", # 69
"QImage", # 70
"QPolygon", # 71
"QRegion", # 72
"QBitmap", # 73
"QCursor", # 74
]
qdumpHelper_QVariants_F = [
# Qt 5. In Qt 4 add one.
"QKeySequence",# 75
"QPen", # 76
"QTextLength", # 77
"QTextFormat", # 78
"X",
"QTransform", # 80
"QMatrix4x4", # 81
"QVector2D", # 82
"QVector3D", # 83
"QVector4D", # 84
"QQuaternion", # 85
"QPolygonF" # 86
]
def qdump__QVariant(d, value):
variantType = int(value["d"]["type"])
#warn("VARIANT TYPE: %s : " % variantType)
# Well-known simple type.
if variantType <= 6:
blob = d.toBlob(value)
qdumpHelper_QVariants_A[variantType](d, blob)
d.putNumChild(0)
return None
# Extended Core type (Qt 5)
if variantType >= 31 and variantType <= 38 and d.qtVersion() >= 0x050000:
blob = d.toBlob(value)
qdumpHelper_QVariants_D[variantType - 31](d, blob)
d.putNumChild(0)
return None
# Extended Core type (Qt 4)
if variantType >= 128 and variantType <= 135 and d.qtVersion() < 0x050000:
if variantType == 128:
p = d.extractPointer(value)
d.putBetterType("%sQVariant (void *)" % d.qtNamespace())
d.putValue("0x%x" % p)
else:
if variantType == 135:
blob = d.toBlob(value)
else:
p = d.extractPointer(value)
p = d.extractPointer(p)
blob = d.extractBlob(p, 8)
qdumpHelper_QVariants_D[variantType - 128](d, blob)
d.putNumChild(0)
return None
if variantType <= 86:
# Known Core or Gui type.
if variantType <= 28:
innert = qdumpHelper_QVariants_B[variantType - 7]
elif variantType <= 74:
innert = qdumpHelper_QVariants_E[variantType - 64]
elif d.qtVersion() < 0x050000:
innert = qdumpHelper_QVariants_F[variantType - 76]
else:
innert = qdumpHelper_QVariants_F[variantType - 75]
data = value["d"]["data"]
ns = d.qtNamespace()
inner = ns + innert
if d.isLldb:
# Looking up typedefs is problematic.
if innert == "QVariantMap":
inner = "%sQMap<%sQString, %sQVariant>" % (ns, ns, ns)
elif innert == "QVariantHash":
inner = "%sQHash<%sQString, %sQVariant>" % (ns, ns, ns)
elif innert == "QVariantList":
inner = "%sQList<%sQVariant>" % (ns, ns)
innerType = d.lookupType(inner)
if toInteger(value["d"]["is_shared"]):
val = data["ptr"].cast(innerType.pointer().pointer()).dereference().dereference()
else:
val = data["ptr"].cast(innerType)
d.putEmptyValue(-99)
d.putItem(val)
d.putBetterType("%sQVariant (%s)" % (d.qtNamespace(), innert))
return innert
# User types.
d_ptr = value["d"]
typeCode = int(d_ptr["type"])
ns = d.qtNamespace()
try:
exp = "((const char *(*)(int))%sQMetaType::typeName)(%d)" % (ns, typeCode)
type = str(d.parseAndEvaluate(exp))
except:
exp = "%sQMetaType::typeName(%d)" % (ns, typeCode)
type = str(d.parseAndEvaluate(exp))
type = type[type.find('"') + 1 : type.rfind('"')]
type = type.replace("Q", ns + "Q") # HACK!
type = type.replace("uint", "unsigned int") # HACK!
type = type.replace("COMMA", ",") # HACK!
type = type.replace(" ,", ",") # Lldb
#warn("TYPE: %s" % type)
data = d.call(value, "constData")
#warn("DATA: %s" % data)
d.putEmptyValue(-99)
d.putType("%sQVariant (%s)" % (ns, type))
d.putNumChild(1)
tdata = data.cast(d.lookupType(type).pointer()).dereference()
if d.isExpanded():
with Children(d):
with NoAddress(d):
d.putSubItem("data", tdata)
return tdata.type
def qedit__QVector(d, value, data):
values = data.split(',')
size = len(values)
d.call(value, "resize", str(size))
innerType = d.templateArgument(value.type, 0)
try:
# Qt 5. Will fail on Qt 4 due to the missing 'offset' member.
offset = value["d"]["offset"]
base = d.pointerValue(value["d"].cast(d.charPtrType()) + offset)
except:
# Qt 4.
base = d.pointerValue(value["p"]["array"])
d.setValues(base, innerType, values)
def qform__QVector():
return arrayForms()
def qdump__QVector(d, value):
data, size, alloc = d.vectorDataHelper(d.extractPointer(value))
d.check(0 <= size and size <= alloc and alloc <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putPlotData(data, size, d.templateArgument(value.type, 0))
def qdump__QVarLengthArray(d, value):
data = d.extractPointer(value["ptr"])
size = int(value["s"])
d.check(0 <= size)
d.putItemCount(size)
d.putPlotData(data, size, d.templateArgument(value.type, 0))
def qdump__QWeakPointer(d, value):
d_ptr = value["d"]
val = value["value"]
if d.isNull(d_ptr) and d.isNull(val):
d.putValue("(null)")
d.putNumChild(0)
return
if d.isNull(d_ptr) or d.isNull(val):
d.putValue("<invalid>")
d.putNumChild(0)
return
weakref = int(d_ptr["weakref"]["_q_value"])
strongref = int(d_ptr["strongref"]["_q_value"])
d.check(strongref >= -1)
d.check(strongref <= weakref)
d.check(weakref <= 10*1000*1000)
innerType = d.templateArgument(value.type, 0)
if d.isSimpleType(innerType):
d.putSimpleValue(val.dereference())
else:
d.putEmptyValue()
d.putNumChild(3)
if d.isExpanded():
with Children(d):
d.putSubItem("data", val.dereference().cast(innerType))
d.putIntItem("weakref", weakref)
d.putIntItem("strongref", strongref)
def qdump__QXmlAttributes(d, value):
qdump__QList(d, value["attList"])
def qdump__QXmlStreamStringRef(d, value):
s = value["m_string"]
data, size, alloc = d.stringData(s)
data += 2 * int(value["m_position"])
size = int(value["m_size"])
s = d.readMemory(data, 2 * size)
d.putValue(s, Hex4EncodedLittleEndian)
d.putPlainChildren(value)
def qdump__QXmlStreamAttribute(d, value):
s = value["m_name"]["m_string"]
data, size, alloc = d.stringData(s)
data += 2 * int(value["m_name"]["m_position"])
size = int(value["m_name"]["m_size"])
s = d.readMemory(data, 2 * size)
d.putValue(s, Hex4EncodedLittleEndian)
d.putPlainChildren(value)
#######################################################################
#
# V4
#
#######################################################################
def qdump__QV4__Object(d, value):
d.putBetterType(d.currentType)
d.putItem(d.extractQmlData(value))
def qdump__QV4__FunctionObject(d, value):
d.putBetterType(d.currentType)
d.putItem(d.extractQmlData(value))
def qdump__QV4__CompilationUnit(d, value):
d.putBetterType(d.currentType)
d.putItem(d.extractQmlData(value))
def qdump__QV4__CallContext(d, value):
d.putBetterType(d.currentType)
d.putItem(d.extractQmlData(value))
def qdump__QV4__ScriptFunction(d, value):
d.putBetterType(d.currentType)
d.putItem(d.extractQmlData(value))
def qdump__QV4__SimpleScriptFunction(d, value):
d.putBetterType(d.currentType)
d.putItem(d.extractQmlData(value))
def qdump__QV4__ExecutionContext(d, value):
d.putBetterType(d.currentType)
d.putItem(d.extractQmlData(value))
def qdump__QV4__TypedValue(d, value):
d.putBetterType(d.currentType)
qdump__QV4__Value(d, d.directBaseObject(value))
def qdump__QV4__CallData(d, value):
argc = toInteger(value["argc"])
d.putItemCount(argc)
if d.isExpanded():
with Children(d):
d.putSubItem("[this]", value["thisObject"])
for i in range(0, argc):
d.putSubItem(i, value["args"][i])
def qdump__QV4__String(d, value):
d.putStringValue(d.addressOf(value) + 2 * d.ptrSize())
def qdump__QV4__Value(d, value):
v = toInteger(str(value["val"]))
NaNEncodeMask = 0xffff800000000000
IsInt32Mask = 0x0002000000000000
IsDoubleMask = 0xfffc000000000000
IsNumberMask = IsInt32Mask | IsDoubleMask
IsNullOrUndefinedMask = 0x0000800000000000
IsNullOrBooleanMask = 0x0001000000000000
IsConvertibleToIntMask = IsInt32Mask | IsNullOrBooleanMask
ns = d.qtNamespace()
if v & IsInt32Mask:
d.putBetterType("%sQV4::Value (int32)" % ns)
d.putValue(value["int_32"])
elif v & IsDoubleMask:
d.putBetterType("%sQV4::Value (double)" % ns)
d.putValue("%x" % (v ^ 0xffff800000000000), Hex2EncodedFloat8)
elif d.isNull(v):
d.putBetterType("%sQV4::Value (null)" % ns)
d.putValue("(null)")
elif v & IsNullOrUndefinedMask:
d.putBetterType("%sQV4::Value (null/undef)" % ns)
d.putValue("(null/undef)")
elif v & IsNullOrBooleanMask:
d.putBetterType("%sQV4::Value (null/bool)" % ns)
d.putValue("(null/bool)")
else:
vtable = value["m"]["vtable"]
if toInteger(vtable["isString"]):
d.putBetterType("%sQV4::Value (string)" % ns)
d.putStringValue(d.extractPointer(value) + 2 * d.ptrSize())
elif toInteger(vtable["isObject"]):
d.putBetterType("%sQV4::Value (object)" % ns)
d.putValue("[0x%x]" % v)
else:
d.putBetterType("%sQV4::Value (unknown)" % ns)
d.putValue("[0x%x]" % v)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
with SubItem(d, "[raw]"):
d.putValue("[0x%x]" % v)
d.putType(" ");
d.putNumChild(0)
d.putFields(value)
#######################################################################
#
# Webkit
#
#######################################################################
def jstagAsString(tag):
# enum { Int32Tag = 0xffffffff };
# enum { CellTag = 0xfffffffe };
# enum { TrueTag = 0xfffffffd };
# enum { FalseTag = 0xfffffffc };
# enum { NullTag = 0xfffffffb };
# enum { UndefinedTag = 0xfffffffa };
# enum { EmptyValueTag = 0xfffffff9 };
# enum { DeletedValueTag = 0xfffffff8 };
if tag == -1:
return "Int32"
if tag == -2:
return "Cell"
if tag == -3:
return "True"
if tag == -4:
return "Null"
if tag == -5:
return "Undefined"
if tag == -6:
return "Empty"
if tag == -7:
return "Deleted"
return "Unknown"
def qdump__QTJSC__JSValue(d, value):
d.putEmptyValue()
d.putNumChild(1)
if d.isExpanded():
with Children(d):
tag = value["u"]["asBits"]["tag"]
payload = value["u"]["asBits"]["payload"]
#d.putIntItem("tag", tag)
with SubItem(d, "tag"):
d.putValue(jstagAsString(int(tag)))
d.putNoType()
d.putNumChild(0)
d.putIntItem("payload", int(payload))
d.putFields(value["u"])
if tag == -2:
cellType = d.lookupType("QTJSC::JSCell").pointer()
d.putSubItem("cell", payload.cast(cellType))
try:
# FIXME: This might not always be a variant.
delegateType = d.lookupType(d.qtNamespace() + "QScript::QVariantDelegate").pointer()
delegate = scriptObject["d"]["delegate"].cast(delegateType)
#d.putSubItem("delegate", delegate)
variant = delegate["m_value"]
d.putSubItem("variant", variant)
except:
pass
def qdump__QScriptValue(d, value):
# structure:
# engine QScriptEnginePrivate
# jscValue QTJSC::JSValue
# next QScriptValuePrivate *
# numberValue 5.5987310416280426e-270 myns::qsreal
# prev QScriptValuePrivate *
# ref QBasicAtomicInt
# stringValue QString
# type QScriptValuePrivate::Type: { JavaScriptCore, Number, String }
#d.putEmptyValue()
dd = value["d_ptr"]["d"]
ns = d.qtNamespace()
if d.isNull(dd):
d.putValue("(invalid)")
d.putNumChild(0)
return
if int(dd["type"]) == 1: # Number
d.putValue(dd["numberValue"])
d.putType("%sQScriptValue (Number)" % ns)
d.putNumChild(0)
return
if int(dd["type"]) == 2: # String
d.putStringValue(dd["stringValue"])
d.putType("%sQScriptValue (String)" % ns)
return
d.putType("%sQScriptValue (JSCoreValue)" % ns)
x = dd["jscValue"]["u"]
tag = x["asBits"]["tag"]
payload = x["asBits"]["payload"]
#isValid = int(x["asBits"]["tag"]) != -6 # Empty
#isCell = int(x["asBits"]["tag"]) == -2
#warn("IS CELL: %s " % isCell)
#isObject = False
#className = "UNKNOWN NAME"
#if isCell:
# # isCell() && asCell()->isObject();
# # in cell: m_structure->typeInfo().type() == ObjectType;
# cellType = d.lookupType("QTJSC::JSCell").pointer()
# cell = payload.cast(cellType).dereference()
# dtype = "NO DYNAMIC TYPE"
# try:
# dtype = cell.dynamic_type
# except:
# pass
# warn("DYNAMIC TYPE: %s" % dtype)
# warn("STATUC %s" % cell.type)
# type = cell["m_structure"]["m_typeInfo"]["m_type"]
# isObject = int(type) == 7 # ObjectType;
# className = "UNKNOWN NAME"
#warn("IS OBJECT: %s " % isObject)
#inline bool JSCell::inherits(const ClassInfo* info) const
#for (const ClassInfo* ci = classInfo(); ci; ci = ci->parentClass) {
# if (ci == info)
# return true;
#return false;
try:
# This might already fail for "native" payloads.
scriptObjectType = d.lookupType(ns + "QScriptObject").pointer()
scriptObject = payload.cast(scriptObjectType)
# FIXME: This might not always be a variant.
delegateType = d.lookupType(ns + "QScript::QVariantDelegate").pointer()
delegate = scriptObject["d"]["delegate"].cast(delegateType)
#d.putSubItem("delegate", delegate)
variant = delegate["m_value"]
#d.putSubItem("variant", variant)
t = qdump__QVariant(d, variant)
# Override the "QVariant (foo)" output
d.putBetterType("%sQScriptValue (%s)" % (ns, t))
if t != "JSCoreValue":
return
except:
pass
# This is a "native" JSCore type for e.g. QDateTime.
d.putValue("<native>")
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putSubItem("jscValue", dd["jscValue"])
def qdump__QQmlAccessorProperties__Properties(d, value):
size = int(value["count"])
d.putItemCount(size)
if d.isExpanded():
d.putArrayData(value["properties"], size)
| Distrotech/qtcreator | share/qtcreator/debugger/qttypes.py | Python | lgpl-2.1 | 90,089 | 0.003519 |
"""
Django settings for superlists project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '47@j(z&b(+=1kr7i)l4&_x#$el3)4h0p*+k$u&k(v5bcw7(pta'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| alwaysrookie/superlists | superlists/settings.py | Python | apache-2.0 | 2,067 | 0 |
#! /usr/bin/env python
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'and',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'exec',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'not',
'or',
'pass',
'print',
'raise',
'return',
'try',
'while',
'yield',
#--end keywords--
]
kwdict = {}
for keyword in kwlist:
kwdict[keyword] = 1
iskeyword = kwdict.has_key
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
fp = open(iptfile)
strprog = re.compile('"([^"]+)"')
lines = []
while 1:
line = fp.readline()
if not line: break
if line.find('{1, "') > -1:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
fp.close()
lines.sort()
# load the output skeleton from the target
fp = open(optfile)
format = fp.readlines()
fp.close()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| DarioGT/OMS-PluginXML | org.modelsphere.sms/lib/jython-2.2.1/Lib/keyword.py | Python | gpl-3.0 | 2,162 | 0.004625 |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
import os
import arrayio
from genomicode import filelib
from Betsy import bie3
from Betsy import rulebase
from Betsy import read_label_file
cls_node, data_node = antecedents
M = arrayio.read(data_node.identifier)
x = read_label_file.read(cls_node.identifier)
a, training_label, second_line = x
predict_model = __import__(
'Betsy.modules.' + 'classify_with_random_forest',
globals(), locals(),
['classify_with_random_forest'], -2)
evaluate_model = __import__(
'Betsy.modules.' + 'evaluate_prediction',
globals(), locals(), ['evaluate_prediction'], -2)
full_index = range(M.ncol())
f = file(outfile, 'w')
f.write('\t'.join(['sample_name', 'Predicted_class', 'Confidence',
'Actual_class', 'Correct?']))
f.write('\n')
for i in range(M.ncol()):
# Make filenames
# gene expression for N samples.
merge_file = 'merge' + '_' + str(i)
# class label file for the training samples (samples 1-(N-1)).
train_label = 'train_label' + '_' + str(i)
# class label file for the test sample (sample N).
test_label = 'test_label' + '_' + str(i)
# Save the output of the prediction and evaluation.
predict_file = "predict.txt"
evaluate_file = "evaluate.txt"
test_index = i
train_index = full_index[:]
train_index.remove(test_index)
merge_index = train_index + [test_index]
y_training = [training_label[x] for x in train_index]
y_test = [training_label[test_index]]
# Write the files for this iteration.
M_merge = M.matrix(None, merge_index)
arrayio.gct_format.write(M_merge, open(merge_file, 'w'))
read_label_file.write(train_label, second_line, y_training)
read_label_file.write(test_label, second_line, y_test[0])
# Make objects to be used in this analysis.
x = rulebase.SignalFile.output(
format='gct', contents='class0,class1,test')
merge_data = bie3.IdentifiedDataNode(x, identifier=merge_file)
x = rulebase.ClassLabelFile.output(contents='class0,class1')
train_label_data = bie3.IdentifiedDataNode(
x, identifier=train_label)
x = rulebase.ClassLabelFile.output(contents='test')
test_label_data = bie3.IdentifiedDataNode(x, identifier=test_label)
# Make a fake object to pass to evaluate_model.run.
out_node = filelib.GenericObject()
out_node.identifier = predict_file
# Run the predictions.
x = train_label_data, merge_data
predict_model.Module().run(
network, x, out_attributes, user_options, num_cores,
predict_file)
# Run the evaluation.
new_parameters = out_attributes.copy()
x = test_label_data, out_node
evaluate_model.Module().run(
network, x, new_parameters, user_options, num_cores,
evaluate_file)
# Is this the right line?
lines = open(evaluate_file).readlines()
f.write(lines[1])
os.remove(merge_file)
os.remove(train_label)
os.remove(test_label)
os.remove(predict_file)
os.remove(evaluate_file)
f.close()
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
data_node, cls_node = antecedents
original_file = module_utils.get_inputid(data_node.identifier)
filename = 'predication_loocv_random_forest' + original_file + '.txt'
return filename
| jefftc/changlab | Betsy/Betsy/modules/run_loocv_random_forest.py | Python | mit | 4,207 | 0.002615 |
# Copyright (c) 2014 Idiap Research Institute, http://www.idiap.ch/
# Written by Nikolaos Pappas <nikolaos.pappas@idiap.ch>,
#
# This file is part of CBRec.
#
# CBRec is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# CBRec is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CBRec. If not, see <http://www.gnu.org/licenses/>.
import sys
import json
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from utils import Unbuffered, write
class Data:
def __init__(self, items, attrs=None, preprocess=True, debug=False):
self.texts = [] # item texts
self.items = items # item hash representations
self.attrs = attrs # item attributes to keep
self.regex = '\r|\t|\n|--' # symbols to be removed
self.min_w = 2 # minimum length of each word
self.debug = debug # print status and debug messages
if attrs is None:
self.attrs = list(set(items[0].keys()) - set(['id']))
self.extract_text()
self.preprocess() if preprocess else ''
def extract_text(self):
write("\n "+"-> Extracting text".ljust(50,'.')) if self.debug else ''
for idx, item in enumerate(self.items):
attr_texts = []
for attr in self.attrs:
attr_texts.append(item[attr])
text = " ".join(attr_texts).replace(self.regex,"")
self.texts.append(text)
write("[OK]") if self.debug else ''
def preprocess(self):
write("\n "+"-> Preprocessing text".ljust(50,'.')) if self.debug else ''
stoplist = stopwords.words('english')
wregex = RegexpTokenizer(r'\w+')
for idx, item in enumerate(self.items):
words = wregex.tokenize(self.texts[idx].lower())
final_words = []
for iw, word in enumerate(words):
if word not in stoplist and len(word) > self.min_w:
final_words.append(word)
self.texts[idx] = ' '.join(final_words)
write("[OK]") if self.debug else ''
if __name__== '__main__':
items = json.loads(open('example.json').read())
data = Data(items)
example = data.items[0]
print "Total items: %d" % len(data.texts)
print "Example (id=%d, title=%s):" % (example['id'], example['title'])
print data.texts[0]
| idiap/cbrec | data.py | Python | gpl-3.0 | 2,541 | 0.023219 |
import random
def pivot(items, a, b):
p = items[b]
i = a
for j in range(a,b):
if items[j] <= p:
items[i], items[j] = items[j], items[i]
i += 1
items[i], items[b] = items[b], items[i]
return i
def quicksort(items, i, j):
"""
inplace quicksort
"""
if i < j:
p = pivot(items, i, j)
quicksort(items, i, p-1)
quicksort(items, p+1, j)
letters = random.choices('abcdefghijklmnopqrstuvwxyz', k=100)
quicksort(letters, 0, len(letters)-1)
print(''.join(letter for letter in letters))
| cbare/Etudes | python/quick_sort.py | Python | apache-2.0 | 572 | 0.001748 |
#!/usr/bin/python3
"""This file is part of asyncoro; see http://asyncoro.sourceforge.net for
details.
This program can be used to start discoro server processes so discoro scheduler
(see 'discoro.py') can send computations to these server processes for executing
distributed communicating proceses (coroutines). All coroutines in a server
execute in the same thread, so multiple CPUs are not used by one server. If CPU
intensive computations are to be run on systems with multiple processors, then
this program should be run with multiple instances (see below for '-c' option to
this program).
See 'discoro_client*.py' files for example use cases.
"""
__author__ = "Giridhar Pemmasani (pgiri@yahoo.com)"
__copyright__ = "Copyright (c) 2014 Giridhar Pemmasani"
__license__ = "MIT"
__url__ = "http://asyncoro.sourceforge.net"
def _discoro_server_coro_proc():
# coroutine
"""Server process receives computations and runs coroutines for it.
"""
import os
import shutil
import traceback
import sys
import time
from asyncoro.discoro import MinPulseInterval, MaxPulseInterval, \
DiscoroNodeInfo, DiscoroNodeAvailInfo, Scheduler
import asyncoro.disasyncoro as asyncoro
from asyncoro.disasyncoro import Coro, SysCoro, Location
_discoro_coro = asyncoro.AsynCoro.cur_coro()
_discoro_config = yield _discoro_coro.receive()
_discoro_node_coro = asyncoro.deserialize(_discoro_config['node_coro'])
_discoro_scheduler_coro = asyncoro.deserialize(_discoro_config['scheduler_coro'])
assert isinstance(_discoro_scheduler_coro, Coro)
_discoro_computation_auth = _discoro_config.pop('computation_auth', None)
if _discoro_config['min_pulse_interval'] > 0:
MinPulseInterval = _discoro_config['min_pulse_interval']
if _discoro_config['max_pulse_interval'] > 0:
MaxPulseInterval = _discoro_config['max_pulse_interval']
_discoro_busy_time = _discoro_config.pop('busy_time')
asyncoro.MsgTimeout = _discoro_config.pop('msg_timeout')
_discoro_name = asyncoro.AsynCoro.instance().name
_discoro_dest_path = os.path.join(asyncoro.AsynCoro.instance().dest_path,
'discoroproc-%s' % _discoro_config['id'])
if os.path.isdir(_discoro_dest_path):
shutil.rmtree(_discoro_dest_path)
asyncoro.AsynCoro.instance().dest_path = _discoro_dest_path
os.chdir(_discoro_dest_path)
sys.path.insert(0, _discoro_dest_path)
for _discoro_var in _discoro_config.pop('peers', []):
Coro(asyncoro.AsynCoro.instance().peer, asyncoro.deserialize(_discoro_var))
for _discoro_var in ['clean', 'min_pulse_interval', 'max_pulse_interval']:
del _discoro_config[_discoro_var]
_discoro_coro.register('discoro_server')
asyncoro.logger.info('discoro server %s started at %s; '
'computation files will be saved in "%s"',
_discoro_config['id'], _discoro_coro.location, _discoro_dest_path)
_discoro_req = _discoro_client = _discoro_auth = _discoro_msg = None
_discoro_peer_status = _discoro_monitor_coro = _discoro_monitor_proc = _discoro_job = None
_discoro_job_coros = set()
_discoro_jobs_done = asyncoro.Event()
def _discoro_peer_status(coro=None):
coro.set_daemon()
while 1:
status = yield coro.receive()
if not isinstance(status, asyncoro.PeerStatus):
asyncoro.logger.warning('Invalid peer status %s ignored', type(status))
continue
if status.status == asyncoro.PeerStatus.Offline:
if (_discoro_scheduler_coro and
_discoro_scheduler_coro.location == status.location):
if _discoro_computation_auth:
_discoro_coro.send({'req': 'close', 'auth': _discoro_computation_auth})
def _discoro_monitor_proc(zombie_period, coro=None):
coro.set_daemon()
while 1:
msg = yield coro.receive(timeout=zombie_period)
if isinstance(msg, asyncoro.MonitorException):
asyncoro.logger.debug('coro %s done', msg.args[0])
_discoro_job_coros.discard(msg.args[0])
if not _discoro_job_coros:
_discoro_jobs_done.set()
_discoro_busy_time.value = int(time.time())
elif not msg:
if _discoro_job_coros:
_discoro_busy_time.value = int(time.time())
else:
asyncoro.logger.warning('invalid message to monitor ignored: %s', type(msg))
_discoro_var = _discoro_config['computation_location']
_discoro_var = asyncoro.Location(_discoro_var.addr, _discoro_var.port)
if (yield asyncoro.AsynCoro.instance().peer(_discoro_var)):
raise StopIteration(-1)
asyncoro.AsynCoro.instance().peer_status(SysCoro(_discoro_peer_status))
yield asyncoro.AsynCoro.instance().peer(_discoro_node_coro.location)
yield asyncoro.AsynCoro.instance().peer(_discoro_scheduler_coro.location)
_discoro_scheduler_coro.send({'status': Scheduler.ServerDiscovered,
'coro': _discoro_coro, 'name': _discoro_name,
'auth': _discoro_computation_auth})
if _discoro_config['_server_setup']:
if _discoro_config['_disable_servers']:
while 1:
_discoro_var = yield _discoro_coro.receive()
if (isinstance(_discoro_var, dict) and
_discoro_var.get('req', None) == 'enable_server' and
_discoro_var.get('auth', None) == _discoro_computation_auth):
_discoro_var = _discoro_var['setup_args']
if not isinstance(_discoro_var, tuple):
_discoro_var = tuple(_discoro_var)
break
else:
asyncoro.logger.warning('Ignoring invalid request to run server setup')
else:
_discoro_var = ()
_discoro_var = yield asyncoro.Coro(globals()[_discoro_config['_server_setup']],
*_discoro_var).finish()
if _discoro_var:
asyncoro.logger.debug('discoro server %s @ %s setup failed',
_discoro_config['id'], _discoro_coro.location)
raise StopIteration(_discoro_var)
_discoro_config['_server_setup'] = None
_discoro_scheduler_coro.send({'status': Scheduler.ServerInitialized,
'coro': _discoro_coro, 'name': _discoro_name,
'auth': _discoro_computation_auth})
_discoro_var = _discoro_config['zombie_period']
if _discoro_var:
_discoro_var /= 3
else:
_discoro_var = None
_discoro_monitor_coro = SysCoro(_discoro_monitor_proc, _discoro_var)
_discoro_node_coro.send({'req': 'server_setup', 'id': _discoro_config['id'],
'coro': _discoro_coro})
_discoro_busy_time.value = int(time.time())
asyncoro.logger.debug('discoro server "%s": Computation "%s" from %s',
_discoro_name, _discoro_computation_auth,
_discoro_scheduler_coro.location)
while 1:
_discoro_msg = yield _discoro_coro.receive()
if not isinstance(_discoro_msg, dict):
continue
_discoro_req = _discoro_msg.get('req', None)
if _discoro_req == 'run':
_discoro_client = _discoro_msg.get('client', None)
_discoro_auth = _discoro_msg.get('auth', None)
_discoro_job = _discoro_msg.get('job', None)
if (not isinstance(_discoro_client, Coro) or
_discoro_auth != _discoro_computation_auth):
asyncoro.logger.warning('invalid run: %s', type(_discoro_job))
if isinstance(_discoro_client, Coro):
_discoro_client.send(None)
continue
try:
if _discoro_job.code:
exec(_discoro_job.code, globals())
_discoro_job.args = asyncoro.deserialize(_discoro_job.args)
_discoro_job.kwargs = asyncoro.deserialize(_discoro_job.kwargs)
except:
asyncoro.logger.debug('invalid computation to run')
_discoro_var = (sys.exc_info()[0], _discoro_job.name, traceback.format_exc())
_discoro_client.send(_discoro_var)
else:
Coro._asyncoro._lock.acquire()
try:
_discoro_var = Coro(globals()[_discoro_job.name],
*(_discoro_job.args), **(_discoro_job.kwargs))
except:
_discoro_var = (sys.exc_info()[0], _discoro_job.name, traceback.format_exc())
else:
_discoro_job_coros.add(_discoro_var)
_discoro_busy_time.value = int(time.time())
asyncoro.logger.debug('coro %s created', _discoro_var)
_discoro_var.notify(_discoro_monitor_coro)
_discoro_var.notify(_discoro_scheduler_coro)
_discoro_client.send(_discoro_var)
Coro._asyncoro._lock.release()
elif _discoro_req == 'close' or _discoro_req == 'quit':
_discoro_auth = _discoro_msg.get('auth', None)
if (_discoro_auth == _discoro_computation_auth):
pass
elif (_discoro_msg.get('node_auth', None) == _discoro_config['node_auth']):
if _discoro_scheduler_coro:
_discoro_scheduler_coro.send({'status': Scheduler.ServerClosed,
'location': _discoro_coro.location})
while _discoro_job_coros:
asyncoro.logger.debug('discoro server "%s": Waiting for %s coroutines to '
'terminate before closing computation',
_discoro_name, len(_discoro_job_coros))
if (yield _discoro_jobs_done.wait(timeout=5)):
break
else:
continue
_discoro_var = _discoro_msg.get('client', None)
if isinstance(_discoro_var, Coro):
_discoro_var.send(0)
break
elif _discoro_req == 'terminate':
_discoro_auth = _discoro_msg.get('node_auth', None)
if (_discoro_auth != _discoro_config['node_auth']):
continue
if _discoro_scheduler_coro:
_discoro_scheduler_coro.send({'status': Scheduler.ServerDisconnected,
'location': _discoro_coro.location})
break
elif _discoro_req == 'status':
if _discoro_msg.get('node_auth', None) != _discoro_config['node_auth']:
continue
if _discoro_scheduler_coro:
print(' discoro server "%s" @ %s with PID %s running %d coroutines for %s' %
(_discoro_name, _discoro_coro.location, os.getpid(),
len(_discoro_job_coros), _discoro_scheduler_coro.location))
else:
print(' discoro server "%s" @ %s with PID %s not used by any computation' %
(_discoro_name, _discoro_coro.location, os.getpid()))
elif _discoro_req == 'peers':
_discoro_auth = _discoro_msg.get('auth', None)
if (_discoro_auth == _discoro_computation_auth):
for _discoro_var in _discoro_msg.get('peers', []):
asyncoro.Coro(asyncoro.AsynCoro.instance().peer, _discoro_var)
else:
asyncoro.logger.warning('invalid command "%s" ignored', _discoro_req)
_discoro_client = _discoro_msg.get('client', None)
if not isinstance(_discoro_client, Coro):
continue
_discoro_client.send(-1)
# kill any pending jobs
while _discoro_job_coros:
for _discoro_job_coro in _discoro_job_coros:
_discoro_job_coro.terminate()
asyncoro.logger.debug('discoro server "%s": Waiting for %s coroutines to terminate '
'before closing computation', _discoro_name, len(_discoro_job_coros))
if (yield _discoro_jobs_done.wait(timeout=5)):
break
asyncoro.logger.debug('discoro server %s @ %s done',
_discoro_config['id'], _discoro_coro.location)
def _discoro_server_process(_discoro_config, _discoro_mp_queue, _discoro_computation):
import os
import sys
import time
# import traceback
for _discoro_var in list(sys.modules.keys()):
if _discoro_var.startswith('asyncoro'):
sys.modules.pop(_discoro_var)
globals().pop('asyncoro', None)
global asyncoro
import asyncoro.disasyncoro as asyncoro
_discoro_pid_path = os.path.join(_discoro_config['dest_path'],
'discoroproc-%s.pid' % _discoro_config['id'])
if os.path.isfile(_discoro_pid_path):
with open(_discoro_pid_path, 'r') as _discoro_req:
_discoro_var = _discoro_req.read()
_discoro_var = int(_discoro_var)
if not _discoro_config['clean']:
print('\n Another discoronode seems to be running;\n'
' make sure server with PID %d quit and remove "%s"\n' %
(_discoro_var, _discoro_pid_path))
_discoro_var = os.getpid()
import signal
try:
os.kill(_discoro_var, signal.SIGINT)
time.sleep(0.1)
os.kill(_discoro_var, signal.SIGKILL)
except:
pass
else:
time.sleep(0.1)
try:
if os.waitpid(_discoro_var, os.WNOHANG)[0] != _discoro_var:
asyncoro.logger.warning('Killing process %d failed', _discoro_var)
except:
pass
del signal, _discoro_req
with open(_discoro_pid_path, 'w') as _discoro_var:
_discoro_var.write('%s' % os.getpid())
if _discoro_config['loglevel']:
asyncoro.logger.setLevel(asyncoro.logger.DEBUG)
# asyncoro.logger.show_ms(True)
else:
asyncoro.logger.setLevel(asyncoro.logger.INFO)
del _discoro_config['loglevel']
server_id = _discoro_config['id']
mp_queue, _discoro_mp_queue = _discoro_mp_queue, None
config = {}
for _discoro_var in ['udp_port', 'tcp_port', 'node', 'ext_ip_addr', 'name', 'discover_peers',
'secret', 'certfile', 'keyfile', 'dest_path', 'max_file_size']:
config[_discoro_var] = _discoro_config.pop(_discoro_var, None)
while 1:
try:
_discoro_scheduler = asyncoro.AsynCoro(**config)
except:
print('discoro server %s failed for port %s; retrying in 5 seconds' %
(server_id, config['tcp_port']))
# print(traceback.format_exc())
time.sleep(5)
else:
break
if os.name == 'nt':
_discoro_computation = asyncoro.deserialize(_discoro_computation)
if _discoro_computation._code:
exec(_discoro_computation._code, globals())
if __name__ == '__mp_main__': # Windows multiprocessing process
sys.modules['__mp_main__'].__dict__.update(globals())
_discoro_config['_disable_servers'] = _discoro_computation._disable_servers
_discoro_config['_server_setup'] = _discoro_computation._server_setup
_discoro_config['computation_location'] = _discoro_computation._pulse_coro.location
_discoro_coro = asyncoro.SysCoro(_discoro_server_coro_proc)
assert isinstance(_discoro_coro, asyncoro.Coro)
mp_queue.put((server_id, asyncoro.serialize(_discoro_coro)))
_discoro_coro.send(_discoro_config)
_discoro_config = None
del config, _discoro_var
_discoro_coro.value()
_discoro_scheduler.ignore_peers(ignore=True)
for location in _discoro_scheduler.peers():
asyncoro.Coro(_discoro_scheduler.close_peer, location)
_discoro_scheduler.finish()
try:
os.remove(_discoro_pid_path)
except:
pass
mp_queue.put((server_id, None))
exit(0)
def _discoro_spawn(_discoro_config, _discoro_id_ports, _discoro_mp_queue,
_discoro_pipe, _discoro_computation, _discoro_setup_args):
import os
import sys
import signal
import multiprocessing
# import traceback
try:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
except:
pass
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGABRT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
for _discoro_var in list(sys.modules.keys()):
if _discoro_var.startswith('asyncoro'):
sys.modules.pop(_discoro_var)
globals().pop('asyncoro', None)
import asyncoro
os.chdir(_discoro_config['dest_path'])
sys.path.insert(0, _discoro_config['dest_path'])
os.environ['PATH'] = _discoro_config['dest_path'] + os.pathsep + os.environ['PATH']
procs = [None] * len(_discoro_id_ports)
def terminate(status):
for i in range(len(_discoro_id_ports)):
proc = procs[i]
if not proc:
continue
if proc.is_alive():
try:
proc.terminate()
except:
pass
else:
proc.join(1)
if (not proc.is_alive()) and proc.exitcode:
asyncoro.logger.warning('Server %s (process %s) reaped', _discoro_id_ports[i][0],
proc.pid)
_discoro_mp_queue.put((_discoro_id_ports[i][0], None))
_discoro_pid_path = os.path.join(_discoro_config['dest_path'],
'discoroproc-%s.pid' % _discoro_id_ports[i][0])
try:
os.remove(_discoro_pid_path)
except:
pass
_discoro_pipe.send('closed')
exit(status)
if os.name != 'nt':
if _discoro_computation._code:
exec(_discoro_computation._code, globals())
if _discoro_computation._node_setup:
try:
if not isinstance(_discoro_setup_args, tuple):
_discoro_setup_args = tuple(_discoro_setup_args)
ret = asyncoro.Coro(globals()[_discoro_computation._node_setup],
*_discoro_setup_args).value()
except:
asyncoro.logger.warning('node_setup failed for %s', _discoro_computation._auth)
# print(traceback.format_exc())
ret = -1
if ret != 0:
_discoro_pipe.send(0)
terminate(ret)
_discoro_computation._node_setup = None
def start_process(i, procs):
server_config = dict(_discoro_config)
server_config['id'] = _discoro_id_ports[i][0]
server_config['name'] = '%s_proc-%s' % (_discoro_config['name'], server_config['id'])
server_config['tcp_port'] = _discoro_id_ports[i][1]
server_config['peers'] = _discoro_config['peers'][:]
procs[i] = multiprocessing.Process(target=_discoro_server_process,
name=server_config['name'],
args=(server_config, _discoro_mp_queue,
_discoro_computation))
procs[i].start()
for i in range(len(procs)):
start_process(i, procs)
asyncoro.logger.debug('discoro server %s started with PID %s',
_discoro_id_ports[i][0], procs[i].pid)
_discoro_pipe.send(len(procs))
while 1:
req = _discoro_pipe.recv()
if req['req'] == 'quit':
break
else:
asyncoro.logger.warning('Ignoring invalid pipe cmd: %s' % str(req))
for proc in procs:
proc.join(1)
terminate(0)
if __name__ == '__main__':
"""
See http://asyncoro.sourceforge.net/discoro.html#node-servers for details on
options to start this program.
"""
import sys
import time
import argparse
import multiprocessing
import threading
import socket
import os
import hashlib
import re
import signal
import platform
import shutil
try:
import readline
except:
pass
try:
import psutil
except ImportError:
print('\n \'psutil\' module is not available; '
'CPU, memory, disk status will not be sent!\n')
psutil = None
else:
psutil.cpu_percent(0.1)
from asyncoro.discoro import MinPulseInterval, MaxPulseInterval, Scheduler, Computation
import asyncoro.disasyncoro as asyncoro
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='config', default='',
help='use configuration in given file')
parser.add_argument('--save_config', dest='save_config', default='',
help='save configuration in given file and exit')
parser.add_argument('-c', '--cpus', dest='cpus', type=int, default=0,
help='number of CPUs/discoro instances to run; '
'if negative, that many CPUs are not used')
parser.add_argument('-i', '--ip_addr', dest='node', default='',
help='IP address or host name of this node')
parser.add_argument('--ext_ip_addr', dest='ext_ip_addr', default='',
help='External IP address to use (needed in case of NAT firewall/gateway)')
parser.add_argument('-u', '--udp_port', dest='udp_port', type=int, default=51351,
help='UDP port number to use')
parser.add_argument('--tcp_ports', dest='tcp_ports', action='append', default=[],
help='TCP port numbers to use')
parser.add_argument('--scheduler_port', dest='scheduler_port', type=int, default=51350,
help='UDP port number used by discoro scheduler')
parser.add_argument('-n', '--name', dest='name', default='',
help='(symbolic) name given to AsynCoro schdulers on this node')
parser.add_argument('--dest_path', dest='dest_path', default='',
help='path prefix to where files sent by peers are stored')
parser.add_argument('--max_file_size', dest='max_file_size', default='',
help='maximum file size of any file transferred')
parser.add_argument('-s', '--secret', dest='secret', default='',
help='authentication secret for handshake with peers')
parser.add_argument('--certfile', dest='certfile', default='',
help='file containing SSL certificate')
parser.add_argument('--keyfile', dest='keyfile', default='',
help='file containing SSL key')
parser.add_argument('--serve', dest='serve', default=-1, type=int,
help='number of clients to serve before exiting')
parser.add_argument('--service_start', dest='service_start', default='',
help='time of day in HH:MM format when to start service')
parser.add_argument('--service_stop', dest='service_stop', default='',
help='time of day in HH:MM format when to stop service '
'(continue to execute running jobs, but no new jobs scheduled)')
parser.add_argument('--service_end', dest='service_end', default='',
help='time of day in HH:MM format when to end service '
'(terminate running jobs)')
parser.add_argument('--msg_timeout', dest='msg_timeout', default=asyncoro.MsgTimeout, type=int,
help='timeout for delivering messages')
parser.add_argument('--min_pulse_interval', dest='min_pulse_interval',
default=MinPulseInterval, type=int,
help='minimum pulse interval clients can use in number of seconds')
parser.add_argument('--max_pulse_interval', dest='max_pulse_interval',
default=MaxPulseInterval, type=int,
help='maximum pulse interval clients can use in number of seconds')
parser.add_argument('--zombie_period', dest='zombie_period', default=(10 * MaxPulseInterval),
type=int, help='maximum number of seconds for client to not run computation')
parser.add_argument('--ping_interval', dest='ping_interval', default=0, type=int,
help='interval in number of seconds for node to broadcast its address')
parser.add_argument('--daemon', action='store_true', dest='daemon', default=False,
help='if given, input is not read from terminal')
parser.add_argument('--clean', action='store_true', dest='clean', default=False,
help='if given, server processes from previous run will be killed '
'and new server process started')
parser.add_argument('--peer', dest='peers', action='append', default=[],
help='peer location (in the form node:TCPport) to communicate')
parser.add_argument('-d', '--debug', action='store_true', dest='loglevel', default=False,
help='if given, debug messages are printed')
_discoro_config = vars(parser.parse_args(sys.argv[1:]))
_discoro_var = _discoro_config.pop('config')
if _discoro_var:
import configparser
cfg = configparser.ConfigParser()
cfg.read(_discoro_var)
cfg = dict(cfg.items('DEFAULT'))
cfg['cpus'] = int(cfg['cpus'])
cfg['udp_port'] = int(cfg['udp_port'])
cfg['serve'] = int(cfg['serve'])
cfg['msg_timeout'] = int(cfg['msg_timeout'])
cfg['min_pulse_interval'] = int(cfg['min_pulse_interval'])
cfg['max_pulse_interval'] = int(cfg['max_pulse_interval'])
cfg['zombie_period'] = int(cfg['zombie_period'])
cfg['ping_interval'] = int(cfg['ping_interval'])
cfg['daemon'] = cfg['daemon'] == 'True'
cfg['clean'] = cfg['clean'] == 'True'
# cfg['discover_peers'] = cfg['discover_peers'] == 'True'
cfg['loglevel'] = cfg['loglevel'] == 'True'
cfg['tcp_ports'] = [_discoro_var.strip()[1:-1] for _discoro_var in
cfg['tcp_ports'][1:-1].split(',')]
cfg['tcp_ports'] = [_discoro_var for _discoro_var in cfg['tcp_ports'] if _discoro_var]
cfg['peers'] = [_discoro_var.strip()[1:-1] for _discoro_var in
cfg['peers'][1:-1].split(',')]
cfg['peers'] = [_discoro_var for _discoro_var in cfg['peers'] if _discoro_var]
for key, value in _discoro_config.items():
if _discoro_config[key] != parser.get_default(key) or key not in cfg:
cfg[key] = _discoro_config[key]
_discoro_config = cfg
del key, value, cfg
del parser, MinPulseInterval, MaxPulseInterval
del sys.modules['argparse'], globals()['argparse']
_discoro_var = _discoro_config.pop('save_config')
if _discoro_var:
import configparser
cfg = configparser.ConfigParser(_discoro_config)
cfgfp = open(_discoro_var, 'w')
cfg.write(cfgfp)
cfgfp.close()
exit(0)
if not _discoro_config['min_pulse_interval']:
_discoro_config['min_pulse_interval'] = MinPulseInterval
if not _discoro_config['max_pulse_interval']:
_discoro_config['max_pulse_interval'] = MaxPulseInterval
if _discoro_config['msg_timeout'] < 1:
raise Exception('msg_timeout must be at least 1')
if (_discoro_config['min_pulse_interval'] and
_discoro_config['min_pulse_interval'] < _discoro_config['msg_timeout']):
raise Exception('min_pulse_interval must be at least msg_timeout')
if (_discoro_config['max_pulse_interval'] and _discoro_config['min_pulse_interval'] and
_discoro_config['max_pulse_interval'] < _discoro_config['min_pulse_interval']):
raise Exception('max_pulse_interval must be at least min_pulse_interval')
if _discoro_config['zombie_period']:
if _discoro_config['zombie_period'] < _discoro_config['min_pulse_interval']:
raise Exception('zombie_period must be at least min_pulse_interval')
else:
_discoro_config['zombie_period'] = 0
_discoro_cpus = multiprocessing.cpu_count()
if _discoro_config['cpus'] > 0:
if _discoro_config['cpus'] > _discoro_cpus:
raise Exception('CPU count must be <= %s' % _discoro_cpus)
_discoro_cpus = _discoro_config['cpus']
elif _discoro_config['cpus'] < 0:
if -_discoro_config['cpus'] >= _discoro_cpus:
raise Exception('CPU count must be > -%s' % _discoro_cpus)
_discoro_cpus += _discoro_config['cpus']
del _discoro_config['cpus']
_discoro_tcp_ports = set()
tcp_port = tcp_ports = None
for tcp_port in _discoro_config.pop('tcp_ports', []):
tcp_ports = tcp_port.split('-')
if len(tcp_ports) == 1:
_discoro_tcp_ports.add(int(tcp_ports[0]))
elif len(tcp_ports) == 2:
_discoro_tcp_ports = _discoro_tcp_ports.union(range(int(tcp_ports[0]),
int(tcp_ports[1]) + 1))
else:
raise Exception('Invalid TCP port range "%s"' % tcp_ports)
_discoro_tcp_ports = sorted(_discoro_tcp_ports)
if _discoro_tcp_ports:
for tcp_port in range(_discoro_tcp_ports[-1] + 1,
_discoro_tcp_ports[-1] + 1 +
(_discoro_cpus + 1) - len(_discoro_tcp_ports)):
_discoro_tcp_ports.append(int(tcp_port))
# _discoro_tcp_ports = _discoro_tcp_ports[:(_discoro_cpus + 1)]
else:
_discoro_tcp_ports = [0] * (_discoro_cpus + 1)
del tcp_port, tcp_ports
peers, _discoro_config['peers'] = _discoro_config['peers'], []
peer = None
for peer in peers:
peer = peer.split(':')
if len(peer) != 2:
raise Exception('peer "%s" is not valid' % ':'.join(peer))
_discoro_config['peers'].append(asyncoro.serialize(asyncoro.Location(peer[0], peer[1])))
del peer, peers
_discoro_name = _discoro_config['name']
if not _discoro_name:
_discoro_name = socket.gethostname()
if not _discoro_name:
_discoro_name = 'discoro_server'
_discoro_daemon = _discoro_config.pop('daemon', False)
if not _discoro_daemon:
try:
if os.getpgrp() != os.tcgetpgrp(sys.stdin.fileno()):
_discoro_daemon = True
except:
pass
if os.name == 'nt':
# Python 3 under Windows blocks multiprocessing.Process on reading
# input; pressing "Enter" twice works (for one subprocess). Until
# this is understood / fixed, disable reading input.
print('\nReading standard input disabled, as multiprocessing does not seem to work'
'with reading input under Windows\n')
_discoro_daemon = True
_discoro_config['discover_peers'] = False
# time at start of day
_discoro_var = time.localtime()
_discoro_var = (int(time.time()) - (_discoro_var.tm_hour * 3600) -
(_discoro_var.tm_min * 60))
_discoro_service_start = _discoro_service_stop = _discoro_service_end = None
if _discoro_config['service_start']:
_discoro_service_start = time.strptime(_discoro_config.pop('service_start'), '%H:%M')
_discoro_service_start = (_discoro_var + (_discoro_service_start.tm_hour * 3600) +
(_discoro_service_start.tm_min * 60))
if _discoro_config['service_stop']:
_discoro_service_stop = time.strptime(_discoro_config.pop('service_stop'), '%H:%M')
_discoro_service_stop = (_discoro_var + (_discoro_service_stop.tm_hour * 3600) +
(_discoro_service_stop.tm_min * 60))
if _discoro_config['service_end']:
_discoro_service_end = time.strptime(_discoro_config.pop('service_end'), '%H:%M')
_discoro_service_end = (_discoro_var + (_discoro_service_end.tm_hour * 3600) +
(_discoro_service_end.tm_min * 60))
if (_discoro_service_start or _discoro_service_stop or _discoro_service_end):
if not _discoro_service_start:
_discoro_service_start = int(time.time())
if _discoro_service_stop:
if _discoro_service_start >= _discoro_service_stop:
raise Exception('"service_start" must be before "service_stop"')
if _discoro_service_end:
if _discoro_service_start >= _discoro_service_end:
raise Exception('"service_start" must be before "service_end"')
if _discoro_service_stop and _discoro_service_stop >= _discoro_service_end:
raise Exception('"service_stop" must be before "service_end"')
if not _discoro_service_stop and not _discoro_service_end:
raise Exception('"service_stop" or "service_end" must also be given')
if _discoro_config['max_file_size']:
_discoro_var = re.match(r'(\d+)([kKmMgGtT]?)', _discoro_config['max_file_size'])
if not _discoro_var or len(_discoro_var.group(0)) != len(_discoro_config['max_file_size']):
raise Exception('Invalid max_file_size option')
_discoro_config['max_file_size'] = int(_discoro_var.group(1))
if _discoro_var.group(2):
_discoro_var = _discoro_var.group(2).lower()
_discoro_config['max_file_size'] *= 1024**({'k': 1, 'm': 2, 'g': 3,
't': 4}[_discoro_var])
else:
_discoro_config['max_file_size'] = 0
if _discoro_config['certfile']:
_discoro_config['certfile'] = os.path.abspath(_discoro_config['certfile'])
else:
_discoro_config['certfile'] = None
if _discoro_config['keyfile']:
_discoro_config['keyfile'] = os.path.abspath(_discoro_config['keyfile'])
else:
_discoro_config['keyfile'] = None
_discoro_node_auth = ''.join(hex(_)[2:] for _ in os.urandom(10))
_discoro_node_auth = hashlib.sha1(_discoro_node_auth.encode()).hexdigest()
class _discoro_Struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __setattr__(self, name, value):
if hasattr(self, name):
self.__dict__[name] = value
else:
raise AttributeError('Invalid attribute "%s"' % name)
_discoro_spawn_proc = None
_discoro_busy_time = multiprocessing.Value('I', 0)
_discoro_mp_queue = multiprocessing.Queue()
_discoro_servers = [None] * _discoro_cpus
for _discoro_server_id in range(1, _discoro_cpus + 1):
_discoro_server = _discoro_Struct(id=_discoro_server_id, proc=None, coro=None,
name='%s_proc-%s' % (_discoro_name, _discoro_server_id))
_discoro_servers[_discoro_server_id - 1] = _discoro_server
def _discoro_node_proc(coro=None):
from asyncoro.discoro import DiscoroNodeAvailInfo, DiscoroNodeInfo, MaxPulseInterval
global _discoro_servers, _discoro_config, _discoro_spawn_proc
coro.register('discoro_node')
coro_scheduler = asyncoro.AsynCoro.instance()
last_pulse = last_ping = time.time()
scheduler_coro = cur_computation_auth = None
interval = _discoro_config['max_pulse_interval']
ping_interval = _discoro_config.pop('ping_interval')
msg_timeout = _discoro_config['msg_timeout']
zombie_period = _discoro_config['zombie_period']
disk_path = coro_scheduler.dest_path
_discoro_config['node_coro'] = asyncoro.serialize(coro)
_discoro_config['node'] = coro.location.addr
def monitor_peers(coro=None):
coro.set_daemon()
while 1:
msg = yield coro.receive()
if not isinstance(msg, asyncoro.PeerStatus):
continue
if msg.status == asyncoro.PeerStatus.Offline:
if (scheduler_coro and scheduler_coro.location == msg.location):
_discoro_node_coro.send({'req': 'release', 'auth': cur_computation_auth})
def mp_queue_server():
global _discoro_tcp_ports
while 1:
proc_id, proc_coro = _discoro_mp_queue.get(block=True)
server = _discoro_servers[proc_id - 1]
if proc_coro:
server.coro = asyncoro.deserialize(proc_coro)
# if not _discoro_tcp_ports[server.id - 1]:
# _discoro_tcp_ports[server.id - 1] = server.coro.location.port
else:
server.coro = None
if _discoro_config['serve']:
if scheduler_coro and service_available(now):
asyncoro.logger.warning('Server %s terminated', server.name)
# _discoro_start_server(server)
elif all(not server.coro for server in _discoro_servers):
_discoro_node_coro.send({'req': 'quit', 'auth': _discoro_node_auth})
break
def service_available(now):
if not _discoro_config['serve']:
return False
if not _discoro_service_start:
return True
if _discoro_service_stop:
if (_discoro_service_start <= now < _discoro_service_stop):
return True
else:
if (_discoro_service_start <= now < _discoro_service_end):
return True
return False
def service_times_proc(coro=None):
global _discoro_service_start, _discoro_service_stop, _discoro_service_end
coro.set_daemon()
while 1:
if _discoro_service_stop:
now = int(time.time())
yield coro.sleep(_discoro_service_stop - now)
for server in _discoro_servers:
if server.coro:
server.coro.send({'req': 'quit', 'node_auth': _discoro_node_auth})
if _discoro_service_end:
now = int(time.time())
yield coro.sleep(_discoro_service_end - now)
for server in _discoro_servers:
if server.coro:
server.coro.send({'req': 'terminate', 'node_auth': _discoro_node_auth})
# advance times for next day
_discoro_service_start += 24 * 3600
if _discoro_service_stop:
_discoro_service_stop += 24 * 3600
if _discoro_service_end:
_discoro_service_end += 24 * 3600
# disable service till next start
coro_scheduler.ignore_peers(True)
now = int(time.time())
yield coro.sleep(_discoro_service_start - now)
coro_scheduler.ignore_peers(False)
coro_scheduler.discover_peers(port=_discoro_config['scheduler_port'])
def close_computation():
global _discoro_spawn_proc
for server in _discoro_servers:
if server.coro:
server.coro.send({'req': 'quit', 'node_auth': _discoro_node_auth})
if _discoro_spawn_proc:
_discoro_send_pipe.send({'req': 'quit'})
if _discoro_send_pipe.poll(5) and _discoro_send_pipe.recv() == 'closed':
_discoro_spawn_proc = None
else:
_discoro_spawn_proc.terminate()
_discoro_spawn_proc = None
while _discoro_send_pipe.poll(): # clear pipe
_discoro_send_pipe.recv()
while _discoro_recv_pipe.poll(): # clear pipe
_discoro_recv_pipe.recv()
for name in os.listdir(_discoro_config['dest_path']):
if name.startswith('discoroproc-') or name == 'discoroscheduler':
continue
name = os.path.join(_discoro_config['dest_path'], name)
if os.path.isdir(name):
shutil.rmtree(name, ignore_errors=True)
elif os.path.isfile(name):
try:
os.remove(name)
except:
pass
coro_scheduler.discover_peers(port=_discoro_config['scheduler_port'])
if _discoro_service_start:
asyncoro.Coro(service_times_proc)
qserver = threading.Thread(target=mp_queue_server)
qserver.daemon = True
qserver.start()
coro_scheduler.peer_status(asyncoro.Coro(monitor_peers))
coro_scheduler.discover_peers(port=_discoro_config['scheduler_port'])
for peer in _discoro_config['peers']:
asyncoro.Coro(coro_scheduler.peer, asyncoro.deserialize(peer))
# TODO: create new pipe for each computation instead?
_discoro_recv_pipe, _discoro_send_pipe = multiprocessing.Pipe(duplex=True)
while 1:
msg = yield coro.receive(timeout=interval)
now = time.time()
if msg:
try:
req = msg['req']
except:
req = ''
if req == 'server_setup':
try:
server = _discoro_servers[msg['id'] - 1]
assert msg['auth'] == cur_computation_auth
except:
pass
else:
if not server.coro:
server.coro = msg['coro']
last_pulse = now
elif req == 'discoro_node_info':
# request from scheduler
client = msg.get('client', None)
if isinstance(client, asyncoro.Coro):
if psutil:
info = DiscoroNodeAvailInfo(coro.location, 100.0 - psutil.cpu_percent(),
psutil.virtual_memory().available,
psutil.disk_usage(disk_path).free,
100.0 - psutil.swap_memory().percent)
else:
info = DiscoroNodeAvailInfo(coro.location, None, None, None, None)
info = DiscoroNodeInfo(_discoro_name, coro.location.addr,
len(_discoro_servers), platform.platform(), info)
client.send(info)
elif req == 'reserve':
# request from scheduler
client = msg.get('client', None)
cpus = msg.get('cpus', -1)
auth = msg.get('auth', None)
if (isinstance(client, asyncoro.Coro) and isinstance(cpus, int) and
cpus >= 0 and not cur_computation_auth and not scheduler_coro and
service_available(now) and (len(_discoro_servers) >= cpus) and auth and
isinstance(msg.get('status_coro', None), asyncoro.Coro) and
isinstance(msg.get('computation_location', None), asyncoro.Location)):
if (yield coro_scheduler.peer(msg['computation_location'])):
cpus = 0
else:
close_computation()
for server in _discoro_servers:
if server.coro:
yield coro.sleep(0.1)
if not cpus:
cpus = len(_discoro_servers)
if ((yield client.deliver(cpus, timeout=min(msg_timeout, interval))) == 1 and
cpus):
cur_computation_auth = auth
last_pulse = now
_discoro_busy_time.value = int(time.time())
scheduler_coro = msg['status_coro']
elif isinstance(client, asyncoro.Coro):
client.send(0)
elif req == 'computation':
client = msg.get('client', None)
computation = msg.get('computation', None)
if (cur_computation_auth == msg.get('auth', None) and
isinstance(client, asyncoro.Coro) and isinstance(computation, Computation)):
interval = computation._pulse_interval
last_pulse = now
if interval:
interval = min(interval, _discoro_config['max_pulse_interval'])
else:
interval = _discoro_config['max_pulse_interval']
_discoro_busy_time.value = int(time.time())
_discoro_config['scheduler_coro'] = asyncoro.serialize(scheduler_coro)
_discoro_config['computation_auth'] = computation._auth
id_ports = [(server.id, _discoro_tcp_ports[server.id - 1])
for server in _discoro_servers if not server.coro]
args = (_discoro_config, id_ports, _discoro_mp_queue, _discoro_recv_pipe,
asyncoro.serialize(computation) if os.name == 'nt'
else computation, msg.get('setup_args', ()))
_discoro_spawn_proc = multiprocessing.Process(target=_discoro_spawn,
args=args)
_discoro_spawn_proc.start()
if _discoro_send_pipe.poll(10):
cpus = _discoro_send_pipe.recv()
else:
cpus = 0
if ((yield client.deliver(cpus)) == 1) and cpus:
pass
else:
close_computation()
elif req == 'release':
auth = msg.get('auth', None)
if cur_computation_auth and auth == cur_computation_auth:
close_computation()
cur_computation_auth = None
scheduler_coro = None
interval = MaxPulseInterval
released = 'released'
else:
released = 'invalid'
client = msg.get('client', None)
if isinstance(client, asyncoro.Coro):
client.send(released)
if released == 'released' and _discoro_config['serve'] > 0:
_discoro_config['serve'] -= 1
if not _discoro_config['serve']:
break
elif req == 'close' or req == 'quit' or req == 'terminate':
auth = msg.get('auth', None)
if auth == _discoro_node_auth:
close_computation()
cur_computation_auth = None
scheduler_coro = None
interval = MaxPulseInterval
if req == 'quit' or req == 'terminate':
_discoro_config['serve'] = 0
if all(not server.coro for server in _discoro_servers):
# _discoro_mp_queue.close()
_discoro_send_pipe.close()
_discoro_recv_pipe.close()
break
else:
asyncoro.logger.warning('Invalid message %s ignored',
str(msg) if isinstance(msg, dict) else '')
if scheduler_coro:
scoro = scheduler_coro # copy in case scheduler closes meanwhile
msg = {'status': 'pulse', 'location': coro.location}
if psutil:
msg['node_status'] = DiscoroNodeAvailInfo(
coro.location, 100.0 - psutil.cpu_percent(),
psutil.virtual_memory().available, psutil.disk_usage(disk_path).free,
100.0 - psutil.swap_memory().percent)
sent = yield scoro.deliver(msg, timeout=msg_timeout)
if sent == 1:
last_pulse = now
elif (now - last_pulse) > (5 * interval):
asyncoro.logger.warning('Scheduler is not reachable; closing computation "%s"',
cur_computation_auth)
close_computation()
cur_computation_auth = None
scheduler_coro = None
interval = MaxPulseInterval
asyncoro.Coro(coro_scheduler.close_peer, scoro.location)
if _discoro_config['serve'] > 0:
_discoro_config['serve'] -= 1
if not _discoro_config['serve']:
break
if (zombie_period and ((now - _discoro_busy_time.value) > zombie_period) and
cur_computation_auth):
asyncoro.logger.warning('Closing zombie computation "%s"', cur_computation_auth)
close_computation()
cur_computation_auth = None
scheduler_coro = None
interval = MaxPulseInterval
if _discoro_config['serve'] > 0:
_discoro_config['serve'] -= 1
if not _discoro_config['serve']:
break
if ping_interval and (now - last_ping) > ping_interval and service_available(now):
coro_scheduler.discover_peers(port=_discoro_config['scheduler_port'])
try:
os.remove(_discoro_node_pid_file)
except:
pass
os.kill(os.getpid(), signal.SIGINT)
_discoro_server_config = {}
for _discoro_var in ['udp_port', 'tcp_port', 'node', 'ext_ip_addr', 'name',
'discover_peers', 'secret', 'certfile', 'keyfile', 'dest_path',
'max_file_size']:
_discoro_server_config[_discoro_var] = _discoro_config.get(_discoro_var, None)
_discoro_server_config['name'] = '%s_proc-0' % _discoro_name
_discoro_server_config['tcp_port'] = _discoro_tcp_ports.pop(0)
if _discoro_config['loglevel']:
asyncoro.logger.setLevel(asyncoro.Logger.DEBUG)
# asyncoro.logger.show_ms(True)
else:
asyncoro.logger.setLevel(asyncoro.Logger.INFO)
_discoro_scheduler = asyncoro.AsynCoro(**_discoro_server_config)
_discoro_scheduler.dest_path = os.path.join(_discoro_scheduler.dest_path, 'discoro')
_discoro_node_pid_file = os.path.join(_discoro_scheduler.dest_path, 'discoroproc-0.pid')
if _discoro_config['clean']:
try:
os.remove(_discoro_node_pid_file)
except:
pass
try:
_discoro_var = os.open(_discoro_node_pid_file, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)
os.write(_discoro_var, str(os.getpid()).encode())
os.close(_discoro_var)
except:
raise Exception('Another discoronode seem to be running; '
'check no discoronode and servers are running and '
'remove *.pid files in %s' % _discoro_scheduler.dest_path)
_discoro_config['name'] = _discoro_name
_discoro_config['dest_path'] = _discoro_scheduler.dest_path
_discoro_config['node_auth'] = _discoro_node_auth
_discoro_config['busy_time'] = _discoro_busy_time
_discoro_node_coro = asyncoro.Coro(_discoro_node_proc)
del _discoro_server_config, _discoro_var
def sighandler(signum, frame):
if os.path.isfile(_discoro_node_pid_file):
_discoro_node_coro.send({'req': 'quit', 'auth': _discoro_node_auth})
else:
raise KeyboardInterrupt
try:
signal.signal(signal.SIGHUP, sighandler)
signal.signal(signal.SIGQUIT, sighandler)
except:
pass
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGABRT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
del sighandler
if _discoro_daemon:
while 1:
try:
time.sleep(3600)
except:
if os.path.exists(_discoro_node_pid_file):
_discoro_node_coro.send({'req': 'quit', 'auth': _discoro_node_auth})
break
else:
while 1:
# wait a bit for any output for previous command is done
time.sleep(0.2)
try:
_discoro_cmd = input(
'\nEnter\n'
' "status" to get status\n'
' "close" to stop accepting new jobs and\n'
' close current computation when current jobs are finished\n'
' "quit" to "close" current computation and exit discoronode\n'
' "terminate" to kill current jobs and "quit": ')
except:
if os.path.exists(_discoro_node_pid_file):
_discoro_node_coro.send({'req': 'quit', 'auth': _discoro_node_auth})
break
else:
_discoro_cmd = _discoro_cmd.strip().lower()
if not _discoro_cmd:
_discoro_cmd = 'status'
print('')
if _discoro_cmd == 'status':
for _discoro_server in _discoro_servers:
if _discoro_server.coro:
_discoro_server.coro.send({'req': _discoro_cmd,
'node_auth': _discoro_node_auth})
else:
print(' discoro server "%s" is not currently used' %
_discoro_server.name)
elif _discoro_cmd in ('close', 'quit', 'terminate'):
_discoro_node_coro.send({'req': _discoro_cmd, 'auth': _discoro_node_auth})
break
try:
_discoro_node_coro.value()
except:
pass
exit(0)
| pgiri/asyncoro | py3/asyncoro/discoronode.py | Python | mit | 55,226 | 0.00344 |
# -*- coding: utf-8 -*-
"""Remove team domain
Revision ID: 07f975f81f03
Revises: 4e206c5ddabd
Create Date: 2017-08-04 15:12:11.992856
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '07f975f81f03'
down_revision = '4e206c5ddabd'
branch_labels = None
depends_on = None
def upgrade():
op.drop_index('ix_team_domain', table_name='team')
op.drop_column('team', 'domain')
def downgrade():
op.add_column(
'team',
sa.Column('domain', sa.VARCHAR(length=253), autoincrement=False, nullable=True),
)
op.create_index('ix_team_domain', 'team', ['domain'], unique=False)
| hasgeek/lastuser | migrations/versions/07f975f81f03_remove_team_domain.py | Python | bsd-2-clause | 655 | 0.001527 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
import tempfile
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
from measurements import skpicture_printer
class SkpicturePrinterUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._skp_outdir = tempfile.mkdtemp('_skp_test')
def tearDown(self):
shutil.rmtree(self._skp_outdir)
@decorators.Disabled('android')
def testSkpicturePrinter(self):
ps = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
measurement = skpicture_printer.SkpicturePrinter(self._skp_outdir)
results = self.RunMeasurement(measurement, ps, options=self._options)
# Picture printing is not supported on all platforms.
if results.failures:
assert 'not supported' in results.failures[0].exc_info[1].message
return
saved_picture_count = results.FindAllPageSpecificValuesNamed(
'saved_picture_count')
self.assertEquals(len(saved_picture_count), 1)
self.assertGreater(saved_picture_count[0].GetRepresentativeNumber(), 0)
| axinging/chromium-crosswalk | tools/perf/measurements/skpicture_printer_unittest.py | Python | bsd-3-clause | 1,305 | 0.004598 |
import pandas as pd
import os
# -*- coding: utf-8 -*-
from flutype.data_management.fill_master import Master
import numpy as np
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
def extract_peptide_batch(ma):
gal_lig_fix = ma.read_gal_ligand("170725_N13", index=False)
unique_peptides = gal_lig_fix[0].drop_duplicates(subset=["ID"])
unique_peptides = unique_peptides[unique_peptides.ID != "Empty"]
unique_peptides.ID = unique_peptides.ID.astype(int)
unique_peptides.sort_values(by = "ID", inplace=True)
unique_peptides.Name = unique_peptides.Name.str.replace('FAIL_', "")
unique_peptides['Concentration'] = unique_peptides.Name.str.rpartition('_')[0]
unique_peptides['Concentration'] = unique_peptides.Concentration.str.partition('_')[0]
peptide_batch = pd.DataFrame(unique_peptides[["Name","Concentration"]].values,columns=["sid","concentration"])
peptide_batch["labeling"] = ""
peptide_batch["buffer"] = ""
peptide_batch["ph"] = ""
peptide_batch["purity"] = ""
peptide_batch["produced_by"] = ""
peptide_batch["comment"] = ""
peptide_batch["ligand"] = ""
peptide_batch["ligand"] = unique_peptides.Name.str.partition('_')[2].values
return peptide_batch
def gal_reformat(ma):
gal_lig_fix = ma.read_gal_ligand("170725_N15", index= False)
gal_lig_fix_new = pd.DataFrame(gal_lig_fix[0][["Block","Row","Column","Name"]])
mapping = {"Empty":"NO",
"Panama":"Pan3",
"California":"Cal2",
"Aichi":"Ach1",
"1.0_Kloe_Amid":"KLOA025",
"0.5_Kloe_Amid":"KLOA050",
"0.25_Kloe_Amid":"KLOA025",
"1.0_pep_Nenad":"NEN100",
"0.5_pep_Nenad":"NEN050",
"0.25_pep_Nenad":"NEN025",
"1.0_Fetuin":"P012-1",
"0.5_Fetuin":"P012-05",
"0.25_Fetuin":"P012-025",
"1.0_Leuchtefix":"DYE100",
"0.5_Leuchtefix":"DYE050",
"0.25_Leuchtefix":"DYE025",
'FAIL_': ""
}
for key in mapping:
gal_lig_fix_new.Name = gal_lig_fix_new.Name.str.replace(key, mapping[key])
mapping = {"1.0_Kloe_S":"KLOS100",
"0.5_Kloe_S":"KLOS050",
"0.25_Kloe_S":"KLOS025"
}
for key in mapping:
gal_lig_fix_new.loc[gal_lig_fix_new["Name"].str.contains(key), "Name"] = mapping[key]
return gal_lig_fix_new
def peptide_batches_not_in_master(ma,gal_lig_fix):
s_gal = set(gal_lig_fix["Name"].values)
data_dic = ma.read_data_tables()
s_pb = set(data_dic["peptide_batch"]["sid"].values)
s_ab = set(data_dic["antibody_batch"]["sid"].values)
s_vb = set(data_dic["virus_batch"]["sid"].values)
s_b = s_pb
s_b.update(s_ab)
s_b.update(s_vb)
return(s_gal - s_b)
def reshape_gal_file(shape, gal_file):
a = []
b = []
for i in range(shape[1]):
for ii in range(shape[0]):
a.append(i )
b.append(ii )
gal_file["row_factor"] = 0
gal_file["column_factor"] = 0
print(a)
print(b)
for block_num,block_factor in enumerate(a):
gal_file.loc[gal_file["Block"] == block_num+1, "row_factor"] = block_factor
for block_num, block_factor in enumerate(b):
gal_file.loc[gal_file["Block"] == block_num+1, "column_factor"] = block_factor
gal_file["Row"]=gal_file["Row"]+(gal_file["Row"].max()*gal_file["row_factor"])
gal_file["Column"]=gal_file["Column"]+(gal_file["Column"].max()*gal_file["column_factor"])
return gal_file
def three_viruses_gal(gal_file):
virus_map = {}
for i in range(1,33):
if i <= 12:
virus_map[i] = "Ach1"
elif 12 < i <= 24:
virus_map[i] = "Cal2"
elif 24 < i:
virus_map[i] = "Pan3"
for key in virus_map.keys():
gal_file.loc[gal_file["Block"]== key , "Name"] =virus_map[key]
return gal_file
####################################################################
if __name__ == "__main__":
ma_path = "../master_uncomplete/"
ma = Master(ma_path)
#peptide_batch = extract_peptide_batch(ma)
# print_full(peptide_batch)
#fp = os.path.join(ma.collections_path,"170725_N13","peptides_batch.csv")
# peptide_batch.to_csv(fp)
ma_path_standard = "../master/"
ma_standard = Master(ma_path_standard)
gal_lig_fix = gal_reformat(ma)
#subset = peptide_batches_not_in_master(ma_standard,gal_lig_fix)
gal_lig_fix= reshape_gal_file((4,8), gal_lig_fix)
gal_lig_fix = gal_lig_fix.reset_index(drop=True)
fp = os.path.join(ma.collections_path,"170725_P7","lig_fix_012.txt")
gal_lig_fix.to_csv(fp, sep='\t',index=True , index_label="ID")
#gal_lig_fix = three_viruses_gal(gal_lig_fix)
gal_lig_fix["Name"] = "Ach1"
fp2 = os.path.join(ma.collections_path,"170725_P7","lig_mob_016.txt")
gal_lig_fix.to_csv(fp2, sep='\t', index=True,index_label="ID")
| janekg89/flutype_webapp | flutype/gal-file.py | Python | lgpl-3.0 | 5,034 | 0.0147 |
# -*- coding: utf-8 -*-
import datetime
from datetime import date, datetime as dt
from email import Encoders
from email.header import Header
import os
import pickle
import random
import smtplib
import traceback
import calendar
from decimal import Decimal
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE
from email.Utils import formatdate
from tg import flash
from tg import redirect
from tg import response
from tg import session
from tg import config
from vatsystem.model import *
from vatsystem.util import const
from sqlalchemy import *
from sqlalchemy.sql import and_
DISPLAY_DATE_FORMAT="%Y-%m-%d"
__all__=['getCompanyCode', "tabFocus", "Date2Text", "getOr404",
"sendEmail", "number2alphabet", "serveFile", 'alphabet2number',
'gerRandomStr', 'allAlpha', 'comp', '_get_params_from_args_and_obj', '_orderly_dict', '_get_lastday_of_month', 'CRef']
def comp(obj,compObj):
if type(obj) == type(1):compObj = int(compObj)
if type(obj) == type(Decimal("1.00")):
obj = float(str(obj))
if compObj:
compObj = float(str(compObj))
if type(obj) == type(u'') or type(compObj) == type(u''):
obj = obj.encode("utf-8").replace("\r\n",'') if obj else None
if compObj:
compObj = compObj.encode("utf-8")
return [True,str(obj),str(compObj)] if not obj==compObj else [False]
def getCompanyCode(type = None):
company_code = [session['company_code']]
if type == 1: company_code = "('%s')" % session['company_code']
return company_code
def tabFocus(tab_type=""):
def decorator(fun):
def returnFun(*args, ** keywordArgs):
returnVal=fun(*args, ** keywordArgs)
if type(returnVal)==dict and "tab_focus" not in returnVal:
returnVal["tab_focus"]=tab_type
return returnVal
return returnFun
return decorator
def Date2Text(value=None, dateTimeFormat=DISPLAY_DATE_FORMAT, defaultNow=False):
if not value and defaultNow: value=datetime.now()
format=dateTimeFormat
result=value
if isinstance(value, date):
try:
result=value.strftime(format)
except:
traceback.print_exc()
elif hasattr(value, "strftime"):
try:
result=value.strftime(format)
except:
traceback.print_exc()
if not result:
result=""
return result
def getOr404(obj, id, redirect_url="/index", message="The record deosn't exist!"):
try:
v=DBSession.query(obj).get(id)
if v: return v
else: raise "No such obj"
except:
traceback.print_exc()
flash(message)
redirect(redirect_url)
def number2alphabet(n):
result=[]
while n>=0:
if n>26:
result.insert(0, n%26)
n/=26
else:
result.insert(0, n)
break
return "".join([chr(r+64) for r in result]) if result else None
def alphabet2number(str):
if not str or not isinstance(str, basestring): raise TypeError
if not str.isalpha(): raise ValueError
return reduce(lambda a, b: (a*26)+ord(b)-ord("a")+1, str.lower(), 0)
def sendEmail(send_from, send_to, subject, text, cc_to=[], files=[], server="192.168.42.13"):
assert type(send_to)==list
assert type(files)==list
msg=MIMEMultipart()
msg.set_charset("utf-8")
msg['From']=send_from
msg['To']=COMMASPACE.join(send_to)
if cc_to:
assert type(cc_to)==list
msg['cc']=COMMASPACE.join(cc_to)
send_to.extend(cc_to)
msg['Date']=formatdate(localtime=True)
msg['Subject']=subject
msg.attach(MIMEText(text))
for f in files:
part=MIMEBase('application', "octet-stream")
part.set_payload(open(f, "rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"'%Header(os.path.basename(f), 'utf-8'))
msg.attach(part)
smtp=smtplib.SMTP(server)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
def serveFile(fileName, contentType="application/x-download", contentDisposition="attachment", charset="utf-8"):
response.headers['Content-type']='application/x-download' if not contentType else contentType
#response.headers['Content-Disposition']="%s;filename=%s"%(contentDisposition, Header(os.path.basename(fileName), charset))
response.headers['Content-Disposition']="%s;filename=%s"%(contentDisposition, os.path.basename(fileName).encode('utf-8'))
f=open(fileName, 'rb')
content="".join(f.readlines())
f.close()
return content
def defaultIfNone(blackList=[None, ], default=""):
def returnFun(value):
defaultValue=default() if callable(default) else default
if value in blackList:
return defaultValue
else:
try:
return str(value)
except:
try:
return repr(value)
except:
pass
return defaultValue
return returnFun
def _get_params_from_args_and_obj(keys, obj, ** args):
params = {}
for i in keys:
if type(i) == dict:
params.update(i)
else:
i, j = i if ((type(i) == list or type(i) == tuple) and len(i) == 2) else (i, i)
if args.get(j) != None:
params[i] = args.get(j)
elif obj.__dict__.get(j) != None:
params[i] = obj.__dict__[j]
return params
def _orderly_dict(list, coor):
new_list = {}
for i in list:
if new_list.get(i.get(coor)):
new_list[i.get(coor)].append(i)
else:
new_list.update({i.get(coor):[i]})
new_dict = []
for key, value in new_list.iteritems():
new_dict.extend(value)
return new_dict
def _get_lastday_of_month(date_str):
date_str = date_str.split(".")
last_day = calendar.monthrange(int(date_str[0]), int(date_str[1]))[1]
return datetime.datetime.strptime("%s.%s.%s" % (date_str[0], date_str[1], last_day if len(date_str) < 3 else date_str[2]), "%Y.%m.%d")
class CRef(object):
def __init__(self):
self.file = os.path.join(os.path.abspath(os.path.curdir), 'data', "ref.pickle")
def save(self, **kwargs):
pickle.dump(kwargs, open(self.file, "w"))
def get(self, head_type):
refTime = dt.now().strftime('%Y%m')[2:]
if os.path.isfile(self.file):
obj = pickle.load(open(self.file, 'r'))
r = obj.get(head_type, 0)
if r and r != 0 and str(r)[:4] != refTime:
r = 0
else:
r = int(r[4:]) if isinstance(r, str) else 0
r = "%s%06d" % (refTime, r + 1)
obj.update({head_type:r})
self.save(**obj)
return r
else:
r = "%s%06d" % (refTime, 1)
self.save(**{head_type:r})
return r
null2blank=defaultIfNone(blackList=[None, "NULL", "null", "None"])
numberAlpha=[str(a) for a in range(10)]
lowerAlpha=[chr(a) for a in range(ord("a"), ord("z")+1)]
upperAlpha=[chr(a) for a in range(ord("A"), ord("Z")+1)]
allAlpha=numberAlpha+lowerAlpha+upperAlpha
gerRandomStr=lambda str_length, randomRange=numberAlpha : "".join(random.sample(randomRange, str_length))
| LamCiuLoeng/vat | vatsystem/util/common.py | Python | mit | 7,482 | 0.014969 |
# Copyright (C) 2017 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import requests
class FakeResponse:
"""A fake version of a requests response object, just about suitable for
mocking a server response. Not usually used directly. See
MockServerResponse* methods"""
def __init__( self, response, exception ):
self._json = response
self._exception = exception
self.status_code = requests.codes.ok
self.text = not exception
def json( self ):
if self._exception:
return None
return self._json
def raise_for_status( self ):
if self._exception:
raise self._exception
class FakeFuture:
"""A fake version of a future response object, just about suitable for
mocking a server response as generated by PostDataToHandlerAsync.
Not usually used directly. See MockAsyncServerResponse* methods"""
def __init__( self, done, response = None, exception = None ):
self._done = done
if not done:
self._result = None
else:
self._result = FakeResponse( response, exception )
def done( self ):
return self._done
def result( self ):
return self._result
def MockAsyncServerResponseDone( response ):
"""Return a MessagePoll containing a fake future object that is complete with
the supplied response message. Suitable for mocking a response future within
a client request. For example:
with MockVimBuffers( [ current_buffer ], [ current_buffer ], ( 1, 1 ) ) as v:
mock_response = MockAsyncServerResponseDone( response )
with patch.dict( ycm._message_poll_requests, {} ):
ycm._message_poll_requests[ filetype ] = MessagesPoll( v.current.buffer )
ycm._message_poll_requests[ filetype ]._response_future = mock_response
# Needed to keep a reference to the mocked dictionary
mock_future = ycm._message_poll_requests[ filetype ]._response_future
ycm.OnPeriodicTick() # Uses ycm._message_poll_requests[ filetype ] ...
"""
return mock.MagicMock( wraps = FakeFuture( True, response ) )
def MockAsyncServerResponseInProgress():
"""Return a fake future object that is incomplete. Suitable for mocking a
response future within a client request. For example:
with MockVimBuffers( [ current_buffer ], [ current_buffer ], ( 1, 1 ) ) as v:
mock_response = MockAsyncServerResponseInProgress()
with patch.dict( ycm._message_poll_requests, {} ):
ycm._message_poll_requests[ filetype ] = MessagesPoll( v.current.buffer )
ycm._message_poll_requests[ filetype ]._response_future = mock_response
# Needed to keep a reference to the mocked dictionary
mock_future = ycm._message_poll_requests[ filetype ]._response_future
ycm.OnPeriodicTick() # Uses ycm._message_poll_requests[ filetype ] ...
"""
return mock.MagicMock( wraps = FakeFuture( False ) )
def MockAsyncServerResponseException( exception ):
"""Return a fake future object that is complete, but raises an exception.
Suitable for mocking a response future within a client request. For example:
with MockVimBuffers( [ current_buffer ], [ current_buffer ], ( 1, 1 ) ) as v:
mock_response = MockAsyncServerResponseException( exception )
with patch.dict( ycm._message_poll_requests, {} ):
ycm._message_poll_requests[ filetype ] = MessagesPoll( v.current.buffer )
ycm._message_poll_requests[ filetype ]._response_future = mock_response
# Needed to keep a reference to the mocked dictionary
mock_future = ycm._message_poll_requests[ filetype ]._response_future
ycm.OnPeriodicTick() # Uses ycm._message_poll_requests[ filetype ] ...
"""
return mock.MagicMock( wraps = FakeFuture( True, None, exception ) )
# TODO: In future, implement MockServerResponse and MockServerResponseException
# for synchronous cases when such test cases are needed.
| Valloric/YouCompleteMe | python/ycm/tests/mock_utils.py | Python | gpl-3.0 | 4,496 | 0.013568 |
ACCESS_KEY = 'twitter_access_token'
REQUEST_KEY = 'twitter_request_token'
SUCCESS_URL_KEY = 'twitter_success_url'
USERINFO_KEY = 'twitter_user_info'
| callowayproject/django-tweeter | django_oauth_twitter/__init__.py | Python | mit | 149 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-13 15:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='category name', max_length=200, unique=True, verbose_name='name')),
('desc', models.TextField(help_text='category description', verbose_name='description')),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Category',
'ordering': ['name'],
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('desc', models.TextField(blank=True, help_text="order's description", verbose_name='description')),
('status', models.PositiveIntegerField(choices=[(0, 'preparation'), (1, 'sent'), (2, 'received')], default=0, help_text="order's status", verbose_name='status')),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Customer', verbose_name='customer')),
],
options={
'verbose_name': 'Order',
'ordering': ['-modified', '-created'],
'verbose_name_plural': 'Orders',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text='product name', max_length=200, verbose_name='name')),
('price', models.FloatField(db_index=True, help_text="product's price", verbose_name='price')),
('image', models.ImageField(help_text="product's image", upload_to='images/', verbose_name='image')),
('desc', models.TextField(help_text="product's description", verbose_name='description')),
('modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(help_text="product's category", on_delete=django.db.models.deletion.CASCADE, to='sales.Category', verbose_name='category')),
],
options={
'verbose_name': 'Product',
'ordering': ['name'],
'verbose_name_plural': 'Products',
},
),
migrations.CreateModel(
name='ProductSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveIntegerField(default=1, verbose_name='number')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.Order', verbose_name='Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.Product', verbose_name='Product')),
],
options={
'verbose_name': 'ProductSet',
'ordering': ['id'],
'verbose_name_plural': 'ProductSets',
},
),
migrations.AddField(
model_name='order',
name='product',
field=models.ManyToManyField(blank=True, through='sales.ProductSet', to='sales.Product', verbose_name='Product'),
),
]
| z0rr0/eshop | shop/sales/migrations/0001_initial.py | Python | mit | 4,179 | 0.004786 |
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
sql, params = self.as_sql()
return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
def as_sql(self):
if self.connection.features.update_can_self_select or self.single_alias:
return super().as_sql()
# MySQL and MariaDB < 10.3.2 doesn't support deletion with a subquery
# which is what the default implementation of SQLDeleteCompiler uses
# when multiple tables are involved. Use the MySQL/MariaDB specific
# DELETE table FROM table syntax instead to avoid performing the
# operation in two queries.
result = [
'DELETE %s FROM' % self.quote_name_unless_alias(
self.query.get_initial_alias()
)
]
from_sql, from_params = self.get_from_clause()
result.extend(from_sql)
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(from_params) + tuple(params)
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
| georgemarshall/django | django/db/backends/mysql/compiler.py | Python | bsd-3-clause | 1,599 | 0.001251 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tracing_project
from py_vulcanize import generate
from tracing_build import render_histograms_viewer
def VulcanizeHistogramsViewer():
"""Vulcanizes Histograms viewer with its dependencies.
Args:
path: destination to write the vulcanized viewer HTML.
"""
vulcanizer = tracing_project.TracingProject().CreateVulcanizer()
load_sequence = vulcanizer.CalcLoadSequenceForModuleNames(
['tracing_build.histograms_viewer'])
return generate.GenerateStandaloneHTMLAsString(load_sequence)
def VulcanizeAndRenderHistogramsViewer(
histogram_dicts, output_stream, reset_results=False):
render_histograms_viewer.RenderHistogramsViewer(
histogram_dicts, output_stream, reset_results,
VulcanizeHistogramsViewer())
| catapult-project/catapult | tracing/tracing_build/vulcanize_histograms_viewer.py | Python | bsd-3-clause | 912 | 0.006579 |
"""
See LICENSE file for copyright and license details.
"""
from app import app
from flask import render_template, flash, redirect
#from app.forms import LoginForm
from app.modules.constant import *
@app.route("/")
@app.route("/index")
@app.route("/index/")
@app.route("/<app_profile>/index")
@app.route("/<app_profile>/index/")
@app.route("/<app_profile>")
@app.route("/<app_profile>/")
def index(app_profile = AppProfile.PERSONAL):
"""
Index page
"""
user = { 'login': 'rockwolf' } # fake user
if app_profile == '':
app_profile = 'personal'
return render_template("index.html",
title = 'Central command entity',
user = user,
app_profile = app_profile.lower())
@app.route("/report_finance")
@app.route("/report_finance/")
@app.route("/<app_profile>/report_finance")
@app.route("/<app_profile>/report_finance/")
def report_finance(app_profile = AppProfile.PERSONAL):
"""
Financial reports.
"""
# Make reports per year in pdf (gnucash) and put links to them here.
return('TBD');
@app.route("/trading_journal")
@app.route("/trading_journal/")
@app.route("/<app_profile>/trading_journal")
@app.route("/<app_profile>/trading_journal/")
def trading_journal(app_profile = AppProfile.PERSONAL):
"""
Trading Journal
"""
if app_profile == AppProfile.ZIVLE:
return render_template("trading_journal.html",
title = 'Trading Journal',
user = user,
app_profile = app_profile.lower())
else:
return render_template("404.html",
title = '404')
@app.route("/contact")
@app.route("/contact/")
@app.route("/<app_profile>/contact")
@app.route("/<app_profile>/contact/")
def contact(app_profile = AppProfile.PERSONAL):
"""
Address book.
"""
# Try to sync this with abook? Can abook export them?
return('TBD');
@app.route("/task")
@app.route("/task/")
@app.route("/<app_profile>/task")
@app.route("/<app_profile>/task/")
def task(app_profile = AppProfile.PERSONAL):
"""
Task and schedule information.
"""
# TODO: generate output of reminders and put it in a new text-file,
# e.g. remind ~/.reminders -c etc.
# TODO: where to schedule the reminders.txt generation?
if app_profile == AppProfile.ZIVLE:
task_file = TaskFile.ZIVLE
reminder_file = ReminderFile.ZIVLE
elif app_profile == AppProfile.PERSONAL:
task_file = TaskFile.PERSONAL
reminder_file = ReminderFile.PERSONAL
else:
error = true
if not error:
return render_template("task.html",
title = 'Tasks',
user = user,
app_profile = app_profile.lower(),
tasks = load_lines(task_file),
reminders = load_lines(reminder_file)
)
else:
return render_template("404.html",
title = '404')
@app.route('/login', methods = ['GET', 'POST'])
@app.route('/login/', methods = ['GET', 'POST'])
def login():
form = LoginForm()
return render_template('login.html',
title = 'Sign In',
form = form)
@app.route("/links")
@app.route("/links/")
@app.route("/<app_profile>/links")
@app.route("/<app_profile>/links/")
def links(app_profile = AppProfile.PERSONAL):
"""
Link bookmarks.
"""
user = { 'login': 'rockwolf' } # fake user
# Try to read from text-files and build links dynamically
# Format: data/<profile>/links.txt
# Textfile format: <url>;<name>;<description>
#TODO: put links_file in constant.py
#or find a more general way to configure files?
#links_file = 'C:\\Users\\AN\\home\\other\\Dropbox\\cece\\app\\data\\' + app_profile + '\\links.txt'
links_file = '/home/rockwolf/Dropbox/cece/app/data/' + app_profile + '/links.txt'
links_full = load_lines(links_file)
links = []
for link_full in links_full:
links.append(link_full.split(';'))
links.sort(key=lambda k: k[1])
categories = []
for link in links:
if link[1] not in categories:
categories.append(link[1])
return render_template("links.html",
title = 'Bookmarks',
user = user,
app_profile = app_profile.lower(),
categories = categories,
total = len(links),
links = links
)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html',
title = '404'), 404
def load_lines(text_file):
"""
Reads the text file and returns a list of lines.
"""
lines = []
with open(text_file, encoding='utf-8') as text:
for line in text:
lines.append(line.strip())
return lines
| rockwolf/python | cece/app/views.py | Python | bsd-3-clause | 4,840 | 0.022727 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2016 Onestein (<http://www.onestein.eu>).
| hip-odoo/odoo | addons/l10n_nl/__init__.py | Python | agpl-3.0 | 158 | 0 |
__author__ = 'robswift'
__project__ = 'blastnfilter'
import os
from BlastNFilter.PreRelease import ParsePreRelease
from BlastNFilter.Blast import ParseAlignment
import OutPut
def run(options):
non_polymer = options.non_polymer
polymer = options.polymer
out = options.out
blast_dir = os.path.abspath(options.blast_db)
pdb_db = os.path.join(blast_dir, 'pdb_db')
fasta = os.path.join(blast_dir, 'pdb_seqres.txt')
target_list = ParsePreRelease.add_ligands(non_polymer)
target_list = ParsePreRelease.add_sequences(polymer, target_list)
#new = [x for x in target_list if x.get_pdb_id().lower() == '2n02']
target_list = ParseAlignment.blast_the_targets(target_list, pdb_db, fasta)
target_list = ParseAlignment.remove_multiple_dockers(target_list)
OutPut.write_csv(target_list, out)
| rvswift/BlastNFilter | build/lib/BlastNFilter/Utilities/Run.py | Python | bsd-3-clause | 827 | 0.001209 |
def extractBijinsans(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Benkyou no Kamisama wa Hitomishiri' in item['tags']:
return buildReleaseMessageWithType(item, 'Benkyou no Kamisama wa Hitomishiri', vol, chp, frag=frag, postfix=postfix)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractBijinsans.py | Python | bsd-3-clause | 387 | 0.023256 |
'''
Kivy framework
==============
Kivy is an open source library for developing multi-touch applications. It is
completely cross-platform (Linux/OSX/Win) and released under the terms of the
MIT License.
It comes with native support for many multi-touch input devices, a growing
library of multi-touch aware widgets and hardware accelerated OpenGL drawing.
Kivy is designed to let you focus on building custom and highly interactive
applications as quickly and easily as possible.
With Kivy, you can take full advantage of the dynamic nature of Python. There
are thousands of high-quality, free libraries that can be integrated in your
application. At the same time, performance-critical parts are implemented
in the C language.
See http://kivy.org for more information.
'''
__all__ = (
'require',
'kivy_configure', 'kivy_register_post_configuration',
'kivy_options', 'kivy_base_dir',
'kivy_modules_dir', 'kivy_data_dir', 'kivy_shader_dir',
'kivy_icons_dir', 'kivy_home_dir', 'kivy_userexts_dir',
'kivy_config_fn', 'kivy_usermodules_dir',
)
__version__ = '1.9.1-dev'
import sys
import shutil
from getopt import getopt, GetoptError
from os import environ, mkdir, pathsep
from os.path import dirname, join, basename, exists, expanduser, isdir
from kivy.logger import Logger, LOG_LEVELS
from kivy.utils import platform
# internals for post-configuration
__kivy_post_configuration = []
if platform == 'macosx' and sys.maxsize < 9223372036854775807:
r = '''Unsupported Python version detected!:
Kivy requires a 64 bit version of Python to run on OS X. We strongly
advise you to use the version of Python that is provided by Apple
(don't use ports, fink or homebrew unless you know what you're
doing).
See http://kivy.org/docs/installation/installation-macosx.html for
details.
'''
Logger.critical(r)
def require(version):
'''Require can be used to check the minimum version required to run a Kivy
application. For example, you can start your application code like this::
import kivy
kivy.require('1.0.1')
If a user attempts to run your application with a version of Kivy that is
older than the specified version, an Exception is raised.
The Kivy version string is built like this::
X.Y.Z[-tag[-tagrevision]]
X is the major version
Y is the minor version
Z is the bugfixes revision
The tag is optional, but may be one of 'dev', 'alpha', or 'beta'.
The tagrevision is the revision of the tag.
.. warning::
You must not ask for a version with a tag, except -dev. Asking for a
'dev' version will just warn the user if the current Kivy
version is not a -dev, but it will never raise an exception.
You must not ask for a version with a tagrevision.
'''
def parse_version(version):
# check for tag
tag = None
tagrev = None
if '-' in version:
l = version.split('-')
if len(l) == 2:
version, tag = l
elif len(l) == 3:
version, tag, tagrev = l
else:
raise Exception('Revision format must be X.Y.Z[-tag]')
# check x y z
l = version.split('.')
if len(l) != 3:
raise Exception('Revision format must be X.Y.Z[-tag]')
return [int(x) for x in l], tag, tagrev
# user version
revision, tag, tagrev = parse_version(version)
# current version
sysrevision, systag, systagrev = parse_version(__version__)
# ensure that the required version don't contain tag, except dev
if tag not in (None, 'dev'):
raise Exception('Revision format must not have any tag except "dev"')
if tag == 'dev' and systag != 'dev':
Logger.warning('Application requested a -dev version of Kivy. '
'(You have %s, but the application requires %s)' % (
__version__, version))
# not tag rev (-alpha-1, -beta-x) allowed.
if tagrev is not None:
raise Exception('Revision format must not contain any tagrevision')
# finally, checking revision
if sysrevision < revision:
raise Exception('The version of Kivy installed on this system '
'is too old. '
'(You have %s, but the application requires %s)' % (
__version__, version))
def kivy_configure():
'''Call post-configuration of Kivy.
This function must be called if you create the window yourself.
'''
for callback in __kivy_post_configuration:
callback()
def kivy_register_post_configuration(callback):
'''Register a function to be called when kivy_configure() is called.
.. warning::
Internal use only.
'''
__kivy_post_configuration.append(callback)
def kivy_usage():
'''Kivy Usage: %s [OPTION...]::
-h, --help
Prints this help message.
-d, --debug
Shows debug log.
-a, --auto-fullscreen
Force 'auto' fullscreen mode (no resolution change).
Uses your display's resolution. This is most likely what you want.
-c, --config section:key[:value]
Set a custom [section] key=value in the configuration object.
-f, --fullscreen
Force running in fullscreen mode.
-k, --fake-fullscreen
Force 'fake' fullscreen mode (no window border/decoration).
Uses the resolution specified by width and height in your config.
-w, --windowed
Force running in a window.
-p, --provider id:provider[,options]
Add an input provider (eg: ccvtable1:tuio,192.168.0.1:3333).
-m mod, --module=mod
Activate a module (use "list" to get a list of available modules).
-r, --rotation
Rotate the window's contents (0, 90, 180, 270).
-s, --save
Save current Kivy configuration.
--size=640x480
Size of window geometry.
--dpi=96
Manually overload the Window DPI (for testing only.)
'''
print(kivy_usage.__doc__ % (basename(sys.argv[0])))
#: Global settings options for kivy
kivy_options = {
'window': ('egl_rpi', 'sdl2', 'pygame', 'sdl', 'x11'),
'text': ('pil', 'sdl2', 'pygame', 'sdlttf'),
'video': (
'gstplayer', 'ffmpeg', 'ffpyplayer', 'gi', 'pygst', 'pyglet',
'null'),
'audio': (
'gstplayer', 'pygame', 'gi', 'pygst', 'ffpyplayer', 'sdl2',
'avplayer'),
'image': ('tex', 'imageio', 'dds', 'gif', 'sdl2', 'pygame', 'pil', 'ffpy'),
'camera': ('opencv', 'gi', 'pygst', 'videocapture', 'avfoundation', 'android'),
'spelling': ('enchant', 'osxappkit', ),
'clipboard': (
'android', 'winctypes', 'xsel', 'xclip', 'dbusklipper', 'nspaste',
'sdl2', 'pygame', 'dummy', 'gtk3', )}
# Read environment
for option in kivy_options:
key = 'KIVY_%s' % option.upper()
if key in environ:
try:
if type(kivy_options[option]) in (list, tuple):
kivy_options[option] = environ[key].split(',')
else:
kivy_options[option] = environ[key].lower() in \
('true', '1', 'yes', 'yup')
except Exception:
Logger.warning('Core: Wrong value for %s environment key' % key)
Logger.exception('')
# Extract all needed path in kivy
#: Kivy directory
kivy_base_dir = dirname(sys.modules[__name__].__file__)
#: Kivy modules directory
kivy_modules_dir = environ.get('KIVY_MODULES_DIR',
join(kivy_base_dir, 'modules'))
#: Kivy extension directory
kivy_exts_dir = environ.get('KIVY_EXTS_DIR',
join(kivy_base_dir, 'extensions'))
#: Kivy data directory
kivy_data_dir = environ.get('KIVY_DATA_DIR',
join(kivy_base_dir, 'data'))
#: Kivy binary deps directory
kivy_binary_deps_dir = environ.get('KIVY_BINARY_DEPS',
join(kivy_base_dir, 'binary_deps'))
#: Kivy glsl shader directory
kivy_shader_dir = join(kivy_data_dir, 'glsl')
#: Kivy icons config path (don't remove the last '')
kivy_icons_dir = join(kivy_data_dir, 'icons', '')
#: Kivy user-home storage directory
kivy_home_dir = ''
#: Kivy configuration filename
kivy_config_fn = ''
#: Kivy user modules directory
kivy_usermodules_dir = ''
#: Kivy user extensions directory
kivy_userexts_dir = ''
# Don't go further if we generate documentation
if any(name in sys.argv[0] for name in ('sphinx-build', 'autobuild.py')):
environ['KIVY_DOC'] = '1'
if 'sphinx-build' in sys.argv[0]:
environ['KIVY_DOC_INCLUDE'] = '1'
if any('nosetests' in arg for arg in sys.argv):
environ['KIVY_UNITTEST'] = '1'
if any('pyinstaller' in arg for arg in sys.argv):
environ['KIVY_PACKAGING'] = '1'
if not environ.get('KIVY_DOC_INCLUDE'):
# Configuration management
if 'KIVY_HOME' in environ:
kivy_home_dir = expanduser(environ['KIVY_HOME'])
else:
user_home_dir = expanduser('~')
if platform == 'android':
user_home_dir = environ['ANDROID_APP_PATH']
elif platform == 'ios':
user_home_dir = join(expanduser('~'), 'Documents')
kivy_home_dir = join(user_home_dir, '.kivy')
kivy_config_fn = join(kivy_home_dir, 'config.ini')
kivy_usermodules_dir = join(kivy_home_dir, 'mods')
kivy_userexts_dir = join(kivy_home_dir, 'extensions')
icon_dir = join(kivy_home_dir, 'icon')
if 'KIVY_NO_CONFIG' not in environ:
if not exists(kivy_home_dir):
mkdir(kivy_home_dir)
if not exists(kivy_usermodules_dir):
mkdir(kivy_usermodules_dir)
if not exists(kivy_userexts_dir):
mkdir(kivy_userexts_dir)
if not exists(icon_dir):
try:
shutil.copytree(join(kivy_data_dir, 'logo'), icon_dir)
except:
Logger.exception('Error when copying logo directory')
# configuration
from kivy.config import Config
# Set level of logger
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
# Can be overrided in command line
if ('KIVY_UNITTEST' not in environ and
'KIVY_PACKAGING' not in environ and
'KIVY_NO_ARGS' not in environ):
# save sys argv, otherwize, gstreamer use it and display help..
sys_argv = sys.argv
sys.argv = sys.argv[:1]
try:
opts, args = getopt(sys_argv[1:], 'hp:fkawFem:sr:dc:', [
'help', 'fullscreen', 'windowed', 'fps', 'event',
'module=', 'save', 'fake-fullscreen', 'auto-fullscreen',
'display=', 'size=', 'rotate=', 'config=', 'debug',
'dpi='])
except GetoptError as err:
Logger.error('Core: %s' % str(err))
kivy_usage()
sys.exit(2)
# set argv to the non-read args
sys.argv = sys_argv[0:1] + args
else:
opts = []
args = []
need_save = False
for opt, arg in opts:
if opt in ('-h', '--help'):
kivy_usage()
sys.exit(0)
elif opt in ('-p', '--provider'):
try:
pid, args = arg.split(':', 1)
Config.set('input', pid, args)
except ValueError:
# when we are doing an executable on macosx with
# pyinstaller, they are passing information with -p. so
# it will conflict with our current -p option. since the
# format is not the same, just avoid it.
pass
elif opt in ('-a', '--auto-fullscreen'):
Config.set('graphics', 'fullscreen', 'auto')
elif opt in ('-c', '--config'):
l = arg.split(':', 2)
if len(l) == 2:
Config.set(l[0], l[1], '')
elif len(l) == 3:
Config.set(l[0], l[1], l[2])
else:
raise Exception('Invalid --config value')
if l[0] == 'kivy' and l[1] == 'log_level':
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
elif opt in ('-k', '--fake-fullscreen'):
Config.set('graphics', 'fullscreen', 'fake')
elif opt in ('-f', '--fullscreen'):
Config.set('graphics', 'fullscreen', '1')
elif opt in ('-w', '--windowed'):
Config.set('graphics', 'fullscreen', '0')
elif opt in ('--size', ):
w, h = str(arg).split('x')
Config.set('graphics', 'width', w)
Config.set('graphics', 'height', h)
elif opt in ('--display', ):
Config.set('graphics', 'display', str(arg))
elif opt in ('-m', '--module'):
if str(arg) == 'list':
from kivy.modules import Modules
Modules.usage_list()
sys.exit(0)
args = arg.split(':', 1)
if len(args) == 1:
args += ['']
Config.set('modules', args[0], args[1])
elif opt in ('-s', '--save'):
need_save = True
elif opt in ('-r', '--rotation'):
Config.set('graphics', 'rotation', arg)
elif opt in ('-d', '--debug'):
level = LOG_LEVELS.get('debug')
Logger.setLevel(level=level)
elif opt == '--dpi':
environ['KIVY_DPI'] = arg
if need_save and 'KIVY_NO_CONFIG' not in environ:
try:
with open(kivy_config_fn, 'w') as fd:
Config.write(fd)
except Exception as e:
Logger.exception('Core: error while saving default'
'configuration file:', str(e))
Logger.info('Core: Kivy configuration saved.')
sys.exit(0)
# add kivy_binary_deps_dir if it exists
if exists(kivy_binary_deps_dir):
environ["PATH"] = kivy_binary_deps_dir + pathsep + environ["PATH"]
# configure all activated modules
from kivy.modules import Modules
Modules.configure()
# android hooks: force fullscreen and add android touch input provider
if platform in ('android', 'ios'):
from kivy.config import Config
Config.set('graphics', 'fullscreen', 'auto')
Config.remove_section('input')
Config.add_section('input')
if platform == 'android':
Config.set('input', 'androidtouch', 'android')
Logger.info('Kivy: v%s' % (__version__))
Logger.info('Python: v{}'.format(sys.version))
| arcticshores/kivy | kivy/__init__.py | Python | mit | 14,620 | 0.000547 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for reading and updating configuration files."""
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from object_detection.protos import eval_pb2
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
def get_image_resizer_config(model_config):
"""Returns the image resizer config from a model config.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
An image_resizer_pb2.ImageResizer.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
return model_config.faster_rcnn.image_resizer
if meta_architecture == "ssd":
return model_config.ssd.image_resizer
raise ValueError("Unknown model type: {}".format(meta_architecture))
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField("identity_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override pipeline_config_path.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Value are the
corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
if config_override:
text_format.Merge(config_override, pipeline_config)
return create_configs_from_pipeline_proto(pipeline_config)
def create_configs_from_pipeline_proto(pipeline_config):
"""Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_configs`. Value are
the corresponding config objects or list of config objects (only for
eval_input_configs).
"""
configs = {}
configs["model"] = pipeline_config.model
configs["train_config"] = pipeline_config.train_config
configs["train_input_config"] = pipeline_config.train_input_reader
configs["eval_config"] = pipeline_config.eval_config
configs["eval_input_configs"] = pipeline_config.eval_input_reader
# Keeps eval_input_config only for backwards compatibility. All clients should
# read eval_input_configs instead.
if configs["eval_input_configs"]:
configs["eval_input_config"] = configs["eval_input_configs"][0]
if pipeline_config.HasField("graph_rewriter"):
configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
return configs
def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
"""Parses config for graph rewriter.
Args:
graph_rewriter_config_file: file path to the graph rewriter config.
Returns:
graph_rewriter_pb2.GraphRewriter proto
"""
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
text_format.Merge(f.read(), graph_rewriter_config)
return graph_rewriter_config
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function performs the inverse operation of
create_configs_from_pipeline_proto().
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.CopyFrom(configs["model"])
pipeline_config.train_config.CopyFrom(configs["train_config"])
pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
pipeline_config.eval_config.CopyFrom(configs["eval_config"])
pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
if "graph_rewriter_config" in configs:
pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
return pipeline_config
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
graph_rewriter_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
returned only for valid (non-empty) strings.
"""
configs = {}
if model_config_path:
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(model_config_path, "r") as f:
text_format.Merge(f.read(), model_config)
configs["model"] = model_config
if train_config_path:
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(train_config_path, "r") as f:
text_format.Merge(f.read(), train_config)
configs["train_config"] = train_config
if train_input_config_path:
train_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(train_input_config_path, "r") as f:
text_format.Merge(f.read(), train_input_config)
configs["train_input_config"] = train_input_config
if eval_config_path:
eval_config = eval_pb2.EvalConfig()
with tf.gfile.GFile(eval_config_path, "r") as f:
text_format.Merge(f.read(), eval_config)
configs["eval_config"] = eval_config
if eval_input_config_path:
eval_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(eval_input_config_path, "r") as f:
text_format.Merge(f.read(), eval_input_config)
configs["eval_input_configs"] = [eval_input_config]
if graph_rewriter_config_path:
configs["graph_rewriter_config"] = get_graph_rewriter_config_from_file(
graph_rewriter_config_path)
return configs
def get_number_of_classes(model_config):
"""Returns the number of classes for a detection model.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
Number of classes.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
return model_config.faster_rcnn.num_classes
if meta_architecture == "ssd":
return model_config.ssd.num_classes
raise ValueError("Expected the model to be one of 'faster_rcnn' or 'ssd'.")
def get_optimizer_type(train_config):
"""Returns the optimizer type for training.
Args:
train_config: A train_pb2.TrainConfig.
Returns:
The type of the optimizer
"""
return train_config.optimizer.WhichOneof("optimizer")
def get_learning_rate_type(optimizer_config):
"""Returns the learning rate type for training.
Args:
optimizer_config: An optimizer_pb2.Optimizer.
Returns:
The type of the learning rate.
"""
return optimizer_config.learning_rate.WhichOneof("learning_rate")
def _is_generic_key(key):
"""Determines whether the key starts with a generic config dictionary key."""
for prefix in [
"graph_rewriter_config",
"model",
"train_input_config",
"train_config",
"eval_config"]:
if key.startswith(prefix + "."):
return True
return False
def _check_and_convert_legacy_input_config_key(key):
"""Checks key and converts legacy input config update to specific update.
Args:
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicating whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: always returns None since legacy input config key never
specifies the target input config. Keeping this output only to match the
output form defined for input config update.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
"""
key_name = None
input_name = None
field_name = key
is_valid_input_config_key = True
if field_name == "train_shuffle":
key_name = "train_input_config"
field_name = "shuffle"
elif field_name == "eval_shuffle":
key_name = "eval_input_configs"
field_name = "shuffle"
elif field_name == "train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
elif field_name == "append_train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "append_eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
else:
is_valid_input_config_key = False
return is_valid_input_config_key, key_name, input_name, field_name
def check_and_parse_input_config_key(configs, key):
"""Checks key and returns specific fields if key is valid input config update.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicate whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: the name of the input config to be updated. None if
is_valid_input_config_key is false.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
Raises:
ValueError: when the input key format doesn't match any known formats.
ValueError: if key_name doesn't match 'eval_input_configs' or
'train_input_config'.
ValueError: if input_name doesn't match any name in train or eval input
configs.
ValueError: if field_name doesn't match any supported fields.
"""
key_name = None
input_name = None
field_name = None
fields = key.split(":")
if len(fields) == 1:
field_name = key
return _check_and_convert_legacy_input_config_key(key)
elif len(fields) == 3:
key_name = fields[0]
input_name = fields[1]
field_name = fields[2]
else:
raise ValueError("Invalid key format when overriding configs.")
# Checks if key_name is valid for specific update.
if key_name not in ["eval_input_configs", "train_input_config"]:
raise ValueError("Invalid key_name when overriding input config.")
# Checks if input_name is valid for specific update. For train input config it
# should match configs[key_name].name, for eval input configs it should match
# the name field of one of the eval_input_configs.
if isinstance(configs[key_name], input_reader_pb2.InputReader):
is_valid_input_name = configs[key_name].name == input_name
else:
is_valid_input_name = input_name in [
eval_input_config.name for eval_input_config in configs[key_name]
]
if not is_valid_input_name:
raise ValueError("Invalid input_name when overriding input config.")
# Checks if field_name is valid for specific update.
if field_name not in [
"input_path", "label_map_path", "shuffle", "mask_type",
"sample_1_of_n_examples"
]:
raise ValueError("Invalid field_name when overriding input config.")
return True, key_name, input_name, field_name
def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):
"""Updates `configs` dictionary based on supplied parameters.
This utility is for modifying specific fields in the object detection configs.
Say that one would like to experiment with different learning rates, momentum
values, or batch sizes. Rather than creating a new config text file for each
experiment, one can use a single base config file, and update particular
values.
There are two types of field overrides:
1. Strategy-based overrides, which update multiple relevant configuration
options. For example, updating `learning_rate` will update both the warmup and
final learning rates.
In this case key can be one of the following formats:
1. legacy update: single string that indicates the attribute to be
updated. E.g. 'label_map_path', 'eval_input_path', 'shuffle'.
Note that when updating fields (e.g. eval_input_path, eval_shuffle) in
eval_input_configs, the override will only be applied when
eval_input_configs has exactly 1 element.
2. specific update: colon separated string that indicates which field in
which input_config to update. It should have 3 fields:
- key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
- input_name: a 'name' that can be used to identify elements, especially
when configs[key_name] is a repeated field.
- field_name: name of the field that you want to override.
For example, given configs dict as below:
configs = {
'model': {...}
'train_config': {...}
'train_input_config': {...}
'eval_config': {...}
'eval_input_configs': [{ name:"eval_coco", ...},
{ name:"eval_voc", ... }]
}
Assume we want to update the input_path of the eval_input_config
whose name is 'eval_coco'. The `key` would then be:
'eval_input_configs:eval_coco:input_path'
2. Generic key/value, which update a specific parameter based on namespaced
configuration keys. For example,
`model.ssd.loss.hard_example_miner.max_negatives_per_positive` will update the
hard example miner configuration for an SSD model config. Generic overrides
are automatically detected based on the namespaced keys.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
hparams: A `HParams`.
kwargs_dict: Extra keyword arguments that are treated the same way as
attribute/value pairs in `hparams`. Note that hyperparameters with the
same names will override keyword arguments.
Returns:
`configs` dictionary.
Raises:
ValueError: when the key string doesn't match any of its allowed formats.
"""
if kwargs_dict is None:
kwargs_dict = {}
if hparams:
kwargs_dict.update(hparams.values())
for key, value in kwargs_dict.items():
tf.logging.info("Maybe overwriting %s: %s", key, value)
# pylint: disable=g-explicit-bool-comparison
if value == "" or value is None:
continue
# pylint: enable=g-explicit-bool-comparison
elif _maybe_update_config_with_key_value(configs, key, value):
continue
elif _is_generic_key(key):
_update_generic(configs, key, value)
else:
tf.logging.info("Ignoring config override key: %s", key)
return configs
def _maybe_update_config_with_key_value(configs, key, value):
"""Checks key type and updates `configs` with the key value pair accordingly.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: String indicates the field(s) to be updated.
value: Value used to override existing field value.
Returns:
A boolean value that indicates whether the override succeeds.
Raises:
ValueError: when the key string doesn't match any of the formats above.
"""
is_valid_input_config_key, key_name, input_name, field_name = (
check_and_parse_input_config_key(configs, key))
if is_valid_input_config_key:
update_input_reader_config(
configs,
key_name=key_name,
input_name=input_name,
field_name=field_name,
value=value)
elif field_name == "learning_rate":
_update_initial_learning_rate(configs, value)
elif field_name == "batch_size":
_update_batch_size(configs, value)
elif field_name == "momentum_optimizer_value":
_update_momentum_optimizer_value(configs, value)
elif field_name == "classification_localization_weight_ratio":
# Localization weight is fixed to 1.0.
_update_classification_localization_weight_ratio(configs, value)
elif field_name == "focal_loss_gamma":
_update_focal_loss_gamma(configs, value)
elif field_name == "focal_loss_alpha":
_update_focal_loss_alpha(configs, value)
elif field_name == "train_steps":
_update_train_steps(configs, value)
elif field_name == "label_map_path":
_update_label_map_path(configs, value)
elif field_name == "mask_type":
_update_mask_type(configs, value)
elif field_name == "sample_1_of_n_eval_examples":
_update_all_eval_input_configs(configs, "sample_1_of_n_examples", value)
elif field_name == "eval_num_epochs":
_update_all_eval_input_configs(configs, "num_epochs", value)
elif field_name == "eval_with_moving_averages":
_update_use_moving_averages(configs, value)
elif field_name == "retain_original_images_in_eval":
_update_retain_original_images(configs["eval_config"], value)
elif field_name == "use_bfloat16":
_update_use_bfloat16(configs, value)
else:
return False
return True
def _update_tf_record_input_path(input_config, input_path):
"""Updates input configuration to reflect a new input path.
The input_config object is updated in place, and hence not returned.
Args:
input_config: A input_reader_pb2.InputReader.
input_path: A path to data or list of paths.
Raises:
TypeError: if input reader type is not `tf_record_input_reader`.
"""
input_reader_type = input_config.WhichOneof("input_reader")
if input_reader_type == "tf_record_input_reader":
input_config.tf_record_input_reader.ClearField("input_path")
if isinstance(input_path, list):
input_config.tf_record_input_reader.input_path.extend(input_path)
else:
input_config.tf_record_input_reader.input_path.append(input_path)
else:
raise TypeError("Input reader type must be `tf_record_input_reader`.")
def update_input_reader_config(configs,
key_name=None,
input_name=None,
field_name=None,
value=None,
path_updater=_update_tf_record_input_path):
"""Updates specified input reader config field.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
input_name: String name used to identify input config to update with. Should
be either None or value of the 'name' field in one of the input reader
configs.
field_name: Field name in input_reader_pb2.InputReader.
value: Value used to override existing field value.
path_updater: helper function used to update the input path. Only used when
field_name is "input_path".
Raises:
ValueError: when input field_name is None.
ValueError: when input_name is None and number of eval_input_readers does
not equal to 1.
"""
if isinstance(configs[key_name], input_reader_pb2.InputReader):
# Updates singular input_config object.
target_input_config = configs[key_name]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is None and len(configs[key_name]) == 1:
# Updates first (and the only) object of input_config list.
target_input_config = configs[key_name][0]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is not None and len(configs[key_name]):
# Updates input_config whose name matches input_name.
update_count = 0
for input_config in configs[key_name]:
if input_config.name == input_name:
setattr(input_config, field_name, value)
update_count = update_count + 1
if not update_count:
raise ValueError(
"Input name {} not found when overriding.".format(input_name))
elif update_count > 1:
raise ValueError("Duplicate input name found when overriding.")
else:
key_name = "None" if key_name is None else key_name
input_name = "None" if input_name is None else input_name
field_name = "None" if field_name is None else field_name
raise ValueError("Unknown input config overriding: "
"key_name:{}, input_name:{}, field_name:{}.".format(
key_name, input_name, field_name))
def _update_initial_learning_rate(configs, learning_rate):
"""Updates `configs` to reflect the new initial learning rate.
This function updates the initial learning rate. For learning rate schedules,
all other defined learning rates in the pipeline config are scaled to maintain
their same ratio with the initial learning rate.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
learning_rate: Initial learning rate for optimizer.
Raises:
TypeError: if optimizer type is not supported, or if learning rate type is
not supported.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
elif optimizer_type == "adam_optimizer":
optimizer_config = configs["train_config"].optimizer.adam_optimizer
else:
raise TypeError("Optimizer %s is not supported." % optimizer_type)
learning_rate_type = get_learning_rate_type(optimizer_config)
if learning_rate_type == "constant_learning_rate":
constant_lr = optimizer_config.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
elif learning_rate_type == "exponential_decay_learning_rate":
exponential_lr = (
optimizer_config.learning_rate.exponential_decay_learning_rate)
exponential_lr.initial_learning_rate = learning_rate
elif learning_rate_type == "manual_step_learning_rate":
manual_lr = optimizer_config.learning_rate.manual_step_learning_rate
original_learning_rate = manual_lr.initial_learning_rate
learning_rate_scaling = float(learning_rate) / original_learning_rate
manual_lr.initial_learning_rate = learning_rate
for schedule in manual_lr.schedule:
schedule.learning_rate *= learning_rate_scaling
elif learning_rate_type == "cosine_decay_learning_rate":
cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate
learning_rate_base = cosine_lr.learning_rate_base
warmup_learning_rate = cosine_lr.warmup_learning_rate
warmup_scale_factor = warmup_learning_rate / learning_rate_base
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate
else:
raise TypeError("Learning rate %s is not supported." % learning_rate_type)
def _update_batch_size(configs, batch_size):
"""Updates `configs` to reflect the new training batch size.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
batch_size: Batch size to use for training (Ideally a power of 2). Inputs
are rounded, and capped to be 1 or greater.
"""
configs["train_config"].batch_size = max(1, int(round(batch_size)))
def _validate_message_has_field(message, field):
if not message.HasField(field):
raise ValueError("Expecting message to have field %s" % field)
def _update_generic(configs, key, value):
"""Update a pipeline configuration parameter based on a generic key/value.
Args:
configs: Dictionary of pipeline configuration protos.
key: A string key, dot-delimited to represent the argument key.
e.g. "model.ssd.train_config.batch_size"
value: A value to set the argument to. The type of the value must match the
type for the protocol buffer. Note that setting the wrong type will
result in a TypeError.
e.g. 42
Raises:
ValueError if the message key does not match the existing proto fields.
TypeError the value type doesn't match the protobuf field type.
"""
fields = key.split(".")
first_field = fields.pop(0)
last_field = fields.pop()
message = configs[first_field]
for field in fields:
_validate_message_has_field(message, field)
message = getattr(message, field)
_validate_message_has_field(message, last_field)
setattr(message, last_field, value)
def _update_momentum_optimizer_value(configs, momentum):
"""Updates `configs` to reflect the new momentum value.
Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For any
other optimizer, no changes take place. The configs dictionary is updated in
place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
momentum: New momentum value. Values are clipped at 0.0 and 1.0.
Raises:
TypeError: If the optimizer type is not `rms_prop_optimizer` or
`momentum_optimizer`.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
else:
raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "
"`momentum_optimizer`.")
optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)
def _update_classification_localization_weight_ratio(configs, ratio):
"""Updates the classification/localization weight loss ratio.
Detection models usually define a loss weight for both classification and
objectness. This function updates the weights such that the ratio between
classification weight to localization weight is the ratio provided.
Arbitrarily, localization weight is set to 1.0.
Note that in the case of Faster R-CNN, this same ratio is applied to the first
stage objectness loss weight relative to localization loss weight.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
ratio: Desired ratio of classification (and/or objectness) loss weight to
localization loss weight.
"""
meta_architecture = configs["model"].WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = configs["model"].faster_rcnn
model.first_stage_localization_loss_weight = 1.0
model.first_stage_objectness_loss_weight = ratio
model.second_stage_localization_loss_weight = 1.0
model.second_stage_classification_loss_weight = ratio
if meta_architecture == "ssd":
model = configs["model"].ssd
model.loss.localization_weight = 1.0
model.loss.classification_weight = ratio
def _get_classification_loss(model_config):
"""Returns the classification loss for a model."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = model_config.faster_rcnn
classification_loss = model.second_stage_classification_loss
elif meta_architecture == "ssd":
model = model_config.ssd
classification_loss = model.loss.classification_loss
else:
raise TypeError("Did not recognize the model architecture.")
return classification_loss
def _update_focal_loss_gamma(configs, gamma):
"""Updates the gamma value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
gamma: Exponent term in focal loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.gamma = gamma
def _update_focal_loss_alpha(configs, alpha):
"""Updates the alpha value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
alpha: Class weight multiplier for sigmoid loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.alpha = alpha
def _update_train_steps(configs, train_steps):
"""Updates `configs` to reflect new number of training steps."""
configs["train_config"].num_steps = int(train_steps)
def _update_eval_steps(configs, eval_steps):
"""Updates `configs` to reflect new number of eval steps per evaluation."""
configs["eval_config"].num_examples = int(eval_steps)
def _update_all_eval_input_configs(configs, field, value):
"""Updates the content of `field` with `value` for all eval input configs."""
for eval_input_config in configs["eval_input_configs"]:
setattr(eval_input_config, field, value)
def _update_label_map_path(configs, label_map_path):
"""Updates the label map path for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
label_map_path: New path to `StringIntLabelMap` pbtxt file.
"""
configs["train_input_config"].label_map_path = label_map_path
_update_all_eval_input_configs(configs, "label_map_path", label_map_path)
def _update_mask_type(configs, mask_type):
"""Updates the mask type for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
mask_type: A string name representing a value of
input_reader_pb2.InstanceMaskType
"""
configs["train_input_config"].mask_type = mask_type
_update_all_eval_input_configs(configs, "mask_type", mask_type)
def _update_use_moving_averages(configs, use_moving_averages):
"""Updates the eval config option to use or not use moving averages.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_moving_averages: Boolean indicating whether moving average variables
should be loaded during evaluation.
"""
configs["eval_config"].use_moving_averages = use_moving_averages
def _update_retain_original_images(eval_config, retain_original_images):
"""Updates eval config with option to retain original images.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_images: Boolean indicating whether to retain original images
in eval mode.
"""
eval_config.retain_original_images = retain_original_images
def _update_use_bfloat16(configs, use_bfloat16):
"""Updates `configs` to reflect the new setup on whether to use bfloat16.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_bfloat16: A bool, indicating whether to use bfloat16 for training.
"""
configs["train_config"].use_bfloat16 = use_bfloat16
| derekjchow/models | research/object_detection/utils/config_util.py | Python | apache-2.0 | 36,470 | 0.006581 |
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2015
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""
This package contains all python modules implementing the DROP
Manager concepts, including their external interface, a web UI and a client
"""
| steve-ord/daliuge | daliuge-engine/dlg/manager/__init__.py | Python | lgpl-2.1 | 1,112 | 0 |
#!/usr/bin/env python3
'''A simple implementation of a sorting algorithm, meant to allow
people to manually rank a list of items using whatever subjective or
objective criteria they want.
This program can be called as a script and used interactively. You
can provide the list of things to sort as command line arguments, or
if there are no arguments provided, you can provide the list in stdin,
one item per line.
Example run:
$ ./sort.py 'ice cream' falafel hamburgers pizza
Which is greater, falafel or ice cream (<, =, or >)? <
Which is greater, hamburgers or ice cream (<, =, or >)? <
Which is greater, hamburgers or falafel (<, =, or >)? >
Which is greater, pizza or hamburgers (<, =, or >)? >
Which is greater, pizza or ice cream (<, =, or >)? <
* ice cream
* pizza
* hamburgers
* falafel
Author: Adam Mesha <adam@mesha.org>
License: MIT
'''
from functools import cmp_to_key
class memoize:
'''We really want to be sure that we don't ask people to compare the
same two items twice, so we cache the result.
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
key = tuple(args)
if key not in self.cache:
self.cache[key] = self.func(*args)
return self.cache[key]
@memoize
def cmpfunc(a, b):
result = None
s = 'Which is greater, {a} or {b} (<, =, or >)? '.format(a=a, b=b)
while result is None or result not in '<=>':
result = input(s).strip()
return '<=>'.index(result) - 1
keyfunc = cmp_to_key(cmpfunc)
def binary_insertion_sort(seq, keyfunc):
'''Insertion sort, using binary search to insert each element. Runs
in O(n**2) time, but the use case is when a human is manually
deciding on the ordering, so the most important thing is to reduce
the number of comparisons.
'''
def mv(srcidx, dstidx):
while srcidx > dstidx:
seq[srcidx], seq[srcidx - 1] = seq[srcidx - 1], seq[srcidx]
srcidx -= 1
i = 1
while i < len(seq):
lower = 0; upper = i
while lower < upper:
j = (upper + lower) // 2
key1, key2 = keyfunc(seq[i]), keyfunc(seq[j])
if key1 == key2:
mv(i, j+1) # XXX this is not stable
i += 1
break
if key1 < key2:
upper = j
else: # >
lower = j + 1
else:
mv(i, upper)
i += 1
class SortableWithHeuristic:
def __init__(self, val, heur):
self.val = val
self.heur = heur
def __str__(self):
return '{val}: {heur}'.format(val=self.val, heur=self.heur)
def __repr__(self):
return '{}(val={}, heur={})'.format(self.__class__.__name__,
repr(self.val),
repr(self.heur))
def get_heuristic_func(val):
result = None
s = 'Give an approximate numeric score to item {}: '.format(val)
while result is None:
try:
result = float(input(s).strip())
except ValueError:
pass
return result
def heuristic_sort(seq, get_heuristic_func, cmpfunc):
def swap(a, b):
seq[a], seq[b] = seq[b], seq[a]
idx = 0
while idx < len(seq):
val = seq[idx]
heur = get_heuristic_func(val)
seq[idx] = SortableWithHeuristic(val, heur)
# find the current location
j = idx
while j > 0 and seq[j].heur < seq[j-1].heur:
swap(j, j-1)
j -= 1
moved = False
while j < idx and cmpfunc(seq[j].val, seq[j+1].val) == 1:
swap(j, j+1)
j += 1
moved = True
if not moved:
while j > 0 and cmpfunc(seq[j].val, seq[j-1].val) == -1:
swap(j, j-1)
j -= 1
if 0 < j < idx:
seq[j].heur = (seq[j-1].heur + seq[j+1].heur) / 2
elif idx > 0:
if j == 0 and seq[j].heur > seq[j+1].heur:
seq[j].heur = seq[j+1].heur - 1
elif j == idx and seq[j].heur < seq[j-1].heur:
seq[j].heur = seq[j-1].heur + 1
idx += 1
def main():
import sys
seq = []
if len(sys.argv) > 1:
seq.extend(sys.argv[1:])
if not seq:
seq.extend(x.strip() for x in sys.stdin.readlines())
heuristic_sort(seq, get_heuristic_func, cmpfunc)
print('\n'.join('* {}'.format(item) for item in reversed(seq)))
if __name__ == '__main__':
main()
| sagittarian/personal-sort | sort.py | Python | mit | 4,539 | 0.003745 |
#
# MLDB-1594-aggregator-empty-row.py
# mldb.ai inc, 2016
# this file is part of mldb. copyright 2016 mldb.ai inc. all rights reserved.
#
import unittest
from mldb import mldb, MldbUnitTest, ResponseException
class Mldb1594(MldbUnitTest):
def test_simple(self):
res1 = mldb.query("select {}")
res2 = mldb.query("select sum({*}) named 'result' from (select {})")
self.assertEqual(res1,res2)
def test_multi_row(self):
dataset_config = {
'type' : 'tabular',
'id' : 'toy'
}
dataset = mldb.create_dataset(dataset_config)
dataset.record_row("rowA", [["txt", "hoho things are great!", 0]])
dataset.record_row("rowB", [["txt", "! ", 0]])
dataset.record_row("rowC", [["txt", "things are great, great", 0]])
dataset.commit()
expected = [
["_rowName", "are", "great", "hoho", "things"],
["pwet", 2, 3, 1, 2]
]
# skipping the null row
self.assertTableResultEquals(
mldb.query("""
select sum({*}) as *
named 'pwet'
from (
SELECT tokenize(lower(txt), {splitChars: ' ,.!;:"?', minTokenLength: 2}) as *
from toy
where rowName() != 'rowB'
)
"""),
expected)
# passing the empty row (rowB) to sum
self.assertTableResultEquals(
mldb.query("""
select sum({*}) as *
named 'pwet'
from (
SELECT tokenize(lower(txt), {splitChars: ' ,.!;:"?', minTokenLength: 2}) as *
from toy
)
"""),
expected)
mldb.run_tests()
| mldbai/mldb | testing/MLDB-1594-aggregator-empty-row.py | Python | apache-2.0 | 1,755 | 0.005698 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pooler
import os
from export_tools import *
from osv import osv, fields
class wiz_sneldev_categories_import(osv.osv_memory):
_name = 'sneldev.categories.import'
_description = 'Import categories'
_columns = {
}
_defaults = {
}
def do_categories_import(self, cr, uid, ids, context=None):
if (self.pool.get('sneldev.magento').import_categories(cr, uid) < 0):
raise osv.except_osv(('Warning'), ('Import failed, please refer to log file for failure details.'))
return {'type': 'ir.actions.act_window_close'}
wiz_sneldev_categories_import()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| sysadminmatmoz/odoo-clearcorp | TODO-7.0/sneldev_magento/wizard/sneldev_magento_categories_import.py | Python | agpl-3.0 | 1,660 | 0.00241 |
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from actstream.exceptions import check_actionable_model
def follow(user, obj, send_action=True, actor_only=False, request=None):
"""
Creates a relationship allowing the object's activities to appear in the
user's stream.
Returns the created ``Follow`` instance.
If ``send_action`` is ``True`` (the default) then a
``<user> started following <object>`` action signal is sent.
If ``actor_only`` is ``True`` (the default) then only actions where the
object is the actor will appear in the user's activity stream. Set to
``False`` to also include actions where this object is the action_object or
the target.
Example::
follow(request.user, group, actor_only=False)
"""
from actstream.models import Follow, action
check_actionable_model(obj)
follow, created = Follow.objects.get_or_create(user=user,
object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj),
actor_only=actor_only)
if send_action and created:
if request:
from django.utils import simplejson as json
from agora_site.misc.utils import geolocate_ip
action.send(user, verb=_('started following'), target=obj,
ipaddr=request.META.get('REMOTE_ADDR'),
geolocation=json.dumps(geolocate_ip(request.META.get('REMOTE_ADDR'))))
else:
action.send(user, verb=_('started following'), target=obj)
return follow
def unfollow(user, obj, send_action=False, request=None):
"""
Removes a "follow" relationship.
Set ``send_action`` to ``True`` (``False is default) to also send a
``<user> stopped following <object>`` action signal.
Example::
unfollow(request.user, other_user)
"""
from actstream.models import Follow, action
check_actionable_model(obj)
Follow.objects.filter(user=user, object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj)).delete()
if send_action:
if request:
from django.utils import simplejson as json
from agora_site.misc.utils import geolocate_ip
action.send(user, verb=_('stopped following'), target=obj,
ipaddr=request.META.get('REMOTE_ADDR'),
geolocation=json.dumps(geolocate_ip(request.META.get('REMOTE_ADDR'))))
else:
action.send(user, verb=_('stopped following'), target=obj)
def is_following(user, obj):
"""
Checks if a "follow" relationship exists.
Returns True if exists, False otherwise.
Example::
is_following(request.user, group)
"""
from actstream.models import Follow
check_actionable_model(obj)
return bool(Follow.objects.filter(user=user, object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj)).count())
def action_handler(verb, **kwargs):
"""
Handler function to create Action instance upon action signal call.
"""
from actstream.models import Action
kwargs.pop('signal', None)
actor = kwargs.pop('sender')
check_actionable_model(actor)
newaction = Action(
actor_content_type=ContentType.objects.get_for_model(actor),
actor_object_id=actor.pk,
verb=unicode(verb),
public=bool(kwargs.pop('public', True)),
description=kwargs.pop('description', None),
timestamp=kwargs.pop('timestamp', timezone.now()),
geolocation=kwargs.pop('geolocation', None)
)
for opt in ('target', 'action_object'):
obj = kwargs.pop(opt, None)
if not obj is None:
check_actionable_model(obj)
setattr(newaction, '%s_object_id' % opt, obj.pk)
setattr(newaction, '%s_content_type' % opt,
ContentType.objects.get_for_model(obj))
newaction.save()
| pirata-cat/agora-ciudadana | actstream/actions.py | Python | agpl-3.0 | 3,984 | 0.003012 |
from django.conf.urls import patterns, url
from application import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^(?P<application_id>\d+)/$', views.detail, name='detail'),
url(r'^klogin/(?P<username>\w+)/(?P<password>\w+)/$', views.klogin, name='klogin'),
)
| davidegalletti/koa-proof-of-concept | kag/application/urls.py | Python | agpl-3.0 | 305 | 0.009836 |
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
# pylint: disable-msg=E0611
from hashlib import md5
from metashare.settings import LOG_HANDLER
from metashare import settings
from os import mkdir
from os.path import exists
import os.path
from uuid import uuid1, uuid4
from xml.etree import ElementTree as etree
from datetime import datetime, timedelta
import logging
import re
from json import dumps, loads
from django.core.serializers.json import DjangoJSONEncoder
import zipfile
from zipfile import ZIP_DEFLATED
from django.db.models.query_utils import Q
import glob
# Setup logging support.
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
# ALLOWED_ARCHIVE_EXTENSIONS = ('zip', 'tar.gz', 'gz', 'tgz', 'tar', 'bzip2')
ALLOWED_ARCHIVE_EXTENSIONS = ('zip',)
MAXIMUM_MD5_BLOCK_SIZE = 1024
XML_DECL = re.compile(r'\s*<\?xml version=".+" encoding=".+"\?>\s*\n?',
re.I|re.S|re.U)
# Publication status constants and choice:
INTERNAL = 'i'
INGESTED = 'g'
PUBLISHED = 'p'
STATUS_CHOICES = (
(INTERNAL, 'internal'),
(INGESTED, 'ingested'),
(PUBLISHED, 'published'),
)
# Copy status constants and choice:
MASTER = 'm'
REMOTE = 'r'
PROXY = 'p'
COPY_CHOICES = (
(MASTER, 'master copy'),
(REMOTE, 'remote copy'),
(PROXY, 'proxy copy'))
# attributes to by serialized in the global JSON of the storage object
GLOBAL_STORAGE_ATTS = ['source_url', 'identifier', 'created', 'modified',
'revision', 'publication_status', 'metashare_version', 'deleted']
# attributes to be serialized in the local JSON of the storage object
LOCAL_STORAGE_ATTS = ['digest_checksum', 'digest_modified',
'digest_last_checked', 'copy_status', 'source_node']
def _validate_valid_xml(value):
"""
Checks whether the given value is well-formed and valid XML.
"""
try:
# Try to create an XML tree from the given String value.
_value = XML_DECL.sub(u'', value)
_ = etree.fromstring(_value.encode('utf-8'))
return True
except etree.ParseError, parse_error:
# In case of an exception, we raise a ValidationError.
raise ValidationError(parse_error)
# cfedermann: in case of other exceptions, raise a ValidationError with
# the corresponding error message. This will prevent the exception
# page handler to be shown and is hence more acceptable for end users.
except Exception, error:
raise ValidationError(error)
def _create_uuid():
"""
Creates a unique id from a UUID-1 and a UUID-4, checks for collisions.
"""
# Create new identifier based on a UUID-1 and a UUID-4.
new_id = '{0}{1}'.format(uuid1().hex, uuid4().hex)
# Check for collisions; in case of a collision, create new identifier.
while StorageObject.objects.filter(identifier=new_id):
new_id = '{0}{1}'.format(uuid1().hex, uuid4().hex)
return new_id
# pylint: disable-msg=R0902
class StorageObject(models.Model):
"""
Models an object inside the persistent storage layer.
"""
__schema_name__ = "STORAGEOJBECT"
class Meta:
permissions = (
('can_sync', 'Can synchronize'),
)
source_url = models.URLField(verify_exists=False, editable=False,
default=settings.DJANGO_URL,
help_text="(Read-only) base URL for the server where the master copy of " \
"the associated language resource is located.")
identifier = models.CharField(max_length=64, default=_create_uuid,
editable=False, unique=True, help_text="(Read-only) unique " \
"identifier for this storage object instance.")
created = models.DateTimeField(auto_now_add=True, editable=False,
help_text="(Read-only) creation date for this storage object instance.")
modified = models.DateTimeField(editable=False, default=datetime.now(),
help_text="(Read-only) last modification date of the metadata XML " \
"for this storage object instance.")
checksum = models.CharField(blank=True, null=True, max_length=32,
help_text="(Read-only) MD5 checksum of the binary data for this " \
"storage object instance.")
digest_checksum = models.CharField(blank=True, null=True, max_length=32,
help_text="(Read-only) MD5 checksum of the digest zip file containing the " \
"global serialized storage object and the metadata XML for this " \
"storage object instance.")
digest_modified = models.DateTimeField(editable=False, null=True, blank=True,
help_text="(Read-only) last modification date of digest zip " \
"for this storage object instance.")
digest_last_checked = models.DateTimeField(editable=False, null=True, blank=True,
help_text="(Read-only) last update check date of digest zip " \
"for this storage object instance.")
revision = models.PositiveIntegerField(default=1, help_text="Revision " \
"or version information for this storage object instance.")
metashare_version = models.CharField(max_length=32, editable=False,
default=settings.METASHARE_VERSION,
help_text="(Read-only) META-SHARE version used with the storage object instance.")
def _get_master_copy(self):
return self.copy_status == MASTER
def _set_master_copy(self, value):
if value == True:
self.copy_status = MASTER
else:
self.copy_status = REMOTE
master_copy = property(_get_master_copy, _set_master_copy)
copy_status = models.CharField(default=MASTER, max_length=1, editable=False, choices=COPY_CHOICES,
help_text="Generalized copy status flag for this storage object instance.")
def _get_published(self):
return self.publication_status == PUBLISHED
def _set_published(self, value):
if value == True:
self.publication_status = PUBLISHED
else:
# request to unpublish depends on current state:
# if we are currently published, set to ingested;
# else don't change
if self.publication_status == PUBLISHED:
self.publication_status = INGESTED
published = property(_get_published, _set_published)
publication_status = models.CharField(default=INTERNAL, max_length=1, choices=STATUS_CHOICES,
help_text="Generalized publication status flag for this " \
"storage object instance.")
source_node = models.CharField(blank=True, null=True, max_length=32, editable=False,
help_text="(Read-only) id of source node from which the resource " \
"originally stems as set in local_settings.py in CORE_NODES and " \
"PROXIED_NODES; empty if resource stems from this local node")
deleted = models.BooleanField(default=False, help_text="Deletion " \
"status flag for this storage object instance.")
metadata = models.TextField(validators=[_validate_valid_xml],
help_text="XML containing the metadata description for this storage " \
"object instance.")
global_storage = models.TextField(default='not set yet',
help_text="text containing the JSON serialization of global attributes " \
"for this storage object instance.")
local_storage = models.TextField(default='not set yet',
help_text="text containing the JSON serialization of local attributes " \
"for this storage object instance.")
def get_digest_checksum(self):
"""
Checks if the current digest is till up-to-date, recreates it if
required, and return the up-to-date digest checksum.
"""
_expiration_date = _get_expiration_date()
if _expiration_date > self.digest_modified \
and _expiration_date > self.digest_last_checked:
self.update_storage()
return self.digest_checksum
def __unicode__(self):
"""
Returns the Unicode representation for this storage object instance.
"""
return u'<StorageObject id="{0}">'.format(self.id)
def _storage_folder(self):
"""
Returns the path to the local folder for this storage object instance.
"""
return '{0}/{1}'.format(settings.STORAGE_PATH, self.identifier)
def compute_checksum(self):
"""
Computes the MD5 hash checksum for the binary archive which may be
attached to this storage object instance and sets it in `self.checksum`.
Returns whether `self.checksum` was changed in this method.
"""
if not self.master_copy or not self.get_download():
return False
_old_checksum = self.checksum
self.checksum = compute_checksum(self.get_download())
return _old_checksum != self.checksum
def get_download(self):
"""
Returns the local path to the downloadable data or None if there is no
download data.
"""
_path = '{0}/archive'.format(self._storage_folder())
for _ext in ALLOWED_ARCHIVE_EXTENSIONS:
_binary_data = '{0}.{1}'.format(_path, _ext)
if exists(_binary_data):
return _binary_data
return None
def save(self, *args, **kwargs):
"""
Overwrites the predefined save() method to ensure that STORAGE_PATH
contains a folder for this storage object instance. We also check
that the object validates.
"""
# Perform a full validation for this storage object instance.
self.full_clean()
# Call save() method from super class with all arguments.
super(StorageObject, self).save(*args, **kwargs)
def update_storage(self, force_digest=False):
"""
Updates the metadata XML if required and serializes it and this storage
object to the storage folder.
force_digest (optional): if True, always recreate the digest zip-archive
"""
# check if the storage folder for this storage object instance exists
if self._storage_folder() and not exists(self._storage_folder()):
# If not, create the storage folder.
mkdir(self._storage_folder())
# make sure that any changes to the DJANGO_URL are also reflected in the
# `source_url` field of master copies
if self.master_copy and self.source_url != settings.DJANGO_URL:
self.source_url = settings.DJANGO_URL
source_url_updated = True
else:
source_url_updated = False
# for internal resources, no serialization is done
if self.publication_status == INTERNAL:
if source_url_updated:
self.save()
return
self.digest_last_checked = datetime.now()
# check metadata serialization
metadata_updated = self.check_metadata()
# check global storage object serialization
global_updated = self.check_global_storage_object()
# create new digest zip-archive if required
if force_digest or metadata_updated or global_updated:
self.create_digest()
# check local storage object serialization
local_updated = self.check_local_storage_object()
# save storage object if required; this should always happen since
# at least self.digest_last_checked in the local storage object
# has changed
if source_url_updated or metadata_updated or global_updated \
or local_updated:
self.save()
def check_metadata(self):
"""
Checks if the metadata of the resource has changed with respect to the
current metadata serialization. If yes, recreates the serialization,
updates it in the storage folder and increases the revision (for master
copies)
Returns a flag indicating if the serialization was updated.
"""
# flag to indicate if rebuilding of metadata.xml is required
update_xml = False
# create current version of metadata XML
from metashare.xml_utils import to_xml_string
try:
_metadata = to_xml_string(
# pylint: disable-msg=E1101
self.resourceinfotype_model_set.all()[0].export_to_elementtree(),
# use ASCII encoding to convert non-ASCII chars to entities
encoding="ASCII")
except:
# pylint: disable-msg=E1101
LOGGER.error('PROBLEMATIC: %s - count: %s', self.identifier,
self.resourceinfotype_model_set.count(), exc_info=True)
raise
if self.metadata != _metadata:
self.metadata = _metadata
LOGGER.debug(u"\nMETADATA: {0}\n".format(self.metadata))
self.modified = datetime.now()
update_xml = True
# increase revision for ingested and published resources whenever
# the metadata XML changes for master copies
if self.publication_status in (INGESTED, PUBLISHED) \
and self.copy_status == MASTER:
self.revision += 1
# check if there exists a metadata XML file; this is not the case if
# the publication status just changed from internal to ingested
# or if the resource was received when syncing
if self.publication_status in (INGESTED, PUBLISHED) \
and not os.path.isfile(
'{0}/metadata-{1:04d}.xml'.format(self._storage_folder(), self.revision)):
update_xml = True
if update_xml:
# serialize metadata
with open('{0}/metadata-{1:04d}.xml'.format(
self._storage_folder(), self.revision), 'wb') as _out:
_out.write(unicode(self.metadata).encode('ASCII'))
return update_xml
def check_global_storage_object(self):
"""
Checks if the global storage object serialization has changed. If yes,
updates it in the storage folder.
Returns a flag indicating if the serialization was updated.
"""
_dict_global = { }
for item in GLOBAL_STORAGE_ATTS:
_dict_global[item] = getattr(self, item)
_global_storage = \
dumps(_dict_global, cls=DjangoJSONEncoder, sort_keys=True, separators=(',',':'))
if self.global_storage != _global_storage:
self.global_storage = _global_storage
if self.publication_status in (INGESTED, PUBLISHED):
with open('{0}/storage-global.json'.format(
self._storage_folder()), 'wb') as _out:
_out.write(unicode(self.global_storage).encode('utf-8'))
return True
return False
def create_digest(self):
"""
Creates a new digest zip-archive for master and proxy copies.
"""
if self.copy_status in (MASTER, PROXY):
_zf_name = '{0}/resource.zip'.format(self._storage_folder())
_zf = zipfile.ZipFile(_zf_name, mode='w', compression=ZIP_DEFLATED)
try:
_zf.write(
'{0}/metadata-{1:04d}.xml'.format(self._storage_folder(), self.revision),
arcname='metadata.xml')
_zf.write(
'{0}/storage-global.json'.format(self._storage_folder()),
arcname='storage-global.json')
finally:
_zf.close()
# update zip digest checksum
self.digest_checksum = \
compute_digest_checksum(self.metadata, self.global_storage)
# update last modified timestamp
self.digest_modified = datetime.now()
def check_local_storage_object(self):
"""
Checks if the local storage object serialization has changed. If yes,
updates it in the storage folder.
Returns a flag indicating if the serialization was updated.
"""
_dict_local = { }
for item in LOCAL_STORAGE_ATTS:
_dict_local[item] = getattr(self, item)
_local_storage = \
dumps(_dict_local, cls=DjangoJSONEncoder, sort_keys=True, separators=(',',':'))
if self.local_storage != _local_storage:
self.local_storage = _local_storage
if self.publication_status in (INGESTED, PUBLISHED):
with open('{0}/storage-local.json'.format(
self._storage_folder()), 'wb') as _out:
_out.write(unicode(self.local_storage).encode('utf-8'))
return True
return False
def restore_from_folder(storage_id, copy_status=MASTER, \
storage_digest=None, source_node=None, force_digest=False):
"""
Restores the storage object and the associated resource for the given
storage object identifier and makes it persistent in the database.
storage_id: the storage object identifier; it is assumed that this is the
folder name in the storage folder folder where serialized storage object
and metadata XML are located
copy_status (optional): one of MASTER, REMOTE, PROXY; if present, used as
copy status for the restored resource
storage_digest (optional): the digest_checksum to set in the restored
storage object
source_node (optional): the source node if to set in the restored
storage object
force_digest (optional): if True, always recreate the digest zip-archive
Returns the restored resource with its storage object set.
"""
from metashare.repository.models import resourceInfoType_model
# if a storage object with this id already exists, delete it
try:
_so = StorageObject.objects.get(identifier=storage_id)
_so.delete()
except ObjectDoesNotExist:
_so = None
storage_folder = os.path.join(settings.STORAGE_PATH, storage_id)
# get most current metadata.xml
_files = os.listdir(storage_folder)
_metadata_files = \
sorted(
[f for f in _files if f.startswith('metadata')],
reverse=True)
if not _metadata_files:
raise Exception('no metadata.xml found')
# restore resource from metadata.xml
_metadata_file = open('{0}/{1}'.format(storage_folder, _metadata_files[0]), 'rb')
_xml_string = _metadata_file.read()
_metadata_file.close()
result = resourceInfoType_model.import_from_string(_xml_string, copy_status=copy_status)
if not result[0]:
msg = u''
if len(result) > 2:
msg = u'{}'.format(result[2])
raise Exception(msg)
resource = result[0]
# at this point, a storage object is already created at the resource, so update it
_storage_object = resource.storage_object
_storage_object.metadata = _xml_string
# add global storage object attributes if available
if os.path.isfile('{0}/storage-global.json'.format(storage_folder)):
_global_json = \
_fill_storage_object(_storage_object, '{0}/storage-global.json'.format(storage_folder))
_storage_object.global_storage = _global_json
else:
LOGGER.warn('missing storage-global.json, importing resource as new')
_storage_object.identifier = storage_id
# add local storage object attributes if available
if os.path.isfile('{0}/storage-local.json'.format(storage_folder)):
_local_json = \
_fill_storage_object(_storage_object, '{0}/storage-local.json'.format(storage_folder))
_storage_object.local_storage = _local_json
# always use the provided copy status, even if its different from the
# one in the local storage object
if copy_status:
if _storage_object.copy_status != copy_status:
LOGGER.warn('overwriting copy status from storage-local.json with "{}"'.format(copy_status))
_storage_object.copy_status = copy_status
else:
if copy_status:
_storage_object.copy_status = copy_status
else:
# no copy status and no local storage object is provided, so use
# a default
LOGGER.warn('no copy status provided, using default copy status MASTER')
_storage_object.copy_status = MASTER
# set storage digest if provided (usually for non-local resources)
if storage_digest:
_storage_object.digest_checksum = storage_digest
# set source node id if provided (usually for non-local resources)
if source_node:
_storage_object.source_node = source_node
_storage_object.update_storage(force_digest=force_digest)
# update_storage includes saving
#_storage_object.save()
return resource
def add_or_update_resource(storage_json, resource_xml_string, storage_digest,
copy_status=REMOTE, source_node=None):
'''
For the resource described by storage_json and resource_xml_string,
do the following:
- if it does not exist, import it with the given copy status and
digest_checksum;
- if it exists, delete it from the database, then import it with the given
copy status and digest_checksum.
Raises 'IllegalAccessException' if an attempt is made to overwrite
an existing master-copy resource with a non-master-copy one.
'''
# Local helper functions first:
def write_to_disk(storage_id):
folder = os.path.join(settings.STORAGE_PATH, storage_id)
if not os.path.exists(folder):
os.mkdir(folder)
with open(os.path.join(folder, 'storage-global.json'), 'wb') as out:
out.write(
unicode(
dumps(storage_json, cls=DjangoJSONEncoder, sort_keys=True, separators=(',',':')))
.encode('utf-8'))
with open(os.path.join(folder, 'metadata.xml'), 'wb') as out:
out.write(unicode(resource_xml_string).encode('utf-8'))
def storage_object_exists(storage_id):
return bool(StorageObject.objects.filter(identifier=storage_id).count() > 0)
def remove_files_from_disk(storage_id):
folder = os.path.join(settings.STORAGE_PATH, storage_id)
for _file in ('storage-local.json', 'storage-global.json', 'metadata.xml'):
path = os.path.join(folder, _file)
if os.path.exists(path):
os.remove(path)
if copy_status == PROXY:
# for proxy copies it is sufficient to only store the latest
# revision of metadata.xml file; in order to be robust against
# remote changes without revision number updates, we always recreate
# this latest metadata.xml copy
for _path in glob.glob(os.path.join(folder, 'metadata-*.xml')):
if os.path.exists(_path):
os.remove(_path)
def remove_database_entries(storage_id):
storage_object = StorageObject.objects.get(identifier=storage_id)
try:
resource = storage_object.resourceinfotype_model_set.all()[0]
except:
# pylint: disable-msg=E1101
LOGGER.error('PROBLEMATIC: %s - count: %s', storage_object.identifier,
storage_object.resourceinfotype_model_set.count(), exc_info=True)
raise
# we have to keep the statistics and recommendations for this resource
# since it is only updated
resource.delete_deep(keep_stats=True)
storage_object.delete()
# Now the actual update_resource():
storage_id = storage_json['identifier']
if storage_object_exists(storage_id):
if copy_status != MASTER and StorageObject.objects.get(identifier=storage_id).copy_status == MASTER:
raise IllegalAccessException("Attempt to overwrite a master copy with a non-master-copy record; refusing")
remove_files_from_disk(storage_id)
remove_database_entries(storage_id)
write_to_disk(storage_id)
return restore_from_folder(storage_id, copy_status=copy_status,
storage_digest=storage_digest, source_node=source_node, force_digest=True)
def _fill_storage_object(storage_obj, json_file_name):
"""
Fills the given storage object with the entries of the given JSON file.
The JSON file contains the serialization of dictionary where it is assumed
the dictionary keys are valid attributes of the storage object.
Returns the content of the JSON file.
"""
with open(json_file_name, 'rb') as _in:
json_string = _in.read()
_dict = loads(json_string)
for _att in _dict.keys():
setattr(storage_obj, _att, _dict[_att])
return json_string
def update_digests():
"""
Re-creates a digest if it is older than MAX_DIGEST_AGE / 2.
This assumes that this method is called in MAX_DIGEST_AGE / 2 intervals to
guarantee a maximum digest age of MAX_DIGEST_AGE.
"""
LOGGER.info('Starting to update digests.')
_expiration_date = _get_expiration_date()
# get all master copy storage object of ingested and published resources
for _so in StorageObject.objects.filter(
Q(copy_status=MASTER),
Q(publication_status=INGESTED) | Q(publication_status=PUBLISHED)):
if _expiration_date > _so.digest_modified \
and _expiration_date > _so.digest_last_checked:
LOGGER.info('updating {}'.format(_so.identifier))
_so.update_storage()
else:
LOGGER.info('{} is up to date'.format(_so.identifier))
LOGGER.info('Finished updating digests.')
def repair_storage_folder():
"""
Repairs the storage folder by forcing the recreation of all files.
Superfluous files are deleted."
"""
for _so in StorageObject.objects.all():
if _so.publication_status == INTERNAL:
# if storage folder is found, delete all files except a possible
# binary
folder = os.path.join(settings.STORAGE_PATH, _so.identifier)
for _file in ('storage-local.json', 'storage-global.json',
'resource.zip', 'metadata.xml', 'metadata-*.xml'):
path = os.path.join(folder, _file)
for _path in glob.glob(path):
if os.path.exists(_path):
os.remove(_path)
else:
_so.metadata = None
_so.global_storage = None
_so.local_storage = None
_so.update_storage()
def repair_storage_objects():
"""
Removes storage objects for which no resourceinfotype_model is set.
"""
for _so in StorageObject.objects.all():
if _so.resourceinfotype_model_set.count() == 0:
LOGGER.info('remove storage object {}'.format(_so.identifier))
_so.delete()
def compute_checksum(infile):
"""
Compute the MD5 checksum of infile, and return it as a hexadecimal string.
infile: either a file-like object instance with a read() method, or
a file path which can be opened using open(infile, 'rb').
"""
checksum = md5()
try:
if hasattr(infile, 'read'):
instream = infile
else:
instream = open(infile, 'rb')
chunk = instream.read(MAXIMUM_MD5_BLOCK_SIZE)
while chunk:
checksum.update(chunk)
chunk = instream.read(MAXIMUM_MD5_BLOCK_SIZE)
finally:
instream.close()
return checksum.hexdigest()
def compute_digest_checksum(metadata, global_storage):
"""
Computes the digest checksum for the given metadata and global storage objects.
"""
_cs = md5()
_cs.update(metadata)
_cs.update(global_storage)
return _cs.hexdigest()
class IllegalAccessException(Exception):
pass
def _get_expiration_date():
"""
Returns the expiration date of a digest based on the maximum age.
"""
_half_time = settings.MAX_DIGEST_AGE / 2
_td = timedelta(seconds=_half_time)
_expiration_date = datetime.now() - _td
return _expiration_date
| MiltosD/CEF-ELRC | metashare/storage/models.py | Python | bsd-3-clause | 28,959 | 0.007321 |
from twisted.internet import reactor
from twisted.python import log
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
agent = Agent(reactor)
class LoginHandler(object):
def __init__(self, api_url):
self.api_url = api_url
def handle(self, handler, args):
game, bot = args
def failed(ignored, reason="login failed"):
handler.parent.closeBecause(reason)
def http_succeeded(response):
if response.code == 200:
log.msg("login succeeded")
try:
handler.parent.login_success(game, bot)
except:
log.err()
elif response.code == 401:
failed(response, "Invalid bot key")
elif response.code == 404:
failed(response, "Invalid game")
elif response.code == 410:
failed(response, "Game is full")
else:
failed(response)
url = '{}/api/internal/join/{}?key={}'\
.format(self.api_url, game, bot)
d = agent.request(
'POST', url,
Headers({'User-Agent': ['Plumbing Connector']}), None
)
d.addCallbacks(http_succeeded, failed)
| gnmerritt/casino | casino/handlers.py | Python | mit | 1,275 | 0.000784 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from types import *
def typer(x,y):
if type(x) is StringType or type(y) is StringType :
print u'получена строка'
else:
if x > y:
print u'больше'
elif x < y:
print u'меньше'
else:
print u'равно'
typer("12", 4)
typer("12","4")
typer(12, 4)
typer(4, 45)
typer(4, 4) | pybursa/homeworks | a_karnauh/hw1/6.py | Python | gpl-2.0 | 354 | 0.04644 |
from bottle import run
from bottle_app import application
run(host='localhost', port=8080)
| snowfed/Chessology | run_locally.py | Python | gpl-3.0 | 92 | 0 |
"""station_analytics_update
Revision ID: 1cb4253a8bc6
Revises: 2972360b9a6f
Create Date: 2014-04-27 13:01:26.309272
"""
# revision identifiers, used by Alembic.
revision = '1cb4253a8bc6'
down_revision = '2972360b9a6f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('radio_station', sa.Column('analytic_update_frequency', sa.Float(), nullable=True))
def downgrade():
op.drop_column('radio_station', 'analytic_update_frequency')
| andaluri/rootio_web | alembic/versions/1cb4253a8bc6_station_analytics_up.py | Python | agpl-3.0 | 472 | 0.006356 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="Ente",
version="0.1",
description="place finder on commoncrawl dataset",
author="László Nagy",
author_email="rizsotto@gmail.com",
license='LICENSE',
url='https://github.com/rizsotto/Ente',
long_description=open('README.md').read(),
scripts=['bin/ente']
)
| rizsotto/Ente | setup.py | Python | bsd-3-clause | 383 | 0 |
# Copyright (c) 2018, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
"""Submodule for operating on LVC-format waveform files"""
from .. import __version__
from .file_io import read_from_h5
| moble/scri | scri/LVC/__init__.py | Python | mit | 242 | 0.004132 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Misc. modules."""
| shaggytwodope/qutebrowser | qutebrowser/misc/__init__.py | Python | gpl-3.0 | 820 | 0 |
# Copyright (c) 2013 Daniel Gill
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
Created on Oct 24, 2013
@author: Daniel Gill
'''
import re
import os
token_re = r"(\w+'\w+)|(\w+)"
def tokenize(line):
def has_valid_contraction(tup):
return len(tup[0]) > 0
for matching_tuple in re.findall(token_re, line):
string = ""
if has_valid_contraction(matching_tuple):
string = matching_tuple[0]
else:
string = matching_tuple[1]
if len(string) > 1 or string in ['I', 'a']:
yield string.lower()
def process_file(file_path):
with open(name=file_path, mode='r') as open_file:
for line in open_file:
for word in tokenize(line):
yield word
def process_dir(dir_path):
for file_path in os.listdir(dir_path):
for word in process_file(os.path.join(dir_path, file_path)):
yield word
| dwgill/dspell | dspell/corpus.py | Python | mit | 1,947 | 0.007704 |
# Based on cage v1.1.4
# http://www.alcyone.com/software/cage/
# Copyright (C) 2002-2006 Erik Max Francis <max@alcyone.com>
# GPL License
class Topology:
"""Encaptulation of the shape and dimentionality of a cellular automata"""
def get(self, address):
raise NotImplementedError
def set(self, address, state):
raise NotImplementedError
def normalize(self, address):
raise NotImplementedError
class Neighborhood:
"""Abstraction of the set of cells adjacent to any given cell"""
def neighbors(self, address):
"""Returns a list of addresses which are neighbors."""
raise NotImplementedError
def states(self, address):
"""Returns the list of cell values for all neighbors"""
return [self.get(x) for x in self.neighbors(address)]
class GridTopology(Topology):
"""A two dimentional, bounded topology consisting of a rectangular grid
of cells"""
background = 0
border = 0
def __init__(self, size):
self.width, self.height = size
self.buffer = []
for _ in range(self.width):
self.buffer.append([self.background] * self.height)
self.zero = (0, 0)
def normalize(self, address):
x, y = address
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return None
return address
def get(self, address):
addr = self.normalize(address)
if addr:
x, y = addr
return self.buffer[x][y]
else:
return self.border
def set(self, address, state):
addr = self.normalize(address)
if addr:
x, y = addr
self.buffer[x][y] = state
else:
raise IndexError
class ExtendedNeighborhood(Neighborhood):
"""A neighborhood that retrieves a list of states on each direction"""
def states(self, address, max=1):
return [[self.get(i) for i in j] for j in self.neighbors(address, max)]
class Automaton:
"""Abstraction for the actions that can be made over the different cells
and states of a specified map"""
def __init__(self, map):
self.map = map
self.generation = 0
def update(self):
self.generation += 1
class Rule:
"""Definition of rules to follow to change a cell value in an automaton"""
def __init__(self, map, address):
self.populate(map, address)
def populate(self, map, address):
raise NotImplementedError
def apply(self):
raise NotImplementedError
| wichovw/tca-gt | server/tca/cellaut.py | Python | mit | 2,703 | 0.009989 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.core.cache import cache
from django.test import TestCase
from django.contrib import admin
from physical.tests.factory import DiskOfferingFactory, EnvironmentFactory
from physical.errors import NoDiskOfferingGreaterError, NoDiskOfferingLesserError
from system.models import Configuration
from ..admin.disk_offering import DiskOfferingAdmin
from ..forms.disk_offerring import DiskOfferingForm
from ..models import DiskOffering
LOG = logging.getLogger(__name__)
SEARCH_FIELDS = ('name', )
LIST_FIELDS = ('name', 'size_gb', 'selected_environments')
SAVE_ON_TOP = True
UNICODE_FORMAT = '{}'
class DiskOfferingTestCase(TestCase):
def create_basic_disks(self):
for disk_offering in DiskOffering.objects.all():
for plan in disk_offering.plans.all():
plan.databaseinfras.all().delete()
disk_offering.plans.all().delete()
disk_offering.delete()
cache.clear()
self.bigger = DiskOfferingFactory()
self.bigger.size_kb *= 30
self.bigger.environments.add(self.environment)
self.bigger.save()
self.medium = DiskOfferingFactory()
self.medium.size_kb *= 20
self.medium.environments.add(self.environment)
self.medium.save()
self.smaller = DiskOfferingFactory()
self.smaller.size_kb *= 10
self.smaller.environments.add(self.environment)
self.smaller.save()
def setUp(self):
self.admin = DiskOfferingAdmin(DiskOffering, admin.sites.AdminSite())
self.auto_resize_max_size_in_gb = Configuration(
name='auto_resize_max_size_in_gb', value=100
)
self.auto_resize_max_size_in_gb.save()
self.environment = EnvironmentFactory()
def tearDown(self):
if self.auto_resize_max_size_in_gb.id:
self.auto_resize_max_size_in_gb.delete()
def test_search_fields(self):
self.assertEqual(SEARCH_FIELDS, self.admin.search_fields)
def test_list_fields(self):
self.assertEqual(LIST_FIELDS, self.admin.list_display)
def test_save_position(self):
self.assertEqual(SAVE_ON_TOP, self.admin.save_on_top)
def test_adding_gb_to_kb(self):
disk_offering_form = DiskOfferingForm(
data={
'name': 'disk_offering_small',
'size_gb': 0.5,
'environments': [self.environment.id]
}
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering_form.instance,
form=disk_offering_form, change=None
)
disk_offering = DiskOffering.objects.get(name='disk_offering_small')
self.assertEqual(disk_offering.size_gb(), 0.5)
self.assertEqual(disk_offering.size_kb, 524288)
def test_editing_gb_to_kb(self):
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
self.assertEqual(disk_offering.size_gb(), 1)
self.assertEqual(disk_offering.size_kb, 1048576)
disk_offering_form = DiskOfferingForm(
data={
'name': disk_offering.name,
'size_gb': 1.5,
'environments': [self.environment.id]
},
instance=disk_offering
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering,
form=disk_offering_form, change=None
)
self.assertEqual(disk_offering.size_gb(), 1.5)
self.assertEqual(disk_offering.size_kb, 1572864)
def test_edit_initial_values(self):
disk_offering_form = DiskOfferingForm()
self.assertNotIn('name', disk_offering_form.initial)
self.assertIn('size_gb', disk_offering_form.initial)
self.assertIsNone(disk_offering_form.initial['size_gb'])
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
disk_offering_form = DiskOfferingForm(instance=disk_offering)
self.assertEqual(
disk_offering_form.initial['name'], disk_offering.name
)
self.assertEqual(
disk_offering_form.initial['size_gb'], disk_offering.size_gb()
)
def test_model_sizes(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.size_kb, 1048576)
self.assertEqual(disk_factory.size_gb(), 1.0)
self.assertEqual(disk_factory.size_bytes(), 1073741824)
disk_offering = DiskOffering()
self.assertIsNone(disk_offering.size_kb)
self.assertIsNone(disk_offering.size_gb())
self.assertIsNone(disk_offering.size_bytes())
def test_model_converter(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.converter_kb_to_gb(1572864), 1.5)
self.assertEqual(disk_factory.converter_kb_to_bytes(524288), 536870912)
self.assertEqual(disk_factory.converter_gb_to_kb(0.75), 786432)
self.assertIsNone(disk_factory.converter_kb_to_gb(0))
self.assertIsNone(disk_factory.converter_kb_to_bytes(0))
self.assertIsNone(disk_factory.converter_gb_to_kb(0))
def test_unicode(self):
disk_offering = DiskOffering()
expected_unicode = UNICODE_FORMAT.format(disk_offering.name)
self.assertEqual(expected_unicode, str(disk_offering))
def test_disk_offering_is_in_admin(self):
self.assertIn(DiskOffering, admin.site._registry)
admin_class = admin.site._registry[DiskOffering]
self.assertIsInstance(admin_class, DiskOfferingAdmin)
def test_can_found_greater_disk(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment)
self.assertEqual(self.medium, found)
found = DiskOffering.first_greater_than(
self.medium.size_kb, self.environment)
self.assertEqual(self.bigger, found)
def test_cannot_found_greater_disk(self):
self.create_basic_disks()
self.assertRaises(
NoDiskOfferingGreaterError,
DiskOffering.first_greater_than, self.bigger.size_kb, self.environment
)
def test_can_found_greater_disk_with_exclude(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment, exclude_id=self.medium.id
)
self.assertEqual(self.bigger, found)
def test_can_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb())
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.bigger, found)
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.medium, found)
def test_cannot_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.smaller.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
self.assertRaises(
NoDiskOfferingLesserError,
DiskOffering.last_offering_available_for_auto_resize, self.environment
)
def test_compare_disks(self):
self.create_basic_disks()
self.assertGreater(self.bigger, self.smaller)
self.assertLess(self.smaller, self.bigger)
self.medium_twice = DiskOfferingFactory()
self.medium_twice.size_kb *= 20
self.medium_twice.save()
self.assertEqual(self.medium, self.medium)
self.assertNotEqual(self.medium, self.medium_twice)
self.medium_twice.delete()
def test_disk_is_last_offering(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.medium.size_gb()) + 1
self.auto_resize_max_size_in_gb.save()
self.assertFalse(
self.smaller.is_last_auto_resize_offering(self.environment)
)
self.assertTrue(
self.medium.is_last_auto_resize_offering(self.environment)
)
self.assertFalse(
self.bigger.is_last_auto_resize_offering(self.environment)
)
def test_disk_is_last_offering_without_param(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.delete()
self.assertFalse(
self.smaller.is_last_auto_resize_offering(self.environment)
)
self.assertFalse(
self.medium.is_last_auto_resize_offering(self.environment)
)
self.assertTrue(
self.bigger.is_last_auto_resize_offering(self.environment)
)
| globocom/database-as-a-service | dbaas/physical/tests/test_disk_offering.py | Python | bsd-3-clause | 9,097 | 0.00033 |
#!BPY
"""
Name: 'TerasologyBlockShapeExport'
Blender: 260
Group: 'Export'
Tooltip: 'Export a Terasology Block Shape'
"""
bl_info = {
"name": "Terasology Block Shape Export",
"description": "Exporter for producing Terasology Block Shape files",
"author": "Immortius",
"version": (1, 1),
"blender": (2, 6, 0),
"location": "File > Import-Export",
"category": "Import-Export"}
import bpy
import os
import bpy_extras.io_utils
from bpy.props import StringProperty, BoolProperty
class ExportBlockShape(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
bl_idname = "export_mesh.terasology_block_shape"
bl_label = "Export Terasology Block Shape"
filename_ext = ".groovy"
filter_glob = StringProperty(default="*.groovy", options={'HIDDEN'})
apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply Modifiers to the exported mesh",
default=True)
for_embed = BoolProperty(
name="For Embed in Jar",
description="Adds the default package to the groovy file",
default=False)
@classmethod
def poll(cls, context):
return context.active_object != None
def execute(self, context):
filepath = self.filepath
filepath = bpy.path.ensure_ext(filepath, self.filename_ext)
from . import export_block_shape
keywords = self.as_keywords(ignore=("filter_glob","check_existing"))
return export_block_shape.save(self, context, **keywords)
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self, "apply_modifiers")
row = layout.row()
row.prop(self, "for_embed")
#UI Panel
bpy.types.Object.teraFullSide = BoolProperty(
name="Full Side",
description="Is this side of the block complete",
default = False)
bpy.types.Object.teraAABB = BoolProperty(
name="Is AABB Collider",
description="Is this object used to describe an AABB collider",
default = False)
bpy.types.Scene.teraAuthor = StringProperty(
name="Author",
description="Is this side of the block complete",
default = "")
bpy.types.Scene.teraAutoCollider = BoolProperty(
name="Auto-generate Collider",
description="Automatically generate an AABB collider that encapulates the block",
default = False)
class UIPanel(bpy.types.Panel):
bl_label = "Terasology Properties"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self, context):
layout = self.layout
scene = context.scene
if not scene:
return
layout.prop(scene, 'teraAuthor')
layout.prop(scene, 'teraAutoCollider')
ob = context.object
if not ob:
return
if not ob.type == 'MESH':
return
layout.prop(ob, 'teraFullSide')
layout.prop(ob, 'teraAABB')
def menu_export(self, context):
self.layout.operator(ExportBlockShape.bl_idname, text="Terasology Block Shape (.groovy)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_export)
if __name__ == "__main__":
register() | rapodaca/Terasology | blender_addons/io_mesh_terasology/__init__.py | Python | apache-2.0 | 3,098 | 0.037121 |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipes for NativeClient toolchain packages.
The real entry plumbing is in toolchain_main.py.
"""
import collections
import fnmatch
import platform
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.gsd_storage
import pynacl.platform
import command
import toolchain_main
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_DIR = os.path.dirname(SCRIPT_DIR)
# See command.GenerateGitPatches for the schema of entries in this dict.
# Additionally, each may contain a 'repo' key whose value is the name
# to use in place of the package name when calling GitUrl (below).
GIT_REVISIONS = {
'binutils': {
'rev': '7deface59753c3b249ac08d854d471951796343f',
'upstream-branch': 'upstream/binutils-2_24-branch',
'upstream-name': 'binutils-2.24',
# This is tag binutils-2_24, but Gerrit won't let us push
# non-annotated tags, and the upstream tag is not annotated.
'upstream-base': '237df3fa4a1d939e6fd1af0c3e5029a25a137310',
},
'gcc': {
'rev': 'faa3cdd0473b7fb82be937e32fd2b474fa0299e6',
'upstream-branch': 'upstream/gcc-4_8-branch',
'upstream-name': 'gcc-4.8.3',
# Upstream tag gcc-4_8_3-release:
'upstream-base': '6bbf0dec66c0e719b06cd2fe67559fda6df09000',
},
'newlib': {
'rev': 'e7b1ccd4b5eec14e840f6bf875f4d6fa4cff045c',
'upstream-branch': 'upstream/master',
'upstream-name': 'newlib-2.1.0',
# Upstream tag newlib_2_1_0:
'upstream-base': '99fc6c167467b41466ec90e8260e9c49cbe3d13c',
},
'gdb': {
'rev': '5deb4793a5e3f2f48d7899f424bb4484686020f8',
'repo': 'binutils',
'upstream-branch': 'upstream/gdb-7.7-branch',
'upstream-name': 'gdb-7.7.1',
# Upstream tag gdb-7.7-release:
'upstream-base': '4bd8fc3a1362970d9800a263987af8093798338b',
},
}
TAR_FILES = {
'gmp': command.path.join('gmp', 'gmp-6.0.0a.tar.bz2'),
'mpfr': command.path.join('mpfr', 'mpfr-3.1.2.tar.bz2'),
'mpc': command.path.join('mpc', 'mpc-1.0.2.tar.gz'),
'isl': command.path.join('cloog', 'isl-0.12.2.tar.bz2'),
'cloog': command.path.join('cloog', 'cloog-0.18.1.tar.gz'),
'expat': command.path.join('expat', 'expat-2.1.0.tar.gz'),
}
GIT_BASE_URL = 'https://chromium.googlesource.com/native_client'
GIT_PUSH_URL = 'ssh://gerrit.chromium.org/native_client'
KNOWN_MIRRORS = [('http://git.chromium.org/native_client', GIT_BASE_URL)]
PUSH_MIRRORS = [('http://git.chromium.org/native_client', GIT_PUSH_URL),
(GIT_BASE_URL, GIT_PUSH_URL)]
def GitUrl(package, push_url=False):
repo = GIT_REVISIONS[package].get('repo', package)
if push_url:
base_url = GIT_PUSH_URL
else:
base_url = GIT_BASE_URL
return '%s/nacl-%s.git' % (base_url, repo)
def CollectSources():
sources = {}
for package in TAR_FILES:
tar_file = TAR_FILES[package]
if fnmatch.fnmatch(tar_file, '*.bz2'):
extract = EXTRACT_STRIP_TBZ2
elif fnmatch.fnmatch(tar_file, '*.gz'):
extract = EXTRACT_STRIP_TGZ
else:
raise Exception('unexpected file name pattern in TAR_FILES[%r]' % package)
sources[package] = {
'type': 'source',
'commands': [
command.Command(extract + [command.path.join('%(abs_top_srcdir)s',
'..', 'third_party',
tar_file)],
cwd='%(output)s'),
],
}
patch_packages = []
patch_commands = []
for package, info in GIT_REVISIONS.iteritems():
sources[package] = {
'type': 'source',
'commands': command.SyncGitRepoCmds(GitUrl(package), '%(output)s',
info['rev'],
git_cache='%(git_cache_dir)s',
push_url=GitUrl(package, True),
known_mirrors=KNOWN_MIRRORS,
push_mirrors=PUSH_MIRRORS),
}
patch_packages.append(package)
patch_info = {'name': package}
patch_info.update(info)
patch_commands.append(
command.GenerateGitPatches('%(' + package + ')s/.git', patch_info))
sources['patches'] = {
'type': 'build',
'dependencies': patch_packages,
'commands': patch_commands,
}
# The gcc_libs component gets the whole GCC source tree.
sources['gcc_libs'] = sources['gcc']
# The gcc component omits all the source directories that are used solely
# for building target libraries. We don't want those included in the
# input hash calculation so that we don't rebuild the compiler when the
# the only things that have changed are target libraries.
sources['gcc'] = {
'type': 'source',
'dependencies': ['gcc_libs'],
'commands': [command.CopyTree('%(gcc_libs)s', '%(output)s', [
'boehm-gc',
'libada',
'libatomic',
'libffi',
'libgcc',
'libgfortran',
'libgo',
'libgomp',
'libitm',
'libjava',
'libmudflap',
'libobjc',
'libquadmath',
'libsanitizer',
'libssp',
'libstdc++-v3',
])]
}
# We have to populate the newlib source tree with the "exported" form of
# some headers from the native_client source tree. The newlib build
# needs these to be in the expected place. By doing this in the source
# target, these files will be part of the input hash and so we don't need
# to do anything else to keep track of when they might have changed in
# the native_client source tree.
newlib_sys_nacl = command.path.join('%(output)s',
'newlib', 'libc', 'sys', 'nacl')
newlib_unpack = [command.RemoveDirectory(command.path.join(newlib_sys_nacl,
dirname))
for dirname in ['bits', 'sys', 'machine']]
newlib_unpack.append(command.Command([
'python',
command.path.join('%(top_srcdir)s', 'src',
'trusted', 'service_runtime', 'export_header.py'),
command.path.join('%(top_srcdir)s', 'src',
'trusted', 'service_runtime', 'include'),
newlib_sys_nacl,
]))
sources['newlib']['commands'] += newlib_unpack
return sources
# Canonical tuples we use for hosts.
WINDOWS_HOST_TUPLE = pynacl.platform.PlatformTriple('win', 'x86-32')
MAC_HOST_TUPLE = pynacl.platform.PlatformTriple('darwin', 'x86-64')
ARM_HOST_TUPLE = pynacl.platform.PlatformTriple('linux', 'arm')
LINUX_X86_32_TUPLE = pynacl.platform.PlatformTriple('linux', 'x86-32')
LINUX_X86_64_TUPLE = pynacl.platform.PlatformTriple('linux', 'x86-64')
# Map of native host tuple to extra tuples that it cross-builds for.
EXTRA_HOSTS_MAP = {
LINUX_X86_64_TUPLE: [
LINUX_X86_32_TUPLE,
ARM_HOST_TUPLE,
WINDOWS_HOST_TUPLE,
],
}
# Map of native host tuple to host tuples that are "native enough".
# For these hosts, we will do a native-style build even though it's
# not the native tuple, just passing some extra compiler flags.
NATIVE_ENOUGH_MAP = {
LINUX_X86_64_TUPLE: {
LINUX_X86_32_TUPLE: ['-m32'],
},
}
# The list of targets to build toolchains for.
TARGET_LIST = ['arm', 'i686']
# List upload targets for each host we want to upload packages for.
TARGET = collections.namedtuple('TARGET', ['name', 'pkg_prefix'])
HOST_TARGET = collections.namedtuple('HOST_TARGET',
['os', 'arch', 'differ3264', 'targets'])
STANDARD_TARGETS = [TARGET('arm', '')]
LINUX_X86_64_TARGETS = [TARGET('arm', ''), TARGET('i686', 'ng_')]
UPLOAD_HOST_TARGETS = [
HOST_TARGET('win', 'x86-32', False, STANDARD_TARGETS),
HOST_TARGET('darwin', 'x86-64', False, STANDARD_TARGETS),
HOST_TARGET('linux', 'arm', False, STANDARD_TARGETS),
HOST_TARGET('linux', 'x86-32', False, STANDARD_TARGETS),
HOST_TARGET('linux', 'x86-64', True, LINUX_X86_64_TARGETS),
]
# GDB is built by toolchain_build but injected into package targets built by
# other means. List out what package targets, packages, and the tar file we are
# injecting on top of here.
GDB_INJECT_HOSTS = [
('win', 'x86-32'),
('darwin', 'x86-64'),
('linux', 'x86-32'),
]
GDB_INJECT_PACKAGES = [
('nacl_x86_newlib', ['naclsdk.tgz']),
('nacl_x86_glibc', ['toolchain.tar.bz2']),
]
# These are extra arguments to pass gcc's configure that vary by target.
TARGET_GCC_CONFIG = {
# TODO(mcgrathr): Disabled tuning for now, tickling a constant-pool layout bug.
# 'arm': ['--with-tune=cortex-a15'],
}
PACKAGE_NAME = 'Native Client SDK [%(build_signature)s]'
BUG_URL = 'http://gonacl.com/reportissue'
TAR_XV = ['tar', '-x', '-v']
EXTRACT_STRIP_TGZ = TAR_XV + ['--gzip', '--strip-components=1', '-f']
EXTRACT_STRIP_TBZ2 = TAR_XV + ['--bzip2', '--strip-components=1', '-f']
CONFIGURE_CMD = ['sh', '%(src)s/configure']
MAKE_PARALLEL_CMD = ['make', '-j%(cores)s']
MAKE_CHECK_CMD = MAKE_PARALLEL_CMD + ['check']
MAKE_DESTDIR_CMD = ['make', 'DESTDIR=%(abs_output)s']
# This file gets installed by multiple packages' install steps, but it is
# never useful when installed in isolation. So we remove it from the
# installation directories before packaging up.
REMOVE_INFO_DIR = command.Remove(command.path.join('%(output)s',
'share', 'info', 'dir'))
def ConfigureHostArch(host):
configure_args = []
is_cross = CrossCompiling(host)
if is_cross:
extra_cc_args = []
configure_args.append('--host=' + host)
else:
extra_cc_args = NATIVE_ENOUGH_MAP.get(NATIVE_TUPLE, {}).get(host, [])
if extra_cc_args:
# The host we've chosen is "native enough", such as x86-32 on x86-64.
# But it's not what config.guess will yield, so we need to supply
# a --build switch to ensure things build correctly.
configure_args.append('--build=' + host)
extra_cxx_args = list(extra_cc_args)
if fnmatch.fnmatch(host, '*-linux*'):
# Avoid shipping binaries with a runtime dependency on
# a particular version of the libstdc++ shared library.
# TODO(mcgrathr): Do we want this for MinGW and/or Mac too?
extra_cxx_args.append('-static-libstdc++')
if extra_cc_args:
# These are the defaults when there is no setting, but we will add
# additional switches, so we must supply the command name too.
if is_cross:
cc = host + '-gcc'
else:
cc = 'gcc'
configure_args.append('CC=' + ' '.join([cc] + extra_cc_args))
if extra_cxx_args:
# These are the defaults when there is no setting, but we will add
# additional switches, so we must supply the command name too.
if is_cross:
cxx = host + '-g++'
else:
cxx = 'g++'
configure_args.append('CXX=' + ' '.join([cxx] + extra_cxx_args))
if HostIsWindows(host):
# The i18n support brings in runtime dependencies on MinGW DLLs
# that we don't want to have to distribute alongside our binaries.
# So just disable it, and compiler messages will always be in US English.
configure_args.append('--disable-nls')
return configure_args
def ConfigureHostCommon(host):
return ConfigureHostArch(host) + [
'--prefix=',
'--disable-silent-rules',
'--without-gcc-arch',
]
def ConfigureHostLib(host):
return ConfigureHostCommon(host) + [
'--disable-shared',
]
def ConfigureHostTool(host):
return ConfigureHostCommon(host) + [
'--with-pkgversion=' + PACKAGE_NAME,
'--with-bugurl=' + BUG_URL,
'--without-zlib',
]
def MakeCommand(host, extra_args=[]):
if HostIsWindows(host):
# There appears to be nothing we can pass at top-level configure time
# that will prevent the configure scripts from finding MinGW's libiconv
# and using it. We have to force this variable into the environment
# of the sub-configure runs, which are run via make.
make_command = MAKE_PARALLEL_CMD + ['HAVE_LIBICONV=no']
else:
make_command = MAKE_PARALLEL_CMD
return make_command + extra_args
# Return the 'make check' command to run.
# When cross-compiling, don't try to run test suites.
def MakeCheckCommand(host):
if CrossCompiling(host):
return ['true']
return MAKE_CHECK_CMD
def InstallDocFiles(subdir, files):
doc_dir = command.path.join('%(output)s', 'share', 'doc', subdir)
dirs = sorted(set([command.path.dirname(command.path.join(doc_dir, file))
for file in files]))
commands = ([command.Mkdir(dir, parents=True) for dir in dirs] +
[command.Copy(command.path.join('%(' + subdir + ')s', file),
command.path.join(doc_dir, file))
for file in files])
return commands
def NewlibLibcScript(arch):
template = """/*
* This is a linker script that gets installed as libc.a for the
* newlib-based NaCl toolchain. It brings in the constituent
* libraries that make up what -lc means semantically.
*/
OUTPUT_FORMAT(%s)
GROUP ( libnacl.a libcrt_common.a )
"""
if arch == 'arm':
# Listing three formats instead of one makes -EL/-EB switches work
# for the endian-switchable ARM backend.
format_list = ['elf32-littlearm-nacl',
'elf32-bigarm-nacl',
'elf32-littlearm-nacl']
elif arch == 'i686':
format_list = ['elf32-i386-nacl']
elif arch == 'x86_64':
format_list = ['elf32-x86_64-nacl']
else:
raise Exception('TODO(mcgrathr): OUTPUT_FORMAT for %s' % arch)
return template % ', '.join(['"' + fmt + '"' for fmt in format_list])
# The default strip behavior removes debugging and symbol table
# sections, but it leaves the .comment section. This contains the
# compiler version string, and so it changes when the compiler changes
# even if the actual machine code it produces is completely identical.
# Hence, the target library packages will always change when the
# compiler changes unless these sections are removed. Doing this
# requires somehow teaching the makefile rules to pass the
# --remove-section=.comment switch to TARGET-strip. For the GCC
# target libraries, setting STRIP_FOR_TARGET is sufficient. But
# quoting nightmares make it difficult to pass a command with a space
# in it as the STRIP_FOR_TARGET value. So the build writes a little
# script that can be invoked with a simple name.
#
# Though the gcc target libraries' makefiles are smart enough to obey
# STRIP_FOR_TARGET for library files, the newlib makefiles just
# blindly use $(INSTALL_DATA) for both header (text) files and library
# files. Hence it's necessary to override its INSTALL_DATA setting to
# one that will do stripping using this script, and thus the script
# must silently do nothing to non-binary files.
def ConfigureTargetPrep(arch):
script_file = 'strip_for_target'
config_target = arch + '-nacl'
script_contents = """\
#!/bin/sh
mode=--strip-all
for arg; do
case "$arg" in
-*) ;;
*)
type=`file --brief --mime-type "$arg"`
case "$type" in
application/x-executable|application/x-sharedlib) ;;
application/x-archive|application/x-object) mode=--strip-debug ;;
*) exit 0 ;;
esac
;;
esac
done
exec %s-strip $mode --remove-section=.comment "$@"
""" % config_target
return [
command.WriteData(script_contents, script_file),
command.Command(['chmod', '+x', script_file]),
]
def ConfigureTargetArgs(arch):
config_target = arch + '-nacl'
return [
'--target=' + config_target,
'--with-sysroot=/' + config_target,
'STRIP_FOR_TARGET=%(cwd)s/strip_for_target',
]
def CommandsInBuild(command_lines):
return [
command.RemoveDirectory('build'),
command.Mkdir('build'),
] + [command.Command(cmd, cwd='build')
for cmd in command_lines]
def PopulateDeps(dep_dirs):
commands = [command.RemoveDirectory('all_deps'),
command.Mkdir('all_deps')]
commands += [command.Command('cp -r "%s/"* all_deps' % dirname, shell=True)
for dirname in dep_dirs]
return commands
def WithDepsOptions(options, component=None):
if component is None:
directory = command.path.join('%(cwd)s', 'all_deps')
else:
directory = '%(abs_' + component + ')s'
return ['--with-' + option + '=' + directory
for option in options]
# Return the component name we'll use for a base component name and
# a host tuple. The component names cannot contain dashes or other
# non-identifier characters, because the names of the files uploaded
# to Google Storage are constrained. GNU configuration tuples contain
# dashes, which we translate to underscores.
def ForHost(component_name, host):
return component_name + '_' + pynacl.gsd_storage.LegalizeName(host)
# These are libraries that go into building the compiler itself.
def HostGccLibs(host):
def H(component_name):
return ForHost(component_name, host)
host_gcc_libs = {
H('gmp'): {
'type': 'build',
'dependencies': ['gmp'],
'commands': [
command.Command(ConfigureCommand('gmp') +
ConfigureHostLib(host) + [
'--with-sysroot=%(abs_output)s',
'--enable-cxx',
# Without this, the built library will
# assume the instruction set details
# available on the build machine. With
# this, it dynamically chooses what code
# to use based on the details of the
# actual host CPU at runtime.
'--enable-fat',
]),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
],
},
H('mpfr'): {
'type': 'build',
'dependencies': ['mpfr', H('gmp')],
'commands': [
command.Command(ConfigureCommand('mpfr') +
ConfigureHostLib(host) +
WithDepsOptions(['sysroot', 'gmp'], H('gmp'))),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
],
},
H('mpc'): {
'type': 'build',
'dependencies': ['mpc', H('gmp'), H('mpfr')],
'commands': PopulateDeps(['%(' + H('gmp') + ')s',
'%(' + H('mpfr') + ')s']) + [
command.Command(ConfigureCommand('mpc') +
ConfigureHostLib(host) +
WithDepsOptions(['sysroot', 'gmp', 'mpfr'])),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
],
},
H('isl'): {
'type': 'build',
'dependencies': ['isl', H('gmp')],
'commands': [
command.Command(ConfigureCommand('isl') +
ConfigureHostLib(host) +
WithDepsOptions(['sysroot', 'gmp-prefix'],
H('gmp'))),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
# The .pc files wind up containing some absolute paths
# that make the output depend on the build directory name.
# The dependents' configure scripts don't need them anyway.
command.RemoveDirectory(command.path.join(
'%(output)s', 'lib', 'pkgconfig')),
],
},
H('cloog'): {
'type': 'build',
'dependencies': ['cloog', H('gmp'), H('isl')],
'commands': PopulateDeps(['%(' + H('gmp') + ')s',
'%(' + H('isl') + ')s']) + [
command.Command(ConfigureCommand('cloog') +
ConfigureHostLib(host) + [
'--with-bits=gmp',
'--with-isl=system',
] + WithDepsOptions(['sysroot',
'gmp-prefix',
'isl-prefix'])),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
# The .pc files wind up containing some absolute paths
# that make the output depend on the build directory name.
# The dependents' configure scripts don't need them anyway.
command.RemoveDirectory(command.path.join(
'%(output)s', 'lib', 'pkgconfig')),
],
},
H('expat'): {
'type': 'build',
'dependencies': ['expat'],
'commands': [
command.Command(ConfigureCommand('expat') +
ConfigureHostLib(host)),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + [
# expat does not support the install-strip target.
'installlib',
'INSTALL=%(expat)s/conftools/install-sh -c -s',
'INSTALL_DATA=%(expat)s/conftools/install-sh -c -m 644',
]),
],
},
}
return host_gcc_libs
HOST_GCC_LIBS_DEPS = ['gmp', 'mpfr', 'mpc', 'isl', 'cloog']
def HostGccLibsDeps(host):
return [ForHost(package, host) for package in HOST_GCC_LIBS_DEPS]
def ConfigureCommand(source_component):
return [command % {'src': '%(' + source_component + ')s'}
for command in CONFIGURE_CMD]
# When doing a Canadian cross, we need native-hosted cross components
# to do the GCC build.
def GccDeps(host, target):
components = ['binutils_' + target]
if CrossCompiling(host):
components.append('gcc_' + target)
host = NATIVE_TUPLE
return [ForHost(component, host) for component in components]
def GccCommand(host, target, cmd):
components_for_path = GccDeps(host, target)
return command.Command(
cmd, path_dirs=[command.path.join('%(abs_' + component + ')s', 'bin')
for component in components_for_path])
def ConfigureGccCommand(source_component, host, target, extra_args=[]):
return GccCommand(
host,
target,
ConfigureCommand(source_component) +
ConfigureHostTool(host) +
ConfigureTargetArgs(target) +
TARGET_GCC_CONFIG.get(target, []) + [
'--with-gmp=%(abs_' + ForHost('gmp', host) + ')s',
'--with-mpfr=%(abs_' + ForHost('mpfr', host) + ')s',
'--with-mpc=%(abs_' + ForHost('mpc', host) + ')s',
'--with-isl=%(abs_' + ForHost('isl', host) + ')s',
'--with-cloog=%(abs_' + ForHost('cloog', host) + ')s',
'--enable-cloog-backend=isl',
'--disable-dlopen',
'--disable-shared',
'--with-newlib',
'--with-linker-hash-style=gnu',
'--enable-linker-build-id',
'--enable-languages=c,c++,lto',
] + extra_args)
def HostTools(host, target):
def H(component_name):
return ForHost(component_name, host)
# Return the file name with the appropriate suffix for an executable file.
def Exe(file):
if HostIsWindows(host):
return file + '.exe'
else:
return file
tools = {
H('binutils_' + target): {
'type': 'build',
'dependencies': ['binutils'],
'commands': ConfigureTargetPrep(target) + [
command.Command(
ConfigureCommand('binutils') +
ConfigureHostTool(host) +
ConfigureTargetArgs(target) + [
'--enable-deterministic-archives',
'--enable-gold',
] + ([] if HostIsWindows(host) else [
'--enable-plugins',
])),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
REMOVE_INFO_DIR,
] + InstallDocFiles('binutils',
['COPYING3'] +
[command.path.join(subdir, 'NEWS')
for subdir in
['binutils', 'gas', 'ld', 'gold']]) +
# The top-level lib* directories contain host libraries
# that we don't want to include in the distribution.
[command.RemoveDirectory(command.path.join('%(output)s', name))
for name in ['lib', 'lib32', 'lib64']],
},
H('gcc_' + target): {
'type': 'build',
'dependencies': (['gcc'] + HostGccLibsDeps(host) +
GccDeps(host, target)),
'commands': ConfigureTargetPrep(target) + [
ConfigureGccCommand('gcc', host, target),
# GCC's configure step writes configargs.h with some strings
# including the configure command line, which get embedded
# into the gcc driver binary. The build only works if we use
# absolute paths in some of the configure switches, but
# embedding those paths makes the output differ in repeated
# builds done in different directories, which we do not want.
# So force the generation of that file early and then edit it
# in place to replace the absolute paths with something that
# never varies. Note that the 'configure-gcc' target will
# actually build some components before running gcc/configure.
GccCommand(host, target,
MakeCommand(host, ['configure-gcc'])),
command.Command(['sed', '-i', '-e',
';'.join(['s@%%(abs_%s)s@.../%s_install@g' %
(component, component)
for component in
HostGccLibsDeps(host)] +
['s@%(cwd)s@...@g']),
command.path.join('gcc', 'configargs.h')]),
# gcc/Makefile's install rules ordinarily look at the
# installed include directory for a limits.h to decide
# whether the lib/gcc/.../include-fixed/limits.h header
# should be made to expect a libc-supplied limits.h or not.
# Since we're doing this build in a clean environment without
# any libc installed, we need to force its hand here.
GccCommand(host, target,
MakeCommand(host, ['all-gcc', 'LIMITS_H_TEST=true'])),
# gcc/Makefile's install targets populate this directory
# only if it already exists.
command.Mkdir(command.path.join('%(output)s',
target + '-nacl', 'bin'),
True),
GccCommand(host, target,
MAKE_DESTDIR_CMD + ['install-strip-gcc']),
REMOVE_INFO_DIR,
# Note we include COPYING.RUNTIME here and not with gcc_libs.
] + InstallDocFiles('gcc', ['COPYING3', 'COPYING.RUNTIME']),
},
# GDB can support all the targets in one host tool.
H('gdb'): {
'type': 'build',
'dependencies': ['gdb', H('expat')],
'commands': [
command.Command(
ConfigureCommand('gdb') +
ConfigureHostTool(host) + [
'--target=x86_64-nacl',
'--enable-targets=arm-none-eabi-nacl',
'--with-expat',
'CPPFLAGS=-I%(abs_' + H('expat') + ')s/include',
'LDFLAGS=-L%(abs_' + H('expat') + ')s/lib',
] +
(['--without-python'] if HostIsWindows(host) else []) +
# TODO(mcgrathr): The default -Werror only breaks because
# the OSX default compiler is an old front-end that does
# not understand all the GCC options. Maybe switch to
# using clang (system or Chromium-supplied) on Mac.
(['--disable-werror'] if HostIsMac(host) else [])),
command.Command(MakeCommand(host) + ['all-gdb']),
command.Command(MAKE_DESTDIR_CMD + [
'-C', 'gdb', 'install-strip',
]),
REMOVE_INFO_DIR,
] + [command.Command(['ln', '-f',
command.path.join('%(abs_output)s',
'bin',
Exe('x86_64-nacl-gdb')),
command.path.join('%(abs_output)s',
'bin',
Exe(arch + '-nacl-gdb'))])
for arch in ['i686', 'arm']] + InstallDocFiles('gdb', [
'COPYING3',
command.path.join('gdb', 'NEWS'),
]),
},
}
# TODO(mcgrathr): The ARM cross environment does not supply a termcap
# library, so it cannot build GDB.
if host.startswith('arm') and CrossCompiling(host):
del tools[H('gdb')]
return tools
def TargetCommands(host, target, command_list):
# First we have to copy the host tools into a common directory.
# We can't just have both directories in our PATH, because the
# compiler looks for the assembler and linker relative to itself.
commands = PopulateDeps(['%(' + ForHost('binutils_' + target, host) + ')s',
'%(' + ForHost('gcc_' + target, host) + ')s'])
bindir = command.path.join('%(cwd)s', 'all_deps', 'bin')
commands += [command.Command(cmd, path_dirs=[bindir])
for cmd in command_list]
return commands
def TargetLibs(host, target):
lib_deps = [ForHost(component + '_' + target, host)
for component in ['binutils', 'gcc']]
def NewlibFile(subdir, name):
return command.path.join('%(output)s', target + '-nacl', subdir, name)
newlib_sysroot = '%(abs_newlib_' + target + ')s'
newlib_tooldir = '%s/%s-nacl' % (newlib_sysroot, target)
# See the comment at ConfigureTargetPrep, above.
newlib_install_data = ' '.join(['STRIPPROG=%(cwd)s/strip_for_target',
'%(abs_newlib)s/install-sh',
'-c', '-s', '-m', '644'])
iconv_encodings = 'UTF-8,UTF-16LE,UCS-4LE,UTF-16,UCS-4'
newlib_configure_args = [
'--disable-libgloss',
'--enable-newlib-iconv',
'--enable-newlib-iconv-from-encodings=' + iconv_encodings,
'--enable-newlib-iconv-to-encodings=' + iconv_encodings,
'--enable-newlib-io-long-long',
'--enable-newlib-io-long-double',
'--enable-newlib-io-c99-formats',
'--enable-newlib-mb',
'CFLAGS=-O2',
'INSTALL_DATA=' + newlib_install_data,
]
newlib_post_install = [
command.Rename(NewlibFile('lib', 'libc.a'),
NewlibFile('lib', 'libcrt_common.a')),
command.WriteData(NewlibLibcScript(target),
NewlibFile('lib', 'libc.a')),
] + [
command.Copy(
command.path.join('%(pthread_headers)s', header),
NewlibFile('include', header))
for header in ('pthread.h', 'semaphore.h')
]
libs = {
'newlib_' + target: {
'type': 'build',
'dependencies': ['newlib'] + lib_deps,
'inputs': { 'pthread_headers':
os.path.join(NACL_DIR, 'src', 'untrusted',
'pthread') },
'commands': (ConfigureTargetPrep(target) +
TargetCommands(host, target, [
ConfigureCommand('newlib') +
ConfigureHostTool(host) +
ConfigureTargetArgs(target) +
newlib_configure_args,
MakeCommand(host),
MAKE_DESTDIR_CMD + ['install-strip'],
]) +
newlib_post_install +
InstallDocFiles('newlib', ['COPYING.NEWLIB'])),
},
'gcc_libs_' + target: {
'type': 'build',
'dependencies': (['gcc_libs'] + lib_deps + ['newlib_' + target] +
HostGccLibsDeps(host)),
# This actually builds the compiler again and uses that compiler
# to build the target libraries. That's by far the easiest thing
# to get going given the interdependencies of the target
# libraries (especially libgcc) on the gcc subdirectory, and
# building the compiler doesn't really take all that long in the
# grand scheme of things.
# TODO(mcgrathr): If upstream ever cleans up all their
# interdependencies better, unpack the compiler, configure with
# --disable-gcc, and just build all-target.
'commands': ConfigureTargetPrep(target) + [
ConfigureGccCommand('gcc_libs', host, target, [
'--with-build-sysroot=' + newlib_sysroot,
]),
GccCommand(host, target,
MakeCommand(host) + [
'build_tooldir=' + newlib_tooldir,
'all-target',
]),
GccCommand(host, target,
MAKE_DESTDIR_CMD + ['install-strip-target']),
REMOVE_INFO_DIR,
],
},
}
return libs
# Compute it once.
NATIVE_TUPLE = pynacl.platform.PlatformTriple()
# For our purposes, "cross-compiling" means not literally that we are
# targetting a host that does not match NATIVE_TUPLE, but that we are
# targetting a host whose binaries we cannot run locally. So x86-32
# on x86-64 does not count as cross-compiling. See NATIVE_ENOUGH_MAP, above.
def CrossCompiling(host):
return (host != NATIVE_TUPLE and
host not in NATIVE_ENOUGH_MAP.get(NATIVE_TUPLE, {}))
def HostIsWindows(host):
return host == WINDOWS_HOST_TUPLE
def HostIsMac(host):
return host == MAC_HOST_TUPLE
# We build target libraries only on Linux for two reasons:
# 1. We only need to build them once.
# 2. Linux is the fastest to build.
# TODO(mcgrathr): In future set up some scheme whereby non-Linux
# bots can build target libraries but not archive them, only verifying
# that the results came out the same as the ones archived by the
# official builder bot. That will serve as a test of the host tools
# on the other host platforms.
def BuildTargetLibsOn(host):
return host == LINUX_X86_64_TUPLE
def GetPackageTargets():
"""Package Targets describes all the final package targets.
This build can be built among many build bots, but eventually all things
will be combined together. This package target dictionary describes the final
output of the entire build.
"""
package_targets = {}
# Add in standard upload targets.
for host_target in UPLOAD_HOST_TARGETS:
for target in host_target.targets:
target_arch = target.name
package_prefix = target.pkg_prefix
# Each package target contains non-platform specific newlib and gcc libs.
# These packages are added inside of TargetLibs(host, target).
newlib_package = 'newlib_%s' % target_arch
gcc_lib_package = 'gcc_libs_%s' % target_arch
shared_packages = [newlib_package, gcc_lib_package]
# Each package target contains arm binutils and gcc.
# These packages are added inside of HostTools(host, target).
platform_triple = pynacl.platform.PlatformTriple(host_target.os,
host_target.arch)
binutils_package = ForHost('binutils_%s' % target_arch, platform_triple)
gcc_package = ForHost('gcc_%s' % target_arch, platform_triple)
gdb_package = ForHost('gdb', platform_triple)
# Create a list of packages for a target.
platform_packages = [binutils_package, gcc_package, gdb_package]
combined_packages = shared_packages + platform_packages
os_name = pynacl.platform.GetOS(host_target.os)
if host_target.differ3264:
arch_name = pynacl.platform.GetArch3264(host_target.arch)
else:
arch_name = pynacl.platform.GetArch(host_target.arch)
package_target = '%s_%s' % (os_name, arch_name)
package_name = '%snacl_%s_newlib' % (package_prefix,
pynacl.platform.GetArch(target_arch))
package_target_dict = package_targets.setdefault(package_target, {})
package_target_dict.setdefault(package_name, []).extend(combined_packages)
# GDB is a special and shared, we will inject it into various other packages.
for platform, arch in GDB_INJECT_HOSTS:
platform_triple = pynacl.platform.PlatformTriple(platform, arch)
os_name = pynacl.platform.GetOS(platform)
arch_name = pynacl.platform.GetArch(arch)
gdb_packages = [ForHost('gdb', platform_triple)]
package_target = '%s_%s' % (os_name, arch_name)
for package_name, package_archives in GDB_INJECT_PACKAGES:
combined_packages = package_archives + gdb_packages
package_target_dict = package_targets.setdefault(package_target, {})
package_target_dict.setdefault(package_name, []).extend(combined_packages)
return dict(package_targets)
def CollectPackagesForHost(host, targets):
packages = HostGccLibs(host).copy()
for target in targets:
packages.update(HostTools(host, target))
if BuildTargetLibsOn(host):
packages.update(TargetLibs(host, target))
return packages
def CollectPackages(targets):
packages = CollectSources()
packages.update(CollectPackagesForHost(NATIVE_TUPLE, targets))
for host in EXTRA_HOSTS_MAP.get(NATIVE_TUPLE, []):
packages.update(CollectPackagesForHost(host, targets))
return packages
PACKAGES = CollectPackages(TARGET_LIST)
PACKAGE_TARGETS = GetPackageTargets()
if __name__ == '__main__':
tb = toolchain_main.PackageBuilder(PACKAGES, PACKAGE_TARGETS, sys.argv[1:])
# TODO(mcgrathr): The bot ought to run some native_client tests
# using the new toolchain, like the old x86 toolchain bots do.
tb.Main()
| davidbrazdil/nacl | toolchain_build/toolchain_build.py | Python | bsd-3-clause | 39,585 | 0.004825 |
import redis
from app.config import get_config_obj
from app.util.httputil import Http_util
class Component_access_token():
def __init__(self):
self.component_appid = get_config_obj().component_appid
self.component_appsecret = get_config_obj().component_secret
self.r = redis.Redis(host='localhost', port=6379, db=0)
def get_component_verify_ticket(self):
# TODO 读取保存的ticket
component_verify_ticket = self.r.get('component_verify_ticket')
return component_verify_ticket
def get_commponent_access_token(self):
token_json_data = Http_util().post_get_component_access_token(self.get_component_verify_ticket())
# TODO 保存
return token_json_data.get("component_access_token")
| CoderHito/wx_demo | app/util/component_access_token.py | Python | mit | 772 | 0.001319 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.CDPSM.Balanced.IEC61970.Core.IdentifiedObject import IdentifiedObject
class EnergyConsumer(IdentifiedObject):
"""Generic user of energy - a point of consumption on the power system model
"""
def __init__(self, customerCount=0, pfixedPct=0.0, qfixedPct=0.0, qfixed=0.0, pfixed=0.0, LoadResponse=None, *args, **kw_args):
"""Initialises a new 'EnergyConsumer' instance.
@param customerCount: Number of individual customers represented by this Demand
@param pfixedPct: Fixed active power as per cent of load group fixed active power. Load sign convention is used, i.e. positive sign means flow out from a node.
@param qfixedPct: Fixed reactive power as per cent of load group fixed reactive power. Load sign convention is used, i.e. positive sign means flow out from a node.
@param qfixed: Reactive power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
@param pfixed: Active power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
@param LoadResponse: The load response characteristic of this load.
"""
#: Number of individual customers represented by this Demand
self.customerCount = customerCount
#: Fixed active power as per cent of load group fixed active power. Load sign convention is used, i.e. positive sign means flow out from a node.
self.pfixedPct = pfixedPct
#: Fixed reactive power as per cent of load group fixed reactive power. Load sign convention is used, i.e. positive sign means flow out from a node.
self.qfixedPct = qfixedPct
#: Reactive power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
self.qfixed = qfixed
#: Active power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
self.pfixed = pfixed
self._LoadResponse = None
self.LoadResponse = LoadResponse
super(EnergyConsumer, self).__init__(*args, **kw_args)
_attrs = ["customerCount", "pfixedPct", "qfixedPct", "qfixed", "pfixed"]
_attr_types = {"customerCount": int, "pfixedPct": float, "qfixedPct": float, "qfixed": float, "pfixed": float}
_defaults = {"customerCount": 0, "pfixedPct": 0.0, "qfixedPct": 0.0, "qfixed": 0.0, "pfixed": 0.0}
_enums = {}
_refs = ["LoadResponse"]
_many_refs = []
def getLoadResponse(self):
"""The load response characteristic of this load.
"""
return self._LoadResponse
def setLoadResponse(self, value):
if self._LoadResponse is not None:
filtered = [x for x in self.LoadResponse.EnergyConsumer if x != self]
self._LoadResponse._EnergyConsumer = filtered
self._LoadResponse = value
if self._LoadResponse is not None:
if self not in self._LoadResponse._EnergyConsumer:
self._LoadResponse._EnergyConsumer.append(self)
LoadResponse = property(getLoadResponse, setLoadResponse)
| rwl/PyCIM | CIM15/CDPSM/Balanced/IEC61970/Wires/EnergyConsumer.py | Python | mit | 4,290 | 0.005128 |
from __future__ import division
import random
import time
from collections import defaultdict, Counter
import gensim
import numpy as np
from numpy import log, pi
from scipy import linalg
from scipy.special import gammaln
import copy
from sklearn.cluster import KMeans
from numba import jit
import cholesky
__author__ = "Michael Mansour, Jared Thompson, Mike Rinehart"
class Wishart(object):
def __init__(self, word_vecs):
self.nu = None
self.kappa = None
self.psi = None
self.mu = None
self.set_params(word_vecs)
# ======================================================================================================================
def set_params(self, word_vecs):
word_vecs = np.vstack(word_vecs.values()) # turn dict of word vectors into a matrix
self.nu = word_vecs.shape[1] # dimensionality of word-vectors
self.kappa = 0.1
# self.psi = word_vecs.T.dot(word_vecs)#, axis=0) # sum of squres -- from Murphy(2012)
# self.psi = np.identity(
# word_vecs.shape[1]) * 3. # changed this to identity matrix as in paper. No intuition here
# self.mu = np.mean(word_vecs, axis=0)
# ======================================================================================================================
class Gauss_LDA(object):
def __init__(self, num_topics, corpus, word_vector_filepath=None,
word_vector_model=None, alpha=0.2, outputfile=None, preprocess=False):
self.doc_topic_CT = None
self.corpus = corpus
self.priors = None
self.word_vecs = {}
self.numtopics = num_topics
self.vocab = set([])
self.topic_params = defaultdict(dict)
self.wordvecFP = word_vector_filepath
self.word_vec_size = None
self.alpha = alpha
self.solver = cholesky.Helper()
self.wvmodel = word_vector_model
self.test_word_topics = defaultdict(list)
self.test_word_topic_count = defaultdict(int)
self.word_topics = {}
self.output_file_name = outputfile
self.preprocess = preprocess
# ======================================================================================================================
def process_corpus(self, documents):
"""
Tokenizes documents into dict of lists of tokens
:param documents: expects list of strings
:return: dict{document ID: list of tokens
"""
if not self.preprocess:
temp_corpus = defaultdict(dict)
random.shuffle(documents) # making sure topics are formed semi-randomly
for index, doc in enumerate(documents):
words = doc.split()
temp_corpus[index]['words'] = words
temp_corpus[index]['topics'] = np.empty(len(words)) # Random topic assign
# temp_corpus[index]['topics'] = np.random.randint(0, self.numtopics, size=len(words)) # Random topic assign
for word in words:
self.vocab.add(word)
self.corpus = temp_corpus
print "Done processing corpus with {} documents".format(len(documents))
else: # Docs are tokenized and such, just add it into class
temp_corpus = defaultdict(dict)
for idx, doc in enumerate(documents):
temp_corpus[idx]["words"] = doc
temp_corpus[idx]["topics"] = np.empty(len(doc))
for word in doc:
self.vocab.add((word))
self.corpus = temp_corpus
# ======================================================================================================================
def process_wordvectors(self, filepath=None):
"""
Takes a trained Word2Vec model, tests each word in vocab against it, and only keeps word vectors that
are in your document corpus, and that are in the word2vec corpus.
Decreases memory requirements for holding word vector info.
:param filepath: filepath of word-vector file. Requires 2 things at top of .txt document:
number of tokens trained on & dimensionality of word-vectors
:return: None - sets class-variable (self.word_vecs) to be a dict{word: word-vector}
"""
if filepath:
print "Processing word-vectors, this takes a moment"
self.wvmodel = gensim.models.Word2Vec.load_word2vec_format(fname=filepath, binary=False)
useable_vocab = 0
unusable_vocab = 0
self.word_vec_size = self.wvmodel.vector_size
for word in self.vocab:
try:
self.word_vecs[word] = self.wvmodel[word]
useable_vocab += 1
except KeyError:
unusable_vocab += 1
print "There are {0} words that could be converted to word vectors in your corpus \n" \
"There are {1} words that could NOT be converted to word vectors".format(useable_vocab,
unusable_vocab)
else:
useable_vocab = 0
unusable_vocab = 0
self.word_vec_size = self.wvmodel.vector_size
for word in self.vocab:
try:
self.word_vecs[word] = self.wvmodel[word]
useable_vocab += 1
except KeyError:
unusable_vocab += 1
print "There are {0} words that could be converted to word vectors in your corpus \n" \
"There are {1} words that could NOT be converted to word vectors".format(useable_vocab,
unusable_vocab)
# ======================================================================================================================
def clean_docs(self):
print "cleaning out docs of words not in your Word2Vec model"
approved_words = set(self.word_vecs.vocab.keys())
for idx, doc in self.corpus.iteritems():
self.corpus[idx] = [word for word in doc if word in approved_words]
print "Done cleaning out docs of bad words"
# ======================================================================================================================
def fit(self, iterations=1, init=True):
if init:
self.init()
init = False
print "Starting fit"
self.display_results()
for i in range(iterations):
self.sample()
print "{0} iterations complete".format(i)
if self.output_file_name: #TODO: fix such that it prints regardless of outputfilename
for k in xrange(self.numtopics):
for param, name in zip(("mean", "cov"),
(self.topic_params[k]["Topic Mean"], self.topic_params[k]["Topic Covariance"])):
self.output_file_name = self.output_file_name + "{}_{}"
results_file = self.output_file_name.format(k, param)
open(results_file, 'w')
np.savetxt(results_file, param)
# ======================================================================================================================
def init(self):
self.process_corpus(self.corpus)
self.process_wordvectors(self.wordvecFP)
self.priors = Wishart(self.word_vecs) # set wishhart priors
self.doc_topic_CT = np.zeros((len(self.corpus.keys()), self.numtopics)) # Init document-topic counts matrix
centroids, km = self.smart_centroids() # Init topic means with KMeans cluster centroids>>faster convergence
# Prior mean
mu_0 = np.zeros(self.word_vec_size)
count = 0
for docID in self.corpus.keys(): # hard setting word-topic assignments as per cluster membership to help model along
for i, word in enumerate(self.corpus[docID]['words']):
self.corpus[docID]['topics'][i] = self.word_topics[word] # word_topics from KMeans cluster membership
mu_0 += self.word_vecs[word]
count += 1
self.priors.mu = mu_0 / float(count) # trying a different prior mean init
# Prior co-variance
self.priors.psi = .01 * np.identity(self.word_vec_size)
# Sample means
for k in range(self.numtopics):
self.topic_params[k]["Topic Sum"] = np.zeros(self.word_vec_size)
self.topic_params[k]["Topic Mean"] = centroids[k]
self.topic_params[k]["Sample Cov"] = np.zeros((self.word_vec_size, self.word_vec_size))
# Sample co-variances and document-topic counts
co_variances = [np.zeros((self.word_vec_size, self.word_vec_size)) for _ in range(self.numtopics)]
for docID in self.corpus.keys():
for topic, word in zip(self.corpus[docID]['topics'], self.corpus[docID]['words']):
topic = int(topic)
wv = self.word_vecs[word]
sample_mu = self.topic_params[topic]["Topic Mean"]
self.doc_topic_CT[docID, topic] += 1. # Ndk
self.topic_params[topic]['Topic Sum'] += wv # sum of topic vectors
# self.topic_params[topic]["Sample Cov"] += np.outer(wv - sample_mu, wv-sample_mu)
co_variances[topic] += np.outer(wv - sample_mu, wv - sample_mu) #+ self.priors.psi
# another attempt at doing the covariances, closer to the paper
# co_variances = [np.zeros((self.word_vec_size, self.word_vec_size)) for _ in range(self.numtopics)]
# for docID in self.corpus.keys():
# for topic, word in zip(self.corpus[docID]['topics'], self.corpus[docID]['words']):
# topic = int(topic)
# sample_mu = self.topic_params[topic]["Topic Mean"]
# Nk = np.sum(self.doc_topic_CT[:, topic], axis=0)
# scale = (self.priors.kappa * Nk) / (self.priors.kappa + Nk)
# co_variances[topic] = scale * np.outer(sample_mu - self.priors.mu, sample_mu - self.priors.mu)
# co_variances[topic] += self.topic_params[topic]["Sample Cov"] + self.priors.psi
# Normalize the sample co-variance
for k in range(self.numtopics):
co_variances[k] = (co_variances[k] / (np.sum(self.doc_topic_CT[:, k]) - 1.)) + self.priors.psi
# Possible error spot
kappa = self.priors.kappa
nu = self.priors.nu
d = self.word_vec_size
scaleT = (kappa + 1.) / (kappa * (nu - d + 1.)) # Needed to convert L => covariance
for k in range(self.numtopics): # Init parameters for topic distributions
Nk = np.sum(self.doc_topic_CT[:, k], axis=0)
self.topic_params[k]["Lower Triangle"] = linalg.cholesky(co_variances[k], lower=True,
check_finite=True)
self.topic_params[k]["Topic Count"] = Nk
self.topic_params[k]["Topic Kappa"] = self.priors.kappa + Nk
# 2 * sum_m_i(log(L_i,i)) + log(scaleT)
self.topic_params[k]["Chol Det"] = np.sum(np.log(np.diag(self.topic_params[k]["Lower Triangle"]))) * 2 \
+ np.log(scaleT)
print np.sum(self.doc_topic_CT, axis=0)
print "Initialization complete"
# ======================================================================================================================
def smart_centroids(self):
print "getting cluster centroids"
from sklearn.cluster import KMeans
vecs = []
for word in self.vocab:
vecs.append(self.word_vecs[word])
km = KMeans(n_clusters=self.numtopics, n_jobs=1, tol=1e-6, init='k-means++')
km.fit(np.array(vecs))
for idx, word in enumerate(self.vocab):
self.word_topics[word] = km.labels_[idx]
vec_matrix = np.array(vecs)
for k in range(self.numtopics):
idx = np.where(km.labels_ == k)
# covar = np.cov(vec_matrix[idx] - km.cluster_centers_[k], rowvar=0) # Mean centered covariance matrix
# self.topic_params[k]['Topic Covar'] = covar
self.topic_params[k]["Topic Mean"] = km.cluster_centers_[k]
return km.cluster_centers_, km
# ======================================================================================================================
def sample(self):
"""
Collapsed Gibbs Sampler derived from Steyver's method, adapted for continuous word-vectors
:return: None. Readjusts topic distribution parameters and topic-counts
"""
ASSIGN_NEW_TOPICS = True
MULTINOMIAL_TOPIC_SELECTION = True
for docID in self.corpus.iterkeys():
for idx in range(len(self.corpus[docID]['words'])):
word = self.corpus[docID]['words'][idx]
current_topic = self.corpus[docID]['topics'][idx]
# former_dists = copy.deepcopy(self.topic_params)
self.recalculate_topic_params(word, current_topic, docID, "-")
log_posterior = np.zeros(self.numtopics)
for k in range(self.numtopics): # Get PDF for each possible word-topic assignment
log_pdf = self.draw_new_wt_assgns(word, k)
Nkd = self.doc_topic_CT[docID, k] # Count of topic in doc, Ndk
log_posterior[k] = log(Nkd + self.alpha) + log_pdf # actual collapsed sampler from R. Das Paper, except in log form
max_log_posterior = np.max(log_posterior)
log_posterior -= max_log_posterior
normalized_post = np.exp(log_posterior - np.log(np.sum(np.exp(log_posterior))))
if MULTINOMIAL_TOPIC_SELECTION:
new_topic = np.argmax(np.random.multinomial(1, pvals=normalized_post))
else:
new_topic = np.argmax(normalized_post)
if not ASSIGN_NEW_TOPICS:
new_topic = current_topic
self.corpus[docID]['topics'][idx] = new_topic
self.recalculate_topic_params(word, new_topic, docID, "+")
# last_word = word
# last_word_current_topic = current_topic
# last_word_new_topic = new_topic
if docID % 20 == 0:
print "{0} docs sampled".format(int(docID))
self.display_results()
# ======================================================================================================================
def recalculate_topic_params(self, word, topic, docID, operation):
"""
:param topic_id: index for topic
:param topic_counts: a copy of the doc-topic count table
:return: None - sets internal class variables
"""
# Update the topic-count table
UPDATE_COUNT = True
if UPDATE_COUNT:
self.update_document_topic_counts(word, topic, docID, operation)
# Update parameters related to the priors
topic_count = np.sum(self.doc_topic_CT[:, topic], axis=0) # N_k
kappa_k = self.priors.kappa + topic_count # K_k
nu_k = self.priors.nu + topic_count # V_k
scaleT = (kappa_k + 1.) / (kappa_k * (nu_k - self.word_vec_size + 1.)) # Needed to convert L => covariance
UPDATE_DISTS = True
if operation == "-": # Remove data point contribution to the topic distribution
# Original equation is:
# \Sigma \leftarrow \Sigma - (k_0 + N + 1)/(k_0 + N)(X_{n} - \mu_{n-1})(X_{n} - \mu_{n-1})^T
if UPDATE_DISTS:
L = self.topic_params[topic]["Lower Triangle"]
centered = self.word_vecs[word] - self.topic_params[topic]["Topic Mean"] # Get rank-1 matrix from point
# centered = (self.topic_params[topic]["Topic Mean"] - self.word_vecs[word]) # paper says this way
centered *= np.sqrt((kappa_k + 1.) / kappa_k) # Scale for recursive downdate
L = self.solver.chol_downdate(L, centered) # Choleksy downdate
self.topic_params[topic]["Lower Triangle"] = L
# Correct the mean for the removed point
sample_mean_K = self.topic_sample_mean(topic, topic_count) # V-Bar_k
# topic_sum = self.topic_params[topic]["Topic Sum"]
topic_mean = ((self.priors.kappa * self.priors.mu) + (topic_count * sample_mean_K)) / kappa_k # Mu_k
# topic_mean = self.topic_params[topic]["Topic Mean"]
# topic_mean *= kappa_k+1
# topic_mean -= self.word_vecs[word]
# topic_mean /= kappa_k
else: # operation == "+": # Add data point contribution to the topic distribution
# Correct the mean for the added point
# Trying a new method of calculating the Mean
# topic_mean = self.topic_params[topic]["Topic Mean"]
# topic_mean *= kappa_k-1
# topic_mean += self.word_vecs[word]
# topic_mean /= kappa_k
sample_mean_K = self.topic_sample_mean(topic, topic_count) # V-Bar_k
# topic_sum = self.topic_params[topic]["Topic Sum"]
topic_mean = ((self.priors.kappa * self.priors.mu) + (topic_count * sample_mean_K)) / kappa_k # Mu_k
# topic_mean = ((self.priors.kappa * self.priors.mu) + (sample_mean_K)) / kappa_k # Mu_k
# Original equation is:
# \Sigma \leftarrow \Sigma + (k_0 + N + 1)/(k_0 + N)(X_{n} - \mu_{n-1})(X_{n} - \mu_{n-1})^T
if UPDATE_DISTS:
L = self.topic_params[topic]["Lower Triangle"]
centered = (self.word_vecs[word] - topic_mean)
# centered = (topic_mean - self.word_vecs[word])# Get rank-1 matrix from point
# centered = centered.dot(centered.T)
centered *= np.sqrt(kappa_k / (kappa_k - 1.)) # Scale for recursive update
L = self.solver.chol_update(L, centered) # Choleksy update
self.topic_params[topic]["Lower Triangle"] = L
L = self.topic_params[topic]["Lower Triangle"]
self.topic_params[topic]["Chol Det"] = (np.sum(np.log(np.diag(L))) * 2) + np.log(scaleT) # 2 * sum_m_i(log(L_i,i))
self.topic_params[topic]["Topic Count"] = topic_count
self.topic_params[topic]["Topic Kappa"] = kappa_k
self.topic_params[topic]["Topic Nu"] = nu_k
if UPDATE_DISTS:
self.topic_params[topic]["Topic Mean"] = topic_mean
# ======================================================================================================================
def topic_sample_mean(self, topic, topic_count):
"""
For a given topic, method calculates scaled topic Mean and Covariance (V-bar_k and C_k in R. Das Paper)
\sum_d \sum_z=i (V_di) / N_k
^^ =
wordvec_sum = array[zero] > shape(word-vec dimensionality)
for each doc:
for each word that has topic assignment i:
wordvec_sum + word
wordvec_sum / count of topic
N_k = count of topic occurences across all documents
:param topic_id: The topic ID, integer
:param topic_count: A copy of the document-topic counts table, numpy array
:return: mean and covariance matrix. Mean will be of shape (1 X word-vector dimension).
Covariance will be matrix of size (word-vector dim X word-vector dim)
"""
scaled_topic_mean = self.topic_params[topic]["Topic Sum"] / \
float(topic_count) if topic_count > 0 else np.zeros(self.word_vec_size)
return scaled_topic_mean
# ======================================================================================================================
# noinspection PyStatementEffect
def update_document_topic_counts(self, word, topic, docID, operation):
if operation == "-":
self.topic_params[topic]["Topic Sum"] -= self.word_vecs[word]
self.doc_topic_CT[docID, topic] -= 1.
if operation == "+":
self.topic_params[topic]["Topic Sum"] += self.word_vecs[word]
self.doc_topic_CT[docID, topic] += 1.
# ======================================================================================================================
def draw_new_wt_assgns(self, word, topic, new_doc=False, wvmodel=None):
"""
Log of the probablity density function for the Student-T Distribution
Provides a PDF for a word (really a word-vector) in a given topic distribution.
:param word: string of the word to find probabilty of word-topic assignment
:param topic: Interger, a topic id to reference a topic distribution and its params
:param new_doc: False (default), optional. True if predicting topics from unseen document/not currently training
:param wvmodel: None by default. If predicting topics from an unseen document, requires a loaded word2vec model
from GenSim
:type wvmodel: gensim.models.word2vec.Word2Vec
:return: log of PDF from t-distribution for a given word. Type: Float
"""
cov_det = self.topic_params[topic]["Chol Det"]
Nk = self.topic_params[topic]["Topic Count"]
# (V_di - Mu)
centered = self.word_vecs[word] - self.topic_params[topic]["Topic Mean"]
d = self.word_vec_size # dimensionality of word vector
kappa_k = self.topic_params[topic]["Topic Kappa"]
scaleT = np.sqrt((kappa_k + 1.) / kappa_k * (self.priors.nu - d + 1.)) # Covariance = chol / sqrt(scaleT)
nu = self.priors.nu + Nk - d + 1.
L = self.topic_params[topic]["Lower Triangle"]
# linalg.cho_solve((L, True), centered, overwrite_b=True,
# check_finite=False)
# inv = centered.T.dot(centered) # (L^-1b)^T(L^-1b)
#
# # Log Multivariate T - PDF
# return gammaln((nu + d) / 2.) - \
# (gammaln(nu / 2.) + (d / 2.) * (log(nu) + log(pi))
# + (0.5 * cov_det) + ((nu + d) / 2.) * log(1. + inv/nu))
return self.multivariate_t_pdf(nu, cov_det, d, scaleT, centered, L)
@jit
def multivariate_t_pdf(self, nu, cov_det, d, scaleT, centered, L):
L *= scaleT
linalg.cho_solve((L, True), centered, overwrite_b=True,
check_finite=False)
inv = centered.T.dot(centered) # (L^-1b)^T(L^-1b)
# Log Multivariate T - PDF
return gammaln((nu + d) / 2.) - \
(gammaln(nu / 2.) + (d / 2.) * (log(nu) + log(pi))
+ (0.5 * cov_det) + ((nu + d) / 2.) * log(1. + inv/nu))
# ======================================================================================================================
def bin_search(self, pdf, key, start, end): # Not using
if start > end:
return start
mid = int((start + end) / 2)
if key == pdf[mid]:
return mid + 1
if key < pdf[mid]:
return self.bin_search(pdf, key, start, mid-1)
if key > pdf[mid]:
return self.bin_search(pdf, key, mid + 1, end)
else:
return None
# ======================================================================================================================
def extract_topics_new_doc(self, doc, wv_model):
"""
:type wv_model: gensim.models.word2vec.Word2Vec
:param doc: Document to extrac topics from. should be one string
:param wv_model: a loaded word2vec model with same dimensionality as training one. Use GenSim Word2Vec
:return: List of tuples (word, topic)
Method removes words in doc that are not in the Word2Vec corpus, and extracts word-topic assignments for each
word by drawing densities from the multivariate student-T distribution. Uses MLE method.
"""
assert wv_model.vector_size == self.word_vec_size, "word-vector dimensionality does not match trained topic" \
"distribution dimensions({0})".format(self.word_vec_size)
filtered_doc = []
nkd = defaultdict(float)
for word in doc.split():
try:
wv_model[word]
filtered_doc.append(word) # Remove words from doc that are not in word-vec model
nkd[self.word_topics[word]] += 1.
except KeyError:
continue
print "{} words removed from doc".format(len(filtered_doc) - len(doc.split()))
word_topics = []
c = Counter(self.word_topics.values())
for word in filtered_doc:
posterior = []
for k in range(self.numtopics):
# print nkd[k]
prob = self.draw_new_wt_assgns(word, k, wvmodel=wv_model, new_doc=True) * log(self.alpha + c[k])
print "probablity of {0} for word {1} assigned to topic {2}".format(prob, word, k)
posterior.append(prob)
posterior /= np.sum(posterior)
word_topics.append((word, np.argmax(posterior)))
return word_topics
# ======================================================================================================================
def display_results(self):
print 'print topic means'
for k in range(self.numtopics):
print "TOPIC {0}:".format(k), \
zip(*self.wvmodel.most_similar(positive=[self.topic_params[k]["Topic Mean"]], topn=9))[0]
if k == max(range(self.numtopics)):
print "\n"
print "Document-Topic Counts:,", np.sum(self.doc_topic_CT, axis=0).astype(int)
# ======================================================================================================================
def explore_topics(self):
self.word_counts = {word: np.zeros(self.numtopics, dtype=int) for word in self.vocab}
for docID in self.corpus.keys():
for topic, word in zip(self.corpus[docID]['topics'], self.corpus[docID]['words']):
self.word_counts[word][int(topic)] += 1
counts = np.array(self.word_counts.values())
ranked = np.argsort(counts, axis=0)[::-1][:20, :]
words = np.array(self.word_counts.keys())
for k in range(self.numtopics):
print words[ranked[:, k]]
if __name__ == "__main__":
f = '/Users/michael/Documents/GaussianLDA/data/cleannips.txt'
with open(f, 'r') as fi:
docs = fi.read().splitlines() # These are all cleaned out
fi.close()
wordvec_fileapth = "/Users/michael/Documents/Gaussian_LDA-master/data/glove.wiki/glove.6B.50d.txt"
start = time.time()
g = Gauss_LDA(50, docs, word_vector_filepath=wordvec_fileapth, alpha=0.7)
g.fit(3)
print time.time() - start | mansweet/GaussianLDA | FastGaussianLDA2.py | Python | apache-2.0 | 27,002 | 0.004037 |
import org.openlca.core.database.CategoryDao as CategoryDao
dbUtil.createTable("tbl_dq_systems",
"CREATE TABLE tbl_dq_systems ( "
+ "id BIGINT NOT NULL, "
+ "name VARCHAR(255), "
+ "ref_id VARCHAR(36), "
+ "version BIGINT, "
+ "last_change BIGINT, "
+ "f_category BIGINT, "
+ "f_source BIGINT, "
+ "description CLOB(64 K), "
+ "has_uncertainties SMALLINT default 0, "
+ "PRIMARY KEY (id)) ")
dbUtil.createTable("tbl_dq_indicators",
"CREATE TABLE tbl_dq_indicators ( "
+ "id BIGINT NOT NULL, "
+ "name VARCHAR(255), "
+ "position INTEGER NOT NULL, "
+ "f_dq_system BIGINT, "
+ "PRIMARY KEY (id)) ")
dbUtil.createTable("tbl_dq_scores", "CREATE TABLE tbl_dq_scores ( "
+ "id BIGINT NOT NULL, "
+ "position INTEGER NOT NULL, "
+ "description CLOB(64 K), "
+ "label VARCHAR(255), "
+ "uncertainty DOUBLE default 0, "
+ "f_dq_indicator BIGINT, "
+ "PRIMARY KEY (id)) ")
dbUtil.createColumn("tbl_processes", "dq_entry", "dq_entry VARCHAR(50)")
dbUtil.createColumn("tbl_processes", "f_dq_system", "f_dq_system BIGINT")
dbUtil.createColumn("tbl_processes", "f_exchange_dq_system", "f_exchange_dq_system BIGINT")
dbUtil.createColumn("tbl_processes", "f_social_dq_system", "f_social_dq_system BIGINT")
dbUtil.renameColumn("tbl_exchanges", "pedigree_uncertainty", "dq_entry", "VARCHAR(50)")
dbUtil.createColumn("tbl_product_systems", "cutoff", "cutoff DOUBLE")
dao = CategoryDao(db)
roots = dao.getRootCategories()
for category in roots:
dao.update(category)
dbUtil.setVersion(6) | GreenDelta/olca-updates | update-src/001_db_schema_update_v6_f0..b7/script.py | Python | mpl-2.0 | 1,555 | 0.036656 |
from django.conf.urls.defaults import *
from django.contrib import admin
# Set up the admin shit
admin.autodiscover()
urlpatterns = patterns('',
(r'^knux/', include(admin.site.urls)),
# Viewing and adding tips/locations
(r'^locations/add/$', 'drinkkit.redditors.views.add_location'),
(r'^locations/search/$', 'drinkkit.redditors.views.find_locations'),
(r'^locations/nearby/$', 'drinkkit.redditors.views.nearby_locations'),
(r'^locations/(?P<location_id>[a-zA-Z0-9_.-]+)/add_tip/$', 'drinkkit.redditors.views.add_tip'),
(r'^locations/(?P<location_id>[a-zA-Z0-9_.-]+)/checkin/$', 'drinkkit.redditors.views.checkin_location'),
(r'^locations/(?P<location_id>[a-zA-Z0-9_.-]+)/$', 'drinkkit.redditors.views.view_location'),
# Query and see who's getting into what
(r'^redditor/(?P<redditor_name>[a-zA-Z0-9_.-]+)/$', 'drinkkit.redditors.views.view_redditor'),
# Registration
(r'^register/$', 'drinkkit.redditors.views.register'),
# User forms - password, logout, login, etc.
(r'^password_reset/$', 'django.contrib.auth.views.password_reset'),
(r'^unauth/$', 'django.contrib.auth.views.logout_then_login'),
(r'^auth/$', 'django.contrib.auth.views.login'),
(r'^/*', 'drinkkit.redditors.views.home'),
)
| ryanmcgrath/drinkkitcom | urls.py | Python | mit | 1,223 | 0.023712 |
def import_once(modulenames, silent=1):
## import_once
## Fedmich Last modified: 3:38 PM 5/15/2006
## version 1.1
## Usage:
## import_once('os')
## import_once( ["os", 'sys'] )
if type(modulenames) is list:
pass
elif type(modulenames) is tuple:
pass
else:
modulenames = [modulenames]
imported = 0
for modulename in modulenames:
print modulename
if globals().has_key(modulename):
if not silent: print """Already imported module "%s"...""" % modulename
imported +=1
else:
try:
if not silent: print """%s is not yet imported so import it now...""" % modulename
globals()[modulename] = __import__(modulename, globals(), locals(), [])
imported += 1
except:
if not silent: print """Error while importing "%s"...""" % modulename
return (imported == len(modulenames) ) #return true if every modules are successfuly imported
print import_once( ("os", "sys") )
import_once( "sys")
import_once("oyster")
import_once("psyco", silent=0) #silent is used for debugging...
print os
print sys
print os.path.basename(r"c:\WINNT")
| ActiveState/code | recipes/Python/496703_importonce/recipe-496703.py | Python | mit | 1,231 | 0.02762 |
"""
Image pipeline viewer.
Note: currently under heavy construction.
"""
import tkinter
import tkinter.ttk
import dh.gui.tk
import dh.image
##
## basic classes
##
class Viewer():
def __init__(self):
self.images = []
self.n = None
self.pipeline = Pipeline()
self.pipeline.add("core.convert")
self.pipeline.add("core.asgray")
#self.pipeline.add("core.invert")
#self.pipeline.add("core.normalize")
self.pipeline.add("core.shift")
#self.pipeline.add("core.fft")
#self.pipeline.add("core.normalize")
self.pipeline.add("core.log")
#self.pipeline.add("core.gamma")
#self.pipeline.add("core.threshold")
#self.pipeline.add("core.rotate")
def select(self, n):
N = len(self.images)
if N == 0:
self.n = None
else:
self.n = n % N
return self.n
def first(self):
self.select(0)
def prev(self):
try:
self.select(self.n - 1)
except TypeError:
pass
def next(self):
try:
self.select(self.n + 1)
except TypeError:
pass
def last(self):
self.select(-1)
def add(self, I):
self.images.append(I.copy())
self.last()
def clear(self):
self.images = []
self.first()
def show(self):
window = _ViewerWindow(self)
window.run()
def view(self, I):
self.add(I)
self.show()
def selectedImage(self):
return self.images[self.n]
def applyPipeline(self):
return self.pipeline(self.selectedImage())
class _ViewerWindow(dh.gui.tk.Application):
def __init__(self, viewer):
super(_ViewerWindow, self).__init__(
title="Viewer",
minSize=(250, 250),
)
self.viewer = viewer
self.updateFilterFrame()
self.updateImage()
def initWidgets(self):
# key bindings
self.bind("<Escape>", lambda _: self.close())
self.bind("<q>", lambda _: self.close())
self.bind("<Left>", lambda _: (self.viewer.prev(), self.updateImage()))
self.bind("<Right>", lambda _: (self.viewer.next(), self.updateImage()))
# main frame
self.mainFrame = tkinter.ttk.Frame(self)
self.mainFrame.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=tkinter.YES)
# filter frame
self.filterFrame = tkinter.ttk.Frame(self.mainFrame)
self.filterFrame.pack(side=tkinter.LEFT, anchor=tkinter.N, padx=2, pady=2)
# image canvas
self.imageCanvas = dh.gui.tk.ImageCanvas(self.mainFrame)
self.imageCanvas.pack(side=tkinter.LEFT, anchor=tkinter.N, fill=tkinter.BOTH, expand=tkinter.YES)
# status bar
self.statusBar = dh.gui.tk.StatusBar(self)
self.statusBar.pack(side=tkinter.BOTTOM, fill=tkinter.X, expand=tkinter.NO)
def updateFilterFrame(self):
for node in self.viewer.pipeline.nodes:
node.gui(parent=self.filterFrame, onChangeCallback=self.updateImage).pack(fill="x", padx=1, pady=1, expand=True)
def updateImage(self, *args, **kwargs):
with dh.utils.Timer() as t:
I = self.viewer.applyPipeline()
self.imageCanvas.setImage(I)
self.updateStatusBar("{shape}, {dtype}, {time}ms".format(
shape=I.shape,
dtype=I.dtype,
time=dh.utils.around(t() * 1000.0),
))
def updateStatusBar(self, text):
self.statusBar.setText(text)
##
## pipeline framework
##
class Pipeline():
def __init__(self):
# nodes
self.nodes = []
self.add("core.source")
def __call__(self, I):
J = I.copy()
for node in self.nodes:
J = node(J)
return J
def add(self, node, position=None):
"""
Inserts processing before the `position`-th slot of the pipeline.
"""
if position is None:
position = len(self.nodes)
if isinstance(node, str):
uid = node
node = Node.instances[uid]
self.nodes.insert(position, node)
def remove(self, position):
del self.nodes[position]
def save(self, filename):
raise NotImplementedError()
def load(self, filename):
raise NotImplementedError()
class Node():
"""
Class for a processing pipeline element (node), which automatically
registers its instances.
"""
# keeps references to all instances of this class
instances = {}
def __init__(self, uid, description=None, tags=None, f=None, parameters=(), cache=False):
# register this instance
if uid not in type(self).instances:
type(self).instances[uid] = self
else:
raise ValueError("Node with uid '{uid}' is already registered".format(uid=uid))
# other properties
self.uid = uid
self.description = description
self.tags = tags
self.f = f
self.parameters = list(parameters)
# cache
self.useCache = cache
self.cache = {}
def __call__(self, *args, **kwargs):
kwargs.update(self.parameterValues())
if self.useCache:
key = dh.utils.ohash((args, kwargs), "hex", 64)
if key not in self.cache:
self.cache[key] = self.f(*args, **kwargs)
return self.cache[key]
else:
return self.f(*args, **kwargs)
def parameterValues(self):
return {parameter.name: parameter() for parameter in self.parameters}
def gui(self, parent, onChangeCallback):
"""
Constructs and returns a GUI frame for this filter.
"""
# master frame
frame = tkinter.ttk.Frame(parent, relief="raised")
# usable part of the frame
innerFrame = tkinter.ttk.Frame(frame)
innerFrame.pack(fill="x", expand=True, padx=6, pady=3)
# header line
header = tkinter.ttk.Frame(innerFrame)
header.pack(side = tkinter.TOP, fill = "x", expand = True)
tkinter.ttk.Label(header, text=self.uid, font="Sans 10 bold", anchor = tkinter.W, justify = tkinter.LEFT).pack(side = tkinter.LEFT, fill = "x", expand = True)
# description line
if self.description is not None:
details = tkinter.ttk.Frame(innerFrame)
details.pack(side = tkinter.TOP, fill = "x", expand = True)
tkinter.ttk.Label(details, text=self.description, font="Sans 8 italic", anchor = tkinter.W, justify = tkinter.LEFT).pack(side = tkinter.LEFT, fill = "x", expand = True)
# parameter frame
parameterFrame = tkinter.ttk.Frame(innerFrame)
parameterFrame.pack(side = tkinter.TOP, fill = "x", expand = True)
for (row, parameter) in enumerate(self.parameters):
(labelFrame, valueFrame) = parameter.gui(parent=parameterFrame, onChangeCallback=onChangeCallback)
labelFrame.grid(row = row, column = 0, padx = 0, sticky = tkinter.W)
valueFrame.grid(row = row, column = 1, padx = 10, sticky = tkinter.W)
#tkinter.ttk.Scale(parameterFrame, from_=0, to=100).grid(row = n, column = 1)
return frame
class SwitchableNode(Node):
"""
Processing node which automatically has one bool parameter to enable or
disable the processing.
"""
def __init__(self, *args, **kwargs):
# parent initialization
super().__init__(*args, **kwargs)
# add "enabled" parameter
self.parameters = [
BoolNodeParameter(
name="enabled",
default=True,
)
] + self.parameters
# wrap function
self.g = self.f
def f(I, enabled, **kwargs):
if enabled:
return self.g(I=I, **kwargs)
else:
return I
self.f = f
class NodeParameter():
def __init__(self, name, label=None):
self.name = name
if label is not None:
self.label = label
else:
self.label = name
def guiLabelFrame(self, parent):
return tkinter.ttk.Label(parent, text=self.label, font="Sans 8", anchor = tkinter.W, justify = tkinter.LEFT)
def guiValueFrame(self, parent, onChangeCallback):
raise NotImplementedError("Use a subclass of 'NodeParameter'")
def gui(self, parent, onChangeCallback):
return (
self.guiLabelFrame(parent=parent),
self.guiValueFrame(parent=parent, onChangeCallback=onChangeCallback),
)
def __call__(self):
raise NotImplementedError("Use a subclass of 'NodeParameter'")
class BoolNodeParameter(NodeParameter):
def __init__(self, name, label=None, default=True):
super().__init__(name=name, label=label)
self.default = default
self.variable = None
def guiValueFrame(self, parent, onChangeCallback):
self.variable = tkinter.IntVar()
self.variable.set(self.default)
return tkinter.ttk.Checkbutton(parent, text="", variable=self.variable, command=onChangeCallback, takefocus=tkinter.NO)
def __call__(self):
if self.variable is not None:
return bool(self.variable.get())
else:
return None
class RangeNodeParameter(NodeParameter):
def __init__(self, name, label=None, start=0.0, end=1.0, step=0.01, default=0.0):
super().__init__(name=name, label=label)
self.start = start
self.end = end
self.step = step
self.default = default
self.slider = None
def guiValueFrame(self, parent, onChangeCallback):
self.slider = tkinter.ttk.Scale(parent, from_=self.start, to=self.end, command=onChangeCallback)
self.slider.set(self.default)
return self.slider
def __call__(self):
if self.slider is not None:
return self.slider.get()
else:
return None
class SelectionNodeParameter(NodeParameter):
"""
The parameter value can be chosen from a list of possible values.
"""
def __init__(self, name, label=None, values=(), default=None):
super().__init__(name=name, label=label)
self.labels = (str(value) for value in values)
self.values = {str(value): value for value in values}
if (default is not None) and (default in values):
self.default = str(default)
elif len(values) > 0:
self.default = str(values[0])
else:
self.default = None
self.variable = None
def guiValueFrame(self, parent, onChangeCallback):
# create variable and set default value
self.variable = tkinter.StringVar()
if self.default is not None:
self.variable.set(self.default)
# create dropdown menu
select = tkinter.OptionMenu(parent, self.variable, *self.labels, command=onChangeCallback)
select.config(width = 10)
return select
def __call__(self):
if self.variable is not None:
return self.values[self.variable.get()]
else:
return None
##
## pipeline processing nodes
##
Node(
uid="core.source",
description="Original image",
f=dh.image.identity,
)
SwitchableNode(
uid="core.convert",
f=dh.image.convert,
parameters=[
SelectionNodeParameter(
name="dtype",
values=("uint8", "uint16", "float"),
default="uint8",
),
],
)
SwitchableNode(
uid="core.asgray",
f=dh.image.asgray,
)
SwitchableNode(
uid="core.invert",
f=dh.image.invert,
)
Node(
uid="core.normalize",
f=dh.image.normalize,
parameters=[
SelectionNodeParameter(
name="mode",
values=("none", "minmax", "percentile"),
default="percentile",
),
RangeNodeParameter(
name="q",
start=0.0,
end=50.0,
step=0.1,
default=2.0,
),
],
)
SwitchableNode(
uid="core.log",
f=dh.image.log,
)
SwitchableNode(
uid="core.gamma",
description="Power-law transformation",
f=dh.image.gamma,
parameters=[
RangeNodeParameter(
name="gamma",
start=1.0,
end=10.0,
step=0.01,
default=1.0,
),
BoolNodeParameter(
name="inverse",
default=False,
),
],
cache=True,
)
SwitchableNode(
uid="core.threshold",
description="Global threshold",
f=lambda I, theta: dh.image.threshold(I=I, theta=theta, relative=True),
parameters=[
RangeNodeParameter(
name="theta",
start=0.0,
end=1.0,
step=0.01,
default=0.5,
),
],
)
SwitchableNode(
uid="core.shift",
f=dh.image.shift,
parameters=[
RangeNodeParameter(
name="dx",
start=0.0,
end=1.0,
step=0.01,
default=0.0,
),
RangeNodeParameter(
name="dy",
start=0.0,
end=1.0,
step=0.01,
default=0.0,
),
],
)
SwitchableNode(
uid="core.rotate",
f=dh.image.rotate,
parameters=[
SelectionNodeParameter(
name="degree",
values=(0, 90, 180, 270),
default=90,
)
],
)
#SwitchableNode(
# uid="core.fft",
# f=dh.image.fft,
#)
| dhaase-de/dh-python-dh | dh/image/pipeline.py | Python | mit | 13,563 | 0.007299 |
from termcolor import colored
def test_Browser_logs(app):
wd = app.wd
sidebar = wd.find_element_by_xpath("//td[@id='sidebar']")
sidebar.find_element_by_xpath(".//span[normalize-space(.)='Catalog']").click()
wd.find_element_by_xpath("//i[@class='fa fa-folder']/../a").click()
products_qty = len(wd.find_elements_by_xpath(("//input[contains(@name,'products')]/../..")))
wd.get_log("browser")
for i in range(1, products_qty + 1):
wd.find_element_by_xpath("//input[contains(@name,'products[%s]')]/../..//a[not(contains(@title,'Edit'))]" % i).click()
log = wd.get_log("browser")
if len(log) != 0:
print(colored("WARNING! LOG EXIST(S)", "red"))
for entry in log:
print(entry)
wd.find_element_by_name("cancel").click()
| Dob3r/python_seleniumwebdriver | Tests/test_Browser_logs.py | Python | apache-2.0 | 819 | 0.004884 |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cdn.manager import base
JSON_HOME = {
"resources": {
"rel/cdn": {
"href-template": "services{?marker,limit}",
"href-vars": {
"marker": "param/marker",
"limit": "param/limit"
},
"hints": {
"allow": [
"GET"
],
"formats": {
"application/json": {}
}
}
}
}
}
class DefaultV1Controller(base.V1Controller):
def __init__(self, manager):
super(DefaultV1Controller, self).__init__(manager)
self.JSON_HOME = JSON_HOME
def get(self):
return self.JSON_HOME
| obulpathi/cdn1 | cdn/manager/default/v1.py | Python | apache-2.0 | 1,291 | 0 |
# ----------------------------------------------------------------------------------
# Electrum plugin for the Digital Bitbox hardware wallet by Shift Devices AG
# digitalbitbox.com
#
import base64
import binascii
import hashlib
import hmac
import json
import math
import os
import re
import struct
import sys
import time
from electrum.crypto import sha256d, EncodeAES_base64, EncodeAES_bytes, DecodeAES_bytes, hmac_oneshot
from electrum.bitcoin import (TYPE_ADDRESS, push_script, var_int, public_key_to_p2pkh,
is_address)
from electrum.bip32 import BIP32Node
from electrum import ecc
from electrum.ecc import msg_magic
from electrum.wallet import Standard_Wallet
from electrum import constants
from electrum.transaction import Transaction
from electrum.i18n import _
from electrum.keystore import Hardware_KeyStore
from ..hw_wallet import HW_PluginBase
from electrum.util import to_string, UserCancelled, UserFacingException
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.network import Network
from electrum.logging import get_logger
_logger = get_logger(__name__)
try:
import hid
DIGIBOX = True
except ImportError as e:
DIGIBOX = False
# ----------------------------------------------------------------------------------
# USB HID interface
#
def to_hexstr(s):
return binascii.hexlify(s).decode('ascii')
def derive_keys(x):
h = sha256d(x)
h = hashlib.sha512(h).digest()
return (h[:32],h[32:])
MIN_MAJOR_VERSION = 5
ENCRYPTION_PRIVKEY_KEY = 'encryptionprivkey'
CHANNEL_ID_KEY = 'comserverchannelid'
class DigitalBitbox_Client():
def __init__(self, plugin, hidDevice):
self.plugin = plugin
self.dbb_hid = hidDevice
self.opened = True
self.password = None
self.isInitialized = False
self.setupRunning = False
self.usbReportSize = 64 # firmware > v2.0.0
def close(self):
if self.opened:
try:
self.dbb_hid.close()
except:
pass
self.opened = False
def timeout(self, cutoff):
pass
def label(self):
return " "
def is_pairable(self):
return True
def is_initialized(self):
return self.dbb_has_password()
def is_paired(self):
return self.password is not None
def has_usable_connection_with_device(self):
try:
self.dbb_has_password()
except BaseException:
return False
return True
def _get_xpub(self, bip32_path):
if self.check_device_dialog():
return self.hid_send_encrypt(('{"xpub": "%s"}' % bip32_path).encode('utf8'))
def get_xpub(self, bip32_path, xtype):
assert xtype in self.plugin.SUPPORTED_XTYPES
reply = self._get_xpub(bip32_path)
if reply:
xpub = reply['xpub']
# Change type of xpub to the requested type. The firmware
# only ever returns the mainnet standard type, but it is agnostic
# to the type when signing.
if xtype != 'standard' or constants.net.TESTNET:
node = BIP32Node.from_xkey(xpub, net=constants.BitcoinMainnet)
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
else:
raise Exception('no reply')
def dbb_has_password(self):
reply = self.hid_send_plain(b'{"ping":""}')
if 'ping' not in reply:
raise UserFacingException(_('Device communication error. Please unplug and replug your Digital Bitbox.'))
if reply['ping'] == 'password':
return True
return False
def stretch_key(self, key: bytes):
return to_hexstr(hashlib.pbkdf2_hmac('sha512', key, b'Digital Bitbox', iterations = 20480))
def backup_password_dialog(self):
msg = _("Enter the password used when the backup was created:")
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return None
if len(password) < 4:
msg = _("Password must have at least 4 characters.") \
+ "\n\n" + _("Enter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.") \
+ "\n\n" + _("Enter password:")
else:
return password.encode('utf8')
def password_dialog(self, msg):
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return False
if len(password) < 4:
msg = _("Password must have at least 4 characters.") + \
"\n\n" + _("Enter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.") + \
"\n\n" + _("Enter password:")
else:
self.password = password.encode('utf8')
return True
def check_device_dialog(self):
match = re.search(r'v([0-9])+\.[0-9]+\.[0-9]+', self.dbb_hid.get_serial_number_string())
if match is None:
raise Exception("error detecting firmware version")
major_version = int(match.group(1))
if major_version < MIN_MAJOR_VERSION:
raise Exception("Please upgrade to the newest firmware using the BitBox Desktop app: https://shiftcrypto.ch/start")
# Set password if fresh device
if self.password is None and not self.dbb_has_password():
if not self.setupRunning:
return False # A fresh device cannot connect to an existing wallet
msg = _("An uninitialized Digital Bitbox is detected.") + " " + \
_("Enter a new password below.") + "\n\n" + \
_("REMEMBER THE PASSWORD!") + "\n\n" + \
_("You cannot access your coins or a backup without the password.") + "\n" + \
_("A backup is saved automatically when generating a new wallet.")
if self.password_dialog(msg):
reply = self.hid_send_plain(b'{"password":"' + self.password + b'"}')
else:
return False
# Get password from user if not yet set
msg = _("Enter your Digital Bitbox password:")
while self.password is None:
if not self.password_dialog(msg):
raise UserCancelled()
reply = self.hid_send_encrypt(b'{"led":"blink"}')
if 'error' in reply:
self.password = None
if reply['error']['code'] == 109:
msg = _("Incorrect password entered.") + "\n\n" + \
reply['error']['message'] + "\n\n" + \
_("Enter your Digital Bitbox password:")
else:
# Should never occur
msg = _("Unexpected error occurred.") + "\n\n" + \
reply['error']['message'] + "\n\n" + \
_("Enter your Digital Bitbox password:")
# Initialize device if not yet initialized
if not self.setupRunning:
self.isInitialized = True # Wallet exists. Electrum code later checks if the device matches the wallet
elif not self.isInitialized:
reply = self.hid_send_encrypt(b'{"device":"info"}')
if reply['device']['id'] != "":
self.recover_or_erase_dialog() # Already seeded
else:
self.seed_device_dialog() # Seed if not initialized
self.mobile_pairing_dialog()
return self.isInitialized
def recover_or_erase_dialog(self):
msg = _("The Digital Bitbox is already seeded. Choose an option:") + "\n"
choices = [
(_("Create a wallet using the current seed")),
(_("Load a wallet from the micro SD card (the current seed is overwritten)")),
(_("Erase the Digital Bitbox"))
]
try:
reply = self.handler.win.query_choice(msg, choices)
except Exception:
return # Back button pushed
if reply == 2:
self.dbb_erase()
elif reply == 1:
if not self.dbb_load_backup():
return
else:
if self.hid_send_encrypt(b'{"device":"info"}')['device']['lock']:
raise UserFacingException(_("Full 2FA enabled. This is not supported yet."))
# Use existing seed
self.isInitialized = True
def seed_device_dialog(self):
msg = _("Choose how to initialize your Digital Bitbox:") + "\n"
choices = [
(_("Generate a new random wallet")),
(_("Load a wallet from the micro SD card"))
]
try:
reply = self.handler.win.query_choice(msg, choices)
except Exception:
return # Back button pushed
if reply == 0:
self.dbb_generate_wallet()
else:
if not self.dbb_load_backup(show_msg=False):
return
self.isInitialized = True
def mobile_pairing_dialog(self):
dbb_user_dir = None
if sys.platform == 'darwin':
dbb_user_dir = os.path.join(os.environ.get("HOME", ""), "Library", "Application Support", "DBB")
elif sys.platform == 'win32':
dbb_user_dir = os.path.join(os.environ["APPDATA"], "DBB")
else:
dbb_user_dir = os.path.join(os.environ["HOME"], ".dbb")
if not dbb_user_dir:
return
try:
# Python 3.5+
jsonDecodeError = json.JSONDecodeError
except AttributeError:
jsonDecodeError = ValueError
try:
with open(os.path.join(dbb_user_dir, "config.dat")) as f:
dbb_config = json.load(f)
except (FileNotFoundError, jsonDecodeError):
return
if ENCRYPTION_PRIVKEY_KEY not in dbb_config or CHANNEL_ID_KEY not in dbb_config:
return
choices = [
_('Do not pair'),
_('Import pairing from the Digital Bitbox desktop app'),
]
try:
reply = self.handler.win.query_choice(_('Mobile pairing options'), choices)
except Exception:
return # Back button pushed
if reply == 0:
if self.plugin.is_mobile_paired():
del self.plugin.digitalbitbox_config[ENCRYPTION_PRIVKEY_KEY]
del self.plugin.digitalbitbox_config[CHANNEL_ID_KEY]
elif reply == 1:
# import pairing from dbb app
self.plugin.digitalbitbox_config[ENCRYPTION_PRIVKEY_KEY] = dbb_config[ENCRYPTION_PRIVKEY_KEY]
self.plugin.digitalbitbox_config[CHANNEL_ID_KEY] = dbb_config[CHANNEL_ID_KEY]
self.plugin.config.set_key('digitalbitbox', self.plugin.digitalbitbox_config)
def dbb_generate_wallet(self):
key = self.stretch_key(self.password)
filename = ("Electrum-" + time.strftime("%Y-%m-%d-%H-%M-%S") + ".pdf")
msg = ('{"seed":{"source": "create", "key": "%s", "filename": "%s", "entropy": "%s"}}' % (key, filename, to_hexstr(os.urandom(32)))).encode('utf8')
reply = self.hid_send_encrypt(msg)
if 'error' in reply:
raise UserFacingException(reply['error']['message'])
def dbb_erase(self):
self.handler.show_message(_("Are you sure you want to erase the Digital Bitbox?") + "\n\n" +
_("To continue, touch the Digital Bitbox's light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the light or wait for the timeout."))
hid_reply = self.hid_send_encrypt(b'{"reset":"__ERASE__"}')
self.handler.finished()
if 'error' in hid_reply:
raise UserFacingException(hid_reply['error']['message'])
else:
self.password = None
raise UserFacingException('Device erased')
def dbb_load_backup(self, show_msg=True):
backups = self.hid_send_encrypt(b'{"backup":"list"}')
if 'error' in backups:
raise UserFacingException(backups['error']['message'])
try:
f = self.handler.win.query_choice(_("Choose a backup file:"), backups['backup'])
except Exception:
return False # Back button pushed
key = self.backup_password_dialog()
if key is None:
raise Exception('Canceled by user')
key = self.stretch_key(key)
if show_msg:
self.handler.show_message(_("Loading backup...") + "\n\n" +
_("To continue, touch the Digital Bitbox's light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the light or wait for the timeout."))
msg = ('{"seed":{"source": "backup", "key": "%s", "filename": "%s"}}' % (key, backups['backup'][f])).encode('utf8')
hid_reply = self.hid_send_encrypt(msg)
self.handler.finished()
if 'error' in hid_reply:
raise UserFacingException(hid_reply['error']['message'])
return True
def hid_send_frame(self, data):
HWW_CID = 0xFF000000
HWW_CMD = 0x80 + 0x40 + 0x01
data_len = len(data)
seq = 0;
idx = 0;
write = []
while idx < data_len:
if idx == 0:
# INIT frame
write = data[idx : idx + min(data_len, self.usbReportSize - 7)]
self.dbb_hid.write(b'\0' + struct.pack(">IBH", HWW_CID, HWW_CMD, data_len & 0xFFFF) + write + b'\xEE' * (self.usbReportSize - 7 - len(write)))
else:
# CONT frame
write = data[idx : idx + min(data_len, self.usbReportSize - 5)]
self.dbb_hid.write(b'\0' + struct.pack(">IB", HWW_CID, seq) + write + b'\xEE' * (self.usbReportSize - 5 - len(write)))
seq += 1
idx += len(write)
def hid_read_frame(self):
# INIT response
read = bytearray(self.dbb_hid.read(self.usbReportSize))
cid = ((read[0] * 256 + read[1]) * 256 + read[2]) * 256 + read[3]
cmd = read[4]
data_len = read[5] * 256 + read[6]
data = read[7:]
idx = len(read) - 7;
while idx < data_len:
# CONT response
read = bytearray(self.dbb_hid.read(self.usbReportSize))
data += read[5:]
idx += len(read) - 5
return data
def hid_send_plain(self, msg):
reply = ""
try:
serial_number = self.dbb_hid.get_serial_number_string()
if "v2.0." in serial_number or "v1." in serial_number:
hidBufSize = 4096
self.dbb_hid.write('\0' + msg + '\0' * (hidBufSize - len(msg)))
r = bytearray()
while len(r) < hidBufSize:
r += bytearray(self.dbb_hid.read(hidBufSize))
else:
self.hid_send_frame(msg)
r = self.hid_read_frame()
r = r.rstrip(b' \t\r\n\0')
r = r.replace(b"\0", b'')
r = to_string(r, 'utf8')
reply = json.loads(r)
except Exception as e:
_logger.info(f'Exception caught {repr(e)}')
return reply
def hid_send_encrypt(self, msg):
sha256_byte_len = 32
reply = ""
try:
encryption_key, authentication_key = derive_keys(self.password)
msg = EncodeAES_bytes(encryption_key, msg)
hmac_digest = hmac_oneshot(authentication_key, msg, hashlib.sha256)
authenticated_msg = base64.b64encode(msg + hmac_digest)
reply = self.hid_send_plain(authenticated_msg)
if 'ciphertext' in reply:
b64_unencoded = bytes(base64.b64decode(''.join(reply["ciphertext"])))
reply_hmac = b64_unencoded[-sha256_byte_len:]
hmac_calculated = hmac_oneshot(authentication_key, b64_unencoded[:-sha256_byte_len], hashlib.sha256)
if not hmac.compare_digest(reply_hmac, hmac_calculated):
raise Exception("Failed to validate HMAC")
reply = DecodeAES_bytes(encryption_key, b64_unencoded[:-sha256_byte_len])
reply = to_string(reply, 'utf8')
reply = json.loads(reply)
if 'error' in reply:
self.password = None
except Exception as e:
_logger.info(f'Exception caught {repr(e)}')
return reply
# ----------------------------------------------------------------------------------
#
#
class DigitalBitbox_KeyStore(Hardware_KeyStore):
hw_type = 'digitalbitbox'
device = 'DigitalBitbox'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
self.force_watching_only = False
self.maxInputs = 14 # maximum inputs per single sign command
def get_derivation(self):
return str(self.derivation)
def is_p2pkh(self):
return self.derivation.startswith("m/44'/")
def give_error(self, message, clear_client = False):
if clear_client:
self.client = None
raise Exception(message)
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for {}').format(self.device))
def sign_message(self, sequence, message, password):
sig = None
try:
message = message.encode('utf8')
inputPath = self.get_derivation() + "/%d/%d" % sequence
msg_hash = sha256d(msg_magic(message))
inputHash = to_hexstr(msg_hash)
hasharray = []
hasharray.append({'hash': inputHash, 'keypath': inputPath})
hasharray = json.dumps(hasharray)
msg = ('{"sign":{"meta":"sign message", "data":%s}}' % hasharray).encode('utf8')
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception(_("Could not sign message."))
reply = dbb_client.hid_send_encrypt(msg)
self.handler.show_message(_("Signing message ...") + "\n\n" +
_("To continue, touch the Digital Bitbox's blinking light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.finished()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception(_("Could not sign message."))
if 'recid' in reply['sign'][0]:
# firmware > v2.1.1
sig_string = binascii.unhexlify(reply['sign'][0]['sig'])
recid = int(reply['sign'][0]['recid'], 16)
sig = ecc.construct_sig65(sig_string, recid, True)
pubkey, compressed = ecc.ECPubkey.from_signature65(sig, msg_hash)
addr = public_key_to_p2pkh(pubkey.get_public_key_bytes(compressed=compressed))
if ecc.verify_message_with_address(addr, sig, message) is False:
raise Exception(_("Could not sign message"))
elif 'pubkey' in reply['sign'][0]:
# firmware <= v2.1.1
for recid in range(4):
sig_string = binascii.unhexlify(reply['sign'][0]['sig'])
sig = ecc.construct_sig65(sig_string, recid, True)
try:
addr = public_key_to_p2pkh(binascii.unhexlify(reply['sign'][0]['pubkey']))
if ecc.verify_message_with_address(addr, sig, message):
break
except Exception:
continue
else:
raise Exception(_("Could not sign message"))
except BaseException as e:
self.give_error(e)
return sig
def sign_transaction(self, tx, password):
if tx.is_complete():
return
try:
p2pkhTransaction = True
derivations = self.get_tx_derivations(tx)
inputhasharray = []
hasharray = []
pubkeyarray = []
# Build hasharray from inputs
for i, txin in enumerate(tx.inputs()):
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] != 'p2pkh':
p2pkhTransaction = False
for x_pubkey in txin['x_pubkeys']:
if x_pubkey in derivations:
index = derivations.get(x_pubkey)
inputPath = "%s/%d/%d" % (self.get_derivation(), index[0], index[1])
inputHash = sha256d(binascii.unhexlify(tx.serialize_preimage(i)))
hasharray_i = {'hash': to_hexstr(inputHash), 'keypath': inputPath}
hasharray.append(hasharray_i)
inputhasharray.append(inputHash)
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
# Build pubkeyarray from outputs
for o in tx.outputs():
assert o.type == TYPE_ADDRESS
info = tx.output_info.get(o.address)
if info is not None:
index = info.address_index
changePath = self.get_derivation() + "/%d/%d" % index
changePubkey = self.derive_pubkey(index[0], index[1])
pubkeyarray_i = {'pubkey': changePubkey, 'keypath': changePath}
pubkeyarray.append(pubkeyarray_i)
# Special serialization of the unsigned transaction for
# the mobile verification app.
# At the moment, verification only works for p2pkh transactions.
if p2pkhTransaction:
class CustomTXSerialization(Transaction):
@classmethod
def input_script(self, txin, estimate_size=False):
if txin['type'] == 'p2pkh':
return Transaction.get_preimage_script(txin)
if txin['type'] == 'p2sh':
# Multisig verification has partial support, but is disabled. This is the
# expected serialization though, so we leave it here until we activate it.
return '00' + push_script(Transaction.get_preimage_script(txin))
raise Exception("unsupported type %s" % txin['type'])
tx_dbb_serialized = CustomTXSerialization(tx.serialize()).serialize_to_network()
else:
# We only need this for the signing echo / verification.
tx_dbb_serialized = None
# Build sign command
dbb_signatures = []
steps = math.ceil(1.0 * len(hasharray) / self.maxInputs)
for step in range(int(steps)):
hashes = hasharray[step * self.maxInputs : (step + 1) * self.maxInputs]
msg = {
"sign": {
"data": hashes,
"checkpub": pubkeyarray,
},
}
if tx_dbb_serialized is not None:
msg["sign"]["meta"] = to_hexstr(sha256d(tx_dbb_serialized))
msg = json.dumps(msg).encode('ascii')
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign transaction.")
reply = dbb_client.hid_send_encrypt(msg)
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'echo' not in reply:
raise Exception("Could not sign transaction.")
if self.plugin.is_mobile_paired() and tx_dbb_serialized is not None:
reply['tx'] = tx_dbb_serialized
self.plugin.comserver_post_notification(reply)
if steps > 1:
self.handler.show_message(_("Signing large transaction. Please be patient ...") + "\n\n" +
_("To continue, touch the Digital Bitbox's blinking light for 3 seconds.") + " " +
_("(Touch {} of {})").format((step + 1), steps) + "\n\n" +
_("To cancel, briefly touch the blinking light or wait for the timeout.") + "\n\n")
else:
self.handler.show_message(_("Signing transaction...") + "\n\n" +
_("To continue, touch the Digital Bitbox's blinking light for 3 seconds.") + "\n\n" +
_("To cancel, briefly touch the blinking light or wait for the timeout."))
# Send twice, first returns an echo for smart verification
reply = dbb_client.hid_send_encrypt(msg)
self.handler.finished()
if 'error' in reply:
if reply["error"].get('code') in (600, 601):
# aborted via LED short touch or timeout
raise UserCancelled()
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign transaction.")
dbb_signatures.extend(reply['sign'])
# Fill signatures
if len(dbb_signatures) != len(tx.inputs()):
raise Exception("Incorrect number of transactions signed.") # Should never occur
for i, txin in enumerate(tx.inputs()):
num = txin['num_sig']
for pubkey in txin['pubkeys']:
signatures = list(filter(None, txin['signatures']))
if len(signatures) == num:
break # txin is complete
ii = txin['pubkeys'].index(pubkey)
signed = dbb_signatures[i]
if 'recid' in signed:
# firmware > v2.1.1
recid = int(signed['recid'], 16)
s = binascii.unhexlify(signed['sig'])
h = inputhasharray[i]
pk = ecc.ECPubkey.from_sig_string(s, recid, h)
pk = pk.get_public_key_hex(compressed=True)
elif 'pubkey' in signed:
# firmware <= v2.1.1
pk = signed['pubkey']
if pk != pubkey:
continue
sig_r = int(signed['sig'][:64], 16)
sig_s = int(signed['sig'][64:], 16)
sig = ecc.der_sig_from_r_and_s(sig_r, sig_s)
sig = to_hexstr(sig) + '01'
tx.add_signature_to_txin(i, ii, sig)
except UserCancelled:
raise
except BaseException as e:
self.give_error(e, True)
else:
_logger.info("Transaction is_complete {tx.is_complete()}")
tx.raw = tx.serialize()
class DigitalBitboxPlugin(HW_PluginBase):
libraries_available = DIGIBOX
keystore_class = DigitalBitbox_KeyStore
client = None
DEVICE_IDS = [
(0x03eb, 0x2402) # Digital Bitbox
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
self.digitalbitbox_config = self.config.get('digitalbitbox', {})
def get_dbb_device(self, device):
dev = hid.device()
dev.open_path(device.path)
return dev
def create_client(self, device, handler):
if device.interface_number == 0 or device.usage_page == 0xffff:
if handler:
self.handler = handler
client = self.get_dbb_device(device)
if client is not None:
client = DigitalBitbox_Client(self, client)
return client
else:
return None
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
client.handler = self.create_handler(wizard)
if purpose == HWD_SETUP_NEW_WALLET:
client.setupRunning = True
client.get_xpub("m/44'/0'", 'standard')
def is_mobile_paired(self):
return ENCRYPTION_PRIVKEY_KEY in self.digitalbitbox_config
def comserver_post_notification(self, payload):
assert self.is_mobile_paired(), "unexpected mobile pairing error"
url = 'https://digitalbitbox.com/smartverification/index.php'
key_s = base64.b64decode(self.digitalbitbox_config[ENCRYPTION_PRIVKEY_KEY])
args = 'c=data&s=0&dt=0&uuid=%s&pl=%s' % (
self.digitalbitbox_config[CHANNEL_ID_KEY],
EncodeAES_base64(key_s, json.dumps(payload).encode('ascii')).decode('ascii'),
)
try:
text = Network.send_http_on_proxy('post', url, body=args.encode('ascii'), headers={'content-type': 'application/x-www-form-urlencoded'})
_logger.info(f'digitalbitbox reply from server {text}')
except Exception as e:
self.handler.show_error(repr(e)) # repr because str(Exception()) == ''
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.check_device_dialog()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
if client is not None:
client.check_device_dialog()
return client
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
if not self.is_mobile_paired():
keystore.handler.show_error(_('This function is only available after pairing your {} with a mobile device.').format(self.device))
return
if not keystore.is_p2pkh():
keystore.handler.show_error(_('This function is only available for p2pkh keystores when using {}.').format(self.device))
return
change, index = wallet.get_address_index(address)
keypath = '%s/%d/%d' % (keystore.derivation, change, index)
xpub = self.get_client(keystore)._get_xpub(keypath)
verify_request_payload = {
"type": 'p2pkh',
"echo": xpub['echo'],
}
self.comserver_post_notification(verify_request_payload)
| neocogent/electrum | electrum/plugins/digitalbitbox/digitalbitbox.py | Python | mit | 32,370 | 0.004541 |
#Written by Timothy Seabrook
#timothy.seabrook@cs.ox.ac.uk
#This script is used to split the LOLA_DEM South Pole Large Tiles into smaller tiles for ingestion.
import glob, os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
from osgeo import gdal
full_size = [30400, 30400]
p_size = [3800, 3800]
cut_size = [32,32]
stride = np.divide(cut_size, 2)
thisDir = os.path.dirname(os.path.abspath(__file__))
rootDir = os.path.join(thisDir, os.pardir, os.pardir)
dataDir = os.path.join(rootDir, 'Data')
DEMDir = os.path.join(dataDir, 'LOLA_DEM', 'South_Pole')
DEMLargeDir = os.path.join(DEMDir, 'Large_Tiles')
DEMSmallDir = os.path.join(DEMDir, 'Small_Tiles')
base_filename = "hs-45-45_lola20sp"
#https://stackoverflow.com/questions/273946/how-do-i-resize-an-image-using-pil-and-maintain-its-aspect-ratio
v_pieces = np.floor_divide(full_size[0], p_size[0]) #Number of vertical divisions for large tiles
h_pieces = np.floor_divide(full_size[1], p_size[1]) #Number of horizontal divisions for large tiles
for n in (25,39,43,57,64):
if not os.path.isdir(os.path.join(DEMSmallDir,'P'+str(n+1),'')):
os.mkdir(os.path.join(DEMSmallDir,'P'+str(n+1),''))
curr_filename = os.path.join(DEMLargeDir,base_filename+'_p'+str(n+1)+'.tif')
ds = gdal.Open(curr_filename)
im = np.array(ds.GetRasterBand(1).ReadAsArray())
width = im.shape[1]
height = im.shape[0]
y_ind, x_ind = np.floor_divide(n, v_pieces), np.mod(n, v_pieces)
y_pos, x_pos = [0] * 2, [0] * 2
y_pos[0], x_pos[0] = np.multiply(p_size[0], y_ind), np.multiply(p_size[1], x_ind)
y_pos[1], x_pos[1] = y_pos[0] + p_size[0], x_pos[0] + p_size[1]
h_cuts = np.floor_divide(p_size[1], stride[1]) - (np.floor_divide(cut_size[1], stride[1])) + 1
v_cuts = np.floor_divide(p_size[0], stride[0]) - (np.floor_divide(cut_size[0], stride[0])) + 1
#The below is what was used to generate the tiles found in this github, however they are incorrect.
#The correct formula is given above.
#Once the data provided has been refactored, the below formula will be replaced.
w_cuts = np.multiply(np.floor_divide(width, cut_size[1]), np.divide(cut_size[1], stride[1]))
h_cuts = np.multiply(np.floor_divide(height, cut_size[0]), np.divide(cut_size[0], stride[0]))
for i in range(w_cuts+1):
for j in range(h_cuts+1):
x_off = np.multiply(i, stride[1])
y_off = np.multiply(j, stride[0])
#image = np.asarray(im)
image = im[y_off:y_off+cut_size[0], x_off:x_off+cut_size[1]]
ind = (i*w_cuts + j)
#x = i*cut_size[1]+x_pos[0]
#y = j*cut_size[0]+y_pos[0]
#filename = os.path.join(DEMSmallDir,'P'+str(n+1),base_filename+'_x'+str(x)+'_y'+str(y))
# Once existing data names have been refactored, the below filename will be replaced with the above.
filename = os.path.join(DEMSmallDir,'P'+str(n+1),base_filename+'_cut'+str(ind))
im2 = Image.fromarray(image)
im2.save(filename + '.tif')
| Arcanewinds/FDL-LunarResources | DataPreparation/LOLA_DEM/img_split.py | Python | gpl-3.0 | 3,075 | 0.014634 |
#!/usr/bin/env python
"""This utility installs an engage extension into a deployment home.
"""
import os
import os.path
import sys
from optparse import OptionParser
import shutil
import re
import logging
logger = logging.getLogger(__name__)
# enable importing from the python_pkg sub-directory
base_src_dir=os.path.abspath(os.path.dirname(__file__))
python_pkg_dir = os.path.join(base_src_dir, "python_pkg")
assert os.path.exists(python_pkg_dir), "Python package directory %s does not exist" % python_pkg_dir
sys.path.append(python_pkg_dir)
from engage.extensions import installed_extensions, extension_versions
dist_root = os.path.abspath(os.path.dirname(__file__))
dist_root_parent = os.path.abspath(os.path.join(dist_root, ".."))
class EngageExtension(object):
def __init__(self, path, name, version, update):
self.path = path
self.name = name
self.version = version
self.update = update
def _copy_dir(self, src_dirname, target, dry_run=False):
src_dir = os.path.join(self.path, src_dirname)
dest_dir = os.path.join(os.path.join(target, src_dirname),
self.name)
if os.path.exists(src_dir):
logger.info("Copying %s to %s" % (src_dirname, dest_dir))
if os.path.exists(dest_dir):
if self.update:
logger.warn("removing old version of %s" % dest_dir)
if not dry_run:
shutil.rmtree(dest_dir)
else:
raise Exception("Target directory %s already exists" % dest_dir)
if not dry_run:
shutil.copytree(src_dir, dest_dir)
elif self.update and os.path.exists(dest_dir):
logger.warn("removing old version of %s" % dest_dir)
if not dry_run:
shutil.rmtree(dest_dir)
def install(self, dist_root, dry_run=False):
if not dry_run:
logger.info("Running install of %s to %s" % (self.name, dist_root))
else:
logger.info("Dry run install of %s to %s" % (self.name, dist_root))
self._copy_dir("metadata", dist_root, dry_run=dry_run)
dest_engage_pkg_dir = os.path.join(os.path.join(dist_root, "python_pkg"),
"engage")
self._copy_dir("drivers", dest_engage_pkg_dir, dry_run=dry_run)
self._copy_dir("tests", dest_engage_pkg_dir, dry_run=dry_run)
self._copy_dir("mgt_backends", dest_engage_pkg_dir, dry_run=dry_run)
# For the software packages we copy the individual files to the main package
# cache.
src_cache_dir = os.path.join(self.path, "sw_packages")
dest_cache_dir = os.path.join(dist_root, "sw_packages")
if os.path.exists(src_cache_dir):
logger.info("Copying software packages from %s to %s" %
(src_cache_dir, dest_cache_dir))
for fname in os.listdir(src_cache_dir):
src_file = os.path.join(src_cache_dir, fname)
dest_file = os.path.join(dest_cache_dir, fname)
logger.debug("Copying %s to %s" % (fname, dest_file))
shutil.copyfile(src_file, dest_file)
# update the extension file
if self.name not in installed_extensions:
installed_extensions.append(self.name)
extension_versions[self.name] = self.version
extns_file = os.path.join(dest_engage_pkg_dir, "extensions.py")
logger.info("Updating extensions file %s" % extns_file)
with open(extns_file, "rb") as ef:
lines = ef.read().split("\n")
updated_list = False
updated_versions = False
if not dry_run:
with open(extns_file, "wb") as ef:
for line in lines:
if re.match("^installed_extensions = ", line):
ef.write("installed_extensions = %s\n" %
installed_extensions.__repr__())
updated_list = True
elif re.match("^extension_versions = ", line):
ef.write("extension_versions = %s\n" %
extension_versions.__repr__())
updated_versions = True
else:
ef.write(line + "\n")
else:
for line in lines:
if re.match("^installed_extensions = ", line):
sys.stdout.write("installed_extensions = %s\n" %
installed_extensions.__repr__())
updated_list = True
elif re.match("^extension_versions = ", line):
sys.stdout.write("extension_versions = %s\n" %
extension_versions.__repr__())
updated_versions = True
else:
sys.stdout.write(line + "\n")
if ((not updated_list) or (not updated_versions)):
raise Exception("Extension registration file %s did not have correct format, unable to complete update" % extns_file)
logger.info("Successfully installed extension %s" % self.name)
def process_args(argv):
usage = "usage: %prog [options] path_to_extension"
parser = OptionParser(usage=usage)
parser.add_option("--dry-run", action="store_true",
help="If specified, don't make changes, just log what would be done",
default=False)
parser.add_option("--update", "-u", action="store_true",
help="If specified, override any existing version of the extension",
default=False)
(options, args) = parser.parse_args(args=argv)
if len(args)==0:
parser.print_help()
sys.exit(0)
elif len(args) > 1:
parser.error("Expecting exactly one argument, path to extension directory")
extension_path = os.path.abspath(args[0])
if not os.path.exists(extension_path):
parser.error("Extension directory %s does not exist" % extension_path)
extension_name = os.path.basename(extension_path)
if os.path.basename(dist_root_parent)=="src":
parser.error("Cannot install extension into source tree %s, run from distribution tree" % dist_root)
if extension_name in installed_extensions and not options.update:
parser.error("Extension %s already installed" % extension_name)
version_file = os.path.join(extension_path, "version.txt")
if not os.path.exists(version_file):
parser.error("Missing version file %s" % version_file)
with open(version_file, "rb") as vf:
extension_version = vf.read().rstrip()
ext = EngageExtension(extension_path, extension_name,
extension_version, options.update)
return (ext, options)
def main(argv=sys.argv[1:]):
(ext, opts) = process_args(argv)
ext.install(dist_root, dry_run=opts.dry_run)
return 0
if __name__ == "__main__":
#formatter = logging.Formatter("[%(levelname)s][%(name)s] %(message)s")
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
#console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
sys.exit(main())
| quaddra/engage | install_extension.py | Python | apache-2.0 | 7,602 | 0.002894 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.