text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from spectre.syntax import *
class SMU2P(Netlist):
__name__ = "SMU2P"
__type__ = "netlist"
def __init__(self, name='SMU2P', nodes=('1', '2'), V1=0, V2=0):
Netlist.__init__(self)
self.name = name
self.nodes = nodes
self.V1 = V1; self.V2 = V2
save = dict(V1=nodes[0], V2=nodes[1])
self.append( Vsource(name='V1', nodes=(nodes[0], '0'), dc=V1, type='dc') )
save['I1'] = 'V1:2'
self.append( Vsource(name='V2', nodes=(nodes[1], '0'), dc=V2, type='dc') )
save['I2'] = 'V2:2'
self.append( Save(**save) )
class SMU4P(Netlist):
__name__ = "SMU4P"
__type__ = "netlist"
def __init__(self, name='SMU4P', nodes=('1', '2', '3', '4'), V1=0, V2=0, V3=0, V4=0):
Netlist.__init__(self)
self.name = name
self.nodes = nodes
self.V1 = V1; self.V2 = V2; self.V3 = V3; self.V4 = V4
self.append( Vsource(name='V1', nodes=(nodes[0], '0'), dc=V1, type='dc') )
self.append( Vsource(name='V2', nodes=(nodes[1], '0'), dc=V2, type='dc') )
self.append( Vsource(name='V3', nodes=(nodes[2], '0'), dc=V3, type='dc') )
self.append( Vsource(name='V4', nodes=(nodes[3], '0'), dc=V4, type='dc'))
self.append( Save(I1='V1:2', I2="V2:2", I3="V3:2", I4="V4:2", V1=nodes[0], V2=nodes[1], V3=nodes[2], V4=nodes[3]) )
class SMU5P(Netlist):
__name__ = "SMU5P"
__type__ = "netlist"
def __init__(self, name='SMU5P', nodes=('1', '2', '3', '4', '5'), V1=0, V2=0, V3=0, V4=0, V5=0):
Netlist.__init__(self)
self.name = name
self.nodes = nodes
self.V1 = V1; self.V2 = V2; self.V3 = V3; self.V4 = V4; self.V5 = V5
self.append( Vsource(name='V1', nodes=(nodes[0], '0'), dc=V1, type='dc') )
self.append( Vsource(name='V2', nodes=(nodes[1], '0'), dc=V2, type='dc') )
self.append( Vsource(name='V3', nodes=(nodes[2], '0'), dc=V3, type='dc') )
self.append( Vsource(name='V4', nodes=(nodes[3], '0'), dc=V4, type='dc'))
self.append( Vsource(name='V5', nodes=(nodes[4], '0'), dc=V5, type='dc'))
self.append( Save(I1='V1:2', I2="V2:2", I3="V3:2", I4="V4:2", I5="V5:2", V1=nodes[0], V2=nodes[1], V3=nodes[2], V4=nodes[3], V5=nodes[4]) )
class SMU3P(Netlist):
__name__ = "SMU3P"
__type__ = "netlist"
def __init__(self, name='SMU3P', nodes=('1', '2', '3'), V1=0, V2=0, V3=0, I1=None, I2=None, I3=None):
Netlist.__init__(self)
self.name = name
self.nodes = nodes
self.V1 = V1; self.V2 = V2; self.V3 = V3;
save = { 'V1':nodes[0], 'V2':nodes[1], 'V3':nodes[2] }
if I1 == None:
self.append( Vsource(name='V1', nodes=(nodes[0], '0'), dc=V1, type='dc') )
save['I1'] = 'V1:2'
else:
self.append( Isource(name='I1', nodes=(nodes[0], '0'), dc=I1, type='dc') )
save['I1'] = 'I1:2'
if I2 == None:
self.append( Vsource(name='V2', nodes=(nodes[1], '0'), dc=V2, type='dc') )
save['I2'] = 'V2:2'
else:
self.append( Isource(name='I2', nodes=(nodes[1], '0'), dc=I2, type='dc') )
save['I2'] = 'I2:2'
if I3 == None:
self.append( Vsource(name='V3', nodes=(nodes[2], '0'), dc=V3, type='dc') )
save['I3'] = 'V3:2'
else:
self.append( Isource(name='I3', nodes=(nodes[2], '0'), dc=I3, type='dc') )
save['I3'] = 'I3:2'
self.append( Save(**save) )
class SMU(Netlist):
__name__ = "SMU"
__type__ = "netlist"
def __init__(self, name='SMU', nodes=(), **parameters):
Netlist.__init__(self)
self.name = name
self.nodes = nodes
save = {}
for node in nodes:
if 'V{node}'.format(node=node) in parameters:
dc = parameters['V{node}'.format(node=node)]
self.append( Vsource(name='V{node}'.format(node=node), nodes=(node, '0'), dc=dc, type='dc') )
save['I{node}'.format(node=node)] = 'V{node}:2'.format(node=node)
save['V{node}'.format(node=node)] = node
elif 'I{node}'.format(node=node) in parameters:
dc = parameters['I{node}'.format(node=node)]
self.append( Isource(name='I{node}'.format(node=node), nodes=(node, '0'), dc=dc, type='dc') )
save['I{node}'.format(node=node)] = 'I{node}:2'.format(node=node)
save['V{node}'.format(node=node)] = node
else:
self.append( Vsource(name='V{node}'.format(node=node), nodes=(node, '0'), dc=0, type='dc') )
save['I{node}'.format(node=node)] = 'V{node}:2'.format(node=node)
save['V{node}'.format(node=node)] = node
if len(save):
self.append( Save(**save) )
|
raphaelvalentin/Utils
|
spectre/syntax/smu.py
|
Python
|
gpl-2.0
| 4,809 | 0.021418 |
# -*- encoding: utf-8 -*-
# from django.test import TestCase
# from block.tests.helper import check_content
# from compose.tests.factories import HeaderFactory
# class TestHeader(TestCase):
#
# def test_content_methods(self):
# c = HeaderFactory()
# check_content(c)
|
pkimber/compose
|
compose/tests/test_header.py
|
Python
|
apache-2.0
| 291 | 0 |
# -*- coding: utf-8 -*-
import json
import urllib2
def get_authkey(zabbix_server, zabbix_user, zabbix_pass, head):
url = "http://" + zabbix_server + "/zabbix/api_jsonrpc.php"
pdata = json.dumps({"jsonrpc" : "2.0",
"method" : "user.login",
"params" : {
"user" : zabbix_user,
"password" : zabbix_pass},
"auth" : None,
"id" : 1})
result = urllib2.urlopen(urllib2.Request(url, pdata, head)).read()
try:
return json.loads(result)['result']
except:
return 1
|
Shadow5523/zabbix_api
|
lib/auth.py
|
Python
|
gpl-2.0
| 669 | 0.014948 |
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
class CarControllerParams:
def __init__(self, CP):
if CP.carFingerprint == CAR.IMPREZA_2020:
self.STEER_MAX = 1439
else:
self.STEER_MAX = 2047
self.STEER_STEP = 2 # how often we update the steer cmd
self.STEER_DELTA_UP = 50 # torque increase per refresh, 0.8s to max
self.STEER_DELTA_DOWN = 70 # torque decrease per refresh
self.STEER_DRIVER_ALLOWANCE = 60 # allowed driver torque before start limiting
self.STEER_DRIVER_MULTIPLIER = 10 # weight driver torque heavily
self.STEER_DRIVER_FACTOR = 1 # from dbc
class CAR:
ASCENT = "SUBARU ASCENT LIMITED 2019"
IMPREZA = "SUBARU IMPREZA LIMITED 2019"
IMPREZA_2020 = "SUBARU IMPREZA SPORT 2020"
FORESTER = "SUBARU FORESTER 2019"
FORESTER_PREGLOBAL = "SUBARU FORESTER 2017 - 2018"
LEGACY_PREGLOBAL = "SUBARU LEGACY 2015 - 2018"
OUTBACK_PREGLOBAL = "SUBARU OUTBACK 2015 - 2017"
OUTBACK_PREGLOBAL_2018 = "SUBARU OUTBACK 2018 - 2019"
FINGERPRINTS = {
CAR.IMPREZA_2020: [{
2: 8, 64: 8, 65: 8, 72: 8, 73: 8, 280: 8, 281: 8, 282: 8, 290: 8, 312: 8, 313: 8, 314: 8, 315: 8, 316: 8, 326: 8, 372: 8, 544: 8, 545: 8, 546: 8, 552: 8, 554: 8, 557: 8, 576: 8, 577: 8, 722: 8, 801: 8, 802: 8, 803: 8, 805: 8, 808: 8, 816: 8, 826: 8, 837: 8, 838: 8, 839: 8, 842: 8, 912: 8, 915: 8, 940: 8, 1617: 8, 1632: 8, 1650: 8, 1677: 8, 1697: 8, 1722: 8, 1743: 8, 1759: 8, 1786: 5, 1787: 5, 1788: 8, 1809: 8, 1813: 8, 1817: 8, 1821: 8, 1840: 8, 1848: 8, 1924: 8, 1932: 8, 1952: 8, 1960: 8, 1968: 8, 1976: 8, 2015: 8, 2016: 8, 2024: 8
},
{
2: 8, 64: 8, 65: 8, 72: 8, 73: 8, 280: 8, 281: 8, 282: 8, 290: 8, 312: 8, 313: 8, 314: 8, 315: 8, 316: 8, 326: 8, 544: 8, 545: 8, 546: 8, 554: 8, 557: 8, 576: 8, 577: 8, 801: 8, 802: 8, 803: 8, 805: 8, 808: 8, 816: 8, 826: 8, 837: 8, 838: 8, 839: 8, 842: 8, 912: 8, 915: 8, 940: 8, 1614: 8, 1617: 8, 1632: 8, 1657: 8, 1658: 8, 1677: 8, 1697: 8, 1743: 8, 1759: 8, 1786: 5, 1787: 5, 1788: 8, 1809: 8, 1813: 8, 1817: 8, 1821: 8, 1840: 8, 1848: 8, 1924: 8, 1932: 8, 1952: 8, 1960: 8
}],
CAR.FORESTER: [{
2: 8, 64: 8, 65: 8, 72: 8, 73: 8, 280: 8, 281: 8, 282: 8, 290: 8, 312: 8, 313: 8, 314: 8, 315: 8, 316: 8, 326: 8, 372: 8, 544: 8, 545: 8, 546: 8, 552: 8, 554: 8, 557: 8, 576: 8, 577: 8, 722: 8, 801: 8, 802: 8, 803: 8, 805: 8, 808: 8, 811: 8, 816: 8, 826: 8, 837: 8, 838: 8, 839: 8, 842: 8, 912: 8, 915: 8, 940: 8, 961: 8, 984: 8, 1614: 8, 1617: 8, 1632: 8, 1650: 8, 1651: 8, 1657: 8, 1658: 8, 1677: 8, 1697: 8, 1698: 8, 1722: 8, 1743: 8, 1759: 8, 1787: 5, 1788: 8, 1809: 8, 1813: 8, 1817: 8, 1821: 8, 1840: 8, 1848: 8, 1924: 8, 1932: 8, 1952: 8, 1960: 8
}],
}
FW_VERSIONS = {
CAR.ASCENT: {
(Ecu.esp, 0x7b0, None): [
b'\xa5 \x19\x02\x00',
b'\xa5 !\002\000',
b'\xf1\x82\xa5 \x19\x02\x00',
],
(Ecu.eps, 0x746, None): [
b'\x85\xc0\xd0\x00',
b'\005\xc0\xd0\000',
b'\x95\xc0\xd0\x00',
],
(Ecu.fwdCamera, 0x787, None): [
b'\x00\x00d\xb9\x1f@ \x10',
b'\000\000e~\037@ \'',
b'\x00\x00e@\x1f@ $',
b'\x00\x00d\xb9\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\xbb,\xa0t\a',
b'\xf1\x82\xbb,\xa0t\x87',
b'\xf1\x82\xbb,\xa0t\a',
b'\xf1\x82\xd9,\xa0@\a',
b'\xf1\x82\xd1,\xa0q\x07',
],
(Ecu.transmission, 0x7e1, None): [
b'\x00\xfe\xf7\x00\x00',
b'\001\xfe\xf9\000\000',
b'\x01\xfe\xf7\x00\x00',
b'\xf1\x00\xa4\x10@',
],
},
CAR.IMPREZA: {
(Ecu.esp, 0x7b0, None): [
b'\x7a\x94\x3f\x90\x00',
b'\xa2 \x185\x00',
b'\xa2 \x193\x00',
b'z\x94.\x90\x00',
b'z\x94\b\x90\x01',
b'\xa2 \x19`\x00',
b'z\x94\f\x90\001',
b'z\x9c\x19\x80\x01',
b'z\x94\x08\x90\x00',
b'z\x84\x19\x90\x00',
],
(Ecu.eps, 0x746, None): [
b'\x7a\xc0\x0c\x00',
b'z\xc0\b\x00',
b'\x8a\xc0\x00\x00',
b'z\xc0\x04\x00',
b'z\xc0\x00\x00',
b'\x8a\xc0\x10\x00',
],
(Ecu.fwdCamera, 0x787, None): [
b'\x00\x00\x64\xb5\x1f\x40\x20\x0e',
b'\x00\x00d\xdc\x1f@ \x0e',
b'\x00\x00e\x1c\x1f@ \x14',
b'\x00\x00d)\x1f@ \a',
b'\x00\x00e+\x1f@ \x14',
b'\000\000e+\000\000\000\000',
b'\000\000dd\037@ \016',
b'\000\000e\002\037@ \024',
b'\x00\x00d)\x00\x00\x00\x00',
b'\x00\x00c\xf4\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\xaa\x61\x66\x73\x07',
b'\xbeacr\a',
b'\xc5!`r\a',
b'\xaa!ds\a',
b'\xaa!`u\a',
b'\xaa!dq\a',
b'\xaa!dt\a',
b'\xf1\x00\xa2\x10\t',
b'\xc5!ar\a',
b'\xbe!as\a',
b'\xc5!ds\a',
b'\xc5!`s\a',
b'\xaa!au\a',
b'\xbe!at\a',
b'\xaa\x00Bu\x07',
b'\xc5!dr\x07',
b'\xaa!aw\x07',
],
(Ecu.transmission, 0x7e1, None): [
b'\xe3\xe5\x46\x31\x00',
b'\xe4\xe5\x061\x00',
b'\xe5\xf5\x04\x00\x00',
b'\xe3\xf5G\x00\x00',
b'\xe3\xf5\a\x00\x00',
b'\xe3\xf5C\x00\x00',
b'\xe5\xf5B\x00\x00',
b'\xe5\xf5$\000\000',
b'\xe4\xf5\a\000\000',
b'\xe3\xf5F\000\000',
b'\xe4\xf5\002\000\000',
b'\xe3\xd0\x081\x00',
b'\xe3\xf5\x06\x00\x00',
b'\xf1\x00\xa4\x10@',
],
},
CAR.IMPREZA_2020: {
(Ecu.esp, 0x7b0, None): [
b'\xa2 \0314\000',
b'\xa2 \0313\000',
b'\xa2 !i\000',
b'\xa2 !`\000',
],
(Ecu.eps, 0x746, None): [
b'\x9a\xc0\000\000',
b'\n\xc0\004\000',
],
(Ecu.fwdCamera, 0x787, None): [
b'\000\000eb\037@ \"',
b'\000\000e\x8f\037@ )',
],
(Ecu.engine, 0x7e0, None): [
b'\xca!ap\a',
b'\xca!`p\a',
b'\xca!`0\a',
b'\xcc\"f0\a',
b'\xcc!fp\a',
],
(Ecu.transmission, 0x7e1, None): [
b'\xe6\xf5\004\000\000',
b'\xe6\xf5$\000\000',
b'\xe7\xf6B0\000',
b'\xe7\xf5D0\000',
],
},
CAR.FORESTER: {
(Ecu.esp, 0x7b0, None): [
b'\xa3 \030\024\000',
b'\xa3 \024\000',
b'\xa3 \031\024\000',
b'\xa3 \024\001',
],
(Ecu.eps, 0x746, None): [
b'\x8d\xc0\004\000',
],
(Ecu.fwdCamera, 0x787, None): [
b'\000\000e!\037@ \021',
b'\000\000e\x97\037@ 0',
b'\000\000e`\037@ ',
b'\xf1\x00\xac\x02\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\xb6\"`A\a',
b'\xcf"`0\a',
b'\xcb\"`@\a',
b'\xcb\"`p\a',
b'\xf1\x00\xa2\x10\n',
],
(Ecu.transmission, 0x7e1, None): [
b'\032\xf6B0\000',
b'\032\xf6F`\000',
b'\032\xf6b`\000',
b'\032\xf6B`\000',
b'\xf1\x00\xa4\x10@',
],
},
CAR.FORESTER_PREGLOBAL: {
(Ecu.esp, 0x7b0, None): [
b'\x7d\x97\x14\x40',
b'\xf1\x00\xbb\x0c\x04',
],
(Ecu.eps, 0x746, None): [
b'}\xc0\x10\x00',
b'm\xc0\x10\x00',
],
(Ecu.fwdCamera, 0x787, None): [
b'\x00\x00\x64\x35\x1f\x40\x20\x09',
b'\x00\x00c\xe9\x1f@ \x03',
b'\x00\x00d\xd3\x1f@ \t'
],
(Ecu.engine, 0x7e0, None): [
b'\xba"@p\a',
b'\xa7)\xa0q\a',
b'\xf1\x82\xa7)\xa0q\a',
b'\xba"@@\a',
],
(Ecu.transmission, 0x7e1, None): [
b'\xdc\xf2\x60\x60\x00',
b'\xdc\xf2@`\x00',
b'\xda\xfd\xe0\x80\x00',
b'\xdc\xf2`\x81\000',
b'\xdc\xf2`\x80\x00',
],
},
CAR.LEGACY_PREGLOBAL: {
(Ecu.esp, 0x7b0, None): [
b'k\x97D\x00',
b'[\xba\xc4\x03',
b'{\x97D\x00',
b'[\x97D\000',
],
(Ecu.eps, 0x746, None): [
b'[\xb0\x00\x01',
b'K\xb0\x00\x01',
b'k\xb0\x00\x00',
],
(Ecu.fwdCamera, 0x787, None): [
b'\x00\x00c\xb7\x1f@\x10\x16',
b'\x00\x00c\x94\x1f@\x10\x08',
b'\x00\x00c\xec\x1f@ \x04',
],
(Ecu.engine, 0x7e0, None): [
b'\xab*@r\a',
b'\xa0+@p\x07',
b'\xb4"@0\x07',
b'\xa0"@q\a',
],
(Ecu.transmission, 0x7e1, None): [
b'\xbe\xf2\x00p\x00',
b'\xbf\xfb\xc0\x80\x00',
b'\xbd\xf2\x00`\x00',
b'\xbf\xf2\000\x80\000',
],
},
CAR.OUTBACK_PREGLOBAL: {
(Ecu.esp, 0x7b0, None): [
b'{\x9a\xac\x00',
b'k\x97\xac\x00',
b'\x5b\xf7\xbc\x03',
b'[\xf7\xac\x03',
b'{\x97\xac\x00',
b'k\x9a\xac\000',
b'[\xba\xac\x03',
b'[\xf7\xac\000',
],
(Ecu.eps, 0x746, None): [
b'k\xb0\x00\x00',
b'[\xb0\x00\x00',
b'\x4b\xb0\x00\x02',
b'K\xb0\x00\x00',
b'{\xb0\x00\x01',
],
(Ecu.fwdCamera, 0x787, None): [
b'\x00\x00c\xec\x1f@ \x04',
b'\x00\x00c\xd1\x1f@\x10\x17',
b'\xf1\x00\xf0\xe0\x0e',
b'\x00\x00c\x94\x00\x00\x00\x00',
b'\x00\x00c\x94\x1f@\x10\b',
b'\x00\x00c\xb7\x1f@\x10\x16',
b'\000\000c\x90\037@\020\016',
b'\x00\x00c\xec\x37@\x04',
],
(Ecu.engine, 0x7e0, None): [
b'\xb4+@p\a',
b'\xab\"@@\a',
b'\xa0\x62\x41\x71\x07',
b'\xa0*@q\a',
b'\xab*@@\a',
b'\xb4"@0\a',
b'\xb4"@p\a',
b'\xab"@s\a',
b'\xab+@@\a',
b'\xb4"@r\a',
b'\xa0+@@\x07',
b'\xa0\"@\x80\a',
],
(Ecu.transmission, 0x7e1, None): [
b'\xbd\xfb\xe0\x80\x00',
b'\xbe\xf2@\x80\x00',
b'\xbf\xe2\x40\x80\x00',
b'\xbf\xf2@\x80\x00',
b'\xbe\xf2@p\x00',
b'\xbd\xf2@`\x00',
b'\xbd\xf2@\x81\000',
b'\xbe\xfb\xe0p\000',
b'\xbf\xfb\xe0b\x00',
],
},
CAR.OUTBACK_PREGLOBAL_2018: {
(Ecu.esp, 0x7b0, None): [
b'\x8b\x97\xac\x00',
b'\x8b\x9a\xac\x00',
b'\x9b\x97\xac\x00',
b'\x8b\x97\xbc\x00',
b'\x8b\x99\xac\x00',
b'\x9b\x9a\xac\000',
b'\x9b\x97\xbe\x10',
],
(Ecu.eps, 0x746, None): [
b'{\xb0\x00\x00',
b'{\xb0\x00\x01',
],
(Ecu.fwdCamera, 0x787, None): [
b'\x00\x00df\x1f@ \n',
b'\x00\x00d\xfe\x1f@ \x15',
b'\x00\x00d\x95\x00\x00\x00\x00',
b'\x00\x00d\x95\x1f@ \x0f',
b'\x00\x00d\xfe\x00\x00\x00\x00',
b'\x00\x00e\x19\x1f@ \x15',
],
(Ecu.engine, 0x7e0, None): [
b'\xb5"@p\a',
b'\xb5+@@\a',
b'\xb5"@P\a',
b'\xc4"@0\a',
b'\xb5b@1\x07',
b'\xb5q\xe0@\a',
b'\xc4+@0\a',
b'\xc4b@p\a',
],
(Ecu.transmission, 0x7e1, None): [
b'\xbc\xf2@\x81\x00',
b'\xbc\xfb\xe0\x80\x00',
b'\xbc\xf2@\x80\x00',
b'\xbb\xf2@`\x00',
b'\xbc\xe2@\x80\x00',
b'\xbc\xfb\xe0`\x00',
b'\xbc\xaf\xe0`\x00',
b'\xbb\xfb\xe0`\000',
],
},
}
STEER_THRESHOLD = {
CAR.ASCENT: 80,
CAR.IMPREZA: 80,
CAR.IMPREZA_2020: 80,
CAR.FORESTER: 80,
CAR.FORESTER_PREGLOBAL: 75,
CAR.LEGACY_PREGLOBAL: 75,
CAR.OUTBACK_PREGLOBAL: 75,
CAR.OUTBACK_PREGLOBAL_2018: 75,
}
DBC = {
CAR.ASCENT: dbc_dict('subaru_global_2017_generated', None),
CAR.IMPREZA: dbc_dict('subaru_global_2017_generated', None),
CAR.IMPREZA_2020: dbc_dict('subaru_global_2017_generated', None),
CAR.FORESTER: dbc_dict('subaru_global_2017_generated', None),
CAR.FORESTER_PREGLOBAL: dbc_dict('subaru_forester_2017_generated', None),
CAR.LEGACY_PREGLOBAL: dbc_dict('subaru_outback_2015_generated', None),
CAR.OUTBACK_PREGLOBAL: dbc_dict('subaru_outback_2015_generated', None),
CAR.OUTBACK_PREGLOBAL_2018: dbc_dict('subaru_outback_2019_generated', None),
}
PREGLOBAL_CARS = [CAR.FORESTER_PREGLOBAL, CAR.LEGACY_PREGLOBAL, CAR.OUTBACK_PREGLOBAL, CAR.OUTBACK_PREGLOBAL_2018]
|
commaai/openpilot
|
selfdrive/car/subaru/values.py
|
Python
|
mit
| 11,405 | 0.001841 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang, Big Switch Networks
import logging
import re
from django.core.urlresolvers import reverse_lazy # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls \
import forms as fw_forms
from openstack_dashboard.dashboards.project.firewalls \
import tabs as fw_tabs
from openstack_dashboard.dashboards.project.firewalls \
import workflows as fw_workflows
InsertRuleToPolicy = fw_forms.InsertRuleToPolicy
RemoveRuleFromPolicy = fw_forms.RemoveRuleFromPolicy
UpdateFirewall = fw_forms.UpdateFirewall
UpdatePolicy = fw_forms.UpdatePolicy
UpdateRule = fw_forms.UpdateRule
FirewallDetailsTabs = fw_tabs.FirewallDetailsTabs
FirewallTabs = fw_tabs.FirewallTabs
PolicyDetailsTabs = fw_tabs.PolicyDetailsTabs
RuleDetailsTabs = fw_tabs.RuleDetailsTabs
AddFirewall = fw_workflows.AddFirewall
AddPolicy = fw_workflows.AddPolicy
AddRule = fw_workflows.AddRule
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabView):
tab_group_class = (FirewallTabs)
template_name = 'project/firewalls/details_tabs.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
obj_type = re.search('.delete([a-z]+)', action).group(1)
if not obj_ids:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if obj_type == 'rule':
for obj_id in obj_ids:
try:
api.fwaas.rule_delete(request, obj_id)
messages.success(request, 'Deleted rule %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete rule. %s' % e))
if obj_type == 'policy':
for obj_id in obj_ids:
try:
api.fwaas.policy_delete(request, obj_id)
messages.success(request, 'Deleted policy %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete policy. %s' % e))
if obj_type == 'firewall':
for obj_id in obj_ids:
try:
api.fwaas.firewall_delete(request, obj_id)
messages.success(request, 'Deleted firewall %s' % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete firewall. %s' % e))
return self.get(request, *args, **kwargs)
class AddRuleView(workflows.WorkflowView):
workflow_class = AddRule
template_name = "project/firewalls/addrule.html"
class AddPolicyView(workflows.WorkflowView):
workflow_class = AddPolicy
template_name = "project/firewalls/addpolicy.html"
class AddFirewallView(workflows.WorkflowView):
workflow_class = AddFirewall
template_name = "project/firewalls/addfirewall.html"
class RuleDetailsView(tabs.TabView):
tab_group_class = (RuleDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class PolicyDetailsView(tabs.TabView):
tab_group_class = (PolicyDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class FirewallDetailsView(tabs.TabView):
tab_group_class = (FirewallDetailsTabs)
template_name = 'project/firewalls/details_tabs.html'
class UpdateRuleView(forms.ModalFormView):
form_class = UpdateRule
template_name = "project/firewalls/updaterule.html"
context_object_name = 'rule'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdateRuleView, self).get_context_data(**kwargs)
context['rule_id'] = self.kwargs['rule_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
rule_id = self.kwargs['rule_id']
try:
self._object = api.fwaas.rule_get(self.request, rule_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rule details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
rule = self._get_object()
initial = rule.get_dict()
return initial
class UpdatePolicyView(forms.ModalFormView):
form_class = UpdatePolicy
template_name = "project/firewalls/updatepolicy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdatePolicyView, self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
return initial
class UpdateFirewallView(forms.ModalFormView):
form_class = UpdateFirewall
template_name = "project/firewalls/updatefirewall.html"
context_object_name = 'firewall'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(UpdateFirewallView, self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
firewall_id = self.kwargs['firewall_id']
try:
self._object = api.fwaas.firewall_get(self.request,
firewall_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class InsertRuleToPolicyView(forms.ModalFormView):
form_class = InsertRuleToPolicy
template_name = "project/firewalls/insert_rule_to_policy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(InsertRuleToPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RemoveRuleFromPolicyView(forms.ModalFormView):
form_class = RemoveRuleFromPolicy
template_name = "project/firewalls/remove_rule_from_policy.html"
context_object_name = 'policy'
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(RemoveRuleFromPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
policy_id = self.kwargs['policy_id']
try:
self._object = api.fwaas.policy_get(self.request, policy_id)
self._object.set_id_as_name_if_empty()
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
|
neumerance/deploy
|
openstack_dashboard/dashboards/project/firewalls/views.py
|
Python
|
apache-2.0
| 10,365 | 0 |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfgen/textobject.py
__version__=''' $Id$ '''
__doc__="""
PDFTextObject is an efficient way to add text to a Canvas. Do not
instantiate directly, obtain one from the Canvas instead.
Progress Reports:
8.83, 2000-01-13, gmcm:
created from pdfgen.py
"""
import string
from types import *
from reportlab.lib import colors
from reportlab.lib.colors import ColorType
from reportlab.lib.utils import fp_str
from reportlab.pdfbase import pdfmetrics
_SeqTypes=(TupleType,ListType)
class PDFTextObject:
"""PDF logically separates text and graphics drawing; text
operations need to be bracketed between BT (Begin text) and
ET operators. This class ensures text operations are
properly encapusalted. Ask the canvas for a text object
with beginText(x, y). Do not construct one directly.
Do not use multiple text objects in parallel; PDF is
not multi-threaded!
It keeps track of x and y coordinates relative to its origin."""
def __init__(self, canvas, x=0,y=0):
self._code = ['BT'] #no point in [] then append RGB
self._canvas = canvas #canvas sets this so it has access to size info
self._fontname = self._canvas._fontname
self._fontsize = self._canvas._fontsize
self._leading = self._canvas._leading
font = pdfmetrics.getFont(self._fontname)
self._dynamicFont = getattr(font, '_dynamicFont', 0)
self._curSubset = -1
self.setTextOrigin(x, y)
def getCode(self):
"pack onto one line; used internally"
self._code.append('ET')
return string.join(self._code, ' ')
def setTextOrigin(self, x, y):
if self._canvas.bottomup:
self._code.append('1 0 0 1 %s Tm' % fp_str(x, y)) #bottom up
else:
self._code.append('1 0 0 -1 %s Tm' % fp_str(x, y)) #top down
# The current cursor position is at the text origin
self._x0 = self._x = x
self._y0 = self._y = y
def setTextTransform(self, a, b, c, d, e, f):
"Like setTextOrigin, but does rotation, scaling etc."
if not self._canvas.bottomup:
c = -c #reverse bottom row of the 2D Transform
d = -d
self._code.append('%s Tm' % fp_str(a, b, c, d, e, f))
# The current cursor position is at the text origin Note that
# we aren't keeping track of all the transform on these
# coordinates: they are relative to the rotations/sheers
# defined in the matrix.
self._x0 = self._x = e
self._y0 = self._y = f
def moveCursor(self, dx, dy):
"""Starts a new line at an offset dx,dy from the start of the
current line. This does not move the cursor relative to the
current position, and it changes the current offset of every
future line drawn (i.e. if you next do a textLine() call, it
will move the cursor to a position one line lower than the
position specificied in this call. """
# Check if we have a previous move cursor call, and combine
# them if possible.
if self._code and self._code[-1][-3:]==' Td':
L = string.split(self._code[-1])
if len(L)==3:
del self._code[-1]
else:
self._code[-1] = string.join(L[:-4])
# Work out the last movement
lastDx = float(L[-3])
lastDy = float(L[-2])
# Combine the two movement
dx += lastDx
dy -= lastDy
# We will soon add the movement to the line origin, so if
# we've already done this for lastDx, lastDy, remove it
# first (so it will be right when added back again).
self._x0 -= lastDx
self._y0 -= lastDy
# Output the move text cursor call.
self._code.append('%s Td' % fp_str(dx, -dy))
# Keep track of the new line offsets and the cursor position
self._x0 += dx
self._y0 += dy
self._x = self._x0
self._y = self._y0
def setXPos(self, dx):
"""Starts a new line dx away from the start of the
current line - NOT from the current point! So if
you call it in mid-sentence, watch out."""
self.moveCursor(dx,0)
def getCursor(self):
"""Returns current text position relative to the last origin."""
return (self._x, self._y)
def getStartOfLine(self):
"""Returns a tuple giving the text position of the start of the
current line."""
return (self._x0, self._y0)
def getX(self):
"""Returns current x position relative to the last origin."""
return self._x
def getY(self):
"""Returns current y position relative to the last origin."""
return self._y
def _setFont(self, psfontname, size):
"""Sets the font and fontSize
Raises a readable exception if an illegal font
is supplied. Font names are case-sensitive! Keeps track
of font anme and size for metrics."""
self._fontname = psfontname
self._fontsize = size
font = pdfmetrics.getFont(self._fontname)
self._dynamicFont = getattr(font, '_dynamicFont', 0)
if self._dynamicFont:
self._curSubset = -1
else:
pdffontname = self._canvas._doc.getInternalFontName(psfontname)
self._code.append('%s %s Tf' % (pdffontname, fp_str(size)))
def setFont(self, psfontname, size, leading = None):
"""Sets the font. If leading not specified, defaults to 1.2 x
font size. Raises a readable exception if an illegal font
is supplied. Font names are case-sensitive! Keeps track
of font anme and size for metrics."""
self._fontname = psfontname
self._fontsize = size
if leading is None:
leading = size * 1.2
self._leading = leading
font = pdfmetrics.getFont(self._fontname)
self._dynamicFont = getattr(font, '_dynamicFont', 0)
if self._dynamicFont:
self._curSubset = -1
else:
pdffontname = self._canvas._doc.getInternalFontName(psfontname)
self._code.append('%s %s Tf %s TL' % (pdffontname, fp_str(size), fp_str(leading)))
def setCharSpace(self, charSpace):
"""Adjusts inter-character spacing"""
self._charSpace = charSpace
self._code.append('%s Tc' % fp_str(charSpace))
def setWordSpace(self, wordSpace):
"""Adjust inter-word spacing. This can be used
to flush-justify text - you get the width of the
words, and add some space between them."""
self._wordSpace = wordSpace
self._code.append('%s Tw' % fp_str(wordSpace))
def setHorizScale(self, horizScale):
"Stretches text out horizontally"
self._horizScale = 100 + horizScale
self._code.append('%s Tz' % fp_str(horizScale))
def setLeading(self, leading):
"How far to move down at the end of a line."
self._leading = leading
self._code.append('%s TL' % fp_str(leading))
def setTextRenderMode(self, mode):
"""Set the text rendering mode.
0 = Fill text
1 = Stroke text
2 = Fill then stroke
3 = Invisible
4 = Fill text and add to clipping path
5 = Stroke text and add to clipping path
6 = Fill then stroke and add to clipping path
7 = Add to clipping path"""
assert mode in (0,1,2,3,4,5,6,7), "mode must be in (0,1,2,3,4,5,6,7)"
self._textRenderMode = mode
self._code.append('%d Tr' % mode)
def setRise(self, rise):
"Move text baseline up or down to allow superscrip/subscripts"
self._rise = rise
self._y = self._y - rise # + ? _textLineMatrix?
self._code.append('%s Ts' % fp_str(rise))
def setStrokeColorRGB(self, r, g, b):
self._strokeColorRGB = (r, g, b)
self._code.append('%s RG' % fp_str(r,g,b))
def setFillColorRGB(self, r, g, b):
self._fillColorRGB = (r, g, b)
self._code.append('%s rg' % fp_str(r,g,b))
def setFillColorCMYK(self, c, m, y, k):
"""Takes 4 arguments between 0.0 and 1.0"""
self._fillColorCMYK = (c, m, y, k)
self._code.append('%s k' % fp_str(c, m, y, k))
def setStrokeColorCMYK(self, c, m, y, k):
"""Takes 4 arguments between 0.0 and 1.0"""
self._strokeColorCMYK = (c, m, y, k)
self._code.append('%s K' % fp_str(c, m, y, k))
def setFillColor(self, aColor):
"""Takes a color object, allowing colors to be referred to by name"""
if type(aColor) == ColorType:
rgb = (aColor.red, aColor.green, aColor.blue)
self._fillColorRGB = rgb
self._code.append('%s rg' % fp_str(rgb) )
elif type(aColor) in _SeqTypes:
l = len(aColor)
if l==3:
self._fillColorRGB = aColor
self._code.append('%s rg' % fp_str(aColor) )
elif l==4:
self.setFillColorCMYK(self, aColor[0], aColor[1], aColor[2], aColor[3])
else:
raise 'Unknown color', str(aColor)
else:
raise 'Unknown color', str(aColor)
def setStrokeColor(self, aColor):
"""Takes a color object, allowing colors to be referred to by name"""
if type(aColor) == ColorType:
rgb = (aColor.red, aColor.green, aColor.blue)
self._strokeColorRGB = rgb
self._code.append('%s RG' % fp_str(rgb) )
elif type(aColor) in _SeqTypes:
l = len(aColor)
if l==3:
self._strokeColorRGB = aColor
self._code.append('%s RG' % fp_str(aColor) )
elif l==4:
self.setStrokeColorCMYK(self, aColor[0], aColor[1], aColor[2], aColor[3])
else:
raise 'Unknown color', str(aColor)
else:
raise 'Unknown color', str(aColor)
def setFillGray(self, gray):
"""Sets the gray level; 0.0=black, 1.0=white"""
self._fillColorRGB = (gray, gray, gray)
self._code.append('%s g' % fp_str(gray))
def setStrokeGray(self, gray):
"""Sets the gray level; 0.0=black, 1.0=white"""
self._strokeColorRGB = (gray, gray, gray)
self._code.append('%s G' % fp_str(gray))
def _formatText(self, text):
"Generates PDF text output operator(s)"
if self._dynamicFont:
#it's a truetype font and should be utf8. If an error is raised,
results = []
font = pdfmetrics.getFont(self._fontname)
try: #assume UTF8
stuff = font.splitString(text, self._canvas._doc)
except UnicodeDecodeError:
#assume latin1 as fallback
from reportlab.pdfbase.ttfonts import latin1_to_utf8
from reportlab.lib.logger import warnOnce
warnOnce('non-utf8 data fed to truetype font, assuming latin-1 data')
text = latin1_to_utf8(text)
stuff = font.splitString(text, self._canvas._doc)
for subset, chunk in stuff:
if subset != self._curSubset:
pdffontname = font.getSubsetInternalName(subset, self._canvas._doc)
results.append("%s %s Tf %s TL" % (pdffontname, fp_str(self._fontsize), fp_str(self._leading)))
self._curSubset = subset
chunk = self._canvas._escape(chunk)
results.append("(%s) Tj" % chunk)
return string.join(results, ' ')
else:
text = self._canvas._escape(text)
return "(%s) Tj" % text
def _textOut(self, text, TStar=0):
"prints string at current point, ignores text cursor"
self._code.append('%s%s' % (self._formatText(text), (TStar and ' T*' or '')))
def textOut(self, text):
"""prints string at current point, text cursor moves across."""
self._x = self._x + self._canvas.stringWidth(text, self._fontname, self._fontsize)
self._code.append(self._formatText(text))
def textLine(self, text=''):
"""prints string at current point, text cursor moves down.
Can work with no argument to simply move the cursor down."""
# Update the coordinates of the cursor
self._x = self._x0
if self._canvas.bottomup:
self._y = self._y - self._leading
else:
self._y = self._y + self._leading
# Update the location of the start of the line
# self._x0 is unchanged
self._y0 = self._y
# Output the text followed by a PDF newline command
self._code.append('%s T*' % self._formatText(text))
def textLines(self, stuff, trim=1):
"""prints multi-line or newlined strings, moving down. One
comon use is to quote a multi-line block in your Python code;
since this may be indented, by default it trims whitespace
off each line and from the beginning; set trim=0 to preserve
whitespace."""
if type(stuff) == StringType:
lines = string.split(string.strip(stuff), '\n')
if trim==1:
lines = map(string.strip,lines)
elif type(stuff) == ListType:
lines = stuff
elif type(stuff) == TupleType:
lines = stuff
else:
assert 1==0, "argument to textlines must be string,, list or tuple"
# Output each line one at a time. This used to be a long-hand
# copy of the textLine code, now called as a method.
for line in lines:
self.textLine(line)
def __nonzero__(self):
'PDFTextObject is true if it has something done after the init'
return self._code != ['BT']
|
BackupTheBerlios/pixies-svn
|
pixies/reportlab/pdfgen/textobject.py
|
Python
|
gpl-2.0
| 14,031 | 0.005274 |
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase):
ADMIN_API = True
extension_name = "os-console-auth-tokens"
extra_extensions_to_load = ["os-remote-consoles", "os-access-ips"]
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(ConsoleAuthTokensSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.consoles.Consoles')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.console_auth_tokens.'
'Console_auth_tokens')
return f
def _get_console_url(self, data):
return jsonutils.loads(data)["console"]["url"]
def _get_console_token(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
'get-rdp-console-post-req',
{'action': 'os-getRDPConsole'})
url = self._get_console_url(response.content)
return re.match('.+?token=([^&]+)', url).groups()[0]
def test_get_console_connect_info(self):
self.flags(enabled=True, group='rdp')
uuid = self._post_server()
token = self._get_console_token(uuid)
response = self._do_get('os-console-auth-tokens/%s' % token)
subs = self._get_regexes()
subs["uuid"] = uuid
subs["host"] = r"[\w\.\-]+"
subs["port"] = "[0-9]+"
subs["internal_access_path"] = ".*"
self._verify_response('get-console-connect-info-get-resp', subs,
response, 200)
|
takeshineshiro/nova
|
nova/tests/functional/v3/test_console_auth_tokens.py
|
Python
|
apache-2.0
| 2,711 | 0 |
#!/usr/bin/env python
"""
split_file.py [-o <dir>] <path>
Take the file at <path> and write it to multiple files, switching to a new file
every time an annotation of the form "// BEGIN file1.swift" is encountered. If
<dir> is specified, place the files in <dir>; otherwise, put them in the
current directory.
"""
import getopt
import os
import re
import sys
def usage():
sys.stderr.write(__doc__.strip() + "\n")
sys.exit(1)
fp_out = None
dest_dir = '.'
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:h')
for (opt, arg) in opts:
if opt == '-o':
dest_dir = arg
elif opt == '-h':
usage()
except getopt.GetoptError:
usage()
if len(args) != 1:
usage()
fp_in = open(args[0], 'r')
for line in fp_in:
m = re.match(r'^//\s*BEGIN\s+([^\s]+)\s*$', line)
if m:
if fp_out:
fp_out.close()
fp_out = open(os.path.join(dest_dir, m.group(1)), 'w')
elif fp_out:
fp_out.write(line)
fp_in.close()
if fp_out:
fp_out.close()
|
khizkhiz/swift
|
utils/split_file.py
|
Python
|
apache-2.0
| 1,030 | 0.000971 |
# Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It tests python pretty
# printers.
import re
# Test returning a Value from a printer.
class string_print:
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['whybother']['contents']
# Test a class-based printer.
class ContainerPrinter:
class _iterator:
def __init__ (self, pointer, len):
self.start = pointer
self.pointer = pointer
self.end = pointer + len
def __iter__(self):
return self
def next(self):
if self.pointer == self.end:
raise StopIteration
result = self.pointer
self.pointer = self.pointer + 1
return ('[%d]' % int (result - self.start), result.dereference())
def __init__(self, val):
self.val = val
def to_string(self):
return 'container %s with %d elements' % (self.val['name'], self.val['len'])
def children(self):
return self._iterator(self.val['elements'], self.val['len'])
# Test a printer where to_string is None
class NoStringContainerPrinter:
class _iterator:
def __init__ (self, pointer, len):
self.start = pointer
self.pointer = pointer
self.end = pointer + len
def __iter__(self):
return self
def next(self):
if self.pointer == self.end:
raise StopIteration
result = self.pointer
self.pointer = self.pointer + 1
return ('[%d]' % int (result - self.start), result.dereference())
def __init__(self, val):
self.val = val
def to_string(self):
return None
def children(self):
return self._iterator(self.val['elements'], self.val['len'])
class pp_s:
def __init__(self, val):
self.val = val
def to_string(self):
a = self.val["a"]
b = self.val["b"]
if a.address != b:
raise Exception("&a(%s) != b(%s)" % (str(a.address), str(b)))
return " a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
class pp_ss:
def __init__(self, val):
self.val = val
def to_string(self):
return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
class pp_sss:
def __init__(self, val):
self.val = val
def to_string(self):
return "a=<" + str(self.val['a']) + "> b=<" + str(self.val["b"]) + ">"
class pp_multiple_virtual:
def __init__ (self, val):
self.val = val
def to_string (self):
return "pp value variable is: " + str (self.val['value'])
class pp_vbase1:
def __init__ (self, val):
self.val = val
def to_string (self):
return "pp class name: " + self.val.type.tag
class pp_nullstr:
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['s'].string(gdb.target_charset())
class pp_ns:
"Print a std::basic_string of some kind"
def __init__(self, val):
self.val = val
def to_string(self):
len = self.val['length']
return self.val['null_str'].string (gdb.target_charset(), length = len)
def display_hint (self):
return 'string'
class pp_ls:
"Print a std::basic_string of some kind"
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['lazy_str'].lazy_string()
def display_hint (self):
return 'string'
class pp_outer:
"Print struct outer"
def __init__ (self, val):
self.val = val
def to_string (self):
return "x = %s" % self.val['x']
def children (self):
yield 's', self.val['s']
yield 'x', self.val['x']
def lookup_function (val):
"Look-up and return a pretty-printer that can print val."
# Get the type.
type = val.type
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
# Get the type name.
typename = type.tag
if typename == None:
return None
# Iterate over local dictionary of types to determine
# if a printer is registered for that type. Return an
# instantiation of the printer if found.
for function in pretty_printers_dict:
if function.match (typename):
return pretty_printers_dict[function] (val)
# Cannot find a pretty printer. Return None.
return None
def disable_lookup_function ():
lookup_function.enabled = False
def enable_lookup_function ():
lookup_function.enabled = True
def register_pretty_printers ():
pretty_printers_dict[re.compile ('^struct s$')] = pp_s
pretty_printers_dict[re.compile ('^s$')] = pp_s
pretty_printers_dict[re.compile ('^S$')] = pp_s
pretty_printers_dict[re.compile ('^struct ss$')] = pp_ss
pretty_printers_dict[re.compile ('^ss$')] = pp_ss
pretty_printers_dict[re.compile ('^const S &$')] = pp_s
pretty_printers_dict[re.compile ('^SSS$')] = pp_sss
pretty_printers_dict[re.compile ('^VirtualTest$')] = pp_multiple_virtual
pretty_printers_dict[re.compile ('^Vbase1$')] = pp_vbase1
pretty_printers_dict[re.compile ('^struct nullstr$')] = pp_nullstr
pretty_printers_dict[re.compile ('^nullstr$')] = pp_nullstr
# Note that we purposely omit the typedef names here.
# Printer lookup is based on canonical name.
# However, we do need both tagged and untagged variants, to handle
# both the C and C++ cases.
pretty_printers_dict[re.compile ('^struct string_repr$')] = string_print
pretty_printers_dict[re.compile ('^struct container$')] = ContainerPrinter
pretty_printers_dict[re.compile ('^struct justchildren$')] = NoStringContainerPrinter
pretty_printers_dict[re.compile ('^string_repr$')] = string_print
pretty_printers_dict[re.compile ('^container$')] = ContainerPrinter
pretty_printers_dict[re.compile ('^justchildren$')] = NoStringContainerPrinter
pretty_printers_dict[re.compile ('^struct ns$')] = pp_ns
pretty_printers_dict[re.compile ('^ns$')] = pp_ns
pretty_printers_dict[re.compile ('^struct lazystring$')] = pp_ls
pretty_printers_dict[re.compile ('^lazystring$')] = pp_ls
pretty_printers_dict[re.compile ('^struct outerstruct$')] = pp_outer
pretty_printers_dict[re.compile ('^outerstruct$')] = pp_outer
pretty_printers_dict = {}
register_pretty_printers ()
gdb.pretty_printers.append (lookup_function)
|
crazyleen/msp430-gdb-7.2a
|
gdb/testsuite/gdb.python/py-prettyprint.py
|
Python
|
gpl-2.0
| 7,344 | 0.012527 |
from datetime import datetime
from parsedatetime import parsedatetime, Constants
from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.utils.misc import load_object
class HistoryMiddleware(object):
DATE_FORMAT = '%Y%m%d'
def __init__(self, crawler):
self.stats = crawler.stats
settings = crawler.settings
history = settings.get('HISTORY', None)
if not history:
raise NotConfigured()
# EPOCH:
# == False: don't retrieve historical data
# == True : retrieve most recent version
# == datetime(): retrieve next version after datetime()
self.epoch = self.parse_epoch(settings.get('EPOCH', False))
self.retrieve_if = load_object(history.get(
'RETRIEVE_IF', 'history.logic.RetrieveNever'))(settings)
self.store_if = load_object(history.get(
'STORE_IF', 'history.logic.StoreAlways'))(settings)
self.storage = load_object(history.get(
'BACKEND', 'history.storage.S3CacheStorage'))(settings)
self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING')
dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def spider_opened(self, spider):
self.storage.open_spider(spider)
self.store_if.spider_opened(spider)
self.retrieve_if.spider_opened(spider)
def spider_closed(self, spider):
self.storage.close_spider(spider)
self.store_if.spider_closed(spider)
self.retrieve_if.spider_closed(spider)
def process_request(self, request, spider):
"""
A request is approaching the Downloader.
Decide if we would like to intercept the request and supply a
response ourselves.
"""
if self.epoch and self.retrieve_if(spider, request):
request.meta['epoch'] = self.epoch
response = self.storage.retrieve_response(spider, request)
if response:
response.flags.append('historic')
return response
elif self.ignore_missing:
raise IgnoreRequest("Ignored; request not in history: %s" % request)
def process_response(self, request, response, spider):
"""
A response is leaving the Downloader. It was either retreived
from the web or from another middleware.
Decide if we would like to store it in the history.
"""
if self.store_if(spider, request, response):
self.storage.store_response(spider, request, response)
self.stats.set_value('history/cached', True, spider=spider)
return response
def parse_epoch(self, epoch):
"""
bool => bool
datetime => datetime
str => datetime
"""
if isinstance(epoch, bool) or isinstance(epoch, datetime):
return epoch
elif epoch == 'True':
return True
elif epoch == 'False':
return False
try:
return datetime.strptime(epoch, self.DATE_FORMAT)
except ValueError:
pass
parser = parsedatetime.Calendar(Constants())
time_tupple = parser.parse(epoch) # 'yesterday' => (time.struct_time, int)
if not time_tupple[1]:
raise NotConfigured('Could not parse epoch: %s' % epoch)
time_struct = time_tupple[0] #=> time.struct_time(tm_year=2012, tm_mon=4, tm_mday=7, tm_hour=22, tm_min=8, tm_sec=6, tm_wday=5, tm_yday=98, tm_isdst=-1)
return datetime(*time_struct[:6]) #=> datetime.datetime(2012, 4, 7, 22, 8, 6)
|
playandbuild/scrapy-history-middleware
|
history/middleware.py
|
Python
|
mit
| 3,854 | 0.002076 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-10 18:31
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshops', '0027_auto_20161110_0311'),
]
operations = [
migrations.AlterField(
model_name='slot',
name='begin',
field=models.DateTimeField(
default=datetime.datetime(2016, 11, 11, 0, 0),
verbose_name='Start'),
),
migrations.AlterField(
model_name='slot',
name='end',
field=models.DateTimeField(
default=datetime.datetime(2016, 11, 11, 0, 0),
verbose_name='Ende'),
),
]
|
d120/pyofahrt
|
workshops/migrations/0028_auto_20161110_1931.py
|
Python
|
agpl-3.0
| 795 | 0 |
## @package shesha.constants
## @brief Numerical constants for shesha and config enumerations for safe-typing
## @author COMPASS Team <https://github.com/ANR-COMPASS>
## @version 5.2.1
## @date 2022/01/24
## @copyright GNU Lesser General Public License
#
# This file is part of COMPASS <https://anr-compass.github.io/compass/>
#
# Copyright (C) 2011-2022 COMPASS Team <https://github.com/ANR-COMPASS>
# All rights reserved.
# Distributed under GNU - LGPL
#
# COMPASS is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either version 3 of the License,
# or any later version.
#
# COMPASS: End-to-end AO simulation tool using GPU acceleration
# The COMPASS platform was designed to meet the need of high-performance for the simulation of AO systems.
#
# The final product includes a software package for simulating all the critical subcomponents of AO,
# particularly in the context of the ELT and a real-time core based on several control approaches,
# with performances consistent with its integration into an instrument. Taking advantage of the specific
# hardware architecture of the GPU, the COMPASS tool allows to achieve adequate execution speeds to
# conduct large simulation campaigns called to the ELT.
#
# The COMPASS platform can be used to carry a wide variety of simulations to both testspecific components
# of AO of the E-ELT (such as wavefront analysis device with a pyramid or elongated Laser star), and
# various systems configurations such as multi-conjugate AO.
#
# COMPASS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with COMPASS.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>.
import numpy as np
from aenum import MultiValueEnum
class CONST:
RAD2ARCSEC = 3600. * 360. / (2 * np.pi)
ARCSEC2RAD = 2. * np.pi / (360. * 3600.)
RAD2DEG = 180. / np.pi
DEG2RAD = np.pi / 180.
def check_enum(cls, name):
"""
Create a safe-type enum instance from bytes contents
"""
if not isinstance(name, str) or \
not name in vars(cls).values():
raise ValueError("Invalid enumeration value for enum %s, value %s" % (cls, name))
return name
class DmType:
"""
Types of deformable mirrors
"""
PZT = 'pzt'
TT = 'tt'
KL = 'kl'
class PatternType:
"""
Types of Piezo DM patterns
"""
SQUARE = 'square'
HEXA = 'hexa'
HEXAM4 = 'hexaM4'
class KLType:
"""
Possible KLs for computations
"""
KOLMO = 'kolmo'
KARMAN = 'karman'
class InfluType:
"""
Influence function types
"""
DEFAULT = 'default'
RADIALSCHWARTZ = 'radialSchwartz'
SQUARESCHWARTZ = 'squareSchwartz'
BLACKNUTT = 'blacknutt'
GAUSSIAN = 'gaussian'
BESSEL = 'bessel'
PETAL = 'petal'
class ControllerType:
"""
Controller types
"""
GENERIC = 'generic'
GENERIC_LINEAR = 'generic_linear'
LS = 'ls'
MV = 'mv'
CURED = 'cured'
GEO = 'geo'
class CommandLawType:
"""
Command law types for generic controller only
"""
INTEGRATOR = 'integrator'
MODAL_INTEGRATOR = 'modal_integrator'
TWO_MATRICES = '2matrices'
class CentroiderType:
"""
Centroider types
"""
COG = 'cog'
TCOG = 'tcog'
WCOG = 'wcog'
BPCOG = 'bpcog'
CORR = 'corr'
PYR = 'pyr'
MASKEDPIX = 'maskedpix'
class CentroiderFctType:
MODEL = 'model'
GAUSS = 'gauss'
class PyrCentroiderMethod:
"""
Pyramid centroider methods
Local flux normalization (eq SH quad-cell, ray optics. Ragazzonni 1996)
Global flux normalization (Verinaud 2004, most > 2010 Pyr applications)
Resulting (A+/-B-/+C-D)/(A+B+C+D) or sin((A+/-B-/+C-D)/(A+B+C+D))
ref. code sutra_centroider_pyr.h
"""
NOSINUSGLOBAL = 0
SINUSGLOBAL = 1
NOSINUSLOCAL = 2
SINUSLOCAL = 3
OTHER = 4
class WFSType:
"""
WFS Types
"""
SH = 'sh'
PYRHR = 'pyrhr'
PYRLR = 'pyrlr'
class TargetImageType:
"""
Target Images
"""
SE = 'se'
LE = 'le'
class ApertureType:
"""
Telescope apertures
"""
GENERIC = 'Generic'
EELT_NOMINAL = 'EELT-Nominal' # Alexis Carlotti method
EELT = 'EELT' # E. Gendron method
EELT_BP1 = 'EELT-BP1'
EELT_BP3 = 'EELT-BP3'
EELT_BP5 = 'EELT-BP5'
EELT_CUSTOM = 'EELT-Custom'
VLT = 'VLT'
KECK = 'keck'
class SpiderType:
"""
Spiders
"""
FOUR = 'four'
SIX = 'six'
class ProfType:
"""
Sodium profile for LGS
"""
GAUSS1 = 'Gauss1'
GAUSS2 = 'Gauss2'
GAUSS3 = 'Gauss3'
EXP = 'Exp'
MULTIPEAK = 'Multipeak'
FILES = dict({
GAUSS1: "allProfileNa_withAltitude_1Gaussian.npy",
GAUSS2: "allProfileNa_withAltitude_2Gaussian.npy",
GAUSS3: "allProfileNa_withAltitude_3Gaussian.npy",
EXP: "allProfileNa_withAltitude.npy",
MULTIPEAK: "multipeakProfileNa_withAltitude.npy"
})
class FieldStopType:
"""
WFS field stop
"""
SQUARE = 'square'
ROUND = 'round'
class PupilType(MultiValueEnum):
"""Compass pupil enumeration
"""
SPUPIL = "spupil", "s"
MPUPIL = "mpupil", "m"
IPUPIL = "ipupil", "i"
|
ANR-COMPASS/shesha
|
shesha/constants.py
|
Python
|
gpl-3.0
| 5,648 | 0.003718 |
#!/usr/bin/python
from __future__ import division
## Math
import numpy as np
import math
## Display
import pygame
import time
## Ros
import rospy
import os, sys
from geometry_msgs.msg import Twist, Pose, PoseStamped, Point, Quaternion
from tf import transformations as tf_trans
from std_msgs.msg import Header
from ieee2015_msgs.msg import Mecanum
from xmega_connector.srv import *
#constants
SCREEN_DIM = (700, 350)
fpath = os.path.dirname(os.path.realpath(__file__))
background = pygame.image.load(os.path.join(fpath, "stage.jpg"))
background = pygame.transform.scale(background, SCREEN_DIM)
rect = background.get_rect()
PXL_PER_METER = 50
#3779.527559055 #Change the number to the correct value!
"""Dimensions of the IEEE Competition
Course:
4 ft. x 8 ft.
1.2192 m x 1.8288 m
Robot:
1 ft. x 1 ft.
0.3048 m x 0.3048 m"""
dt = .5
radius = 10
ORIGIN = np.array([SCREEN_DIM[0]/2.0, SCREEN_DIM[1]/2.0])
## Function to calculate the cross-torque of an array of velocities (top left, top right, bottom left, bottom right)
def crosstorque(velocity):
forcex = 0.0
forcey = 0.0
forcex += velocity[0] * np.sin(np.pi / 4)
forcex += velocity[1] * np.sin(np.pi / 4)
forcex += velocity[2] * np.sin(np.pi / 4)
forcex += velocity[3] * np.sin(np.pi / 4)
# N = kg m/s^2 = m/s * (r)
# M_i =
forcey += velocity[0] * np.sin(np.pi / 4)
forcey -= velocity[1] * np.sin(np.pi / 4)
forcey += velocity[2] * np.sin(np.pi / 4)
forcey -= velocity[3] * np.sin(np.pi / 4)
return np.array([forcex, forcey])
## Class to define a robot object that moves with velocity, acceleration, and force
class Robot(object):
def __init__(self, (x, y), height, width):
self.position = np.array([x, y], dtype=np.float32)
self.position += [0, (150 - height)]
self.velocity = np.array([0, 0], dtype=np.float32)
self.acceleration = np.array([0, 0], dtype=np.float32)
self.force = np.array([0, 0], dtype=np.float32)
# Implementation for rotation of object
self.angle = 0
self.omega = 0
self.alpha = 0
self.mecanum = np.array([0, 0, 0, 0], dtype=np.float32)
rospy.init_node('simulation', anonymous=True)
self.pose_pub = rospy.Publisher('pose', PoseStamped, queue_size=10)
self.height = height
self.width = width
a = +(self.width / 2)
b = -(self.width / 2)
c = +(self.height / 2)
d = -(self.height / 2)
self.pointlist = map(lambda vector: np.array(vector, dtype=np.float32), [[b, d], [a, d], [a, c], [b, c]])
rospy.Service('/xmega_connector/set_wheel_speeds', SetWheelSpeeds, self.set_wheel_speed_service)
def set_wheel_speed_service(self, ws_req):
if abs(ws_req.wheel1) > .00000000001 and abs(ws_req.wheel2) > .00000000000001 and abs(ws_req.wheel3) > .00000000000001 and abs(ws_req.wheel4) > .00000000000001:
self.mecanum[0] = ws_req.wheel1
self.mecanum[1] = ws_req.wheel2
self.mecanum[2] = ws_req.wheel3
self.mecanum[3] = ws_req.wheel4
else:
self.mecanum[0] = 0
self.mecanum[1] = 0
self.mecanum[2] = 0
self.mecanum[3] = 0
#print('Wheel speeds set')
return SetWheelSpeedsResponse()
def update(self):
self.publish_pose()
# Update velocity and position
#self.position[0] += self.velocity[0] * dt
#self.position[1] += self.velocity[1] * dt
#self.velocity += self.acceleration * dt
self.position += self.force * dt
#self.acceleration = self.force
# Update rotation of object
self.angle += self.omega * dt
self.omega += self.alpha * dt
self.force = crosstorque(self.mecanum)
# Makes sure the object stays in the window
if self.position[0] + (self.width / 2) >= 700 or self.position[0] - (self.width / 2) <= 0:
self.velocity[0] *= -1
if self.position[1] + (self.height / 2) >= 350 or self.position[1] - (self.height / 2) <= 0:
self.velocity[1] *= -1
def rotate(self):
# Rotates the object by some angle, in degrees, clockwise
radangle = math.radians(self.angle)
rotmatrix = np.matrix([[math.cos(radangle), -math.sin(radangle)], [math .sin(radangle), math.cos(radangle)]])
templist = []
"""one = np.matrix([
[1, 0, 50],
[0, 1, 0],
[0, 0, 1]])
two = np.matrix([[math.cos(radangle), -math.sin(radangle), 0], [math.sin(radangle), math.cos(radangle), 0], [0, 0, 1]])
three = np.matrix([
[1, 0, -50],
[0, 1, 0],
[0, 0, 1]])
rotmatrix = one * two * three"""
for point in self.pointlist:
matpoint = np.matrix(point).T
matpoint = rotmatrix * matpoint
point = np.array(matpoint.T)[0]
templist.append(point)
self.pointlist = templist
def publish_pose(self):
'''Publish Pose'''
_orientation = tf_trans.quaternion_from_euler(0, 0, self.angle)
self.pose_pub.publish(
PoseStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/world',
),
pose = Pose(
position = Point(self.position[0], self.position[1], 0.0),
orientation = Quaternion(*_orientation) #Radians
)
)
)
def draw(self, display):
# polygon(Surface, color, pointlist, width=0) -> Rect
# pointlist = [(1, 2), (7, 9), (21, 50)]
"""roundedlist = []
for point in self.pointlist:
roundedlist.append(round_point(point + self.position))
print roundedlist"""
"""Change position coordinates from meters to pixels."""
listInMeters = (self.position + self.pointlist)
display.blit(background, (0,0))
pygame.draw.polygon(display, (0,255,0), listInMeters, 0)
class World:
def __init__(self, Robot, waypoints):
self.Robot = Robot
self.waypoints = waypoints
def main():
pygame.init()
display = pygame.display.set_mode(SCREEN_DIM)
background.convert()
dimensions = (background.get_width(), background.get_height())
clock = pygame.time.Clock()
Rob = Robot((50, 50), 50, 50)
while not rospy.is_shutdown():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.MOUSEBUTTONDOWN:
Rob.desired_pose_pub = rospy.Publisher('desired_pose', PoseStamped, queue_size=10)
pt = pygame.mouse.get_pos()
# Publish this coordinate in meters as the desired pose
print 'publishing desired pose'
# _orientation = tf_trans.quaternion_from_euler(0, 0, Rob.angle)
_orientation = tf_trans.quaternion_from_euler(0.0, 0.0, 0.0)
Rob.desired_pose_pub.publish(
PoseStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/world'
),
pose = Pose(
# Update the position to reflect meters per second, not pixels
position = Point(pt[0], pt[1], 0.0),
orientation = Quaternion(*_orientation) #Radians
)
)
)
Rob.rotate()
Rob.draw(display)
Rob.update()
pygame.display.update()
clock.tick(20)
display.fill((0, 0, 0))
if __name__ == '__main__':
main()
|
ufieeehw/IEEE2015
|
ros/ieee2015_simulator/nodes/mecanum_simulation.py
|
Python
|
gpl-2.0
| 7,968 | 0.009538 |
import sys
from cliff import app
from cliff import commandmanager as cm
from conf import default
import tacoclient
class TacoClientApp(app.App):
def __init__(self, **kwargs):
super(TacoClientApp, self).__init__(
description='tacoclient - CLI client for TACO(SKT All Container \
Openstack)',
version=tacoclient.__version__,
command_manager=cm.CommandManager('tacoclient'),
**kwargs)
def build_option_parser(self, description, version, argparse_kwargs=None):
parser = super(TacoClientApp, self).build_option_parser(
description, version, argparse_kwargs)
return parser
def configure_logging(self):
super(TacoClientApp, self).configure_logging()
default.register_opts()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
return TacoClientApp().run(argv)
|
powerds/python-tacoclient
|
tacoclient/shell.py
|
Python
|
apache-2.0
| 904 | 0.002212 |
# -*- coding: utf-8 -*-
#
# 2016-05-04 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Initial writup
#
# License: AGPLv3
# (c) 2016. Cornelius Kölbel
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
__doc__ = """This is the base class for an event handler module.
The event handler module is bound to an event together with
* a condition and
* an action
* optional options ;-)
"""
import logging
log = logging.getLogger(__name__)
class BaseEventHandler(object):
"""
An Eventhandler needs to return a list of actions, which it can handle.
It also returns a list of allowed action and conditions
It returns an identifier, which can be used in the eventhandlig definitions
"""
identifier = "BaseEventHandler"
description = "This is the base class of an EventHandler with no " \
"functionality"
def __init__(self):
pass
@property
def actions(cls):
"""
This method returns a list of available actions, that are provided
by this event handler.
:return: list of actions
"""
actions = ["sample_action_1", "sample_action_2"]
return actions
@property
def events(cls):
"""
This method returns a list allowed events, that this event handler
can be bound to and which it can handle with the corresponding actions.
An eventhandler may return an asterisk ["*"] indicating, that it can
be used in all events.
:return: list of events
"""
events = ["*"]
return events
def check_condition(self):
"""
TODO
:return:
"""
# TODO
return True
def do(self, action, options=None):
"""
This method executes the defined action in the given event.
:param action:
:param options:
:return:
"""
log.info("In fact we are doing nothing, be we presume we are doing"
"{0!s}".format(action))
return True
|
jalr/privacyidea
|
privacyidea/lib/eventhandler/base.py
|
Python
|
agpl-3.0
| 2,637 | 0 |
from common import common_global
from common import common_isbn
from common import common_logging_elasticsearch_httpx
from common import common_pagination_bootstrap
from sanic import Blueprint
blueprint_user_metadata_periodical = Blueprint('name_blueprint_user_metadata_periodical',
url_prefix='/user')
@blueprint_user_metadata_periodical.route('/user_meta_periodical', methods=['GET', 'POST'])
@common_global.jinja_template.template('bss_user/metadata/bss_user_metadata_periodical.html')
@common_global.auth.login_required
async def url_bp_user_metadata_periodical(request):
"""
Display periodical list page
"""
page, offset = common_pagination_bootstrap.com_pagination_page_calc(request)
item_list = []
db_connection = await request.app.db_pool.acquire()
for item_data in await request.app.db_functions.db_meta_periodical_list(offset,
int(request.ctx.session[
'per_page']),
request.ctx.session[
'search_text'],
db_connection=db_connection):
await common_logging_elasticsearch_httpx.com_es_httpx_post_async(message_type='info',
message_text={
'person data': item_data})
item_image = "img/missing_icon.jpg"
item_list.append((item_data['mm_metadata_book_guid'],
item_data['mm_metadata_book_name'], item_image))
request.ctx.session['search_page'] = 'meta_periodical'
pagination = common_pagination_bootstrap.com_pagination_boot_html(page,
url='/user/user_meta_periodical',
item_count=await request.app.db_functions.db_meta_periodical_list_count(
request.ctx.session[
'search_text'],
db_connection=db_connection),
client_items_per_page=
int(request.ctx.session[
'per_page']),
format_number=True)
await request.app.db_pool.release(db_connection)
return {
'media_person': item_list,
'pagination_links': pagination,
}
@blueprint_user_metadata_periodical.route('/user_meta_periodical_detail/<guid>')
@common_global.jinja_template.template('bss_user/metadata/bss_user_metadata_periodical_detail.html')
@common_global.auth.login_required
async def url_bp_user_metadata_periodical_detail(request, guid):
"""
Display periodical detail page
"""
db_connection = await request.app.db_pool.acquire()
json_metadata = await request.app.db_functions.db_meta_periodical_by_uuid(guid,
db_connection=db_connection)
await request.app.db_pool.release(db_connection)
try:
data_name = json_metadata['mm_metadata_book_json']['title']
except KeyError:
data_name = 'NA'
try:
data_isbn = common_isbn.com_isbn_mask(json_metadata['mm_metadata_book_json']['isbn10'])
except KeyError:
data_isbn = 'NA'
try:
data_overview = json_metadata['mm_metadata_book_json']['summary']
except KeyError:
data_overview = 'NA'
try:
data_author = json_metadata['mm_metadata_book_json']['author_data'][0]['name']
except KeyError:
data_author = 'NA'
try:
data_publisher = json_metadata['mm_metadata_book_json']['publisher_name']
except KeyError:
data_publisher = 'NA'
try:
data_pages = json_metadata['mm_metadata_book_json']['physical_description_text']
except KeyError:
data_pages = 'NA'
return {
'data_name': data_name,
'data_isbn': data_isbn,
'data_overview': data_overview,
'data_author': data_author,
'data_publisher': data_publisher,
'data_pages': data_pages,
'data_item_image': "img/missing_icon.jpg",
}
|
MediaKraken/MediaKraken_Deployment
|
source/web_app_sanic/blueprint/user/bp_user_metadata_periodical.py
|
Python
|
gpl-3.0
| 4,831 | 0.006417 |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for different embedders for modalities."""
import abc
import numpy as np
import tensorflow as tf
import preprocessing
from tensorflow.contrib.slim.nets import resnet_v2
slim = tf.contrib.slim
class Embedder(object):
"""Represents the embedder for different modalities.
Modalities can be semantic segmentation, depth channel, object detection and
so on, which require specific embedder for them.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def build(self, observation):
"""Builds the model to embed the observation modality.
Args:
observation: tensor that contains the raw observation from modality.
Returns:
Embedding tensor for the given observation tensor.
"""
raise NotImplementedError(
'Needs to be implemented as part of Embedder Interface')
class DetectionBoxEmbedder(Embedder):
"""Represents the model that encodes the detection boxes from images."""
def __init__(self, rnn_state_size, scope=None):
self._rnn_state_size = rnn_state_size
self._scope = scope
def build(self, observations):
"""Builds the model to embed object detection observations.
Args:
observations: a tuple of (dets, det_num).
dets is a tensor of BxTxLxE that has the detection boxes in all the
images of the batch. B is the batch size, T is the maximum length of
episode, L is the maximum number of detections per image in the batch
and E is the size of each detection embedding.
det_num is a tensor of BxT that contains the number of detected boxes
each image of each sequence in the batch.
Returns:
For each image in the batch, returns the accumulative embedding of all the
detection boxes in that image.
"""
with tf.variable_scope(self._scope, default_name=''):
shape = observations[0].shape
dets = tf.reshape(observations[0], [-1, shape[-2], shape[-1]])
det_num = tf.reshape(observations[1], [-1])
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self._rnn_state_size)
batch_size = tf.shape(dets)[0]
lstm_outputs, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=dets,
sequence_length=det_num,
initial_state=lstm_cell.zero_state(batch_size, dtype=tf.float32),
dtype=tf.float32)
# Gathering the last state of each sequence in the batch.
batch_range = tf.range(batch_size)
indices = tf.stack([batch_range, det_num - 1], axis=1)
last_lstm_outputs = tf.gather_nd(lstm_outputs, indices)
last_lstm_outputs = tf.reshape(last_lstm_outputs,
[-1, shape[1], self._rnn_state_size])
return last_lstm_outputs
class ResNet(Embedder):
"""Residual net embedder for image data."""
def __init__(self, params, *args, **kwargs):
super(ResNet, self).__init__(*args, **kwargs)
self._params = params
self._extra_train_ops = []
def build(self, images):
shape = images.get_shape().as_list()
if len(shape) == 5:
images = tf.reshape(images,
[shape[0] * shape[1], shape[2], shape[3], shape[4]])
embedding = self._build_model(images)
if len(shape) == 5:
embedding = tf.reshape(embedding, [shape[0], shape[1], -1])
return embedding
@property
def extra_train_ops(self):
return self._extra_train_ops
def _build_model(self, images):
"""Builds the model."""
# Convert images to floats and normalize them.
images = tf.to_float(images)
bs = images.get_shape().as_list()[0]
images = [
tf.image.per_image_standardization(tf.squeeze(i))
for i in tf.split(images, bs)
]
images = tf.concat([tf.expand_dims(i, axis=0) for i in images], axis=0)
with tf.variable_scope('init'):
x = self._conv('init_conv', images, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
if self._params.use_bottleneck:
res_func = self._bottleneck_residual
filters = [16, 64, 128, 256]
else:
res_func = self._residual
filters = [16, 16, 32, 128]
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
for i in xrange(1, self._params.num_residual_units):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
for i in xrange(1, self._params.num_residual_units):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
for i in xrange(1, self._params.num_residual_units):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, self._params.relu_leakiness)
with tf.variable_scope('pool_logit'):
x = self._global_avg_pooling(x)
return x
def _stride_arr(self, stride):
return [1, stride, stride, 1]
def _batch_norm(self, name, x):
"""batch norm implementation."""
with tf.variable_scope(name):
params_shape = [x.shape[-1]]
beta = tf.get_variable(
'beta',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable(
'gamma',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32))
if self._params.is_train:
mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')
moving_mean = tf.get_variable(
'moving_mean',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32),
trainable=False)
moving_variance = tf.get_variable(
'moving_variance',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False)
self._extra_train_ops.append(
tf.assign_moving_average(moving_mean, mean, 0.9))
self._extra_train_ops.append(
tf.assign_moving_average(moving_variance, variance, 0.9))
else:
mean = tf.get_variable(
'moving_mean',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32),
trainable=False)
variance = tf.get_variable(
'moving_variance',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False)
tf.summary.histogram(mean.op.name, mean)
tf.summary.histogram(variance.op.name, variance)
# elipson used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net.
y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
y.set_shape(x.shape)
return y
def _residual(self,
x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self._params.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0], [(out_filter - in_filter) // 2,
(out_filter - in_filter) // 2]])
x += orig_x
return x
def _bottleneck_residual(self,
x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""A residual convolutional layer with a bottleneck.
The layer is a composite of three convolutional layers with a ReLU non-
linearity and batch normalization after each linear convolution. The depth
if the second and third layer is out_filter / 4 (hence it is a bottleneck).
Args:
x: a float 4 rank Tensor representing the input to the layer.
in_filter: a python integer representing depth of the input.
out_filter: a python integer representing depth of the output.
stride: a python integer denoting the stride of the layer applied before
the first convolution.
activate_before_residual: a python boolean. If True, then a ReLU is
applied as a first operation on the input x before everything else.
Returns:
A 4 rank Tensor with batch_size = batch size of input, width and height =
width / stride and height / stride of the input and depth = out_filter.
"""
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self._params.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self._params.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter / 4, out_filter / 4,
[1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = self._batch_norm('bn3', x)
x = self._relu(x, self._params.relu_leakiness)
x = self._conv('conv3', x, 1, out_filter / 4, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride)
x += orig_x
return x
def _decay(self):
costs = []
for var in tf.trainable_variables():
if var.op.name.find(r'DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.mul(self._params.weight_decay_rate, tf.add_n(costs))
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
x = tf.reshape(x, [self._params.batch_size, -1])
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable(
'biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pooling(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class MLPEmbedder(Embedder):
"""Embedder of vectorial data.
The net is a multi-layer perceptron, with ReLU nonlinearities in all layers
except the last one.
"""
def __init__(self, layers, *args, **kwargs):
"""Constructs MLPEmbedder.
Args:
layers: a list of python integers representing layer sizes.
*args: arguments for super constructor.
**kwargs: keyed arguments for super constructor.
"""
super(MLPEmbedder, self).__init__(*args, **kwargs)
self._layers = layers
def build(self, features):
shape = features.get_shape().as_list()
if len(shape) == 3:
features = tf.reshape(features, [shape[0] * shape[1], shape[2]])
x = features
for i, dim in enumerate(self._layers):
with tf.variable_scope('layer_%i' % i):
x = self._fully_connected(x, dim)
if i < len(self._layers) - 1:
x = self._relu(x)
if len(shape) == 3:
x = tf.reshape(x, shape[:-1] + [self._layers[-1]])
return x
def _fully_connected(self, x, out_dim):
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.variance_scaling_initializer(distribution='uniform'))
b = tf.get_variable(
'biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
class SmallNetworkEmbedder(Embedder):
"""Embedder for image like observations.
The network is comprised of multiple conv layers and a fully connected layer
at the end. The number of conv layers and the parameters are configured from
params.
"""
def __init__(self, params, *args, **kwargs):
"""Constructs the small network.
Args:
params: params should be tf.hparams type. params need to have a list of
conv_sizes, conv_strides, conv_channels. The length of these lists
should be equal to each other and to the number of conv layers in the
network. Plus, it also needs to have boolean variable named to_one_hot
which indicates whether the input should be converted to one hot or not.
The size of the fully connected layer is specified by
params.embedding_size.
*args: The rest of the parameters.
**kwargs: the reset of the parameters.
Raises:
ValueError: If the length of params.conv_strides, params.conv_sizes, and
params.conv_channels are not equal.
"""
super(SmallNetworkEmbedder, self).__init__(*args, **kwargs)
self._params = params
if len(self._params.conv_sizes) != len(self._params.conv_strides):
raise ValueError(
'Conv sizes and strides should have the same length: {} != {}'.format(
len(self._params.conv_sizes), len(self._params.conv_strides)))
if len(self._params.conv_sizes) != len(self._params.conv_channels):
raise ValueError(
'Conv sizes and channels should have the same length: {} != {}'.
format(len(self._params.conv_sizes), len(self._params.conv_channels)))
def build(self, images):
"""Builds the embedder with the given speicifcation.
Args:
images: a tensor that contains the input images which has the shape of
NxTxHxWxC where N is the batch size, T is the maximum length of the
sequence, H and W are the height and width of the images and C is the
number of channels.
Returns:
A tensor that is the embedding of the images.
"""
shape = images.get_shape().as_list()
images = tf.reshape(images,
[shape[0] * shape[1], shape[2], shape[3], shape[4]])
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(self._params.weight_decay_rate),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
# convert the image to one hot if needed.
if self._params.to_one_hot:
net = tf.one_hot(
tf.squeeze(tf.to_int32(images), axis=[-1]),
self._params.one_hot_length)
else:
net = images
p = self._params
# Adding conv layers with the specified configurations.
for conv_id, kernel_stride_channel in enumerate(
zip(p.conv_sizes, p.conv_strides, p.conv_channels)):
kernel_size, stride, channels = kernel_stride_channel
net = slim.conv2d(
net,
channels, [kernel_size, kernel_size],
stride,
scope='conv_{}'.format(conv_id + 1))
net = slim.flatten(net)
net = slim.fully_connected(net, self._params.embedding_size, scope='fc')
output = tf.reshape(net, [shape[0], shape[1], -1])
return output
class ResNet50Embedder(Embedder):
"""Uses ResNet50 to embed input images."""
def build(self, images):
"""Builds a ResNet50 embedder for the input images.
It assumes that the range of the pixel values in the images tensor is
[0,255] and should be castable to tf.uint8.
Args:
images: a tensor that contains the input images which has the shape of
NxTxHxWx3 where N is the batch size, T is the maximum length of the
sequence, H and W are the height and width of the images and C is the
number of channels.
Returns:
The embedding of the input image with the shape of NxTxL where L is the
embedding size of the output.
Raises:
ValueError: if the shape of the input does not agree with the expected
shape explained in the Args section.
"""
shape = images.get_shape().as_list()
if len(shape) != 5:
raise ValueError(
'The tensor shape should have 5 elements, {} is provided'.format(
len(shape)))
if shape[4] != 3:
raise ValueError('Three channels are expected for the input image')
images = tf.cast(images, tf.uint8)
images = tf.reshape(images,
[shape[0] * shape[1], shape[2], shape[3], shape[4]])
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
def preprocess_fn(x):
x = tf.expand_dims(x, 0)
x = tf.image.resize_bilinear(x, [299, 299],
align_corners=False)
return(tf.squeeze(x, [0]))
images = tf.map_fn(preprocess_fn, images, dtype=tf.float32)
net, _ = resnet_v2.resnet_v2_50(
images, is_training=False, global_pool=True)
output = tf.reshape(net, [shape[0], shape[1], -1])
return output
class IdentityEmbedder(Embedder):
"""This embedder just returns the input as the output.
Used for modalitites that the embedding of the modality is the same as the
modality itself. For example, it can be used for one_hot goal.
"""
def build(self, images):
return images
|
cshallue/models
|
research/cognitive_planning/embedders.py
|
Python
|
apache-2.0
| 19,820 | 0.006105 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteTask
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataplex
# [START dataplex_v1_generated_DataplexService_DeleteTask_sync]
from google.cloud import dataplex_v1
def sample_delete_task():
# Create a client
client = dataplex_v1.DataplexServiceClient()
# Initialize request argument(s)
request = dataplex_v1.DeleteTaskRequest(
name="name_value",
)
# Make the request
operation = client.delete_task(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dataplex_v1_generated_DataplexService_DeleteTask_sync]
|
googleapis/python-dataplex
|
samples/generated_samples/dataplex_v1_generated_dataplex_service_delete_task_sync.py
|
Python
|
apache-2.0
| 1,521 | 0.000657 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.vcard_enabled'
db.add_column(u'aldryn_people_person', 'vcard_enabled',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.vcard_enabled'
db.delete_column(u'aldryn_people_person', 'vcard_enabled')
models = {
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
u'aldryn_people.grouptranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'GroupTranslation', 'db_table': "u'aldryn_people_group_translation'"},
'company_description': ('djangocms_text_ckeditor.fields.HTMLField', [], {'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Group']"})
},
u'aldryn_people.peopleplugin': {
'Meta': {'object_name': 'PeoplePlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'group_by_group': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'people': ('sortedm2m.fields.SortedManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'show_links': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "'standard'", 'max_length': '50'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'aldryn_people.persontranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'PersonTranslation', 'db_table': "u'aldryn_people_person_translation'"},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'function': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Person']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_people']
|
Venturi/cms
|
env/lib/python2.7/site-packages/aldryn_people/south_migrations/0013_auto__add_field_person_vcard_enabled.py
|
Python
|
gpl-2.0
| 14,400 | 0.007917 |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext_lazy
from .base import Product
from .variants import (ProductVariant, PhysicalProduct, ColoredVariant,
StockedProduct)
class Bag(PhysicalProduct, Product, ColoredVariant):
class Meta:
app_label = 'product'
class Shirt(PhysicalProduct, Product, ColoredVariant):
class Meta:
app_label = 'product'
class BagVariant(ProductVariant, StockedProduct):
product = models.ForeignKey(Bag, related_name='variants')
class Meta:
app_label = 'product'
@python_2_unicode_compatible
class ShirtVariant(ProductVariant, StockedProduct):
SIZE_CHOICES = (
('xs', pgettext_lazy('Variant size', 'XS')),
('s', pgettext_lazy('Variant size', 'S')),
('m', pgettext_lazy('Variant size', 'M')),
('l', pgettext_lazy('Variant size', 'L')),
('xl', pgettext_lazy('Variant size', 'XL')),
('xxl', pgettext_lazy('Variant size', 'XXL')))
product = models.ForeignKey(Shirt, related_name='variants')
size = models.CharField(
pgettext_lazy('Variant field', 'size'), choices=SIZE_CHOICES,
max_length=3)
class Meta:
app_label = 'product'
def __str__(self):
return '%s (%s)' % (self.product.name, self.size)
|
hongquan/saleor
|
saleor/product/models/products.py
|
Python
|
bsd-3-clause
| 1,423 | 0 |
#!/usr/bin/env python
'''
Set of analytics based on ssdeep hash.
- compare
Simple implementation of ssdeep comparisions using a few optimizations
described at the links below
https://www.virusbulletin.com/virusbulletin/2015/11/optimizing-ssdeep-use-scale
http://www.intezer.com/intezer-community-tip-ssdeep-comparisons-with-elasticsearch/
Designed to be run on a regular basis (e.g., nightly).
For each sample that has not run ssdeep analytic, search for samples where
ssdeep.compare > 0 based on chunksize, chunk 7grams, and double-chunk
7grams. Update sample with any matches and mark ssdeep analytic as having
run.
- group
Returns SHA256 hashes of samples grouped based on ssdeep hash.
'''
import argparse
import configparser
import json
import os
import sys
from pprint import pprint
import ssdeep
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.join(MS_WD, 'storage') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'storage'))
if MS_WD not in sys.path:
sys.path.insert(0, os.path.join(MS_WD))
import common
import elasticsearch_storage
import multiscanner
class SSDeepAnalytic:
def __init__(self, debug=False):
storage_conf = multiscanner.common.get_config_path(multiscanner.CONFIG, 'storage')
config_object = configparser.SafeConfigParser()
config_object.optionxform = str
config_object.read(storage_conf)
conf = common.parse_config(config_object)
storage_handler = multiscanner.storage.StorageHandler(configfile=storage_conf)
es_handler = None
for handler in storage_handler.loaded_storage:
if isinstance(handler, elasticsearch_storage.ElasticSearchStorage):
es_handler = handler
break
if not es_handler:
print('[!] ERROR: This analytic only works with ES stroage module.')
sys.exit(0)
# probably not ideal...
self.es = es_handler.es
self.index = conf['ElasticSearchStorage']['index']
self.doc_type = 'sample'
self.debug = debug
def ssdeep_compare(self):
# get all of the samples where ssdeep_compare has not been run
# e.g., ssdeepmeta.analyzed == false
query = {
'_source': ['ssdeep', 'SHA256'],
'query': {
'bool': {
'must': [
{'match': {'ssdeep.analyzed': 'false'}}
]
}
}
}
page = self.es.search(
self.index,
scroll='2m',
size=1000,
body=query)
records_list = []
while len(page['hits']['hits']) > 0:
for hit in page['hits']['hits']:
records_list.append(hit)
sid = page['_scroll_id']
page = self.es.scroll(scroll_id=sid, scroll='2m')
for new_ssdeep_hit in records_list:
new_ssdeep_hit_src = new_ssdeep_hit.get('_source')
chunksize = new_ssdeep_hit_src.get('ssdeep').get('chunksize')
chunk = new_ssdeep_hit_src.get('ssdeep').get('chunk')
double_chunk = new_ssdeep_hit_src.get('ssdeep').get('double_chunk')
new_sha256 = new_ssdeep_hit_src.get('SHA256')
# build new query for docs that match our optimizations
# https://github.com/intezer/ssdeep-elastic/blob/master/ssdeep_elastic/ssdeep_querying.py#L35
opti_query = {
'_source': ['ssdeep', 'SHA256'],
'query': {
'bool': {
'must': [
{
'terms': {
'ssdeep.chunksize': [chunksize, chunksize / 2, chunksize * 2]
}
},
{
'bool': {
'should': [
{
'match': {
'ssdeep.chunk': {
'query': chunk
}
}
},
{
'match': {
'ssdeep.double_chunk': {
'query': double_chunk
}
}
}
],
'minimum_should_match': 1
}
},
{
'bool': {
'must_not': {
'match': {
'SHA256': new_sha256
}
}
}
}
]
}
}
}
# this bool condition isn't working how I expect
# if we have already updated the match dictionary to
# include a hit, don't rerun it for the inverse
# {
# 'bool': {
# 'must_not': {
# 'exists': {
# 'field': 'ssdeep.matches.' + new_sha256
# }
# }
# }
# }
opti_page = self.es.search(
self.index,
scroll='2m',
size=1000,
body=opti_query)
while len(opti_page['hits']['hits']) > 0:
# for each hit, ssdeep.compare != 0; update the matches
for opti_hit in opti_page['hits']['hits']:
opti_hit_src = opti_hit.get('_source')
opti_sha256 = opti_hit_src.get('SHA256')
result = ssdeep.compare(
new_ssdeep_hit_src.get('ssdeep').get('ssdeep_hash'),
opti_hit_src.get('ssdeep').get('ssdeep_hash'))
if self.debug:
print(
new_ssdeep_hit_src.get('SHA256'),
opti_hit_src.get('SHA256'),
result)
msg = {'doc': {'ssdeep': {'matches': {opti_sha256: result}}}}
self.es.update(
index=self.index,
doc_type=self.doc_type,
id=new_ssdeep_hit.get('_id'),
body=json.dumps(msg))
msg = {'doc': {'ssdeep': {'matches': {new_sha256: result}}}}
self.es.update(
index=self.index,
doc_type=self.doc_type,
id=opti_hit.get('_id'),
body=json.dumps(msg))
opti_sid = opti_page['_scroll_id']
opti_page = self.es.scroll(scroll_id=opti_sid, scroll='2m')
# analytic has run against sample, set ssdeep.analyzed = true
msg = {'doc': {'ssdeep': {'analyzed': 'true'}}}
self.es.update(
index=self.index,
doc_type=self.doc_type,
id=new_ssdeep_hit.get('_id'),
body=json.dumps(msg))
def ssdeep_group(self):
# get all of the samples where ssdeep_compare has not been run
# e.g., ssdeepmeta.analyzed == false
query = {
'_source': ['ssdeep', 'SHA256'],
'query': {
'exists': {
'field': 'ssdeep.matches'
}
}
}
page = self.es.search(
self.index,
scroll='2m',
size=1000,
body=query)
records = {}
while len(page['hits']['hits']) > 0:
for hit in page['hits']['hits']:
hit_src = hit.get('_source')
records[hit_src.get('SHA256')] = hit_src.get('ssdeep', {}) \
.get('matches', {})
sid = page['_scroll_id']
page = self.es.scroll(scroll_id=sid, scroll='2m')
# inspired by ssdc
groups = []
for sha256_, matches_dict in records.items():
in_group = False
for i in range(len(groups)):
if sha256_ in groups:
in_group = True
continue
should_add = True
for match_hash in groups[i]:
if match_hash not in records.get(sha256_):
should_add = False
if should_add:
groups[i].append(sha256_)
in_group = True
if not in_group:
groups.append([sha256_])
return groups
def main():
parser = argparse.ArgumentParser(description='Script to interact with '
'Multiscanner\'s Elasticsearch datastore to run analytics based on '
'ssdeep hash.')
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Increase output to stdout')
group.add_argument('-c', '--compare', dest='compare', action='store_true',
help='Run ssdeep.compare using a few optimizations based on ssdeep'
' hash structure.')
group.add_argument('-g', '--group', dest='group', action='store_true',
help='Returns group of samples based on ssdeep hash.')
args = parser.parse_args()
ssdeep_analytic = SSDeepAnalytic(debug=args.verbose)
if args.compare:
ssdeep_analytic.ssdeep_compare()
print('[*] Success')
elif args.group:
pprint(ssdeep_analytic.ssdeep_group())
print('[*] Success')
if __name__ == '__main__':
main()
|
jmlong1027/multiscanner
|
analytics/ssdeep_analytics.py
|
Python
|
mpl-2.0
| 10,551 | 0.001516 |
# -*- coding: utf-8 -*-
"""
Managing Vocab Caching.
@summary: RDFa parser (distiller)
@requires: U{RDFLib<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
import os, sys, datetime, re
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
import urllib, urlparse, urllib2
from pyRdfa import HTTPError, RDFaError
from pyRdfa.host import MediaTypes, HostLanguage
from pyRdfa.utils import create_file_name, URIOpener, quote_URI
from pyRdfa.options import Options
from pyRdfa import ns_rdfa
from pyRdfa.rdfs import err_outdated_cache
from pyRdfa.rdfs import err_unreachable_vocab
from pyRdfa.rdfs import err_unparsable_Turtle_vocab
from pyRdfa.rdfs import err_unparsable_xml_vocab
from pyRdfa.rdfs import err_unparsable_ntriples_vocab
from pyRdfa.rdfs import err_unparsable_rdfa_vocab
from pyRdfa.rdfs import err_unrecognised_vocab_type
from pyRdfa.rdfs import VocabCachingInfo
# Regular expression object for a general XML application media type
xml_application_media_type = re.compile("application/[a-zA-Z0-9]+\+xml")
from pyRdfa.utils import URIOpener
#===========================================================================================
import cPickle as pickle
# Protocol to be used for pickle files. 0 is good for debug, it stores the data in ASCII; 1 is better for deployment,
# it stores data in binary format. Care should be taken for consistency; when changing from 0 to 1 or back, all
# cached data should be removed/regenerated, otherwise mess may occur...
_Pickle_Protocol = 1
# If I could rely on python 2.5 or 2.6 (or higher) I could use the with...as... idiom for what is below, it
# is indeed nicer. But I cannot...
def _load(fname) :
"""
Load a cached file and return the resulting object
@param fname: file name
"""
try :
f = open(fname)
return pickle.load(f)
finally :
f.close()
def _dump(obj, fname) :
"""
Dump an object into cached file
@param obj: Python object to store
@param fname: file name
"""
try :
f = open(fname, "w")
pickle.dump(obj, f, _Pickle_Protocol)
f.flush()
finally :
f.close()
#===========================================================================================
class CachedVocabIndex :
"""
Class to manage the cache index. Takes care of finding the vocab directory, and manages the index
to the individual vocab data.
The vocab directory is set to a platform specific area, unless an environment variable
sets it explicitly. The environment variable is "PyRdfaCacheDir"
Every time the index is changed, the index is put back (via pickle) to the directory.
@ivar app_data_dir: directory for the vocabulary cache directory
@ivar index_fname: the full path of the index file on the disc
@ivar indeces: the in-memory version of the index (a directory mapping URI-s to tuples)
@ivar options: the error handler (option) object to send warnings to
@type options: L{options.Options}
@ivar report: whether details on the caching should be reported
@type report: Boolean
@cvar vocabs: File name used for the index in the cache directory
@cvar preference_path: Cache directories for the three major platforms (ie, mac, windows, unix)
@type preference_path: directory, keyed by "mac", "win", and "unix"
@cvar architectures: Various 'architectures' as returned by the python call, and their mapping on one of the major platforms. If an architecture is missing, it is considered to be "unix"
@type architectures: directory, mapping architectures to "mac", "win", or "unix"
"""
# File Name used for the index in the cache directory
vocabs = "cache_index"
# Cache directories for the three major platforms...
preference_path = {
"mac" : "Library/Application Support/pyRdfa-cache",
"win" : "pyRdfa-cache",
"unix" : ".pyRdfa-cache"
}
# various architectures as returned by the python call, and their mapping on platorm. If an architecture is not here, it is considered as unix
architectures = {
"darwin" : "mac",
"nt" : "win",
"win32" : "win",
"cygwin" : "win"
}
def __init__(self, options = None) :
"""
@param options: the error handler (option) object to send warnings to
@type options: L{options.Options}
"""
self.options = options
self.report = (options != None) and options.vocab_cache_report
# This is where the cache files should be
self.app_data_dir = self._give_preference_path()
self.index_fname = os.path.join(self.app_data_dir, self.vocabs)
self.indeces = {}
# Check whether that directory exists.
if not os.path.isdir(self.app_data_dir) :
try :
os.mkdir(self.app_data_dir)
except Exception, e:
(type,value,traceback) = sys.exc_info()
if self.report: options.add_info("Could not create the vocab cache area %s" % value, VocabCachingInfo)
return
else :
# check whether it is at least readable
if not os.access(self.app_data_dir, os.R_OK) :
if self.report: options.add_info("Vocab cache directory is not readable", VocabCachingInfo)
return
if not os.access(self.app_data_dir, os.W_OK) :
if self.report: options.add_info("Vocab cache directory is not writeable, but readable", VocabCachingInfo)
return
if os.path.exists(self.index_fname) :
if os.access(self.index_fname, os.R_OK) :
self.indeces = _load(self.index_fname)
else :
if self.report: options.add_info("Vocab cache index not readable", VocabCachingInfo)
else :
# This is the very initial phase, creation
# of a a new index
if os.access(self.app_data_dir, os.W_OK) :
# This is then put into a pickle file to put the stake in the ground...
try :
_dump(self.indeces, self.index_fname)
except Exception, e:
if self.report: options.add_info("Could not create the vocabulary index %s" % e.msg, VocabCachingInfo)
else :
if self.report: options.add_info("Vocabulary cache directory is not writeable", VocabCachingInfo)
self.cache_writeable = False
def add_ref(self, uri, vocab_reference) :
"""
Add a new entry to the index, possibly removing the previous one.
@param uri: the URI that serves as a key in the index directory
@param vocab_reference: tuple consisting of file name, modification date, and expiration date
"""
# Store the index right away
self.indeces[uri] = vocab_reference
try :
_dump(self.indeces, self.index_fname)
except Exception, e:
(type,value,traceback) = sys.exc_info()
if self.report: self.options.add_info("Could not store the cache index %s" % value, VocabCachingInfo)
def get_ref(self, uri) :
"""
Get an index entry, if available, None otherwise.
The return value is a tuple: file name, modification date, and expiration date
@param uri: the URI that serves as a key in the index directory
"""
if uri in self.indeces :
return tuple(self.indeces[uri])
else :
return None
def _give_preference_path(self) :
"""
Find the vocab cache directory.
"""
from pyRdfa import CACHE_DIR_VAR
if CACHE_DIR_VAR in os.environ :
return os.environ[CACHE_DIR_VAR]
else :
# find the preference path on the architecture
platform = sys.platform
if platform in self.architectures :
system = self.architectures[platform]
else :
system = "unix"
if system == "win" :
# there is a user variable set for that purpose
app_data = os.path.expandvars("%APPDATA%")
return os.path.join(app_data,self.preference_path[system])
else :
return os.path.join(os.path.expanduser('~'),self.preference_path[system])
#===========================================================================================
class CachedVocab(CachedVocabIndex) :
"""
Cache for a specific vocab. The content of the cache is the graph. These are also the data that are stored
on the disc (in pickled form)
@ivar graph: the RDF graph
@ivar URI: vocabulary URI
@ivar filename: file name (not the complete path) of the cached version
@ivar creation_date: creation date of the cache
@type creation_date: datetime
@ivar expiration_date: expiration date of the cache
@type expiration_date: datetime
@cvar runtime_cache : a run time cache for already 'seen' vocabulary files. Apart from (marginally) speeding up processing, this also prevents recursion
@type runtime_cache : dictionary
"""
def __init__(self, URI, options = None) :
"""
@param URI: real URI for the vocabulary file
@param options: the error handler (option) object to send warnings to
@type options: L{options.Options}
"""
# First see if this particular vocab has been handled before. If yes, it is extracted and everything
# else can be forgotten.
self.uri = URI
(self.filename, self.creation_date, self.expiration_date) = ("",None,None)
self.graph = Graph()
try :
CachedVocabIndex.__init__(self, options)
vocab_reference = self.get_ref(URI)
self.caching = True
except Exception, e :
# what this means is that the caching becomes impossible through some system error...
(type,value,traceback) = sys.exc_info()
if self.report: options.add_info("Could not access the vocabulary cache area %s" % value, VocabCachingInfo, URI)
vocab_reference = None
self.caching = False
if vocab_reference == None :
# This has never been cached before
if self.report: options.add_info("No cache exists for %s, generating one" % URI, VocabCachingInfo)
self._get_vocab_data(newCache = True)
# Store all the cache data unless caching proves to be impossible
if self.caching :
self.filename = create_file_name(self.uri)
self._store_caches()
if self.report:
options.add_info("Generated a cache for %s, with an expiration date of %s" % (URI,self.expiration_date), VocabCachingInfo, URI)
else :
(self.filename, self.creation_date, self.expiration_date) = vocab_reference
if self.report: options.add_info("Found a cache for %s, expiring on %s" % (URI,self.expiration_date), VocabCachingInfo)
# Check if the expiration date is still away
if options.bypass_vocab_cache == False and datetime.datetime.utcnow() <= self.expiration_date :
# We are fine, we can just extract the data from the cache and we're done
if self.report: options.add_info("Cache for %s is still valid; extracting the data" % URI, VocabCachingInfo)
fname = os.path.join(self.app_data_dir, self.filename)
try :
self.graph = _load(fname)
except Exception, e :
# what this means is that the caching becomes impossible VocabCachingInfo
(type,value,traceback) = sys.exc_info()
sys.excepthook(type,value,traceback)
if self.report: options.add_info("Could not access the vocab cache %s (%s)" % (value,fname), VocabCachingInfo, URI)
else :
if self.report :
if options.bypass_vocab_cache == True :
options.add_info("Time check is bypassed; refreshing the cache for %s" % URI, VocabCachingInfo)
else :
options.add_info("Cache timeout; refreshing the cache for %s" % URI, VocabCachingInfo)
# we have to refresh the graph
if self._get_vocab_data(newCache = False) == False :
# bugger; the cache could not be refreshed, using the current one, and setting the cache artificially
# to be valid for the coming hour, hoping that the access issues will be resolved by then...
if self.report:
options.add_info("Could not refresh vocabulary cache for %s, using the old cache, extended its expiration time by an hour (network problems?)" % URI, VocabCachingInfo, URI)
fname = os.path.join(self.app_data_dir, self.filename)
try :
self.graph = _load(fname)
self.expiration_date = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
except Exception, e :
# what this means is that the caching becomes impossible VocabCachingInfo
(type,value,traceback) = sys.exc_info()
sys.excepthook(type,value,traceback)
if self.report: options.add_info("Could not access the vocabulary cache %s (%s)" % (value,fname), VocabCachingInfo, URI)
self.creation_date = datetime.datetime.utcnow()
if self.report:
options.add_info("Generated a new cache for %s, with an expiration date of %s" % (URI,self.expiration_date), VocabCachingInfo, URI)
self._store_caches()
def _get_vocab_data(self, newCache = True) :
"""Just a macro like function to get the data to be cached"""
from pyRdfa.rdfs.process import return_graph
(self.graph, self.expiration_date) = return_graph(self.uri, self.options, newCache)
def _store_caches(self) :
"""Called if the creation date, etc, have been refreshed or new, and
all content must be put into a cache file
"""
# Store the cached version of the vocabulary file
fname = os.path.join(self.app_data_dir, self.filename)
try :
_dump(self.graph, fname)
except Exception, e :
(type,value,traceback) = sys.exc_info()
if self.report : self.options.add_info("Could not write cache file %s (%s)", (fname,value), VocabCachingInfo, self.uri)
# Update the index
self.add_ref(self.uri,(self.filename, self.creation_date, self.expiration_date))
#########################################################################################################################################
def offline_cache_generation(args) :
"""Generate a cache for the vocabulary in args.
@param args: array of vocabulary URIs.
"""
class LocalOption :
def __init__(self) :
self.vocab_cache_report = True
def pr(self, wae, txt, warning_type, context) :
print "===="
if warning_type != None : print warning_type
print wae + ": " + txt
if context != None: print context
print "===="
def add_warning(self, txt, warning_type=None, context=None) :
"""Add a warning to the processor graph.
@param txt: the warning text.
@keyword warning_type: Warning Class
@type warning_type: URIRef
@keyword context: possible context to be added to the processor graph
@type context: URIRef or String
"""
self.pr("Warning",txt,warning_type,context)
def add_info(self, txt, info_type=None, context=None) :
"""Add an informational comment to the processor graph.
@param txt: the information text.
@keyword info_type: Info Class
@type info_type: URIRef
@keyword context: possible context to be added to the processor graph
@type context: URIRef or String
"""
self.pr("Info",txt,info_type,context)
def add_error(self, txt, err_type=None, context=None) :
"""Add an error to the processor graph.
@param txt: the information text.
@keyword err_type: Error Class
@type err_type: URIRef
@keyword context: possible context to be added to the processor graph
@type context: URIRef or String
"""
self.pr("Error",txt,err_type,context)
for uri in args :
# This should write the cache
# print ">>>>> Writing Cache <<<<<"
writ = CachedVocab(uri,options = LocalOption(),report = True)
# Now read it back and print the content for tracing
# print ">>>>> Reading Cache <<<<<"
rd = CachedVocab(uri,options = LocalOption(),report = True)
# print "URI: " + uri
# print "default vocab: " + rd.vocabulary
# print "terms: ",
# print rd.terms
# print "prefixes: ",
# print rd.ns
|
RDFLib/PyRDFa
|
pyRdfa/rdfs/cache.py
|
Python
|
bsd-3-clause
| 15,733 | 0.038075 |
import moe
from moe.easy_interface.experiment import Experiment
from moe.easy_interface.simple_endpoint import gp_next_points, gp_hyper_opt
import pymbar
import seaborn as sns
import scipy.interpolate
import pymc
import sklearn.gaussian_process
import os
import pandas as pd
import glob
keys = ["q0", "sigma0"]
data = pd.read_hdf("./symmetric.h5", 'data')
indexed_data = data.set_index(keys + ["temperature"])
q0 = pymc.Uniform("q0", 0.4, 0.8)
sigma0 = pymc.Uniform("sigma0", 0.2, 0.3)
temperatures = [280, 300, 320]
measurements = np.array([1.000998, 1.043560, 1.084166])
relative_error = 0.001
def objective(q0_val, sigma0_val):
variables = []
q0.value = q0_val
sigma0.value = sigma0_val
print(q0.value)
print(sigma0.value)
for k, temperature in enumerate(temperatures):
observed = measurements[k]
predicted = indexed_data.density.ix[(q0_val, sigma0_val, temperature)]
tau = (observed * relative_error) ** -2.
var = pymc.Normal("obs_%d" % k, mu=predicted, tau=tau, observed=True, value=observed)
print(predicted, observed, tau, var.logp)
variables.append(var)
model = pymc.MCMC(variables)
return model.logp
a, b = data[keys].iloc[0].values
logp = objective(a, b)
get_bounds = lambda variable: (variable.parents["lower"], variable.parents["upper"])
experiment_bounds = [get_bounds(q0), get_bounds(sigma0)]
exp = Experiment(experiment_bounds)
for (q0_val, sigma0_val) in data.set_index(keys).index:
value = objective(q0_val, sigma0_val)
print(q0_val, sigma0_val, value)
error = 0.001
exp.historical_data.append_sample_points([[(q0_val, sigma0_val), value, error]])
covariance_info = gp_hyper_opt(exp.historical_data.to_list_of_sample_points())
next_point_to_sample = gp_next_points(exp, covariance_info=covariance_info)
print next_point_to_sample
|
kyleabeauchamp/DBayes
|
dbayes/analysis/test_moe.py
|
Python
|
gpl-2.0
| 1,864 | 0.004292 |
from keras.models import Sequential
from keras.layers import convolutional
from keras.layers.core import Dense, Flatten
from SGD_exponential_decay import SGD_exponential_decay as SGD
### Parameters obtained from paper ###
K = 152 # depth of convolutional layers
LEARNING_RATE = .003 # initial learning rate
DECAY = 8.664339379294006e-08 # rate of exponential learning_rate decay
class value_trainer:
def __init__(self):
self.model = Sequential()
self.model.add(convolutional.Convolution2D(input_shape=(49, 19, 19), nb_filter=K, nb_row=5, nb_col=5,
init='uniform', activation='relu', border_mode='same'))
for i in range(2,13):
self.model.add(convolutional.Convolution2D(nb_filter=K, nb_row=3, nb_col=3,
init='uniform', activation='relu', border_mode='same'))
self.model.add(convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1,
init='uniform', activation='linear', border_mode='same'))
self.model.add(Flatten())
self.model.add(Dense(256,init='uniform'))
self.model.add(Dense(1,init='uniform',activation="tanh"))
sgd = SGD(lr=LEARNING_RATE, decay=DECAY)
self.model.compile(loss='mean_squared_error', optimizer=sgd)
def get_samples(self):
# TODO non-terminating loop that draws training samples uniformly at random
pass
def train(self):
# TODO use self.model.fit_generator to train from data source
pass
if __name__ == '__main__':
trainer = value_trainer()
# TODO command line instantiation
|
wrongu/AlphaGo
|
AlphaGo/models/value.py
|
Python
|
mit
| 1,711 | 0.01052 |
#!/usr/bin/env python
import os
import argparse
from subprocess import call
admin_username = 'admin'
admin_password = os.environ['MONGO_ADMIN_PASSWORD']
parser=argparse.ArgumentParser()
parser.add_argument("-d", "--db-name", help="the DB to create the user in", required=True)
parser.add_argument("-c", "--collection", help="the collection to index", required=True)
parser.add_argument("-i", "--index-definition", help="the index definition", required=True)
args = parser.parse_args()
reindex_js = "db.getSiblingDB('" + args.db_name + "').getCollection('" + args.collection + "').ensureIndex( " + args.index_definition + " );"
print 'Creating index'
call(["/usr/bin/mongo","admin","-u",admin_username,"-p",admin_password,"--authenticationDatabase","admin","--eval",reindex_js])
|
ministryofjustice/opg-docker
|
mongodb/docker/opt/reindex_database.py
|
Python
|
mit
| 784 | 0.019133 |
import os
from clang.cindex import Config
if 'CLANG_LIBRARY_PATH' in os.environ:
Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
from clang.cindex import LinkageKind
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
from .util import get_cursor
from .util import get_tu
import unittest
class TestLinkage(unittest.TestCase):
def test_linkage(self):
"""Ensure that linkage specifers are available on cursors"""
tu = get_tu("""
void foo() { int no_linkage; }
static int internal;
namespace { struct unique_external_type {} }
unique_external_type unique_external;
extern int external;
""", lang = 'cpp')
no_linkage = get_cursor(tu.cursor, 'no_linkage')
self.assertEqual(no_linkage.linkage, LinkageKind.NO_LINKAGE)
internal = get_cursor(tu.cursor, 'internal')
self.assertEqual(internal.linkage, LinkageKind.INTERNAL)
unique_external = get_cursor(tu.cursor, 'unique_external')
self.assertEqual(unique_external.linkage, LinkageKind.UNIQUE_EXTERNAL)
external = get_cursor(tu.cursor, 'external')
self.assertEqual(external.linkage, LinkageKind.EXTERNAL)
|
endlessm/chromium-browser
|
third_party/llvm/clang/bindings/python/tests/cindex/test_linkage.py
|
Python
|
bsd-3-clause
| 1,175 | 0.001702 |
"""
Classes for char-to-int mapping and int-to-int mapping.
:Author: James Taylor (james@bx.psu.edu)
The char-to-int mapping can be used to translate a list of strings
over some alphabet to a single int array (example for encoding a multiple
sequence alignment).
The int-to-int mapping is particularly useful for creating partitions,
and provides methods to merge/split symbols in the output mapping.
The two forms of mapping can be combined, for example to encode a
multiple sequence alignment in a reduced alphabet defined by a partition
of alignment columns. Many of the helper methods provided are for
solving such alignment oriented problems.
This code was originally written for the `ESPERR`_ project which includes
software for searcing for alignment encodings that work well for specific
classification problems using various Markov chain classifiers over the
reduced encodings.
Most of the core implementation is in the pyrex/C extension
"_seqmapping.pyx" for performance reasons (specifically to avoid the
excessive bounds checking that would make a sequence/array lookup heavy
problem like this slow in pure python).
.. _ESPERR: http://www.bx.psu.edu/projects/esperr/
"""
from ._seqmapping import (
CharToIntArrayMapping,
IntToIntMapping,
)
# Char->Int mapping for DNA characters with missing data
DNA = CharToIntArrayMapping()
DNA.set_mapping("a", 0)
DNA.set_mapping("A", 0)
DNA.set_mapping("c", 1)
DNA.set_mapping("C", 1)
DNA.set_mapping("g", 2)
DNA.set_mapping("G", 2)
DNA.set_mapping("t", 3)
DNA.set_mapping("T", 3)
DNA.set_mapping("-", 4)
DNA.set_mapping("*", 5)
# Creating mappings
def alignment_mapping_from_file(f, char_mapping=DNA):
"""
Create a mapping from a file of alignment columns.
"""
columns, symbols = [], []
for line in f:
column, symbol = line.split()
columns.append(column)
symbols.append(int(symbol))
align_count = len(columns[0])
mapping = IntToIntMapping(char_mapping.get_out_size() ** align_count)
for column, symbol in zip(columns, symbols):
index = char_mapping.translate_list(list(column))[0]
mapping.set_mapping(index, symbol)
return align_count, mapping
def second_mapping_from_file(f, first_mapping, char_mapping=DNA):
columns, symbols = [], []
for line in f:
column, symbol = line.split()
columns.append(column)
symbols.append(int(symbol))
mapping = IntToIntMapping(first_mapping.get_out_size())
for column, symbol in zip(columns, symbols):
index = char_mapping.translate_list(list(column))[0]
if first_mapping[index] >= 0:
mapping.set_mapping(first_mapping[index], symbol)
return mapping
def identity_mapping(size):
mapping = IntToIntMapping(size)
for i in range(size):
mapping.set_mapping(i, i)
return mapping
|
bxlab/bx-python
|
lib/bx/seqmapping.py
|
Python
|
mit
| 2,856 | 0 |
from distutils.core import setup
import yavd
setup (
name = 'YAVD',
version = yavd.__version__,
description= 'Download videos from Youtube and others.',
author = '10n1z3d',
author_email = '10n1z3d@w.cn',
url = '',
license = 'GPLv3',
packages = ['YAVD'],
data_files = [('YAVD/', ['README', 'COPYING', 'TODO'])]
)
|
10n1z3d/YAVD
|
setup.py
|
Python
|
gpl-3.0
| 391 | 0.046036 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Place', fields ['place_id']
db.create_index('storybase_geo_place', ['place_id'])
# Adding index on 'Location', fields ['location_id']
db.create_index('storybase_geo_location', ['location_id'])
def backwards(self, orm):
# Removing index on 'Location', fields ['location_id']
db.delete_index('storybase_geo_location', ['location_id'])
# Removing index on 'Place', fields ['place_id']
db.delete_index('storybase_geo_place', ['place_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
}
}
complete_apps = ['storybase_geo']
|
denverfoundation/storybase
|
apps/storybase_geo/migrations/0004_auto.py
|
Python
|
mit
| 7,705 | 0.007787 |
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants
from oslo_config import cfg
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.plugins.ml2.drivers.openvswitch.agent.common import (
constants as agent_consts)
from neutron.services.trunk.drivers.openvswitch import driver
from neutron.tests import base
GEN_TRUNK_BR_NAME_PATCH = (
'neutron.services.trunk.drivers.openvswitch.utils.gen_trunk_br_name')
class OVSDriverTestCase(base.BaseTestCase):
def test_driver_creation(self):
ovs_driver = driver.OVSDriver.create()
self.assertFalse(ovs_driver.is_loaded)
self.assertEqual(driver.NAME, ovs_driver.name)
self.assertEqual(driver.SUPPORTED_INTERFACES, ovs_driver.interfaces)
self.assertEqual(driver.SUPPORTED_SEGMENTATION_TYPES,
ovs_driver.segmentation_types)
self.assertEqual(constants.AGENT_TYPE_OVS, ovs_driver.agent_type)
self.assertFalse(ovs_driver.can_trunk_bound_port)
self.assertTrue(
ovs_driver.is_agent_compatible(constants.AGENT_TYPE_OVS))
self.assertTrue(
ovs_driver.is_interface_compatible(driver.SUPPORTED_INTERFACES[0]))
def test_driver_is_loaded(self):
cfg.CONF.set_override('mechanism_drivers',
'openvswitch', group='ml2')
ovs_driver = driver.OVSDriver.create()
self.assertTrue(ovs_driver.is_loaded)
def test_driver_is_not_loaded(self):
cfg.CONF.set_override('core_plugin', 'my_foo_plugin')
ovs_driver = driver.OVSDriver.create()
self.assertFalse(ovs_driver.is_loaded)
@mock.patch(GEN_TRUNK_BR_NAME_PATCH)
def test_vif_details_bridge_name_handler_registration(self,
mock_gen_br_name):
driver.register()
mock_gen_br_name.return_value = 'fake-trunk-br-name'
test_trigger = mock.Mock()
registry.notify(agent_consts.OVS_BRIDGE_NAME, events.BEFORE_READ,
test_trigger, **{'port': {'trunk_details':
{'trunk_id': 'foo'}}})
test_trigger.assert_called_once_with('fake-trunk-br-name')
|
sebrandon1/neutron
|
neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py
|
Python
|
apache-2.0
| 2,829 | 0 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import finufft
from finufft import interface
import numpy as np
import pytest
__all__ = [
"test_nufft1d1", "test_nufft1d2", "test_nufft1d3",
]
def test_nufft1d1(seed=42, iflag=1):
np.random.seed(seed)
ms = int(1e3)
n = int(2e3)
tol = 1.0e-9
x = np.random.uniform(-np.pi, np.pi, n)
c = np.random.uniform(-1.0, 1.0, n) + 1.0j*np.random.uniform(-1.0, 1.0, n)
f = finufft.nufft1d1(x, c, ms, eps=tol, iflag=iflag)
# Make sure that this also works with other values of 'fftw'
f = finufft.nufft1d1(x, c, ms, eps=tol, iflag=iflag,
fftw=interface.FFTWOptions.measure)
with pytest.raises(TypeError):
f = finufft.nufft1d1(x, c, ms, eps=tol, iflag=iflag, fftw=100)
f0 = interface.dirft1d1(x, c, ms, iflag=iflag)
assert np.all(np.abs((f - f0) / f0) < 1e-6)
def test_nufft1d2(seed=42, iflag=1):
np.random.seed(seed)
ms = int(1e3)
n = int(2e3)
tol = 1.0e-9
x = np.random.uniform(-np.pi, np.pi, n)
c = np.random.uniform(-1.0, 1.0, n) + 1.0j*np.random.uniform(-1.0, 1.0, n)
f = finufft.nufft1d1(x, c, ms, eps=tol, iflag=iflag)
c = finufft.nufft1d2(x, f, eps=tol, iflag=iflag)
c0 = interface.dirft1d2(x, f, iflag=iflag)
assert np.all(np.abs((c - c0) / c0) < 1e-6)
def test_nufft1d3(seed=42, iflag=1):
np.random.seed(seed)
ms = int(1e3)
n = int(2e3)
tol = 1.0e-9
x = np.random.uniform(-np.pi, np.pi, n)
c = np.random.uniform(-1.0, 1.0, n) + 1.0j*np.random.uniform(-1.0, 1.0, n)
s = 0.5 * n * (1.7 + np.random.uniform(-1.0, 1.0, ms))
f = finufft.nufft1d3(x, c, s, eps=tol, iflag=iflag)
f0 = interface.dirft1d3(x, c, s, iflag=iflag)
assert np.all(np.abs((f - f0) / f0) < 1e-6)
|
dfm/python-finufft
|
tests/test_1d.py
|
Python
|
apache-2.0
| 1,814 | 0.000551 |
# Write a function to delete a node (except the tail) in a singly linked list,
# given only access to that node.
#
# Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node
# with value 3, the linked list should become 1 -> 2 -> 4 after calling your function.
#
# time: O(1)
# space: O(1)
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteNode2(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
curt = node
prev = None
while curt.next is not None:
curt.val = curt.next.val
prev = curt
curt = curt.next
if prev is not None:
prev.next = None
return
def deleteNode1(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
curt = node
while curt.next is not None:
curt.val = curt.next.val
if curt.next.next is None:
curt.next = None
break
curt = curt.next
return
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
return
if __name__ == '__main__':
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n1.next = n2
n2.next = n3
n3.next = n4
sol = Solution()
sol.deleteNode(n1)
print n1.val, n1.next.val, n1.next.next.val
try:
print n1.next.next.next.val
except:
print 'None Type!'
pass
|
RobinCPC/algorithm-practice
|
LinkedList/deleteNode.py
|
Python
|
mit
| 1,861 | 0.005911 |
# vim: set tw=0:
from django.conf.urls import url, include
from django.core.urlresolvers import RegexURLPattern
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.authtoken.views import obtain_auth_token
from . import views
def format_patterns(urlpatterns):
"If a URL pattern ends in a slash, it should be able to be rendered as different types"
suffixes = ['json', 'jsonld', 'jsonld-browse', 'ttl', 'ttl-browse']
ret = []
for urlpattern in urlpatterns:
if isinstance(urlpattern, RegexURLPattern):
pattern = urlpattern.regex.pattern
is_empty = pattern == '^$'
if is_empty or pattern.endswith('/$'):
regex = '^' if is_empty else urlpattern.regex.pattern[:-2]
view = urlpattern._callback or urlpattern._callback_str
kwargs = urlpattern.default_args
name = urlpattern.name
stripped_url = url(regex, view, kwargs, name)
ret.append(format_suffix_patterns([stripped_url], True, suffixes)[0])
ret.append(urlpattern)
return ret
project_specific_patterns = [
### Project (general) ###
url(r'^$', views.ProjectDetail.as_view(), name='projects-detail'),
url(r'^vocab$', views.ProjectAPIDocumentation.as_view(), name='projects-api-documentation'),
url(r'^activity/$', views.ActivityView.as_view(), name='projects-activity'),
### Topics ###
url(r'^topics/$', views.TopicList.as_view(), name='topics-list'),
url(r'^topics/(?P<pk>\d+)/$', views.TopicDetail.as_view(), name='topics-detail'),
url(r'^topics/(?P<pk>\d+)/w/$', views.ENTopicDetail.as_view(), name='topics-wn-detail'),
url(r'^topics/(?P<pk>\d+)/p/$', views.TopicLDDetail.as_view(), name='topics-proj-detail'),
url(r'^topics/(?P<pk>\d+)/confirm_delete$', views.TopicConfirmDelete.as_view(), name='topics-confirm-delete'),
### Notes ###
url(r'^notes/$', views.NoteList.as_view(), name='notes-list'),
url(r'^notes/(?P<pk>\d+)/$', views.NoteDetail.as_view(), name='notes-detail'),
url(r'^notes/(?P<pk>\d+)/confirm_delete$', views.NoteConfirmDelete.as_view(), name='notes-confirm-delete'),
### Documents ###
url(r'^documents/$', views.DocumentList.as_view(), name='documents-list'),
url(r'^documents/(?P<pk>\d+)/$', views.DocumentDetail.as_view(), name='documents-detail'),
url(r'^documents/(?P<pk>\d+)/confirm_delete$', views.DocumentConfirmDelete.as_view(), name='documents-confirm-delete'),
url(r'^documents/(?P<document_id>\d+)/scans/$', views.ScanList.as_view(), name='scans-list'),
url(r'^documents/(?P<document_id>\d+)/scans/(?P<scan_id>\d+)/$', views.ScanDetail.as_view(), name='scans-detail'),
url(r'^documents/(?P<document_id>\d+)/transcript/$', views.Transcript.as_view(), name='transcripts-detail'),
]
project_specific_patterns = format_patterns(project_specific_patterns)
urlpatterns = [
url(r'^$', views.browse.root, name='root'),
url(r'^browse/$', views.browse.browse_items, name='browse'),
url(r'^auth-token/$', obtain_auth_token, name='obtain-auth-token'),
url(r'^search/$', views.SearchView.as_view(), name='search'),
url(r'^notes/$', views.AllProjectNoteList.as_view(), name='all-projects-notes-list'),
url(r'^projects/$', views.ProjectList.as_view(), name='projects-list'),
url(r'^projects/(?P<project_slug>[\w\-]+)/', include(project_specific_patterns)),
url(r'^users/(?P<pk>\d+)/$', views.UserDetail.as_view(), name='users-detail'),
url(r'^users/(?P<pk>\d+)/activity/$', views.ActivityView.as_view(), name='users-activity'),
url(r'^me/$', views.SelfUserDetail.as_view(), name='users-detail-self'),
]
urlpatterns = format_patterns(urlpatterns)
|
editorsnotes/editorsnotes
|
editorsnotes/api/urls.py
|
Python
|
agpl-3.0
| 3,744 | 0.006143 |
from bears.java.InferBear import InferBear
from tests.LocalBearTestHelper import verify_local_bear
good_file = """
class InferGood {
int test() {
String s = null;
return s == null ? 0 : s.length();
}
}
"""
bad_file = """
class InferBad {
int test() {
String s = null;
return s.length();
}
}
"""
InferBearTest = verify_local_bear(InferBear,
valid_files=(good_file,),
invalid_files=(bad_file,),
tempfile_kwargs={'suffix': '.java'})
|
sounak98/coala-bears
|
tests/java/InferBearTest.py
|
Python
|
agpl-3.0
| 585 | 0 |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__all__ = [
'build',
'build_from_document'
'fix_method_name',
'key2param'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = ['trace', 'pp', 'userip', 'strict']
# Python reserved words.
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: apiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string, discovery document.
base: string, base URI for all HTTP requests, usually the discovery URI.
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = _createResource(http, base, model, requestBuilder, developerKey,
service, service, schema)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def _createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
apiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
Returns:
An instance of Resource with all the methods attached for interacting with
that resource.
"""
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates a method for attaching to a Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
# TODO(user) Use URLs from discovery once it is updated.
parsed = list(urlparse.urlparse(baseUrl))
basePath = parsed[2]
mediaPathUrl = '/upload' + basePath + pathUrl
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
# Add in the parameters common to all methods.
for name, desc in rootDesc.get('parameters', {}).iteritems():
methodDesc['parameters'][name] = desc
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.append(STACK_QUERY_PARAMETERS)
for arg in argmap.iterkeys():
if arg in skip_parameters:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
createMethod(Resource, methodName, methodDesc, rootDesc)
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
createMethod(Resource, methodName + '_media', methodDesc, rootDesc)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, rootDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return _createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, rootDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
createResourceMethod(Resource, methodName, methodDesc, rootDesc)
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
|
palladius/gcloud
|
packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/discovery.py
|
Python
|
gpl-3.0
| 26,209 | 0.008623 |
from threading import Thread
import time
from scapy.all import *
class AttackProcess(Thread):
def __init__(self, main):
Thread.__init__(self)
self.main = main
self.selected_hosts = []
self.is_attacking = False
def run(self):
while True:
while self.is_attacking:
packets = []
for host in self.main.HostMgr.hosts:
if host.is_selected:
packets.append(host.packet)
time.sleep(1)
send(packets)
time.sleep(5)
|
GadgeurX/NetworkLiberator
|
Daemon/AttackProcess.py
|
Python
|
gpl-3.0
| 586 | 0.001706 |
"""
WSGI config for circuit project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "circuit.settings")
application = get_wsgi_application()
|
7Pros/circuit
|
circuit/wsgi.py
|
Python
|
gpl-2.0
| 391 | 0 |
def estriangulo(a,b,c):
print("el primer argumento es:",a)
print("el segundo argumento es:",b)
print("el tercer argumento es:",c)
return a+b>c and a+c>b and c+b>a
def espitagorico(a,b,c):
return a**2+b**2==c**2 or a**2+c**2==b**2 or b**2+c**2==a**2
def esisosceles(a,b,c):
return a==b or a==c or b==c
print(estriangulo(int(input("numero? ")),4,5))
print(espitagorico(3,4,5))
print(esisosceles(3,4,5))
|
jabaier/iic1103.20152.s4
|
estriangulo_prueba.py
|
Python
|
unlicense
| 428 | 0.063084 |
#!/usr/bin/python
import RPi.GPIO as GPIO
import subprocess
# Starting up
GPIO.setmode(GPIO.BCM)
GPIO.setup(3, GPIO.IN)
# Wait until power button is off
# Recommended to use GPIO.BOTH for cases with switch
GPIO.wait_for_edge(3, GPIO.BOTH)
# Shutting down
subprocess.call(['shutdown', '-h', 'now'], shell=False)
|
UBayouski/RaspberryPiPowerButton
|
power_button.py
|
Python
|
mit
| 315 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from mock import patch
from apache.aurora.client.cli.client import AuroraCommandLine
from .util import AuroraClientCommandTest, FakeAuroraCommandContext
from gen.apache.aurora.api.ttypes import GetQuotaResult, ResourceAggregate, Result
class TestGetQuotaCommand(AuroraClientCommandTest):
@classmethod
def setup_mock_quota_call_no_consumption(cls, mock_context):
api = mock_context.get_api('west')
response = cls.create_simple_success_response()
response.result = Result(getQuotaResult=GetQuotaResult(
quota=ResourceAggregate(numCpus=5, ramMb=20480, diskMb=40960),
prodSharedConsumption=None,
prodDedicatedConsumption=None,
nonProdSharedConsumption=None,
nonProdDedicatedConsumption=None
))
api.get_quota.return_value = response
@classmethod
def setup_mock_quota_call_with_consumption(cls, mock_context):
api = mock_context.get_api('west')
response = cls.create_simple_success_response()
response.result = Result(getQuotaResult=GetQuotaResult(
quota=ResourceAggregate(numCpus=5, ramMb=20480, diskMb=40960),
prodSharedConsumption=ResourceAggregate(numCpus=1, ramMb=512, diskMb=1024),
prodDedicatedConsumption=ResourceAggregate(numCpus=2, ramMb=1024, diskMb=2048),
nonProdSharedConsumption=ResourceAggregate(numCpus=3, ramMb=2048, diskMb=4096),
nonProdDedicatedConsumption=ResourceAggregate(numCpus=4, ramMb=4096, diskMb=8192),
))
api.get_quota.return_value = response
def test_get_quota_no_consumption(self):
assert ('Allocated:\n CPU: 5\n RAM: 20.000000 GB\n Disk: 40.000000 GB' ==
self._get_quota(False, ['quota', 'get', 'west/bozo']))
def test_get_quota_with_consumption(self):
expected_output = ('Allocated:\n CPU: 5\n RAM: 20.000000 GB\n Disk: 40.000000 GB\n'
'Production shared pool resources consumed:\n'
' CPU: 1\n RAM: 0.500000 GB\n Disk: 1.000000 GB\n'
'Production dedicated pool resources consumed:\n'
' CPU: 2\n RAM: 1.000000 GB\n Disk: 2.000000 GB\n'
'Non-production shared pool resources consumed:\n'
' CPU: 3\n RAM: 2.000000 GB\n Disk: 4.000000 GB\n'
'Non-production dedicated pool resources consumed:\n'
' CPU: 4\n RAM: 4.000000 GB\n Disk: 8.000000 GB')
assert expected_output == self._get_quota(True, ['quota', 'get', 'west/bozo'])
def test_get_quota_with_no_consumption_json(self):
assert (json.loads('{"quota":{"numCpus":5,"ramMb":20480,"diskMb":40960}}') ==
json.loads(self._get_quota(False, ['quota', 'get', '--write-json', 'west/bozo'])))
def test_get_quota_with_consumption_json(self):
expected_response = json.loads(
'{"quota":{"numCpus":5,"ramMb":20480,"diskMb":40960},'
'"prodSharedConsumption":{"numCpus":1,"ramMb":512,"diskMb":1024},'
'"prodDedicatedConsumption":{"numCpus":2,"ramMb":1024,"diskMb":2048},'
'"nonProdSharedConsumption":{"numCpus":3,"ramMb":2048,"diskMb":4096},'
'"nonProdDedicatedConsumption":{"numCpus":4,"ramMb":4096,"diskMb":8192}}')
assert (expected_response ==
json.loads(self._get_quota(True, ['quota', 'get', '--write-json', 'west/bozo'])))
def test_get_quota_failed(self):
fake_context = FakeAuroraCommandContext()
api = fake_context.get_api('')
api.get_quota.return_value = self.create_error_response()
self._call_get_quota(fake_context, ['quota', 'get', 'west/bozo'])
assert fake_context.get_err() == ['Error retrieving quota for role bozo', '\tWhoops']
def _get_quota(self, include_consumption, command_args):
mock_context = FakeAuroraCommandContext()
if include_consumption:
self.setup_mock_quota_call_with_consumption(mock_context)
else:
self.setup_mock_quota_call_no_consumption(mock_context)
return self._call_get_quota(mock_context, command_args)
def _call_get_quota(self, mock_context, command_args):
with patch('apache.aurora.client.cli.quota.Quota.create_context', return_value=mock_context):
cmd = AuroraCommandLine()
cmd.execute(command_args)
out = '\n'.join(mock_context.get_out())
return out
|
protochron/aurora
|
src/test/python/apache/aurora/client/cli/test_quota.py
|
Python
|
apache-2.0
| 4,844 | 0.0064 |
from .base import (ExtensionArray, # noqa
ExtensionScalarOpsMixin)
from .categorical import Categorical # noqa
from .datetimes import DatetimeArrayMixin # noqa
from .interval import IntervalArray # noqa
from .period import PeriodArrayMixin # noqa
from .timedeltas import TimedeltaArrayMixin # noqa
|
kdebrab/pandas
|
pandas/core/arrays/__init__.py
|
Python
|
bsd-3-clause
| 325 | 0 |
# Copyright (c) 2009 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
class MacFailedException(Exception):
pass
|
vejeshv/main_project
|
knockknock/MacFailedException.py
|
Python
|
gpl-3.0
| 790 | 0 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2013, gamesun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of gamesun nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GAMESUN "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GAMESUN BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from distutils.core import setup
import sys
import py2exe
import os
import glob
from py2exe.build_exe import py2exe as build_exe
import appInfo
if len(sys.argv) == 1:
sys.argv.append("py2exe")
# sys.argv.append("-q")
manifest_template = '''
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<assemblyIdentity
version="0.6.8.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false"
/>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b"
/>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
CONTENT_DIRS = [ "media" ]
# EXTRA_FILES = [ "./media/icon16.ico", "./media/icon32.ico" ]
EXTRA_FILES = []
class MediaCollector(build_exe):
def addDirectoryToZip(self, folder):
full = os.path.join(self.collect_dir, folder)
if not os.path.exists(full):
self.mkpath(full)
for f in glob.glob("%s/*" % folder):
if os.path.isdir(f):
self.addDirectoryToZip(f)
else:
name = os.path.basename(f)
self.copy_file(f, os.path.join(full, name))
self.compiled_files.append(os.path.join(folder, name))
def copy_extensions(self, extensions):
#super(MediaCollector, self).copy_extensions(extensions)
build_exe.copy_extensions(self, extensions)
for folder in CONTENT_DIRS:
self.addDirectoryToZip(folder)
for fileName in EXTRA_FILES:
name = os.path.basename(fileName)
self.copy_file(fileName, os.path.join(self.collect_dir, name))
self.compiled_files.append(name)
myOptions = {
"py2exe":{
"compressed": 1,
"optimize": 2,
"ascii": 1,
# "includes":,
"dll_excludes": ["MSVCP90.dll","w9xpopen.exe"],
"bundle_files": 2
}
}
RT_MANIFEST = 24
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
MyTerm_windows = Target(
# used for the versioninfo resource
copyright = "Copywrong All Lefts Unreserved.",
name = appInfo.title,
version = appInfo.version,
description = appInfo.file_name,
author = appInfo.author,
url = appInfo.url,
# what to build
script = "main.py",
dest_base = appInfo.file_name,
icon_resources = [(1, "icon\icon.ico")],
other_resources= [(RT_MANIFEST, 1, manifest_template % dict(prog = appInfo.title))]
)
setup(
options = myOptions,
cmdclass= {'py2exe': MediaCollector},
data_files = [("", ["COPYING",]),],
windows = [MyTerm_windows]
)
|
gamesun/MyTerm-for-YellowStone
|
setup.py
|
Python
|
bsd-3-clause
| 4,648 | 0.010327 |
# -*- coding: utf-8 -*-
"""
Dashboard stuff for admin_tools
"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules, Dashboard, AppIndexDashboard
from admin_tools.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for project.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a link list module for "quick links"
self.children.append(modules.LinkList(
_('Quick links'),
layout='inline',
draggable=False,
deletable=False,
collapsible=False,
children=[
[_('Return to site'), '/'],
[_('Change password'),
reverse('%s:password_change' % site_name)],
[_('Log out'), reverse('%s:logout' % site_name)],
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(_('Recent Actions'), 5))
## append a feed module
#self.children.append(modules.Feed(
#_('Latest Django News'),
#feed_url='http://www.djangoproject.com/rss/weblog/',
#limit=5
#))
## append another link list module for "support".
#self.children.append(modules.LinkList(
#_('Support'),
#children=[
#{
#'title': _('Django documentation'),
#'url': 'http://docs.djangoproject.com/',
#'external': True,
#},
#{
#'title': _('Django "django-users" mailing list'),
#'url': 'http://groups.google.com/group/django-users',
#'external': True,
#},
#{
#'title': _('Django irc channel'),
#'url': 'irc://irc.freenode.net/django',
#'external': True,
#},
#]
#))
class CustomAppIndexDashboard(AppIndexDashboard):
"""
Custom app index dashboard for project.
"""
# we disable title because its redundant with the model list module
title = ''
def __init__(self, *args, **kwargs):
AppIndexDashboard.__init__(self, *args, **kwargs)
# append a model list module and a recent actions module
self.children += [
modules.ModelList(self.app_title, self.models),
modules.RecentActions(
_('Recent Actions'),
include_list=self.get_app_content_types(),
limit=5
)
]
def init_with_context(self, context):
"""
Use this method if you need to access the request context.
"""
return super(CustomAppIndexDashboard, self).init_with_context(context)
|
emencia/emencia_paste_djangocms_3
|
emencia_paste_djangocms_3/django_buildout/project/mods_available/admin_tools/dashboard.py
|
Python
|
mit
| 3,336 | 0.014688 |
# Class for a network object.
from NetworkPrimitives import Ip, Mac
from Config import config
from Exceptions import *
import Toolbox
import easysnmp
import requests
import json
import time
from datetime import datetime
import uuid
import geocoder
# Disable security warnings.
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class Host:
def __init__(self, network, ip=None, mac=None, hash=None):
self.serial = uuid.uuid4().int
self.network = network
# Set the timestamp for unix epoch, unless it was set during init.
self.network.add_node(self)
self.network.node[self]['updated'] = 0
self.community = None
# If supplied with an IP address or a MAC address, add those.
if ip:
if type(ip) != Ip:
ip = Ip(ip)
self.addAddress(ip)
if mac:
if type(mac) != Mac:
mac = Mac(mac)
self.addAddress(mac)
print(self.ips)
def __str__(self):
return 'Host:' + str(self.serial)
def __hash__(self):
return self.serial
@property
def ips(self):
if self.mgmntip:
# Always return management ips first.
ips = self.network.typedNeighbors(self, Ip)
ips.remove(self.mgmntip)
return [self.mgmntip] + ips
else:
return sorted(self.network.typedNeighbors(self, Ip))
@property
def macs(self):
return sorted(self.network.typedNeighbors(self, Mac))
@property
def community(self):
return self.network.node[self]['community']
@community.setter
def community(self, community):
self.network.node[self]['community'] = community
@property
def addresses(self):
# Aggregation of all MAC and IP addresses
return self.macs + self.ips
@property
def hostname(self):
try:
return self.network.node[self]['hostname']
except KeyError:
return None
@hostname.setter
def hostname(self, hostname):
self.network.node[self]['hostname'] = hostname
@property
def updated(self):
return self.network.node[self]['updated']
def touch(self):
# Update timestamp on host.
self.network.node[self]['updated'] = Toolbox.timestamp()
@property
def vendor(self):
# Take the first recognizable MAC vendor we find.
for mac in self.macs:
if mac.vendor:
return mac.vendor
return None
@property
def location(self):
try:
return self.network.node[self]['location']
except KeyError:
return None
@property
def coords(self):
# Geocoords lookup to get address for host.
return geocoder.google(self.location).latlng
@property
def lat(self):
return self.coords[0]
@property
def lng(self):
return self.coords[1]
@property
def arpNeighbors(self):
return self.network.findAdj(self, ntype=Host, etype='arp')
@property
def mgmntip(self):
# An IP address that is confirmed to work with this host.
try:
for ip in self.network.typedNeighbors(self, Ip):
edge = self.network[self][ip]
if 'mgmnt' in edge and edge['mgmnt'] == 1:
return ip
# Unless we don't know one.
except TypeError:
# Means that there are no IPs.
pass
return False
def setmgmntip(self, ip, isit):
if isit:
self.network[self][ip]['mgmnt'] = 1
else:
self.network[self][ip]['mgmnt'] = 0
def addAddress(self, address, ifnum=None):
# Add an IP or MAC address.
if not address.local:
if address in self.addresses:
# Add the ifnum, in case it's not there.
self.network.node[address]['ifnum'] = ifnum
else:
# This is a new mac, or at least not attached to this host.
self.network.removeSafely(address)
self.network.add_node(address, ifnum=ifnum)
self.network.add_edge(self, address, etype='owns')
# Associate it with any similarly-numbered IPs.
if ifnum:
for a in self.addresses:
if 'ifnum' in self.network.node[a] and \
self.network.node[a]['ifnum'] == ifnum:
self.network.add_edge(address, a, etype='interface')
def snmpInit(self, ip, community):
print(ip, community)
session = easysnmp.Session(hostname=ip, community=community, version=1, timeout=1)
return session
def snmpwalk(self, mib):
# Walks specified mib
ips = self.ips
# Get a list of communities, starting with any that are known to
# work on this host.
communities = self.network.communities.copy()
if self.community:
# Means we have a functional community string. Use that first.
communities.append(self.community)
communities.reverse()
def scanAllCommunities(ip):
for community in communities:
results = scan(ip, community)
if results:
return results
return False
def scan(ip, community):
session = self.snmpInit(ip, community)
try:
responses = session.walk(mib)
self.community = community
self.setmgmntip(ip, True)
print('Response on', ip, 'with', community)
return responses
except easysnmp.exceptions.EasySNMPNoSuchNameError:
# Probably means that you're hitting the wrong kind of device.
self.community = None
self.setmgmntip(ip, False)
raise
except easysnmp.exceptions.EasySNMPTimeoutError:
# Either the community string is wrong, or the address is dead.
print('No response on', ip, 'with', community)
self.community = None
self.setmgmntip(ip, False)
pass
return False
# First, we try using known-good settings for communicating with this
# host.
if self.mgmntip:
if self.community:
results = scan(self.mgmntip, self.community)
if results:
return results
results = scanAllCommunities(self.mgmntip)
if results:
return results
# If we have no known-good settings, we just iterate over everything.
for ip in ips:
if not Toolbox.ipInNetworks(ip, self.network.inaccessiblenets):
results = scanAllCommunities(ip)
if results:
return results
return False
def getStatusPage(self, path, tries=0):
# Negotiates HTTP auth and JSON decoding for getting data from
# the web interface. Functions by requesting IPs until something gives
# a non-empty response, then authenticates until it gives the correct
# response.
# HTTPS redirect is managed automatically in applicable devices.
# Mostly got its own function for exception handling.
def webrequest(verb, session, url, data=None, tries=0):
if tries < 3:
try:
if verb == 'get':
return session.get(url, verify=False, timeout=2)
elif verb == 'post':
return session.post(url, data=data, verify=False,
timeout=2)
# Requests has like a billion error codes...
except (requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
requests.exceptions.ConnectTimeout):
tries += 1
return webrequest(verb, session, url, data, tries)
return False
def iterCreds(url, i=0):
# Iterate through the list of credentials and return either a
# working json dump or False.
with requests.Session() as websess:
# Make an initial request to establish session cookies, get
# redirected URLS, etc.
a = webrequest('get', url)
try:
credential = self.network.credentials[i]
except KeyError:
# We've tried all the keys. Fail out.
return False
b = webrequest('post', websess, a.url, data=credential)
try:
return json.loads(b.text)
except ValueError:
# Not valid JSON. Probably not authenticated.
# Recurse
return itercreds(url, i=i+1)
# The entirety of the actual function.
for ip in self.ips:
statusurl = 'http://' + ip + '/' + path
status = iterCreds(statusurl)
if status:
return status
return False
def getInterfacePage(self):
# Get the list of network interfaces from the web interface.
data = self.getStatusPage('iflist.cgi?_=' + str(Toolbox.timestamp()))
interfaces = {}
if interfaces:
for ifdata in data['interfaces']:
interface = {}
try:
# The typing is for consistency with the SNMP data.
interfaces[Mac(ifdata['hwaddr'])] = set([Ip(ifdata['ipv4']['addr'])])
except KeyError:
# Some interfaces won't have an address.
pass
return interfaces
def getBridgePage(self):
# Get the bridge page for applicable radios.
# Data is a big JSON.
data = self.getStatusPage('brmacs.cgi?brmacs=y&_=' +\
Toolbox.timestamp())
brm = data['brmacs']
bridges = {}
# The last element is always null.
datum = data.pop()
while datum:
try:
# Attempt to look it up from the existing bridges.
bridge = bridges[datum['bridge']]
bridge['interfaces'].add(datum['port'])
bridge['macs'].add(datum['hwaddr'])
except KeyError:
# If the bridge is unknown, initialize it.
bridge = {}
# Sets for deduplication.
bridge['interfaces'] = set(datum['port'])
bridge['macs'] = set(datum['hwaddr'])
bridges.append(bridge)
bridge = {}
bridges[datum['bridge']] = {}
datum = data.pop()
return bridges
def getSingleSNMPValue(self, mib, indexInstead=False):
try:
responses = self.snmpwalk(mib)
except NonResponsiveError:
return None
try:
# Take the first response.
r = responses.pop()
except AttributeError:
# Responses empty
return None
if indexInstead:
return r.oid_index
return r.value
def scanHostname(self):
mib = '1.3.6.1.2.1.1.5'
hostname = self.getSingleSNMPValue(mib)
# Sanitize
if hostname:
hostname = hostname.encode('ascii', 'ignore').decode()
self.network.node[self]['hostname'] = hostname
return hostname
def scanLocation(self):
mib = '1.3.6.1.2.1.1.6'
location = self.getSingleSNMPValue(mib)
if location:
location = location.encode('ascii', 'ignore').decode()
self.network.node[self]['location'] = location
return location
def scanArpTable(self):
mib = 'ipNetToMediaPhysAddress'
if self.vendor == 'ubiquiti':
return False
else:
print('No vendor established for:', self.macs)
responses = self.snmpwalk(mib)
arps = []
for response in responses:
#print(response)
try:
# Validation occurs in the decoding, just move on if they
# throw assertion errors.
mac = Mac(response.value, encoding='utf-16')
ip = Ip(response.oid_index, encoding='snmp')
# We ignore data points that have to do with locally
# administered MAC addresses.
if not mac.local and not ip.local:
# See if we already have this data somewhere.
self.network.addHostByIp(ip, mac=mac)
self.network.add_edge(ip, mac, etype='interface')
except AssertionError:
# Malformed input is to be ignored.
print('malformed input:', response.value, response.oid_index)
return arps
def scanInterfaces(self):
# We scan the mib to mac addresses, which gives us indexing
# information. We then cross-reference that index against the ips
macmib = '1.3.6.1.2.1.2.2.1.6'
macrs = self.snmpwalk(macmib)
# The MAC address tells us the vendor, which determines some logic.
for macr in macrs:
try:
mac = Mac(macr.value, encoding='utf-16')
ifnum = macr.oid_index
self.addAddress(mac, ifnum=ifnum)
except InputError:
# Empty interfaces are of no interest.
if len(macr.value) > 0:
# But if they're actually malformed, I want to know.
print('invalid mac:', macr.value)
if self.vendor == 'ubiquiti':
# Ubiquity devices don't reply to IF-MIB requests for ip addresses,
# but they will give the data through a web portal.
interfaces = self.getInterfacePage()
else:
# Other hosts are mostly compliant with the IF-MIB.
ipmib = '1.3.6.1.2.1.4.20.1.2'
iprs = self.snmpwalk(ipmib)
for ipr in iprs:
try:
ip = Ip(ipr.oid_index)
ifnum = ipr.value
self.addAddress(ip, ifnum=ifnum)
except InputError:
print('invalid ip:', ip)
def to_JSON(self):
return json.dumps(self.__hash__())
def print(self):
try:
print('Host ' + self.hostname + ':')
except TypeError:
# Hostname is not set.
pass
class Interface(str):
def __init__(self, host):
self.network = host.network
self.network.add_edge(self, host)
def print(self):
print('\tInterface:')
for mac in self.macs:
print('\t\tMAC:', mac)
for ip in self.ips:
print('\t\tIP:', ip)
def __hash__(self):
try:
return self.__hash
except AttributeError:
self.__hash = hash(uuid.uuid4())
return self.__hash
def __str__(self):
print('Interface(', self.mac, self.ips, ')')
@property
def ips(self):
return self.network.findAdj(self, ntype=Ip)
def add_ip(self, ip):
self.network.add_edge(self, ip, etype='interface')
@property
def mac(self):
macs = self.network.findAdj(self, ntype=Mac)
try:
return Toolbox.getUnique(macs)
except IndexError:
return None
except Toolbox.NonUniqueError:
# If there are multiples, just give the first one.
return macs[0]
@mac.setter
def mac(self, mac):
self.network.add_edge(self, mac, etype='interface')
@property
def host(self):
hosts = self.network.findAdj(self, ntype=Host)
return Toolbox.getUnique(hosts)
@host.setter
def host(self, host):
self.network.add_edge(self, host, etype='interface')
@property
def addresses(self):
# Provides a list of all addresses associated with this device.
return self.ips + [self.mac]
@property
def label(self):
return self.network.node(self)['label']
@label.setter
def label(self, label):
self.network.node(self)['label'] = label
@property
def speed(self):
return self.network.node(self)['speed']
@speed.setter
def speed(self, speed):
self.network.node(self)['speed'] = speed
class BridgedInterface(Interface):
# Essentially, makes MAC non-unique for this interface.
@property
def macs(self):
return self.network.findAdj(self, ntype=Mac)
@property
def mac():
raise AttributeError('BridgedInterfaces have macs, not mac.')
|
mudbungie/NetExplorer
|
Host.py
|
Python
|
mit
| 17,053 | 0.002346 |
"""Upgrade a virtual server."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
from SoftLayer.CLI import virt
import click
@click.command(epilog="""Note: SoftLayer automatically reboots the VS once
upgrade request is placed. The VS is halted until the Upgrade transaction is
completed. However for Network, no reboot is required.""")
@click.argument('identifier')
@click.option('--cpu', type=click.INT, help="Number of CPU cores")
@click.option('--private',
is_flag=True,
help="CPU core will be on a dedicated host server.")
@click.option('--memory', type=virt.MEM_TYPE, help="Memory in megabytes")
@click.option('--network', type=click.INT, help="Network port speed in Mbps")
@environment.pass_env
def cli(env, identifier, cpu, private, memory, network):
"""Upgrade a virtual server."""
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
if not (env.skip_confirmations or formatting.confirm(
"This action will incur charges on your account. "
"Continue?")):
raise exceptions.CLIAbort('Aborted')
if not vsi.upgrade(vs_id,
cpus=cpu,
memory=memory/1024,
nic_speed=network,
public=not private):
raise exceptions.CLIAbort('VS Upgrade Failed')
|
iftekeriba/softlayer-python
|
SoftLayer/CLI/virt/upgrade.py
|
Python
|
mit
| 1,538 | 0 |
import datetime
import itertools
from hashlib import sha256
from collections import defaultdict
from flanker import mime
from sqlalchemy import (Column, Integer, BigInteger, String, DateTime,
Boolean, Enum, ForeignKey, Index)
from sqlalchemy.dialects.mysql import LONGBLOB
from sqlalchemy.orm import relationship, backref, validates
from sqlalchemy.sql.expression import false
from sqlalchemy.ext.associationproxy import association_proxy
from inbox.util.html import plaintext2html, strip_tags
from inbox.sqlalchemy_ext.util import JSON, json_field_too_long
from inbox.util.addr import parse_mimepart_address_header
from inbox.util.misc import parse_references, get_internaldate
from inbox.models.mixins import HasPublicID, HasRevisions
from inbox.models.base import MailSyncBase
from inbox.models.namespace import Namespace
from inbox.models.category import Category
from inbox.security.blobstorage import encode_blob, decode_blob
from inbox.log import get_logger
log = get_logger()
def _trim_filename(s, mid, max_len=64):
if s and len(s) > max_len:
log.warning('filename is too long, truncating',
mid=mid, max_len=max_len, filename=s)
return s[:max_len - 8] + s[-8:] # Keep extension
return s
class Message(MailSyncBase, HasRevisions, HasPublicID):
@property
def API_OBJECT_NAME(self):
return 'message' if not self.is_draft else 'draft'
namespace_id = Column(ForeignKey(Namespace.id, ondelete='CASCADE'),
index=True, nullable=False)
namespace = relationship(
'Namespace',
lazy='joined',
load_on_pending=True)
# Do delete messages if their associated thread is deleted.
thread_id = Column(Integer, ForeignKey('thread.id', ondelete='CASCADE'),
nullable=False)
thread = relationship(
'Thread',
backref=backref('messages', order_by='Message.received_date',
passive_deletes=True, cascade='all, delete-orphan'))
from_addr = Column(JSON, nullable=False, default=lambda: [])
sender_addr = Column(JSON, nullable=True)
reply_to = Column(JSON, nullable=True, default=lambda: [])
to_addr = Column(JSON, nullable=False, default=lambda: [])
cc_addr = Column(JSON, nullable=False, default=lambda: [])
bcc_addr = Column(JSON, nullable=False, default=lambda: [])
in_reply_to = Column(JSON, nullable=True)
# From: http://tools.ietf.org/html/rfc4130, section 5.3.3,
# max message_id_header is 998 characters
message_id_header = Column(String(998), nullable=True)
# There is no hard limit on subject limit in the spec, but 255 is common.
subject = Column(String(255), nullable=True, default='')
received_date = Column(DateTime, nullable=False, index=True)
size = Column(Integer, nullable=False)
data_sha256 = Column(String(255), nullable=True)
is_read = Column(Boolean, server_default=false(), nullable=False)
is_starred = Column(Boolean, server_default=false(), nullable=False)
# For drafts (both Inbox-created and otherwise)
is_draft = Column(Boolean, server_default=false(), nullable=False)
is_sent = Column(Boolean, server_default=false(), nullable=False)
# DEPRECATED
state = Column(Enum('draft', 'sending', 'sending failed', 'sent'))
_compacted_body = Column(LONGBLOB, nullable=True)
snippet = Column(String(191), nullable=False)
SNIPPET_LENGTH = 191
# A reference to the block holding the full contents of the message
full_body_id = Column(ForeignKey('block.id', name='full_body_id_fk'),
nullable=True)
full_body = relationship('Block', cascade='all, delete')
# this might be a mail-parsing bug, or just a message from a bad client
decode_error = Column(Boolean, server_default=false(), nullable=False,
index=True)
# In accordance with JWZ (http://www.jwz.org/doc/threading.html)
references = Column(JSON, nullable=True)
# Only used for drafts.
version = Column(Integer, nullable=False, server_default='0')
# only on messages from Gmail (TODO: use different table)
#
# X-GM-MSGID is guaranteed unique across an account but not globally
# across all Gmail.
#
# Messages between different accounts *may* have the same X-GM-MSGID,
# but it's unlikely.
#
# (Gmail info from
# http://mailman13.u.washington.edu/pipermail/imap-protocol/
# 2014-July/002290.html.)
g_msgid = Column(BigInteger, nullable=True, index=True, unique=False)
g_thrid = Column(BigInteger, nullable=True, index=True, unique=False)
# The uid as set in the X-INBOX-ID header of a sent message we create
inbox_uid = Column(String(64), nullable=True, index=True)
def regenerate_inbox_uid(self):
"""
The value of inbox_uid is simply the draft public_id and version,
concatenated. Because the inbox_uid identifies the draft on the remote
provider, we regenerate it on each draft revision so that we can delete
the old draft and add the new one on the remote."""
from inbox.sendmail.message import generate_message_id_header
self.inbox_uid = '{}-{}'.format(self.public_id, self.version)
self.message_id_header = generate_message_id_header(self.inbox_uid)
categories = association_proxy(
'messagecategories', 'category',
creator=lambda category: MessageCategory(category=category))
# FOR INBOX-CREATED MESSAGES:
is_created = Column(Boolean, server_default=false(), nullable=False)
# Whether this draft is a reply to an existing thread.
is_reply = Column(Boolean)
reply_to_message_id = Column(Integer, ForeignKey('message.id'),
nullable=True)
reply_to_message = relationship('Message', uselist=False)
def mark_for_deletion(self):
"""
Mark this message to be deleted by an asynchronous delete
handler.
"""
self.deleted_at = datetime.datetime.utcnow()
@validates('subject')
def sanitize_subject(self, key, value):
# Trim overlong subjects, and remove null bytes. The latter can result
# when, for example, UTF-8 text decoded from an RFC2047-encoded header
# contains null bytes.
if value is None:
return
if len(value) > 255:
value = value[:255]
value = value.replace('\0', '')
return value
@classmethod
def create_from_synced(cls, account, mid, folder_name, received_date,
body_string):
"""
Parses message data and writes out db metadata and MIME blocks.
Returns the new Message, which links to the new Part and Block objects
through relationships. All new objects are uncommitted.
Threads are not computed here; you gotta do that separately.
Parameters
----------
mid : int
The account backend-specific message identifier; it's only used for
logging errors.
raw_message : str
The full message including headers (encoded).
"""
_rqd = [account, mid, folder_name, body_string]
if not all([v is not None for v in _rqd]):
raise ValueError(
'Required keyword arguments: account, mid, folder_name, '
'body_string')
# stop trickle-down bugs
assert account.namespace is not None
assert not isinstance(body_string, unicode)
msg = Message()
from inbox.models.block import Block
body_block = Block()
body_block.namespace_id = account.namespace.id
body_block.data = body_string
body_block.content_type = "text/plain"
msg.full_body = body_block
msg.namespace_id = account.namespace.id
try:
parsed = mime.from_string(body_string)
msg._parse_metadata(parsed, body_string, received_date, account.id,
folder_name, mid)
except (mime.DecodingError, AttributeError, RuntimeError,
TypeError) as e:
parsed = None
log.error('Error parsing message metadata',
folder_name=folder_name, account_id=account.id, error=e)
msg._mark_error()
if parsed is not None:
plain_parts = []
html_parts = []
for mimepart in parsed.walk(
with_self=parsed.content_type.is_singlepart()):
try:
if mimepart.content_type.is_multipart():
log.warning('multipart sub-part found',
account_id=account.id,
folder_name=folder_name,
mid=mid)
continue # TODO should we store relations?
msg._parse_mimepart(mid, mimepart, account.namespace.id,
html_parts, plain_parts)
except (mime.DecodingError, AttributeError, RuntimeError,
TypeError) as e:
log.error('Error parsing message MIME parts',
folder_name=folder_name, account_id=account.id,
error=e)
msg._mark_error()
msg.calculate_body(html_parts, plain_parts)
# Occasionally people try to send messages to way too many
# recipients. In such cases, empty the field and treat as a parsing
# error so that we don't break the entire sync.
for field in ('to_addr', 'cc_addr', 'bcc_addr', 'references'):
value = getattr(msg, field)
if json_field_too_long(value):
log.error('Recipient field too long', field=field,
account_id=account.id, folder_name=folder_name,
mid=mid)
setattr(msg, field, [])
msg._mark_error()
return msg
def _parse_metadata(self, parsed, body_string, received_date,
account_id, folder_name, mid):
mime_version = parsed.headers.get('Mime-Version')
# sometimes MIME-Version is '1.0 (1.0)', hence the .startswith()
if mime_version is not None and not mime_version.startswith('1.0'):
log.warning('Unexpected MIME-Version',
account_id=account_id, folder_name=folder_name,
mid=mid, mime_version=mime_version)
self.data_sha256 = sha256(body_string).hexdigest()
self.subject = parsed.subject
self.from_addr = parse_mimepart_address_header(parsed, 'From')
self.sender_addr = parse_mimepart_address_header(parsed, 'Sender')
self.reply_to = parse_mimepart_address_header(parsed, 'Reply-To')
self.to_addr = parse_mimepart_address_header(parsed, 'To')
self.cc_addr = parse_mimepart_address_header(parsed, 'Cc')
self.bcc_addr = parse_mimepart_address_header(parsed, 'Bcc')
self.in_reply_to = parsed.headers.get('In-Reply-To')
self.message_id_header = parsed.headers.get('Message-Id')
self.received_date = received_date if received_date else \
get_internaldate(parsed.headers.get('Date'),
parsed.headers.get('Received'))
# Custom Inbox header
self.inbox_uid = parsed.headers.get('X-INBOX-ID')
# In accordance with JWZ (http://www.jwz.org/doc/threading.html)
self.references = parse_references(
parsed.headers.get('References', ''),
parsed.headers.get('In-Reply-To', ''))
self.size = len(body_string) # includes headers text
def _parse_mimepart(self, mid, mimepart, namespace_id, html_parts,
plain_parts):
disposition, _ = mimepart.content_disposition
content_id = mimepart.headers.get('Content-Id')
content_type, params = mimepart.content_type
filename = params.get('name')
is_text = content_type.startswith('text')
if disposition not in (None, 'inline', 'attachment'):
log.error('Unknown Content-Disposition',
message_public_id=self.public_id,
bad_content_disposition=mimepart.content_disposition)
self._mark_error()
return
if disposition == 'attachment':
self._save_attachment(mimepart, disposition, content_type,
filename, content_id, namespace_id, mid)
return
if (disposition == 'inline' and
not (is_text and filename is None and content_id is None)):
# Some clients set Content-Disposition: inline on text MIME parts
# that we really want to treat as part of the text body. Don't
# treat those as attachments.
self._save_attachment(mimepart, disposition, content_type,
filename, content_id, namespace_id, mid)
return
if is_text:
if mimepart.body is None:
return
normalized_data = mimepart.body.encode('utf-8', 'strict')
normalized_data = normalized_data.replace('\r\n', '\n'). \
replace('\r', '\n')
if content_type == 'text/html':
html_parts.append(normalized_data)
elif content_type == 'text/plain':
plain_parts.append(normalized_data)
else:
log.info('Saving other text MIME part as attachment',
content_type=content_type, mid=mid)
self._save_attachment(mimepart, 'attachment', content_type,
filename, content_id, namespace_id, mid)
return
# Finally, if we get a non-text MIME part without Content-Disposition,
# treat it as an attachment.
self._save_attachment(mimepart, 'attachment', content_type,
filename, content_id, namespace_id, mid)
def _save_attachment(self, mimepart, content_disposition, content_type,
filename, content_id, namespace_id, mid):
from inbox.models import Part, Block
block = Block()
block.namespace_id = namespace_id
block.filename = _trim_filename(filename, mid=mid)
block.content_type = content_type
part = Part(block=block, message=self)
part.content_id = content_id
part.content_disposition = content_disposition
data = mimepart.body or ''
if isinstance(data, unicode):
data = data.encode('utf-8', 'strict')
block.data = data
def _mark_error(self):
""" Mark message as having encountered errors while parsing.
Message parsing can fail for several reasons. Occasionally iconv will
fail via maximum recursion depth. EAS messages may be missing Date and
Received headers. Flanker may fail to handle some out-of-spec messages.
In this case, we keep what metadata we've managed to parse but also
mark the message as having failed to parse properly.
"""
self.decode_error = True
# fill in required attributes with filler data if could not parse them
self.size = 0
if self.received_date is None:
self.received_date = datetime.datetime.utcnow()
if self.body is None:
self.body = ''
if self.snippet is None:
self.snippet = ''
def calculate_body(self, html_parts, plain_parts):
html_body = ''.join(html_parts).decode('utf-8').strip()
plain_body = '\n'.join(plain_parts).decode('utf-8').strip()
if html_body:
self.snippet = self.calculate_html_snippet(html_body)
self.body = html_body
elif plain_body:
self.snippet = self.calculate_plaintext_snippet(plain_body)
self.body = plaintext2html(plain_body, False)
else:
self.body = u''
self.snippet = u''
def calculate_html_snippet(self, text):
text = strip_tags(text)
return self.calculate_plaintext_snippet(text)
def calculate_plaintext_snippet(self, text):
return ' '.join(text.split())[:self.SNIPPET_LENGTH]
@property
def body(self):
if self._compacted_body is None:
return None
return decode_blob(self._compacted_body).decode('utf-8')
@body.setter
def body(self, value):
if value is None:
self._compacted_body = None
else:
self._compacted_body = encode_blob(value.encode('utf-8'))
@property
def participants(self):
"""
Different messages in the thread may reference the same email
address with different phrases. We partially deduplicate: if the same
email address occurs with both empty and nonempty phrase, we don't
separately return the (empty phrase, address) pair.
"""
deduped_participants = defaultdict(set)
chain = []
if self.from_addr:
chain.append(self.from_addr)
if self.to_addr:
chain.append(self.to_addr)
if self.cc_addr:
chain.append(self.cc_addr)
if self.bcc_addr:
chain.append(self.bcc_addr)
for phrase, address in itertools.chain.from_iterable(chain):
deduped_participants[address].add(phrase.strip())
p = []
for address, phrases in deduped_participants.iteritems():
for phrase in phrases:
if phrase != '' or len(phrases) == 1:
p.append((phrase, address))
return p
@property
def attachments(self):
return [part for part in self.parts if part.is_attachment]
@property
def api_attachment_metadata(self):
resp = []
for part in self.parts:
if not part.is_attachment:
continue
k = {'content_type': part.block.content_type,
'size': part.block.size,
'filename': part.block.filename,
'id': part.block.public_id}
content_id = part.content_id
if content_id:
if content_id[0] == '<' and content_id[-1] == '>':
content_id = content_id[1:-1]
k['content_id'] = content_id
resp.append(k)
return resp
@property
def versioned_relationships(self):
return ['parts']
@property
def propagated_attributes(self):
return ['is_read', 'is_starred', 'messagecategories']
@property
def has_attached_events(self):
return 'text/calendar' in [p.block.content_type for p in self.parts]
@property
def attached_event_files(self):
return [part for part in self.parts
if part.block.content_type == 'text/calendar']
@property
def account(self):
return self.namespace.account
def get_header(self, header, mid):
if self.decode_error:
log.warning('Error getting message header', mid=mid)
return
parsed = mime.from_string(self.full_body.data)
return parsed.headers.get(header)
def update_metadata(self, is_draft):
account = self.namespace.account
if account.discriminator == 'easaccount':
uids = self.easuids
else:
uids = self.imapuids
self.is_read = any(i.is_seen for i in uids)
self.is_starred = any(i.is_flagged for i in uids)
self.is_draft = is_draft
categories = set()
for i in uids:
categories.update(i.categories)
if account.category_type == 'folder':
categories = [select_category(categories)]
self.categories = categories
# TODO[k]: Update from pending actions here?
def add_category(self, category):
if category not in self.categories:
self.categories.add(category)
def remove_category(self, category):
if category not in self.categories:
return
self.categories.remove(category)
# Need to explicitly specify the index length for table generation with MySQL
# 5.6 when columns are too long to be fully indexed with utf8mb4 collation.
Index('ix_message_subject', Message.subject, mysql_length=191)
Index('ix_message_data_sha256', Message.data_sha256, mysql_length=191)
# For API querying performance.
Index('ix_message_ns_id_is_draft_received_date', Message.namespace_id,
Message.is_draft, Message.received_date)
# For async deletion.
Index('ix_message_namespace_id_deleted_at', Message.namespace_id,
Message.deleted_at)
# For statistics about messages sent via Nylas
Index('ix_message_namespace_id_is_created', Message.namespace_id,
Message.is_created)
class MessageCategory(MailSyncBase):
""" Mapping between messages and categories. """
message_id = Column(Integer, ForeignKey(Message.id, ondelete='CASCADE'),
nullable=False)
message = relationship(
'Message',
backref=backref('messagecategories',
collection_class=set,
cascade='all, delete-orphan'))
category_id = Column(Integer, ForeignKey(Category.id, ondelete='CASCADE'),
nullable=False)
category = relationship(
Category,
backref=backref('messagecategories',
cascade='all, delete-orphan',
lazy='dynamic'))
@property
def namespace(self):
return self.message.namespace
Index('message_category_ids',
MessageCategory.message_id, MessageCategory.category_id)
def select_category(categories):
# TODO[k]: Implement proper ranking function
return list(categories)[0]
|
wakermahmud/sync-engine
|
inbox/models/message.py
|
Python
|
agpl-3.0
| 22,094 | 0.000181 |
import os
import sys
import msgfmt
from setuptools import setup
from setuptools.command.install_lib import install_lib as _install_lib
from setuptools.command.develop import develop as _develop
from distutils.command.build import build as _build
from setuptools.command.test import test as TestCommand
from distutils.cmd import Command
class compile_translations(Command):
description = 'compile message catalogs to .mo files'
user_options = [('force', 'f', "compile also not updated message catalogs")]
boolean_options = ['force']
def initialize_options(self):
self.force = False
def finalize_options(self):
pass
def run(self):
"""
Compile all message catalogs .mo files into .po files.
Skips not changed file based on source mtime.
"""
# thanks to deluge guys ;)
po_dir = os.path.join(os.path.dirname(__file__), 'webant', 'translations')
print('Compiling po files from "{}"...'.format(po_dir))
for lang in os.listdir(po_dir):
sys.stdout.write("\tCompiling {}... ".format(lang))
sys.stdout.flush()
curr_lang_path = os.path.join(po_dir, lang)
for path, dirs, filenames in os.walk(curr_lang_path):
for f in filenames:
if f.endswith('.po'):
src = os.path.join(path, f)
dst = os.path.join(path, f[:-3] + ".mo")
if not os.path.exists(dst) or self.force:
msgfmt.make(src, dst)
print("ok.")
else:
src_mtime = os.stat(src)[8]
dst_mtime = os.stat(dst)[8]
if src_mtime > dst_mtime:
msgfmt.make(src, dst)
print("ok.")
else:
print("already up to date.")
print('Finished compiling translation files.')
class build(_build):
sub_commands = [('compile_translations', None)] + _build.sub_commands
class install_lib(_install_lib):
def run(self):
self.run_command('compile_translations')
_install_lib.run(self)
class develop(_develop):
def run(self):
self.run_command('compile_translations')
_develop.run(self)
class NoseTestCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Run nose ensuring that argv simulates running nosetests directly
import nose
nose.run_exit(argv=['nosetests'])
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as buf:
return buf.read()
conf = dict(
name='libreant',
version='0.3',
description='{e,}book archive focused on small grass root archives, distributed search, low assumptions',
long_description=read('README.rst'),
author='insomnialab',
author_email='insomnialab@hacari.org',
url='https://github.com/insomnia-lab/libreant',
license='AGPL',
packages=['libreantdb',
'webant',
'webant.api',
'presets',
'archivant',
'users',
'utils',
'cli',
'conf'],
install_requires=[
'gevent',
'elasticsearch >=1, <2',
'flask-bootstrap',
'Flask-Babel',
'flask-script',
'Flask-Authbone >=0.2',
'Flask',
'opensearch',
'Fsdb',
'click',
'peewee',
'passlib >=1.6, <1.7' # version 1.7 will drop python2 suport
],
package_data = {
# If any package contains *.mo include them
# important! leave all the stars!
'webant': ['translations/*/*/*.mo']
},
include_package_data=True,
tests_require=['nose', 'coverage'],
zip_safe=False,
cmdclass={'build': build,
'test': NoseTestCommand,
'install_lib': install_lib,
'develop': develop,
'compile_translations': compile_translations},
entry_points={'console_scripts': [
'libreant=cli.libreant:libreant',
'agherant=cli.agherant:agherant',
'libreant-users=cli.libreant_users:libreant_users',
'libreant-db=cli.libreant_db:libreant_db'
]},
classifiers=[
"Framework :: Flask",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Development Status :: 4 - Beta"
])
if __name__ == '__main__':
setup(**conf)
|
ael-code/libreant
|
setup.py
|
Python
|
agpl-3.0
| 4,963 | 0.002015 |
"""Polynomial factorization routines in characteristic zero. """
from __future__ import print_function, division
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcdex,
gf_sqf_p,
gf_factor_sqf, gf_factor)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dmp_from_dict,
dmp_zero_p,
dmp_one,
dmp_nest, dmp_raise,
dup_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dmp_pow,
dup_div, dmp_div,
dup_quo, dmp_quo,
dmp_expand,
dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_lshift,
dup_max_norm, dmp_max_norm,
dup_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_trunc, dmp_ground_trunc,
dup_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dmp_compose,
dup_shift, dup_mirror)
from sympy.polys.euclidtools import (
dmp_primitive,
dup_inner_gcd, dmp_inner_gcd)
from sympy.polys.sqfreetools import (
dup_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part)
from sympy.polys.polyutils import _sort_factors
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import subsets
from math import ceil as _ceil, log as _log
from sympy.core.compatibility import range
def dup_trial_division(f, factors, K):
"""
Determine multiplicities of factors for a univariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dmp_trial_division(f, factors, u, K):
"""
Determine multiplicities of factors for a multivariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dup_zz_mignotte_bound(f, K):
"""Mignotte bound for univariate polynomials in `K[x]`. """
a = dup_max_norm(f, K)
b = abs(dup_LC(f, K))
n = dup_degree(f)
return K.sqrt(K(n + 1))*2**n*a*b
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(K(n + 1))*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f = g*h (mod m)
s*g + t*h = 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) = 1
deg(f) = deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f = G*H (mod m**2)
S*G + T*H = 1 (mod m**2)
References
==========
.. [1] [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
def dup_zz_hensel_lift(p, f, f_list, l, K):
"""
Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1`, `F_2`, ..., `F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
References
==========
.. [1] [Gathen99]_
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(_ceil(_log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k + 1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d + 1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
def _test_pl(fc, q, pl):
if q > pl // 2:
q = q - pl
if not q:
return True
return fc % q == 0
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
fc = f[-1]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(K(n + 1))*2**n*A*b))
C = int((n + 1)**(2*n)*A**(2*n - 1))
gamma = int(_ceil(2*_log(C, 2)))
bound = int(2*gamma*_log(gamma))
a = []
# choose a prime number `p` such that `f` be square free in Z_p
# if there are many factors in Z_p, choose among a few different `p`
# the one with fewer factors
for px in range(3, bound + 1):
if not isprime(px) or b % px == 0:
continue
px = K.convert(px)
F = gf_from_int_poly(f, px)
if not gf_sqf_p(F, px, K):
continue
fsqfx = gf_factor_sqf(F, px, K)[1]
a.append((px, fsqfx))
if len(fsqfx) < 15 or len(a) > 4:
break
p, fsqf = min(a, key=lambda x: len(x[1]))
l = int(_ceil(_log(2*B + 1, p)))
modular = [gf_to_int_poly(ff, p) for ff in fsqf]
g = dup_zz_hensel_lift(p, f, modular, l, K)
sorted_T = range(len(g))
T = set(sorted_T)
factors, s = [], 1
pl = p**l
while 2*s <= len(T):
for S in subsets(sorted_T, s):
# lift the constant coefficient of the product `G` of the factors
# in the subset `S`; if it is does not divide `fc`, `G` does
# not divide the input polynomial
if b == 1:
q = 1
for i in S:
q = q*g[i][-1]
q = q % pl
if not _test_pl(fc, q, pl):
continue
else:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
G = dup_primitive(G, K)[1]
q = G[-1]
if q and fc % q != 0:
continue
H = [b]
S = set(S)
T_S = T - S
if b == 1:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
for i in T_S:
H = dup_mul(H, g[i], K)
H = dup_trunc(H, pl, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T_S
sorted_T = [i for i in sorted_T if i not in S]
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.keys():
if (lc % p) and (tc % p**2):
return True
def dup_cyclotomic_p(f, K, irreducible=False):
"""
Efficiently test if ``f`` is a cyclotomic polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(f)
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(g)
True
"""
if K.is_QQ:
try:
K0, K = K, K.get_ring()
f = dup_convert(f, K0, K)
except CoercionFailed:
return False
elif not K.is_ZZ:
return False
lc = dup_LC(f, K)
tc = dup_TC(f, K)
if lc != 1 or (tc != -1 and tc != 1):
return False
if not irreducible:
coeff, factors = dup_factor_list(f, K)
if coeff != K.one or factors != [(f, 1)]:
return False
n = dup_degree(f)
g, h = [], []
for i in range(n, -1, -2):
g.insert(0, f[i])
for i in range(n - 1, -1, -2):
h.insert(0, f[i])
g = dup_sqr(dup_strip(g), K)
h = dup_sqr(dup_strip(h), K)
F = dup_sub(g, dup_lshift(h, 1, K), K)
if K.is_negative(dup_LC(F, K)):
F = dup_neg(F, K)
if F == f:
return True
g = dup_mirror(f, K)
if K.is_negative(dup_LC(g, K)):
g = dup_neg(g, K)
if F == g and dup_cyclotomic_p(g, K):
return True
G = dup_sqf_part(F, K)
if dup_sqr(G, K) == F and dup_cyclotomic_p(G, K):
return True
return False
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polynomial. """
h = [K.one, -K.one]
for p, k in factorint(n).items():
h = dup_quo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k - 1), K)
return h
def _dup_cyclotomic_decompose(n, K):
H = [[K.one, -K.one]]
for p, k in factorint(n).items():
Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in range(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
def dup_zz_cyclotomic_factor(f, K):
"""
Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
References
==========
.. [1] [Weisstein09]_
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any(bool(cf) for cf in f[1:-1]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
def dup_zz_factor_sqf(f, K):
"""Factor square-free (non-primitive) polynomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [g]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [g]
factors = None
if query('USE_CYCLOTOMIC_FACTOR'):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
def dup_zz_factor(f, K):
"""
Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Examples
========
Consider the polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_factor(2*x**4 - 2)
(2, [(x - 1, 1), (x + 1, 1), (x**2 + 1, 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
References
==========
.. [1] [Gathen99]_
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H = None
if query('USE_CYCLOTOMIC_FACTOR'):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
factors = dup_trial_division(f, H, K)
return cont, factors
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u - 1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u - 1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u - 1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(range(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k + 1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if any(not j for j in J):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H) - 1), u, K)
return f, HHH, CCC
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
T = dup_zz_diophantine(F, n - i, p, K)
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_quo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in K.map(range(0, d)):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k + 1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_quo_ground(C, K.factorial(k + 1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u - 1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n - i, u - i, K)
S.insert(0, dmp_ground_trunc(s, p, v - i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(range(2, n + 2), S, A):
G, w = list(H), j - 1
I, J = A[:j - 2], A[j - 1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w - 1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w - 1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in K.map(range(0, dj)):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k + 1, a, w, w, K)
if not dmp_zero_p(C, w - 1):
C = dmp_quo_ground(C, K.factorial(k + 1), w - 1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w - 1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w - 1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
def dmp_zz_wang(f, u, K, mod=None, seed=None):
"""
Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which is
primitive and square-free in `x_1`, computes factorization of `f` into
irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate polynomial
in `Z[x_2,...,x_n][x_1]`, for which an evaluation mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, ..., n`, are carefully chosen integers. The
mapping is used to transform `f` into a univariate polynomial in `Z[x_1]`,
which can be factored efficiently using Zassenhaus algorithm. The last
step is to lift univariate factors to obtain true multivariate
factors. For this purpose a parallel Hensel lifting procedure is used.
The parameter ``seed`` is passed to _randint and can be used to seed randint
(when an integer) or (for testing purposes) can be a sequence of numbers.
References
==========
.. [1] [Wang78]_
.. [2] [Geddes92]_
"""
from sympy.utilities.randtest import _randint
randint = _randint(seed)
ct, T = dmp_zz_factor(dmp_LC(f, K), u - 1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
if mod is None:
if u == 1:
mod = 2
else:
mod = 1
history, configs, A, r = set([]), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS')
eez_num_tries = query('EEZ_NUMBER_OF_TRIES')
eez_mod_step = query('EEZ_MODULUS_STEP')
while len(configs) < eez_num_configs:
for _ in range(eez_num_tries):
A = [ K(randint(-mod, mod)) for _ in range(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == eez_num_configs:
break
else:
mod += eez_mod_step
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
orig_f = f
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if query('EEZ_RESTART_IF_NEEDED'):
return dmp_zz_wang(orig_f, u, K, mod + 1)
else:
raise ExtraneousFactors(
"we need to restart algorithm with better parameters")
result = []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
def dmp_zz_factor(f, u, K):
"""
Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_zz_factor(2*x**2 - 2*y**2)
(2, [(x - y, 1), (x + y, 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
References
==========
.. [1] [Gathen99]_
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all(d <= 0 for d in dmp_degree_list(g, u)):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
factors = dmp_trial_division(f, H, u, K)
for g, k in dmp_zz_factor(G, u - 1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_ext_factor(f, K):
"""Factor univariate polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list_include(r, K.dom)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_shift(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
def dmp_ext_factor(f, u, K):
"""Factor multivariate polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all(d <= 0 for d in dmp_degree_list(f, u)):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list_include(r, u, K.dom)
if len(factors) == 1:
factors = [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
def dup_gf_factor(f, K):
"""Factor univariate polynomials over finite fields. """
f = dup_convert(f, K, K.dom)
coeff, factors = gf_factor(f, K.mod, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K.dom, K), k)
return K.convert(coeff, K.dom), factors
def dmp_gf_factor(f, u, K):
"""Factor multivariate polynomials over finite fields. """
raise NotImplementedError('multivariate polynomials over finite fields')
def dup_factor_list(f, K0):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
j, f = dup_terms_gcd(f, K0)
cont, f = dup_primitive(f, K0)
if K0.is_FiniteField:
coeff, factors = dup_gf_factor(f, K0)
elif K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dup_clear_denoms(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dup_max_norm(f, K0)
f = dup_quo_ground(f, max_norm, K0)
f = dup_convert(f, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
if j:
factors.insert(0, ([K0.one, K0.zero], j))
return coeff*cont, _sort_factors(factors)
def dup_factor_list_include(f, K):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
coeff, factors = dup_factor_list(f, K)
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
def dmp_factor_list(f, u, K0):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0)
J, f = dmp_terms_gcd(f, u, K0)
cont, f = dmp_ground_primitive(f, u, K0)
if K0.is_FiniteField: # pragma: no cover
coeff, factors = dmp_gf_factor(f, u, K0)
elif K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dmp_clear_denoms(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
levels, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, levels, v, K), k)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dmp_max_norm(f, u, K0)
f = dmp_quo_ground(f, max_norm, u, K0)
f = dmp_convert(f, u, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
for i, j in enumerate(reversed(J)):
if not j:
continue
term = {(0,)*(u - i) + (1,) + (0,)*i: K0.one}
factors.insert(0, (dmp_from_dict(term, u, K0), j))
return coeff*cont, _sort_factors(factors)
def dmp_factor_list_include(f, u, K):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list_include(f, K)
coeff, factors = dmp_factor_list(f, u, K)
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
def dup_irreducible_p(f, K):
"""
Returns ``True`` if a univariate polynomial ``f`` has no factors
over its domain.
"""
return dmp_irreducible_p(f, 0, K)
def dmp_irreducible_p(f, u, K):
"""
Returns ``True`` if a multivariate polynomial ``f`` has no factors
over its domain.
"""
_, factors = dmp_factor_list(f, u, K)
if not factors:
return True
elif len(factors) > 1:
return False
else:
_, k = factors[0]
return k == 1
|
kaushik94/sympy
|
sympy/polys/factortools.py
|
Python
|
bsd-3-clause
| 34,338 | 0.00067 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.api import http_pb2
from google.cloud.vision_v1.proto import geometry_pb2
from google.cloud.vision_v1.proto import image_annotator_pb2
from google.cloud.vision_v1.proto import product_search_pb2
from google.cloud.vision_v1.proto import product_search_service_pb2
from google.cloud.vision_v1.proto import text_annotation_pb2
from google.cloud.vision_v1.proto import web_detection_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import wrappers_pb2
from google.rpc import status_pb2
from google.type import color_pb2
from google.type import latlng_pb2
_shared_modules = [
http_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
empty_pb2,
field_mask_pb2,
timestamp_pb2,
wrappers_pb2,
status_pb2,
color_pb2,
latlng_pb2,
]
_local_modules = [
geometry_pb2,
image_annotator_pb2,
product_search_pb2,
product_search_service_pb2,
text_annotation_pb2,
web_detection_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.vision_v1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
dhermes/google-cloud-python
|
vision/google/cloud/vision_v1/types.py
|
Python
|
apache-2.0
| 2,315 | 0 |
"""
KeepNote Extension
new_file
Extension allows adding new filetypes to a notebook
"""
#
# KeepNote
# Copyright (c) 2008-2011 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
import gettext
import os
import re
import shutil
import sys
import time
import xml.etree.cElementTree as etree
#_ = gettext.gettext
import keepnote
from keepnote import unicode_gtk
from keepnote.notebook import NoteBookError
from keepnote import notebook as notebooklib
from keepnote import tasklib
from keepnote import tarfile
from keepnote.gui import extension
from keepnote.gui import dialog_app_options
# pygtk imports
try:
import pygtk
pygtk.require('2.0')
from gtk import gdk
import gtk.glade
import gobject
except ImportError:
# do not fail on gtk import error,
# extension should be usable for non-graphical uses
pass
class Extension (extension.Extension):
def __init__(self, app):
"""Initialize extension"""
extension.Extension.__init__(self, app)
self.app = app
self._file_types = []
self._default_file_types = [
FileType("Text File (txt)", "untitled.txt", "plain_text.txt"),
FileType("Spreadsheet (xls)", "untitled.xls", "spreadsheet.xls"),
FileType("Word Document (doc)", "untitled.doc", "document.doc")
]
self.enabled.add(self.on_enabled)
def get_filetypes(self):
return self._file_types
def on_enabled(self, enabled):
if enabled:
self.load_config()
def get_depends(self):
return [("keepnote", ">=", (0, 7, 1))]
#===============================
# config handling
def get_config_file(self):
return self.get_data_file("config.xml")
def load_config(self):
config = self.get_config_file()
if not os.path.exists(config):
self.set_default_file_types()
self.save_default_example_files()
self.save_config()
try:
tree = etree.ElementTree(file=config)
# check root
root = tree.getroot()
if root.tag != "file_types":
raise NoteBookError("Root tag is not 'file_types'")
# iterate children
self._file_types = []
for child in root:
if child.tag == "file_type":
filetype = FileType("", "", "")
for child2 in child:
if child2.tag == "name":
filetype.name = child2.text
elif child2.tag == "filename":
filetype.filename = child2.text
elif child2.tag == "example_file":
filetype.example_file = child2.text
self._file_types.append(filetype)
except:
self.app.error("Error reading file type configuration")
self.set_default_file_types()
self.save_config()
def save_config(self):
config = self.get_config_file()
tree = etree.ElementTree(
etree.Element("file_types"))
root = tree.getroot()
for file_type in self._file_types:
elm = etree.SubElement(root, "file_type")
name = etree.SubElement(elm, "name")
name.text = file_type.name
example = etree.SubElement(elm, "example_file")
example.text = file_type.example_file
filename = etree.SubElement(elm, "filename")
filename.text = file_type.filename
tree.write(open(config, "w"), "UTF-8")
def set_default_file_types(self):
self._file_types = list(self._default_file_types)
def save_default_example_files(self):
base = self.get_base_dir()
data_dir = self.get_data_dir()
for file_type in self._default_file_types:
fn = file_type.example_file
shutil.copy(os.path.join(base, fn), os.path.join(data_dir, fn))
def update_all_menus(self):
for window in self.get_windows():
self.set_new_file_menus(window)
#==============================
# UI
def on_add_ui(self, window):
"""Initialize extension for a particular window"""
# add menu options
self.add_action(window, "New File", "New _File")
#("treeview_popup", None, None),
self.add_ui(window,
"""
<ui>
<menubar name="main_menu_bar">
<menu action="File">
<placeholder name="New">
<menuitem action="New File"/>
</placeholder>
</menu>
</menubar>
<!--
<menubar name="popup_menus">
<menu action="treeview_popup">
<placeholder action="New">
<menuitem action="New File"/>
</placeholder>
</menu>
</menubar>
-->
</ui>
""")
self.set_new_file_menus(window)
#=================================
# Options UI setup
def on_add_options_ui(self, dialog):
dialog.add_section(NewFileSection("new_file",
dialog, self._app,
self),
"extensions")
def on_remove_options_ui(self, dialog):
dialog.remove_section("new_file")
#======================================
# callbacks
def on_new_file(self, window, file_type):
"""Callback from gui to add a new file"""
notebook = window.get_notebook()
if notebook is None:
return
nodes = window.get_selected_nodes()
if len(nodes) == 0:
parent = notebook
else:
sibling = nodes[0]
if sibling.get_parent():
parent = sibling.get_parent()
index = sibling.get_attr("order") + 1
else:
parent = sibling
try:
uri = os.path.join(self.get_data_dir(), file_type.example_file)
node = notebooklib.attach_file(uri, parent)
node.rename(file_type.filename)
window.get_viewer().goto_node(node)
except Exception, e:
window.error("Error while attaching file '%s'." % uri, e)
def on_new_file_type(self, window):
"""Callback from gui for adding a new file type"""
self.app.app_options_dialog.show(window, "new_file")
#==========================================
# menu setup
def set_new_file_menus(self, window):
"""Set the recent notebooks in the file menu"""
menu = window.get_uimanager().get_widget("/main_menu_bar/File/New/New File")
if menu:
self.set_new_file_menu(window, menu)
menu = window.get_uimanager().get_widget("/popup_menus/treeview_popup/New/New File")
if menu:
self.set_new_file_menu(window, menu)
def set_new_file_menu(self, window, menu):
"""Set the recent notebooks in the file menu"""
# TODO: perform lookup of filetypes again
# init menu
if menu.get_submenu() is None:
submenu = gtk.Menu()
submenu.show()
menu.set_submenu(submenu)
menu = menu.get_submenu()
# clear menu
menu.foreach(lambda x: menu.remove(x))
def make_func(file_type):
return lambda w: self.on_new_file(window, file_type)
# populate menu
for file_type in self._file_types:
item = gtk.MenuItem(u"New %s" % file_type.name)
item.connect("activate", make_func(file_type))
item.show()
menu.append(item)
item = gtk.SeparatorMenuItem()
item.show()
menu.append(item)
item = gtk.MenuItem(u"Add New File Type")
item.connect("activate", lambda w: self.on_new_file_type(window))
item.show()
menu.append(item)
#===============================
# actions
def install_example_file(self, filename):
"""Installs a new example file into the extension"""
newpath = self.get_data_dir()
newfilename = os.path.basename(filename)
newfilename, ext = os.path.splitext(newfilename)
newfilename = notebooklib.get_unique_filename(newpath, newfilename,
ext=ext, sep=u"",
number=2)
shutil.copy(filename, newfilename)
return os.path.basename(newfilename)
class FileType (object):
"""Class containing information about a filetype"""
def __init__(self, name, filename, example_file):
self.name = name
self.filename = filename
self.example_file = example_file
def copy(self):
return FileType(self.name, self.filename, self.example_file)
class NewFileSection (dialog_app_options.Section):
"""A Section in the Options Dialog"""
def __init__(self, key, dialog, app, ext,
label=u"New File Types",
icon=None):
dialog_app_options.Section.__init__(self, key, dialog, app, label, icon)
self.ext = ext
self._filetypes = []
self._current_filetype = None
# setup UI
w = self.get_default_widget()
h = gtk.HBox(False, 5)
w.add(h)
# left column (file type list)
v = gtk.VBox(False, 5)
h.pack_start(v, False, True, 0)
self.filetype_store = gtk.ListStore(str, object)
self.filetype_listview = gtk.TreeView(self.filetype_store)
self.filetype_listview.set_headers_visible(False)
self.filetype_listview.get_selection().connect("changed",
self.on_listview_select)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
sw.add(self.filetype_listview)
sw.set_size_request(160, 200)
v.pack_start(sw, False, True, 0)
# create the treeview column
column = gtk.TreeViewColumn()
self.filetype_listview.append_column(column)
cell_text = gtk.CellRendererText()
column.pack_start(cell_text, True)
column.add_attribute(cell_text, 'text', 0)
# add/del buttons
h2 = gtk.HBox(False, 5)
v.pack_start(h2, False, True, 0)
button = gtk.Button("New")
button.connect("clicked", self.on_new_filetype)
h2.pack_start(button, True, True, 0)
button = gtk.Button("Delete")
button.connect("clicked", self.on_delete_filetype)
h2.pack_start(button, True, True, 0)
# right column (file type editor)
v = gtk.VBox(False, 5)
h.pack_start(v, False, True, 0)
table = gtk.Table(3, 2)
self.filetype_editor = table
v.pack_start(table, False, True, 0)
# file type name
label = gtk.Label("File type name:")
table.attach(label, 0, 1, 0, 1,
xoptions=0, yoptions=0,
xpadding=2, ypadding=2)
self.filetype = gtk.Entry()
table.attach(self.filetype, 1, 2, 0, 1,
xoptions=gtk.FILL, yoptions=0,
xpadding=2, ypadding=2)
# default filename
label = gtk.Label("Default filename:")
table.attach(label, 0, 1, 1, 2,
xoptions=0, yoptions=0,
xpadding=2, ypadding=2)
self.filename = gtk.Entry()
table.attach(self.filename, 1, 2, 1, 2,
xoptions=gtk.FILL, yoptions=0,
xpadding=2, ypadding=2)
# example new file
label = gtk.Label("Example new file:")
table.attach(label, 0, 1, 2, 3,
xoptions=0, yoptions=0,
xpadding=2, ypadding=2)
self.example_file = gtk.Entry()
table.attach(self.example_file, 1, 2, 2, 3,
xoptions=gtk.FILL, yoptions=0,
xpadding=2, ypadding=2)
# browse button
button = gtk.Button(_("Browse..."))
button.set_image(
gtk.image_new_from_stock(gtk.STOCK_OPEN,
gtk.ICON_SIZE_SMALL_TOOLBAR))
button.show()
button.connect("clicked", lambda w:
dialog_app_options.on_browse(
w.get_toplevel(), "Choose Example New File", "",
self.example_file))
table.attach(button, 1, 2, 3, 4,
xoptions=gtk.FILL, yoptions=0,
xpadding=2, ypadding=2)
w.show_all()
self.set_filetypes()
self.set_filetype_editor(None)
def load_options(self, app):
"""Load options from app to UI"""
self._filetypes = [x.copy() for x in self.ext.get_filetypes()]
self.set_filetypes()
self.filetype_listview.get_selection().unselect_all()
def save_options(self, app):
"""Save options to the app"""
self.save_current_filetype()
# install example files
bad = []
for filetype in self._filetypes:
if os.path.isabs(filetype.example_file):
# copy new file into extension data dir
try:
filetype.example_file = self.ext.install_example_file(
filetype.example_file)
except Exception, e:
app.error("Cannot install example file '%s'" %
filetype.example_file, e)
bad.append(filetype)
# update extension state
self.ext.get_filetypes()[:] = [x.copy() for x in self._filetypes
if x not in bad]
self.ext.save_config()
self.ext.update_all_menus()
def set_filetypes(self):
"""Initialize the lisview to the loaded filetypes"""
self.filetype_store.clear()
for filetype in self._filetypes:
self.filetype_store.append([filetype.name, filetype])
def set_filetype_editor(self, filetype):
"""Update editor with current filetype"""
if filetype is None:
self._current_filetype = None
self.filetype.set_text("")
self.filename.set_text("")
self.example_file.set_text("")
self.filetype_editor.set_sensitive(False)
else:
self._current_filetype = filetype
self.filetype.set_text(filetype.name)
self.filename.set_text(filetype.filename)
self.example_file.set_text(filetype.example_file)
self.filetype_editor.set_sensitive(True)
def save_current_filetype(self):
"""Save the contents of the editor into the current filetype object"""
if self._current_filetype:
self._current_filetype.name = self.filetype.get_text()
self._current_filetype.filename = self.filename.get_text()
self._current_filetype.example_file = self.example_file.get_text()
# update filetype list
for row in self.filetype_store:
if row[1] == self._current_filetype:
row[0] = self._current_filetype.name
def on_listview_select(self, selection):
"""Callback for when listview selection changes"""
model, it = self.filetype_listview.get_selection().get_selected()
self.save_current_filetype()
# set editor to current selection
if it is not None:
filetype = self.filetype_store[it][1]
self.set_filetype_editor(filetype)
else:
self.set_filetype_editor(None)
def on_new_filetype(self, button):
"""Callback for adding a new filetype"""
self._filetypes.append(FileType(u"New File Type", u"untitled", ""))
self.set_filetypes()
self.filetype_listview.set_cursor((len(self._filetypes)-1,))
def on_delete_filetype(self, button):
model, it = self.filetype_listview.get_selection().get_selected()
if it is not None:
filetype = self.filetype_store[it][1]
self._filetypes.remove(filetype)
self.set_filetypes()
|
reshadh/Keepnote-LaTeX
|
keepnote/extensions/new_file/__init__.py
|
Python
|
gpl-2.0
| 17,272 | 0.004805 |
"""
MagPy
IAGA02 input filter
Written by Roman Leonhardt June 2012
- contains test, read and write function
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from io import open
from magpy.stream import *
#global variables
MISSING_DATA = 99999
NOT_REPORTED = 88888
def isIAGA(filename):
"""
Checks whether a file is ASCII IAGA 2002 format.
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
try:
if not temp.startswith(' Format'):
return False
if not 'IAGA-2002' in temp:
return False
except:
return False
return True
def readIAGA(filename, headonly=False, **kwargs):
"""
Reading IAGA2002 format data.
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
debug = kwargs.get('debug')
getfile = True
array = [[] for key in KEYLIST]
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
# Check whether header infromation is already present
headers = {}
data = []
key = None
try:
# get day from filename (platform independent)
theday = extractDateFromString(filename)[0]
day = datetime.strftime(theday,"%Y-%m-%d")
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
except:
logging.warning("Could not identify typical IAGA date for %s. Reading all ...".format(filename))
getfile = True
if getfile:
loggerlib.info('Read: %s Format: %s ' % (filename, "IAGA2002"))
dfpos = KEYLIST.index('df')
for line in fh:
if line.isspace():
# blank line
continue
elif line.startswith(' '):
# data info
infoline = line[:-4]
key = infoline[:23].strip()
val = infoline[23:].strip()
if key.find('Source') > -1:
if not val == '':
stream.header['StationInstitution'] = val
if key.find('Station') > -1:
if not val == '':
stream.header['StationName'] = val
if key.find('IAGA') > -1:
if not val == '':
stream.header['StationIAGAcode'] = val
stream.header['StationID'] = val
if key.find('Latitude') > -1:
if not val == '':
stream.header['DataAcquisitionLatitude'] = val
if key.find('Longitude') > -1:
if not val == '':
stream.header['DataAcquisitionLongitude'] = val
if key.find('Elevation') > -1:
if not val == '':
stream.header['DataElevation'] = val
if key.find('Format') > -1:
if not val == '':
stream.header['DataFormat'] = val
if key.find('Reported') > -1:
if not val == '':
stream.header['DataComponents'] = val
if key.find('Orientation') > -1:
if not val == '':
stream.header['DataSensorOrientation'] = val
if key.find('Digital') > -1:
if not val == '':
stream.header['DataDigitalSampling'] = val
if key.find('Interval') > -1:
if not val == '':
stream.header['DataSamplingFilter'] = val
if key.startswith(' #'):
if key.find('# V-Instrument') > -1:
if not val == '':
stream.header['SensorID'] = val
elif key.find('# PublicationDate') > -1:
if not val == '':
stream.header['DataPublicationDate'] = val
else:
print ("formatIAGA: did not import optional header info {a}".format(a=key))
if key.find('Data Type') > -1:
if not val == '':
if val[0] in ['d','D']:
stream.header['DataPublicationLevel'] = '4'
elif val[0] in ['q','Q']:
stream.header['DataPublicationLevel'] = '3'
elif val[0] in ['p','P']:
stream.header['DataPublicationLevel'] = '2'
else:
stream.header['DataPublicationLevel'] = '1'
if key.find('Publication Date') > -1:
if not val == '':
stream.header['DataPublicationDate'] = val
elif line.startswith('DATE'):
# data header
colsstr = line.lower().split()
varstr = ''
for it, elem in enumerate(colsstr):
if it > 2:
varstr += elem[-1]
varstr = varstr[:4]
stream.header["col-x"] = varstr[0].upper()
stream.header["col-y"] = varstr[1].upper()
stream.header["col-z"] = varstr[2].upper()
stream.header["unit-col-x"] = 'nT'
stream.header["unit-col-y"] = 'nT'
stream.header["unit-col-z"] = 'nT'
stream.header["unit-col-f"] = 'nT'
if varstr.endswith('g'):
stream.header["unit-col-df"] = 'nT'
stream.header["col-df"] = 'G'
stream.header["col-f"] = 'F'
else:
stream.header["col-f"] = 'F'
if varstr in ['dhzf','dhzg']:
#stream.header["col-x"] = 'H'
#stream.header["col-y"] = 'D'
#stream.header["col-z"] = 'Z'
stream.header["unit-col-y"] = 'deg'
stream.header['DataComponents'] = 'HDZF'
elif varstr in ['ehzf','ehzg']:
#stream.header["col-x"] = 'H'
#stream.header["col-y"] = 'E'
#stream.header["col-z"] = 'Z'
stream.header['DataComponents'] = 'HEZF'
elif varstr in ['dhif','dhig']:
stream.header["col-x"] = 'I'
stream.header["col-y"] = 'D'
stream.header["col-z"] = 'F'
stream.header["unit-col-x"] = 'deg'
stream.header["unit-col-y"] = 'deg'
stream.header['DataComponents'] = 'IDFF'
elif varstr in ['hdzf','hdzg']:
#stream.header["col-x"] = 'H'
#stream.header["col-y"] = 'D'
stream.header["unit-col-y"] = 'deg'
#stream.header["col-z"] = 'Z'
stream.header['DataComponents'] = 'HDZF'
else:
#stream.header["col-x"] = 'X'
#stream.header["col-y"] = 'Y'
#stream.header["col-z"] = 'Z'
stream.header['DataComponents'] = 'XYZF'
elif headonly:
# skip data for option headonly
continue
elif line.startswith('%'):
pass
else:
# data entry - may be written in multiple columns
# row beinhaltet die Werte eine Zeile
# transl. row values contains a line
row=[]
# Verwende das letzte Zeichen von "line" nicht, d.h. line[:-1],
# da darin der Zeilenumbruch "\n" steht
# transl. Do not use the last character of "line", d.h. line [:-1],
# since this is the line break "\n"
for val in line[:-1].split():
# nur nicht-leere Spalten hinzufuegen
# transl. Just add non-empty columns
if val.strip()!="":
row.append(val.strip())
# Baue zweidimensionales Array auf
# transl. Build two-dimensional array
array[0].append( date2num(datetime.strptime(row[0]+'-'+row[1],"%Y-%m-%d-%H:%M:%S.%f")) )
if float(row[3]) >= NOT_REPORTED:
row[3] = np.nan
if float(row[4]) >= NOT_REPORTED:
row[4] = np.nan
if float(row[5]) >= NOT_REPORTED:
row[5] = np.nan
if varstr in ['dhzf','dhzg']:
array[1].append( float(row[4]) )
array[2].append( float(row[3])/60.0 )
array[3].append( float(row[5]) )
elif varstr in ['ehzf','ehzg']:
array[1].append( float(row[4]) )
array[2].append( float(row[3]) )
array[3].append( float(row[5]) )
elif varstr in ['dhif','dhig']:
array[1].append( float(row[5])/60.0 )
array[2].append( float(row[3])/60.0 )
array[3].append( float(row[6]) )
elif varstr in ['hdzf','hdzg']:
array[1].append( float(row[3]) )
array[2].append( float(row[4])/60.0 )
array[3].append( float(row[5]) )
else:
array[1].append( float(row[3]) )
array[2].append( float(row[4]) )
array[3].append( float(row[5]) )
try:
if float(row[6]) < NOT_REPORTED:
if varstr[-1]=='f':
array[4].append(float(elem[6]))
elif varstr[-1]=='g' and varstr=='xyzg':
array[4].append(np.sqrt(float(row[3])**2+float(row[4])**2+float(row[5])**2) - float(row[6]))
array[dfpos].append(float(row[6]))
elif varstr[-1]=='g' and varstr in ['hdzg','dhzg','ehzg']:
array[4].append(np.sqrt(float(row[3])**2+float(row[5])**2) - float(row[6]))
array[dfpos].append(float(row[6]))
elif varstr[-1]=='g' and varstr in ['dhig']:
array[4].append(float(row[6]))
array[dfpos].append(float(row[6]))
else:
raise ValueError
else:
array[4].append(float('nan'))
except:
if not float(row[6]) >= NOT_REPORTED:
array[4].append(float(row[6]))
else:
array[4].append(float('nan'))
#data.append(row)
fh.close()
for idx, elem in enumerate(array):
array[idx] = np.asarray(array[idx])
stream = DataStream([LineStruct()],stream.header,np.asarray(array))
sr = stream.samplingrate()
return stream
def writeIAGA(datastream, filename, **kwargs):
"""
Writing IAGA2002 format data.
"""
mode = kwargs.get('mode')
useg = kwargs.get('useg')
def OpenFile(filename, mode='w'):
if sys.version_info >= (3,0,0):
f = open(filename, mode, newline='')
else:
f = open(filename, mode+'b')
return f
if os.path.isfile(filename):
if mode == 'skip': # skip existing inputs
exst = read(path_or_url=filename)
datastream = mergeStreams(exst,datastream,extend=True)
myFile= OpenFile(filename)
elif mode == 'replace': # replace existing inputs
exst = read(path_or_url=filename)
datastream = mergeStreams(datastream,exst,extend=True)
myFile= OpenFile(filename)
elif mode == 'append':
myFile= OpenFile(filename,mode='a')
else: # overwrite mode
#os.remove(filename) ?? necessary ??
myFile= OpenFile(filename)
else:
myFile= OpenFile(filename)
header = datastream.header
datacomp = header.get('DataComponents'," ")
if datacomp in ['hez','HEZ','hezf','HEZF','hezg','HEZG']:
order = [1,0,2]
datacomp = 'EHZ'
elif datacomp in ['hdz','HDZ','hdzf','HDZF','hdzg','HDZG']:
order = [1,0,2]
datacomp = 'DHZ'
elif datacomp in ['idf','IDF','idff','IDFF','idfg','IDFG']:
order = [1,3,0]
datacomp = 'DHI'
elif datacomp in ['xyz','XYZ','xyzf','XYZF','xyzg','XYZG']:
order = [0,1,2]
datacomp = 'XYZ'
elif datacomp in ['ehz','EHZ','ehzf','EHZF','ehzg','EHZG']:
order = [0,1,2]
datacomp = 'EHZ'
elif datacomp in ['dhz','DHZ','dhzf','DHZF','dhzg','DHZG']:
order = [0,1,2]
datacomp = 'DHZ'
elif datacomp in ['dhi','DHI','dhif','DHIF','dhig','DHIG']:
order = [0,1,2]
datacomp = 'DHI'
else:
order = [0,1,2]
datacomp = 'XYZ'
find = KEYLIST.index('f')
findg = KEYLIST.index('df')
if len(datastream.ndarray[findg]) > 0:
useg = True
if len(datastream.ndarray[find]) > 0:
if not useg:
datacomp = datacomp+'F'
else:
datacomp = datacomp+'G'
else:
datacomp = datacomp+'F'
publevel = str(header.get('DataPublicationLevel'," "))
if publevel == '2':
publ = 'Provisional'
elif publevel == '3':
publ = 'Quasi-definitive'
elif publevel == '4':
publ = 'Definitive'
else:
publ = 'Variation'
proj = header.get('DataLocationReference','')
longi = header.get('DataAcquisitionLongitude',' ')
lati = header.get('DataAcquisitionLatitude',' ')
if not longi=='' or lati=='':
if proj == '':
pass
else:
if proj.find('EPSG:') > 0:
epsg = int(proj.split('EPSG:')[1].strip())
if not epsg==4326:
longi,lati = convertGeoCoordinate(float(longi),float(lati),'epsg:'+str(epsg),'epsg:4326')
line = []
if not mode == 'append':
#if header.get('Elevation') > 0:
# print(header)
line.append(' Format %-15s IAGA-2002 %-34s |\n' % (' ',' '))
line.append(' Source of Data %-7s %-44s |\n' % (' ',header.get('StationInstitution'," ")[:44]))
line.append(' Station Name %-9s %-44s |\n' % (' ', header.get('StationName'," ")[:44]))
line.append(' IAGA Code %-12s %-44s |\n' % (' ',header.get('StationIAGAcode'," ")[:44]))
line.append(' Geodetic Latitude %-4s %-44s |\n' % (' ',str(lati)[:44]))
line.append(' Geodetic Longitude %-3s %-44s |\n' % (' ',str(longi)[:44]))
line.append(' Elevation %-12s %-44s |\n' % (' ',str(header.get('DataElevation'," "))[:44]))
line.append(' Reported %-13s %-44s |\n' % (' ',datacomp))
line.append(' Sensor Orientation %-3s %-44s |\n' % (' ',header.get('DataSensorOrientation'," ").upper()[:44]))
line.append(' Digital Sampling %-5s %-44s |\n' % (' ',str(header.get('DataDigitalSampling'," "))[:44]))
line.append(' Data Interval Type %-3s %-44s |\n' % (' ',(str(header.get('DataSamplingRate'," "))+' ('+header.get('DataSamplingFilter'," ")+')')[:44]))
line.append(' Data Type %-12s %-44s |\n' % (' ',publ[:44]))
if not header.get('DataPublicationDate','') == '':
line.append(' {a:<20} {b:<45s}|\n'.format(a='Publication date',b=str(header.get('DataPublicationDate'))[:10]))
# Optional header part:
skipopt = False
if not skipopt:
if not header.get('SensorID','') == '':
line.append(' #{a:<20} {b:<45s}|\n'.format(a='V-Instrument',b=header.get('SensorID')[:44]))
if not header.get('SecondarySensorID','') == '':
line.append(' #{a:<20} {b:<45s}|\n'.format(a='F-Instrument',b=header.get('SecondarySensorID')[:44]))
if not header.get('StationMeans','') == '':
try:
meanlist = header.get('StationMeans') # Assume something like H:xxxx,D:xxx,Z:xxxx
meanlist = meanlist.split(',')
for me in meanlist:
if me.startswith('H'):
hval = me.split(':')
line.append(' #{a:<20} {b:<45s}|\n'.format(a='Approx H',b=hval[1]))
except:
pass
line.append(' #{a:<20} {b:<45s}|\n'.format(a='File created by',b='MagPy '+magpyversion))
iagacode = header.get('StationIAGAcode',"")
line.append('DATE TIME DOY %8s %9s %9s %9s |\n' % (iagacode+datacomp[0],iagacode+datacomp[1],iagacode+datacomp[2],iagacode+datacomp[3]))
try:
myFile.writelines(line) # Write header sequence of strings to a file
except IOError:
pass
try:
line = []
ndtype = False
if len(datastream.ndarray[0]) > 0:
ndtype = True
fulllength = datastream.length()[0]
# Possible types: DHIF, DHZF, XYZF, or DHIG, DHZG, XYZG
#datacomp = 'EHZ'
#datacomp = 'DHZ'
#datacomp = 'DHI'
#datacomp = 'XYZ'
xmult = 1.0
ymult = 1.0
zmult = 1.0
xind = order[0]+1
yind = order[1]+1
zind = order[2]+1
if len(datastream.ndarray[xind]) == 0 or len(datastream.ndarray[yind]) == 0 or len(datastream.ndarray[zind]) == 0:
print("writeIAGA02: WARNING! Data missing in X, Y or Z component! Writing anyway...")
find = KEYLIST.index('f')
if datacomp.startswith('DHZ'):
xmult = 60.0
elif datacomp.startswith('DHI'):
xmult = 60.0
zmult = 60.0
for i in range(fulllength):
if not ndtype:
elem = datastream[i]
xval = elem.x
yval = elem.y
zval = elem.z
fval = elem.f
timeval = elem.time
else:
if len(datastream.ndarray[xind]) > 0:
xval = datastream.ndarray[xind][i]*xmult
else:
xval = NOT_REPORTED
if len(datastream.ndarray[yind]) > 0:
yval = datastream.ndarray[yind][i]
if order[1] == '3':
yval = datastream.ndarray[yind][i]*np.cos(datastream.ndarray[zind][i]*np.pi/180.)
else:
yval = NOT_REPORTED
if len(datastream.ndarray[zind]) > 0:
zval = datastream.ndarray[zind][i]*zmult
else:
zval = NOT_REPORTED
if len(datastream.ndarray[find]) > 0:
if not useg:
fval = datastream.ndarray[find][i]
else:
fval = np.sqrt(xval**2+yval**2+zval**2)-datastream.ndarray[find][i]
else:
fval = NOT_REPORTED
timeval = datastream.ndarray[0][i]
row = ''
try:
row = datetime.strftime(num2date(timeval).replace(tzinfo=None),"%Y-%m-%d %H:%M:%S.%f")
row = row[:-3]
doi = datetime.strftime(num2date(timeval).replace(tzinfo=None), "%j")
row += ' %s' % str(doi)
except:
row = ''
pass
if isnan(xval):
row += '%13.2f' % MISSING_DATA
else:
row += '%13.2f' % xval
if isnan(yval):
row += '%10.2f' % MISSING_DATA
else:
row += '%10.2f' % yval
if isnan(zval):
row += '%10.2f' % MISSING_DATA
else:
row += '%10.2f' % zval
if isnan(fval):
row += '%10.2f' % MISSING_DATA
else:
row += '%10.2f' % fval
line.append(row + '\n')
try:
myFile.writelines( line )
pass
finally:
myFile.close()
except IOError:
return False
pass
return True
|
hschovanec-usgs/magpy
|
magpy/lib/format_iaga02.py
|
Python
|
gpl-3.0
| 20,820 | 0.011479 |
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/hydro/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/hydro/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/hydro".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/opt/ros/hydro/stacks/qbo_webi/build/devel/env.sh')
output_filename = '/opt/ros/hydro/stacks/qbo_webi/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
HailStorm32/Q.bo_stacks
|
qbo_webi/build/catkin_generated/generate_cached_setup.py
|
Python
|
lgpl-2.1
| 1,266 | 0.004739 |
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.management import call_command
from django.db import models, connections, transaction
from django.urls import reverse
from django_tenants.clone import CloneSchema
from .postgresql_backend.base import _check_schema_name
from .signals import post_schema_sync, schema_needs_to_be_sync
from .utils import get_creation_fakes_migrations, get_tenant_base_schema
from .utils import schema_exists, get_tenant_domain_model, get_public_schema_name, get_tenant_database_alias
class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True, db_index=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
domain_subfolder = None
"""
Leave this as None. Stores the subfolder in subfolder routing was used
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA "%s" CASCADE' % self.schema_name)
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA "%s"' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
def get_tenant_type(self):
"""
Get the type of tenant. Will only work for multi type tenants
:return: str
"""
return getattr(self, settings.MULTI_TYPE_DATABASE_FIELD)
class DomainMixin(models.Model):
"""
All models that store the domains must inherit this class
"""
domain = models.CharField(max_length=253, unique=True, db_index=True)
tenant = models.ForeignKey(settings.TENANT_MODEL, db_index=True, related_name='domains',
on_delete=models.CASCADE)
# Set this to true if this is the primary domain
is_primary = models.BooleanField(default=True, db_index=True)
@transaction.atomic
def save(self, *args, **kwargs):
# Get all other primary domains with the same tenant
domain_list = self.__class__.objects.filter(tenant=self.tenant, is_primary=True).exclude(pk=self.pk)
# If we have no primary domain yet, set as primary domain by default
self.is_primary = self.is_primary or (not domain_list.exists())
if self.is_primary:
# Remove primary status of existing domains for tenant
domain_list.update(is_primary=False)
super().save(*args, **kwargs)
class Meta:
abstract = True
|
tomturner/django-tenants
|
django_tenants/models.py
|
Python
|
mit
| 9,732 | 0.001747 |
from sympy.utilities.pytest import XFAIL, raises
from sympy import (symbols, lambdify, sqrt, sin, cos, pi, atan, Rational, Float,
Matrix, Lambda, exp, Integral, oo, I)
from sympy.printing.lambdarepr import LambdaPrinter
from sympy import mpmath
from sympy.utilities.lambdify import implemented_function
import math, sympy
x,y,z = symbols('x,y,z')
#================== Test different arguments ==============
def test_no_args():
f = lambdify([], 1)
try:
f(-1)
assert False
except TypeError:
pass
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x,y], x+y)
assert f(1,2) == 3
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3,2,1) == (1,2,3)
assert f(1.0,2.0,3.0) == (3.0,2.0,1.0)
# make sure correct number of args required
try:
f(0)
assert False
except TypeError:
pass
def test_own_namespace():
myfunc = lambda x:1
f = lambdify(x, sin(x), {"sin":myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0)==0.0
f = lambdify(x, sympy.ceiling(x), math)
try:
f(4.5)
assert False
except NameError:
pass
def test_bad_args():
try:
# no vargs given
f = lambdify(1)
assert False
except TypeError:
pass
try:
# same with vector exprs
f = lambdify([1,2])
assert False
except TypeError:
pass
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules ================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
def test_sympy_lambda():
dps = mpmath.mp.dps
mpmath.mp.dps = 50
try:
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1,5)).evalf() - Float(str(sin02)) < prec
try:
# arctan is in numpy module and should not be available
f = lambdify(x, arctan(x), "sympy")
assert False
except NameError:
pass
finally:
mpmath.mp.dps = dps
def test_math_lambda():
dps = mpmath.mp.dps
mpmath.mp.dps = 50
try:
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
try:
f(x) # if this succeeds, it can't be a python math function
assert False
except ValueError:
pass
finally:
mpmath.mp.dps = dps
def test_mpmath_lambda():
dps = mpmath.mp.dps
mpmath.mp.dps = 50
try:
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
try:
f(x) # if this succeeds, it can't be a mpmath function
assert False
except TypeError:
pass
finally:
mpmath.mp.dps = dps
@XFAIL
def test_number_precision():
dps = mpmath.mp.dps
mpmath.mp.dps = 50
try:
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
finally:
mpmath.mp.dps = dps
#================== Test Translations =====================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.iteritems():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.iteritems():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
#================== Test some functions ===================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
try:
#FIXME-py3k: In Python 3, sqrt(-1) is a ValueError but (-1)**(1/2) isn't
#FIXME-py3k: (previously both were). Change the test, or check Py version?
f(-1)
assert False
except ValueError: pass
def test_trig():
f = lambdify([x], [cos(x),sin(x)])
d = f(pi)
prec = 1e-11
assert -prec < d[0]+1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0]+1 < prec
assert -prec < d[1] < prec
#================== Test vectors ==========================
def test_vector_simple():
f = lambdify((x,y,z), (z,y,x))
assert f(3,2,1) == (1,2,3)
assert f(1.0,2.0,3.0) == (3.0,2.0,1.0)
# make sure correct number of args required
try:
f(0)
assert False
except TypeError: pass
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
try:
f(0)
assert False
except ZeroDivisionError: pass
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x),sin(x)])
d = f(pi)
assert abs(d[0]+1) < 0.0001
assert abs(d[1]-0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x),sin(x)])
d = f(3.14159)
assert abs(d[0]+1) < 0.0001
assert abs(d[1]-0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x,y,z], [z,y,x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x,y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z)+4, x**z]])
sol = Matrix([[1, 2], [sin(3)+4, 1]])
f = lambdify((x,y,z), A, modules="sympy")
assert f(1,2,3) == sol
f = lambdify((x,y,z), (A, [A]), modules="sympy")
assert f(1,2,3) == (sol,[sol])
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
#########Test Symbolic###########
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x,y], x + y + z)
assert f(1,2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x:'first f'}
n2 = {'f': lambda x:'second f',
'g': lambda x:'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(func, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x : x+101)
raises(ValueError, 'lambdify(x, f(f2(x)))')
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x : x+100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x : x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
#================== Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr)
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sympy.sqrt(sympy.sqrt(2) + sympy.sqrt(3)) + sympy.S(1)/2
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/utilities/tests/test_lambdify.py
|
Python
|
agpl-3.0
| 11,262 | 0.010478 |
from __future__ import unicode_literals
import os
from mopidy import config, exceptions, ext
__version__ = '0.2.2'
class GMusicExtension(ext.Extension):
dist_name = 'Mopidy-GMusic'
ext_name = 'gmusic'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(GMusicExtension, self).get_config_schema()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['deviceid'] = config.String(optional=True)
return schema
def validate_environment(self):
try:
import gmusicapi # noqa
except ImportError as e:
raise exceptions.ExtensionError('gmusicapi library not found', e)
pass
def get_backend_classes(self):
from .actor import GMusicBackend
return [GMusicBackend]
|
jeh/mopidy-gmusic
|
mopidy_gmusic/__init__.py
|
Python
|
apache-2.0
| 976 | 0 |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
# In[2]:
train_df = pd.read_csv('./input/train.csv', index_col=0)
test_df = pd.read_csv('./input/test.csv', index_col=0)
# In[4]:
train_df.head()
# In[6]:
#label本身并不平滑。为了我们分类器的学习更加准确,我们会首先把label给“平滑化”(正态化)
import matplotlib.pyplot as plt
prices = pd.DataFrame({"price":train_df["SalePrice"], "log(price + 1)":np.log1p(train_df["SalePrice"])})
prices.hist()
plt.show()
# In[7]:
y_train = np.log1p(train_df.pop('SalePrice'))
# In[8]:
all_df = pd.concat((train_df, test_df), axis=0)
# In[19]:
all_df['MSSubClass'].dtypes
all_df['MSSubClass'].value_counts()
all_df['MSSubClass'] = all_df['MSSubClass'].astype(str)
pd.get_dummies(all_df['MSSubClass'], prefix='MSSubClass').head()
# In[20]:
all_dummy_df = pd.get_dummies(all_df)
all_dummy_df.head()
# In[21]:
all_dummy_df.isnull().sum().sort_values(ascending=False).head(10)
# In[22]:
mean_cols = all_dummy_df.mean()
mean_cols.head(10)
all_dummy_df = all_dummy_df.fillna(mean_cols)
all_dummy_df.isnull().sum().sum()
# In[23]:
dummy_train_df = all_dummy_df.loc[train_df.index]
dummy_test_df = all_dummy_df.loc[test_df.index]
# In[24]:
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
X_train = dummy_train_df.values
X_test = dummy_test_df.values
# ### Ridge
# In[25]:
alphas = np.logspace(-3, 2, 50)
test_scores = []
for alpha in alphas:
clf = Ridge(alpha)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[27]:
plt.plot(alphas, test_scores)
plt.title("Alpha vs CV Error");
plt.show()
# 15最佳
#
# ### RandomForestRegressor
# In[28]:
from sklearn.ensemble import RandomForestRegressor
max_features = [.1, .3, .5, .7, .9, .99]
test_scores = []
for max_feat in max_features:
clf = RandomForestRegressor(n_estimators=200, max_features=max_feat)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=5, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[29]:
plt.plot(max_features, test_scores)
plt.title("Max Features vs CV Error");
plt.show()
# ### xgboost
# In[36]:
from xgboost import XGBRegressor
params = [1,2,3,4,5,6]
test_scores = []
for param in params:
clf = XGBRegressor(max_depth=param)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[ ]:
plt.plot(params, test_scores)
plt.title("max_depth vs CV Error");
plt.show()
# ### bagging
# In[38]:
from sklearn.ensemble import BaggingRegressor
params = [10, 15, 20, 25, 30, 35, 40, 45, 50]
test_scores = []
for param in params:
clf = BaggingRegressor(n_estimators=param)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[39]:
plt.plot(params, test_scores)
plt.title("max_depth vs CV Error");
plt.show()
# ### 基本为ridge ,效果更好一点
# In[40]:
ridge = Ridge(15)
params = [1, 10, 15, 20, 25, 30, 40]
test_scores = []
for param in params:
clf = BaggingRegressor(n_estimators=param, base_estimator=ridge)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[41]:
plt.plot(params, test_scores)
plt.title("max_depth vs CV Error");
plt.show()
# ### Ensemble
#
# In[30]:
ridge = Ridge(alpha=15)
rf = RandomForestRegressor(n_estimators=500, max_features=.3)
ridge.fit(X_train, y_train)
rf.fit(X_train, y_train)
# In[31]:
y_ridge = np.expm1(ridge.predict(X_test))
y_rf = np.expm1(rf.predict(X_test))
y_final = (y_ridge + y_rf) / 2
# In[32]:
submission_df = pd.DataFrame(data= {'Id' : test_df.index, 'SalePrice': y_final})
# In[33]:
submission_df.head()
# In[37]:
submission_df.to_csv('submission20180316.csv',index = False,header = True,columns = ['Id','SalePrice'])
|
muxiaobai/CourseExercises
|
python/kaggle/competition/house-price/house.py
|
Python
|
gpl-2.0
| 4,162 | 0.010591 |
# -*- coding: utf8 -*-
# This file is a part of SMSShell
#
# Copyright (c) 2016-2018 Pierre GINDRAUD
#
# SMSShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SMSShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SMSShell. If not, see <http://www.gnu.org/licenses/>.
"""Help command
This command return some help string in function of the given input parameters
* If call without parameter : return the list of all available commands
* If call with a command name as first parameter, return the usage string of this function
In this case you can pass some other parameter that will be send to command usage
"""
from . import AbstractCommand, CommandException
class Help(AbstractCommand):
"""Command class, see module docstring for help
"""
def usage(self, argv):
return 'help [COMMAND] [COMMAND ARGS]'
def description(self, argv):
return 'Show commands usage'
def main(self, argv):
# call usage function of the given command
if argv:
try:
return self.shell.getCommand(self.session, argv[0]).usage(argv[1:])
except CommandException as ex:
self.log.error("error during command execution : " + str(ex))
return 'command not available'
# return the list of availables commands
else:
return ' '.join(self.shell.getAvailableCommands(self.session))
|
Turgon37/SMSShell
|
SMSShell/commands/help.py
|
Python
|
gpl-3.0
| 1,862 | 0.001611 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0004_pollanswer_zaventemtransport'),
]
operations = [
migrations.AlterField(
model_name='zaventemtransport',
name='transport',
field=models.CharField(default=b'group', max_length=5, choices=[(b'group', '.. with the group'), (b'own', '.. by myself')]),
),
]
|
tfiers/arenberg-online
|
polls/migrations/0005_auto_20150428_0016.py
|
Python
|
mit
| 510 | 0.001961 |
from kervi.hal.gpio import IGPIODeviceDriver
class GPIODriver(IGPIODeviceDriver):
def __init__(self, gpio_id="generic_gpio"):
IGPIODeviceDriver.__init__(self, gpio_id)
pass
def _get_channel_type(self, channel):
from kervi.hal.gpio import CHANNEL_TYPE_GPIO, CHANNEL_TYPE_ANALOG_IN, CHANNEL_TYPE_ANALOG_OUT
if channel in ["GPIO1", "GPIO2", "GPIO3"]:
return CHANNEL_TYPE_GPIO
elif channel in ["DAC1", "DAC2"]:
return CHANNEL_TYPE_ANALOG_OUT
elif channel in ["ADC1", "ADC2"]:
return CHANNEL_TYPE_ANALOG_IN
def _get_channel_names(self):
return ["GPIO1", "GPIO2", "GPIO3", "DAC1", "DAC2", "ADC1", "ADC2"]
@property
def name(self):
return "Generic GPIO"
def define_as_input(self, pin, pullup=None, bounce_time=0):
print("define pin in")
def define_as_output(self, pin):
print("define pin out")
def define_as_pwm(self, pin, frequency, duty_cycle):
print("define pwm")
def set(self, pin, state):
print("set pin", state)
def get(self, pin):
print("get pin")
return 0
def pwm_start(self, channel, duty_cycle=None, frequency=None):
print("start pwm")
def pwm_stop(self, pin):
print("stop pwm")
def listen(self, pin, callback, bounce_time=0):
print("listen rising")
def listen_rising(self, pin, callback, bounce_time=0):
print("listen rising")
def listen_falling(self, pin, callback, bounce_time=0):
print("listen falling")
|
kervi/kervi
|
kervi-hal-win/kervi/platforms/windows/gpio.py
|
Python
|
mit
| 1,575 | 0.000635 |
#!/usr/bin/env python3
_version = (0,4,0)
|
arizona-phonological-imaging-lab/autotres
|
a3/constants.py
|
Python
|
apache-2.0
| 43 | 0.046512 |
# -*- coding: utf-8 -*-
# Copyright 2015 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import uuid
import webob.dec
import webob.exc
from ooi import utils
import ooi.wsgi
application_url = "https://foo.example.org:8774/ooiv1"
tenants = {
"foo": {"id": uuid.uuid4().hex,
"name": "foo"},
"bar": {"id": uuid.uuid4().hex,
"name": "bar"},
"baz": {"id": uuid.uuid4().hex,
"name": "baz"},
}
flavors = {
1: {
"id": 1,
"name": "foo",
"vcpus": 2,
"ram": 256,
"disk": 10,
},
2: {
"id": 2,
"name": "bar",
"vcpus": 4,
"ram": 2014,
"disk": 20,
}
}
images = {
"foo": {
"id": "foo",
"name": "foo",
},
"bar": {
"id": "bar",
"name": "bar",
}
}
volumes = {
tenants["foo"]["id"]: [
{
"id": uuid.uuid4().hex,
"displayName": "foo",
"size": 2,
"status": "available",
"attachments": [],
},
{
"id": uuid.uuid4().hex,
"displayName": "bar",
"size": 3,
"status": "available",
"attachments": [],
},
{
"id": uuid.uuid4().hex,
"displayName": "baz",
"size": 5,
"status": "available",
"attachments": [],
},
],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"id": uuid.uuid4().hex,
"displayName": "volume",
"size": 5,
"status": "in-use",
},
],
}
pools = {
tenants["foo"]["id"]: [
{
"id": "foo",
"name": "foo",
},
{
"id": "bar",
"name": "bar",
}
],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"id": "public",
"name": "public",
},
],
}
linked_vm_id = uuid.uuid4().hex
allocated_ip = "192.168.253.23"
floating_ips = {
tenants["foo"]["id"]: [],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"fixed_ip": "10.0.0.2",
"id": uuid.uuid4().hex,
"instance_id": linked_vm_id,
"ip": "192.168.253.1",
"pool": pools[tenants["baz"]["id"]][0]["name"],
},
{
"fixed_ip": None,
"id": uuid.uuid4().hex,
"instance_id": None,
"ip": "192.168.253.2",
"pool": pools[tenants["baz"]["id"]][0]["name"],
},
],
}
servers = {
tenants["foo"]["id"]: [
{
"id": uuid.uuid4().hex,
"name": "foo",
"flavor": {"id": flavors[1]["id"]},
"image": {"id": images["foo"]["id"]},
"status": "ACTIVE",
},
{
"id": uuid.uuid4().hex,
"name": "bar",
"flavor": {"id": flavors[2]["id"]},
"image": {"id": images["bar"]["id"]},
"status": "SHUTOFF",
},
{
"id": uuid.uuid4().hex,
"name": "baz",
"flavor": {"id": flavors[1]["id"]},
"image": {"id": images["bar"]["id"]},
"status": "ERROR",
},
],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"id": linked_vm_id,
"name": "withvolume",
"flavor": {"id": flavors[1]["id"]},
"image": {"id": images["bar"]["id"]},
"status": "ACTIVE",
"os-extended-volumes:volumes_attached": [
{"id": volumes[tenants["baz"]["id"]][0]["id"]}
],
"addresses": {
"private": [
{"addr": floating_ips[tenants["baz"]["id"]][0]["fixed_ip"],
"OS-EXT-IPS:type": "fixed",
"OS-EXT-IPS-MAC:mac_addr": "1234"},
{"addr": floating_ips[tenants["baz"]["id"]][0]["ip"],
"OS-EXT-IPS:type": "floating",
"OS-EXT-IPS-MAC:mac_addr": "1234"},
]
}
}
],
}
# avoid circular definition of attachments
volumes[tenants["baz"]["id"]][0]["attachments"] = [{
# how consistent can OpenStack be!
# depending on using /servers/os-volume_attachments
# or /os-volumes it will return different field names
"server_id": servers[tenants["baz"]["id"]][0]["id"],
"serverId": servers[tenants["baz"]["id"]][0]["id"],
"attachment_id": uuid.uuid4().hex,
"volumeId": volumes[tenants["baz"]["id"]][0]["id"],
"volume_id": volumes[tenants["baz"]["id"]][0]["id"],
"device": "/dev/vdb",
"id": volumes[tenants["baz"]["id"]][0]["id"],
}]
def fake_query_results():
cats = []
# OCCI Core
cats.append(
'link; '
'scheme="http://schemas.ogf.org/occi/core#"; '
'class="kind"; title="link"')
cats.append(
'resource; '
'scheme="http://schemas.ogf.org/occi/core#"; '
'class="kind"; title="resource"; '
'rel="http://schemas.ogf.org/occi/core#entity"')
cats.append(
'entity; '
'scheme="http://schemas.ogf.org/occi/core#"; '
'class="kind"; title="entity"')
# OCCI Infrastructure Compute
cats.append(
'compute; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="compute resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"')
cats.append(
'start; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="start compute instance"')
cats.append(
'stop; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="stop compute instance"')
cats.append(
'restart; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="restart compute instance"')
cats.append(
'suspend; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="suspend compute instance"')
# OCCI Templates
cats.append(
'os_tpl; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="mixin"; title="OCCI OS Template"')
cats.append(
'resource_tpl; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="mixin"; title="OCCI Resource Template"')
# OpenStack Images
cats.append(
'bar; '
'scheme="http://schemas.openstack.org/template/os#"; '
'class="mixin"; title="bar"; '
'rel="http://schemas.ogf.org/occi/infrastructure#os_tpl"')
cats.append(
'foo; '
'scheme="http://schemas.openstack.org/template/os#"; '
'class="mixin"; title="foo"; '
'rel="http://schemas.ogf.org/occi/infrastructure#os_tpl"')
# OpenStack Flavors
cats.append(
'1; '
'scheme="http://schemas.openstack.org/template/resource#"; '
'class="mixin"; title="Flavor: foo"; '
'rel="http://schemas.ogf.org/occi/infrastructure#resource_tpl"')
cats.append(
'2; '
'scheme="http://schemas.openstack.org/template/resource#"; '
'class="mixin"; title="Flavor: bar"; '
'rel="http://schemas.ogf.org/occi/infrastructure#resource_tpl"')
# OCCI Infrastructure Network
cats.append(
'network; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="network resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"')
cats.append(
'ipnetwork; '
'scheme="http://schemas.ogf.org/occi/infrastructure/network#"; '
'class="mixin"; title="IP Networking Mixin"')
cats.append(
'up; '
'scheme="http://schemas.ogf.org/occi/infrastructure/network/action#"; '
'class="action"; title="up network instance"')
cats.append(
'down; '
'scheme="http://schemas.ogf.org/occi/infrastructure/network/action#"; '
'class="action"; title="down network instance"')
cats.append(
'networkinterface; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="network link resource"; '
'rel="http://schemas.ogf.org/occi/core#link"')
cats.append(
'ipnetworkinterface; '
'scheme="http://schemas.ogf.org/occi/infrastructure/'
'networkinterface#"; '
'class="mixin"; title="IP Network interface Mixin"')
# OCCI Infrastructure Storage
cats.append(
'storage; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="storage resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"')
cats.append(
'storagelink; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="storage link resource"; '
'rel="http://schemas.ogf.org/occi/core#link"')
cats.append(
'offline; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="offline storage instance"')
cats.append(
'online; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="online storage instance"')
cats.append(
'backup; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="backup storage instance"')
cats.append(
'resize; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="resize storage instance"')
cats.append(
'snapshot; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="snapshot storage instance"')
# OpenStack contextualization
cats.append(
'user_data; '
'scheme="http://schemas.openstack.org/compute/instance#"; '
'class="mixin"; title="Contextualization extension - user_data"')
cats.append(
'public_key; '
'scheme="http://schemas.openstack.org/instance/credentials#"; '
'class="mixin"; title="Contextualization extension - public_key"')
result = []
for c in cats:
result.append(("Category", c))
return result
class FakeOpenStackFault(ooi.wsgi.Fault):
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
406: "notAceptable",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
@webob.dec.wsgify()
def __call__(self, req):
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code)
explanation = self.wrapped_exc.explanation
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
self.wrapped_exc.body = utils.utf8(json.dumps(fault_data))
self.wrapped_exc.content_type = "application/json"
return self.wrapped_exc
class FakeApp(object):
"""Poor man's fake application."""
def __init__(self):
self.routes = {}
for tenant in tenants.values():
path = "/%s" % tenant["id"]
self._populate(path, "server", servers[tenant["id"]], actions=True)
self._populate(path, "volume", volumes[tenant["id"]], "os-volumes")
self._populate(path, "floating_ip_pool", pools[tenant["id"]],
"os-floating-ip-pools")
self._populate(path, "floating_ip", floating_ips[tenant["id"]],
"os-floating-ips")
# NOTE(aloga): dict_values un Py3 is not serializable in JSON
self._populate(path, "image", list(images.values()))
self._populate(path, "flavor", list(flavors.values()))
self._populate_attached_volumes(path, servers[tenant["id"]],
volumes[tenant["id"]])
def _populate(self, path_base, obj_name, obj_list,
objs_path=None, actions=[]):
objs_name = "%ss" % obj_name
if objs_path:
path = "%s/%s" % (path_base, objs_path)
else:
path = "%s/%s" % (path_base, objs_name)
objs_details_path = "%s/detail" % path
self.routes[path] = create_fake_json_resp({objs_name: obj_list})
self.routes[objs_details_path] = create_fake_json_resp(
{objs_name: obj_list})
for o in obj_list:
obj_path = "%s/%s" % (path, o["id"])
self.routes[obj_path] = create_fake_json_resp({obj_name: o})
if actions:
action_path = "%s/action" % obj_path
self.routes[action_path] = webob.Response(status=202)
def _populate_attached_volumes(self, path, server_list, vol_list):
for s in server_list:
attachments = []
if "os-extended-volumes:volumes_attached" in s:
for attach in s["os-extended-volumes:volumes_attached"]:
for v in vol_list:
if attach["id"] == v["id"]:
attachments.append(v["attachments"][0])
path_base = "%s/servers/%s/os-volume_attachments" % (path, s["id"])
self.routes[path_base] = create_fake_json_resp(
{"volumeAttachments": attachments}
)
for attach in attachments:
obj_path = "%s/%s" % (path_base, attach["id"])
self.routes[obj_path] = create_fake_json_resp(
{"volumeAttachment": attach})
@webob.dec.wsgify()
def __call__(self, req):
if req.method == "GET":
return self._do_get(req)
elif req.method == "POST":
return self._do_post(req)
elif req.method == "DELETE":
return self._do_delete(req)
def _do_create_server(self, req):
# TODO(enolfc): this should check the json is
# semantically correct
s = {"server": {"id": "foo",
"name": "foo",
"flavor": {"id": "1"},
"image": {"id": "2"},
"status": "ACTIVE"}}
return create_fake_json_resp(s)
def _do_create_volume(self, req):
# TODO(enolfc): this should check the json is
# semantically correct
s = {"volume": {"id": "foo",
"displayName": "foo",
"size": 1,
"status": "on-line"}}
return create_fake_json_resp(s)
def _do_create_attachment(self, req):
v = {"volumeAttachment": {"serverId": "foo",
"volumeId": "bar",
"device": "/dev/vdb"}}
return create_fake_json_resp(v, 202)
def _do_allocate_ip(self, req):
body = req.json_body.copy()
pool = body.popitem()
tenant = req.path_info.split('/')[1]
for p in pools[tenant]:
if p["name"] == pool[1]:
break
else:
exc = webob.exc.HTTPNotFound()
return FakeOpenStackFault(exc)
ip = {"floating_ip": {"ip": allocated_ip, "id": 1}}
return create_fake_json_resp(ip, 202)
def _do_post(self, req):
if req.path_info.endswith("servers"):
return self._do_create_server(req)
if req.path_info.endswith("os-volumes"):
return self._do_create_volume(req)
elif req.path_info.endswith("action"):
body = req.json_body.copy()
action = body.popitem()
if action[0] in ["os-start", "os-stop", "reboot",
"addFloatingIp", "removeFloatingIp"]:
return self._get_from_routes(req)
elif req.path_info.endswith("os-volume_attachments"):
return self._do_create_attachment(req)
elif req.path_info.endswith("os-floating-ips"):
return self._do_allocate_ip(req)
raise Exception
def _do_delete(self, req):
self._do_get(req)
tested_paths = {
r"/[^/]+/servers/[^/]+/os-volume_attachments/[^/]+$": 202,
r"/[^/]+/os-floating-ips/[^/]+$": 202,
r"/[^/]+/servers/[^/]+$": 204,
r"/[^/]+/os-volumes/[^/]+$": 204,
}
for p, st in tested_paths.items():
if re.match(p, req.path_info):
return create_fake_json_resp({}, st)
raise Exception
def _do_get(self, req):
return self._get_from_routes(req)
def _get_from_routes(self, req):
try:
ret = self.routes[req.path_info]
except KeyError:
exc = webob.exc.HTTPNotFound()
ret = FakeOpenStackFault(exc)
return ret
def create_fake_json_resp(data, status=200):
r = webob.Response()
r.headers["Content-Type"] = "application/json"
r.charset = "utf8"
r.body = json.dumps(data).encode("utf8")
r.status_code = status
return r
|
orviz/ooi
|
ooi/tests/fakes.py
|
Python
|
apache-2.0
| 17,912 | 0 |
from tkinter import ttk
from ozopython.colorLanguageTranslator import ColorLanguageTranslator
from .ozopython import *
from tkinter import *
def run(filename):
code = ozopython.compile(filename)
colorcode = ColorLanguageTranslator.translate(code)
def load(prog, prog_bar):
colormap = {
'K': "#000000",
'R': "#ff0000",
'G': "#00ff00",
'Y': "#ffff00",
'B': "#0000ff",
'M': "#ff00ff",
'C': "#00ffff",
'W': "#ffffff"
}
head, *tail = prog
canvas.itemconfig(circle, fill=colormap[head])
prog = tail
prog_bar["value"] = len(colorcode) - len(prog)
if len(prog) != 0:
canvas.after(50, lambda: load(prog, prog_bar))
window = Tk()
progress = ttk.Progressbar(window, orient="horizontal", length='5c', mode="determinate")
progress["value"] = 0
progress["maximum"] = len(colorcode)
button = Button(window, text="Load", command=lambda: load(colorcode, progress))
button.pack(pady=5)
exit = Button(window, text="Exit", command=lambda: quit())
exit.pack(side="bottom",pady=5)
progress.pack()
canvas = Canvas(window, height='6c', width='6c')
circle = canvas.create_oval('0.5c', '0.5c', '5.5c', '5.5c', fill="white")
canvas.pack()
window.mainloop()
|
Kaarel94/Ozobot-Python
|
ozopython/__init__.py
|
Python
|
mit
| 1,375 | 0.004364 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_batch_history
|
odoousers2014/odoo_addons-2
|
clv_batch/history/__init__.py
|
Python
|
agpl-3.0
| 1,429 | 0.011896 |
#!/usr/bin/env python
# print_needed_variables.py
#
# Copyright (C) 2014, 2015 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU General Public License v2
#
import os
import sys
if __name__ == '__main__' and __package__ is None:
dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if dir_path != '/usr':
sys.path.insert(1, dir_path)
from kano_profile.badges import load_badge_rules
from kano.utils import write_json, uniqify_list
all_rules = load_badge_rules()
variables_needed = dict()
for category, subcats in all_rules.iteritems():
for subcat, items in subcats.iteritems():
for item, rules in items.iteritems():
targets = rules['targets']
for target in targets:
app = target[0]
variable = target[1]
variables_needed.setdefault(app, list()).append(variable)
for key in variables_needed.iterkeys():
variables_needed[key] = uniqify_list(variables_needed[key])
write_json('variables_needed.json', variables_needed, False)
|
rcocetta/kano-profile
|
tools/print_needed_variables.py
|
Python
|
gpl-2.0
| 1,082 | 0.000924 |
from distutils.core import setup
setup(
name='browser-cookie3',
version='0.13.0',
packages=['browser_cookie3'],
# look for package contents in current directory
package_dir={'browser_cookie3': '.'},
author='Boris Babic',
author_email='boris.ivan.babic@gmail.com',
description='Loads cookies from your browser into a cookiejar object so can download with urllib and other libraries the same content you see in the web browser.',
url='https://github.com/borisbabic/browser_cookie3',
install_requires=['pyaes', 'pbkdf2', 'keyring', 'lz4', 'pycryptodome', 'SecretStorage'],
license='lgpl'
)
|
borisbabic/browser_cookie3
|
setup.py
|
Python
|
lgpl-3.0
| 632 | 0.003165 |
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
##
# Implements SFA GID. GIDs are based on certificates, and the GID class is a
# descendant of the certificate class.
##
import xmlrpclib
import uuid
from sfa.trust.certificate import Certificate
from sfa.util.faults import *
from sfa.util.sfalogging import logger
from sfa.util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn
##
# Create a new uuid. Returns the UUID as a string.
def create_uuid():
return str(uuid.uuid4().int)
##
# GID is a tuple:
# (uuid, urn, public_key)
#
# UUID is a unique identifier and is created by the python uuid module
# (or the utility function create_uuid() in gid.py).
#
# HRN is a human readable name. It is a dotted form similar to a backward domain
# name. For example, planetlab.us.arizona.bakers.
#
# URN is a human readable identifier of form:
# "urn:publicid:IDN+toplevelauthority[:sub-auth.]*[\res. type]\ +object name"
# For example, urn:publicid:IDN+planetlab:us:arizona+user+bakers
#
# PUBLIC_KEY is the public key of the principal identified by the UUID/HRN.
# It is a Keypair object as defined in the cert.py module.
#
# It is expected that there is a one-to-one pairing between UUIDs and HRN,
# but it is uncertain how this would be inforced or if it needs to be enforced.
#
# These fields are encoded using xmlrpc into the subjectAltName field of the
# x509 certificate. Note: Call encode() once the fields have been filled in
# to perform this encoding.
class GID(Certificate):
uuid = None
hrn = None
urn = None
##
# Create a new GID object
#
# @param create If true, create the X509 certificate
# @param subject If subject!=None, create the X509 cert and set the subject name
# @param string If string!=None, load the GID from a string
# @param filename If filename!=None, load the GID from a file
# @param lifeDays life of GID in days - default is 1825==5 years
def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825):
Certificate.__init__(self, lifeDays, create, subject, string, filename)
if subject:
logger.debug("Creating GID for subject: %s" % subject)
if uuid:
self.uuid = int(uuid)
if hrn:
self.hrn = hrn
self.urn = hrn_to_urn(hrn, 'unknown')
if urn:
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def set_uuid(self, uuid):
if isinstance(uuid, str):
self.uuid = int(uuid)
else:
self.uuid = uuid
def get_uuid(self):
if not self.uuid:
self.decode()
return self.uuid
def set_hrn(self, hrn):
self.hrn = hrn
def get_hrn(self):
if not self.hrn:
self.decode()
return self.hrn
def set_urn(self, urn):
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def get_urn(self):
if not self.urn:
self.decode()
return self.urn
def get_type(self):
if not self.urn:
self.decode()
_, t = urn_to_hrn(self.urn)
return t
##
# Encode the GID fields and package them into the subject-alt-name field
# of the X509 certificate. This must be called prior to signing the
# certificate. It may only be called once per certificate.
def encode(self):
if self.urn:
urn = self.urn
else:
urn = hrn_to_urn(self.hrn, None)
str = "URI:" + urn
if self.uuid:
str += ", " + "URI:" + uuid.UUID(int=self.uuid).urn
self.set_data(str, 'subjectAltName')
##
# Decode the subject-alt-name field of the X509 certificate into the
# fields of the GID. This is automatically called by the various get_*()
# functions in this class.
def decode(self):
data = self.get_data('subjectAltName')
dict = {}
if data:
if data.lower().startswith('uri:http://<params>'):
dict = xmlrpclib.loads(data[11:])[0][0]
else:
spl = data.split(', ')
for val in spl:
if val.lower().startswith('uri:urn:uuid:'):
dict['uuid'] = uuid.UUID(val[4:]).int
elif val.lower().startswith('uri:urn:publicid:idn+'):
dict['urn'] = val[4:]
self.uuid = dict.get("uuid", None)
self.urn = dict.get("urn", None)
self.hrn = dict.get("hrn", None)
if self.urn:
self.hrn = urn_to_hrn(self.urn)[0]
##
# Dump the credential to stdout.
#
# @param indent specifies a number of spaces to indent the output
# @param dump_parents If true, also dump the parents of the GID
def dump(self, *args, **kwargs):
print self.dump_string(*args,**kwargs)
def dump_string(self, indent=0, dump_parents=False):
result=" "*(indent-2) + "GID\n"
result += " "*indent + "hrn:" + str(self.get_hrn()) +"\n"
result += " "*indent + "urn:" + str(self.get_urn()) +"\n"
result += " "*indent + "uuid:" + str(self.get_uuid()) + "\n"
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
if self.parent and dump_parents:
result += " "*indent + "parent:\n"
result += self.parent.dump_string(indent+4, dump_parents)
return result
##
# Verify the chain of authenticity of the GID. First perform the checks
# of the certificate class (verifying that each parent signs the child,
# etc). In addition, GIDs also confirm that the parent's HRN is a prefix
# of the child's HRN, and the parent is of type 'authority'.
#
# Verifying these prefixes prevents a rogue authority from signing a GID
# for a principal that is not a member of that authority. For example,
# planetlab.us.arizona cannot sign a GID for planetlab.us.princeton.foo.
def verify_chain(self, trusted_certs = None):
# do the normal certificate verification stuff
trusted_root = Certificate.verify_chain(self, trusted_certs)
if self.parent:
# make sure the parent's hrn is a prefix of the child's hrn
if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):
raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))
# Parent must also be an authority (of some type) to sign a GID
# There are multiple types of authority - accept them all here
if not self.parent.get_type().find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
# Then recurse up the chain - ensure the parent is a trusted
# root or is in the namespace of a trusted root
self.parent.verify_chain(trusted_certs)
else:
# make sure that the trusted root's hrn is a prefix of the child's
trusted_gid = GID(string=trusted_root.save_to_string())
trusted_type = trusted_gid.get_type()
trusted_hrn = trusted_gid.get_hrn()
#if trusted_type == 'authority':
# trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]
cur_hrn = self.get_hrn()
if not hrn_authfor_hrn(trusted_hrn, cur_hrn):
raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert %s" % (trusted_hrn, cur_hrn))
# There are multiple types of authority - accept them all here
if not trusted_type.find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))
return
|
dana-i2cat/felix
|
ofam/src/src/ext/sfa/trust/gid.py
|
Python
|
apache-2.0
| 9,265 | 0.004425 |
# coding=utf-8
"""Ingest workflow management tool
FileNameSource Class
"""
__copyright__ = "Copyright (C) 2016 University of Maryland"
__license__ = "GNU AFFERO GENERAL PUBLIC LICENSE, Version 3"
import abc
import os
import sys
import psycopg2
class FileNameSource:
def __init__(self): pass
def __iter__(self): return self
@abc.abstractmethod
def next(self): pass
def confirm_completion(self, path):
return True
class FileList(FileNameSource):
def __init__(self, args, cfg):
FileNameSource.__init__(self)
src = args['<source_directory>']
self.fp = sys.stdin if src == '-' else open(src, 'rU')
self.prefix = args['--prefix']
self.offset = len(self.prefix)
def next(self):
v = self.fp.next().strip()
if not v.startswith(self.prefix):
print v, ' not in ', self.prefix, 'ignoring '
return
return decode_str(v[self.offset:])
class DirectoryWalk(FileNameSource):
def __init__(self, args, cfg):
FileNameSource.__init__(self)
src = args['<source_directory>']
if src == '-':
print ' Incompatible mode -- Cannot Walk stdin '
raise ValueError
self.prefix = args['--prefix']
self.offset = len(self.prefix)
self.walker = os.walk(src, topdown=True, followlinks=True)
self.dirname = None
self.files = None
def next(self):
while not self.dirname or not self.files:
self.dirname, _, self.files = self.walker.next()
return os.path.join(self.dirname[self.offset:], self.files.pop())
class DB:
def __init__(self, args, cfg):
defaults = (('user', 'drastic'), ('database', 'drastic'), ('password', 'drastic'), ('host', 'localhost'))
credentials = dict(user=cfg.get('postgres', 'user'),
database=cfg.get('postgres', 'database'),
password=cfg.get('postgres', 'password'),
host=cfg.get('postgres', 'host'))
for k, v in defaults:
if not credentials[k]: credentials[k] = v
self.credentials = credentials
self.cnx = psycopg2.connect(**credentials)
self.cs1 = self.cnx.cursor()
table = args.get('--dataset', 'resource')
if not table: table = 'resource'
self.tablename = table
### Do JIT set up of other queries....
self.update_status = False
self.db_initialized = False
def summary(self):
cmd = '''SELECT status,count(*) from "{0}" group by status order by status '''.format(self.tablename)
try:
self.cs1.execute(cmd)
for v in self.cs1: print '{0:-10s}\t{1:,}'.format(*v)
except Exception as e:
print e
def _setup_db(self, table):
cs = self.cnx.cursor()
# Create the status Enum
try:
cs.execute("CREATE TYPE resource_status AS ENUM ('READY','IN-PROGRESS','DONE','BROKEN','VERIFIED')")
except:
cs.connection.rollback()
#
cmds = [
'''CREATE TABLE IF NOT EXISTS "{0}" (
path TEXT PRIMARY KEY,
status resource_status DEFAULT 'READY',
started timestamp,
fs_sync boolean)''',
'''CREATE INDEX "IDX_{0}_01_status" ON "{0}" (status ) WHERE status <> 'DONE' ''',
'''CREATE INDEX "IDX_{0}_01_fs_sync" ON "{0}" (fs_sync) WHERE fs_sync is not True''']
for cmd in cmds:
try:
cs.execute(cmd.format(table))
cs.connection.commit()
except Exception as e:
cs.connection.rollback()
class DBPrepare(DB):
"""
Class to be used when preparing.
"""
def __init__(self, args, cfg):
DB.__init__(self, args, cfg)
self.prefix = (args['--prefix'])
self.offset = len(self.prefix)
self.cs = self.cnx.cursor('AB1', withhold=True)
self._setup_db(self.tablename)
cmd = '''PREPARE I1 ( text ) AS insert into "{0}" (path,status)
SELECT $1,'READY'::resource_status WHERE NOT EXISTS (SELECT TRUE FROM "{0}" where path = $1)'''
self.cs1.execute(cmd.format(self.tablename))
def prepare(self, path ):
self.cs1.execute("EXECUTE I1(%s); commit", [path])
return True
class DBQuery(FileNameSource, DB):
"""
Class to be used to get file names when injecting.
"""
def __init__(self, args, cfg):
DB.__init__(self,args,cfg)
FileNameSource.__init__(self)
self.prefix = (args['--prefix'])
self.offset = len(self.prefix)
self.fetch_cs = self.cnx.cursor()
cmd = '''PREPARE F1 (integer) AS SELECT path FROM "{0}" where status = 'READY' LIMIT $1 '''.format(self.tablename)
self.fetch_cs.execute(cmd)
self.fetch_cs.execute('EXECUTE F1 (1000)')
# And prepare the update status cmd
ucmd = '''PREPARE M1 (TEXT,resource_status) AS UPDATE "{0}" SET status='DONE' WHERE path = $1 and status <> $2 '''.format(
self.tablename)
self.cs1.execute(ucmd)
# And retreive the values for the status
self.cs1.execute('''SELECT unnest(enum_range(NULL::resource_status))''')
self.status_values = set( ( k[0] for k in self.cs1.fetchall() ))
return
def confirm_completion(self, path, status = 'DONE'):
if status not in self.status_values :
if status == 'FAILED' : status = 'BROKEN'
else : raise ValueError("bad value for enum -- {} : should be {}".format(status,self.status_values) )
####
try:
self.cs1.execute('EXECUTE M1(%s,%s)', [path,status])
updates = self.cs1.rowcount
self.cs1.connection.commit()
return True
except Exception as e:
print 'failed to update status for ', path,'\n',e
self.cs1.connection.rollback()
return False
def next(self):
"""
:return: next path from DB that is ready...
This function will re-issue the Select when the current one is exhausted.
This attempts to avoid two many locks on two many records.
"""
k = self.fetch_cs.fetchone()
#
if not k:
self.fetch_cs.execute('EXECUTE F1 (1000)')
k = self.fetch_cs.fetchone()
#
if k: return k[0].decode('utf-8')
raise StopIteration
def CreateFileNameSource(args, cfg):
"""
use the parameters to prepare an iterator that will deliver all the (suitably normalized) files to be injected
:param args: command line args
:param cfg: global, persistent parameters
:return: iterator
"""
src = args['<source_directory>']
prefix = args['--prefix']
if not prefix:
prefix = '/data'
else:
prefix = prefix.rstrip('/')
if not src.startswith(prefix):
print src, ' must be a subdirectory of the host data directory (--prefix=', prefix, ')'
print 'If you did not specify it, please do so'
sys.exit(1)
#########
## Set up a source that gets list of files from a file
if args['--read'] : return FileList(args, cfg)
if args['--walk']: return DirectoryWalk(args, cfg)
if args['--postgres'] : return DBQuery(args, cfg)
if args['--sqlite3'] :
raise NotImplementedError
def decode_str(s):
"""
:param s: string to be converted to unicode
:return: unicode version
"""
if isinstance(s, unicode): return s
try:
return s.decode('utf8')
except UnicodeDecodeError:
try:
return s.decode('iso8859-1')
except UnicodeDecodeError:
s_ignore = s.decode('utf8', 'ignore')
return s_ignore
|
UMD-DRASTIC/drastic
|
drastic/DrasticLoader/FileNameSource.py
|
Python
|
agpl-3.0
| 7,914 | 0.006444 |
##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Various test utility functions.
@author: Kenneth Hoste (Ghent University)
"""
import copy
import fileinput
import os
import re
import shutil
import sys
import tempfile
from vsc.utils import fancylogger
from vsc.utils.patterns import Singleton
from vsc.utils.testing import EnhancedTestCase as _EnhancedTestCase
import easybuild.tools.build_log as eb_build_log
import easybuild.tools.options as eboptions
import easybuild.tools.toolchain.utilities as tc_utils
import easybuild.tools.module_naming_scheme.toolchain as mns_toolchain
from easybuild.framework.easyconfig import easyconfig
from easybuild.framework.easyblock import EasyBlock
from easybuild.main import main
from easybuild.tools import config
from easybuild.tools.config import module_classes, set_tmpdir
from easybuild.tools.environment import modify_env
from easybuild.tools.filetools import mkdir, read_file
from easybuild.tools.module_naming_scheme import GENERAL_CLASS
from easybuild.tools.modules import modules_tool
from easybuild.tools.options import CONFIG_ENV_VAR_PREFIX, EasyBuildOptions
# make sure tests are robust against any non-default configuration settings;
# involves ignoring any existing configuration files that are picked up, and cleaning the environment
# this is tackled here rather than in suite.py, to make sure this is also done when test modules are ran separately
# clean up environment from unwanted $EASYBUILD_X env vars
for key in os.environ.keys():
if key.startswith('%s_' % CONFIG_ENV_VAR_PREFIX):
del os.environ[key]
# ignore any existing configuration files
go = EasyBuildOptions(go_useconfigfiles=False)
os.environ['EASYBUILD_IGNORECONFIGFILES'] = ','.join(go.options.configfiles)
# redefine $TEST_EASYBUILD_X env vars as $EASYBUILD_X
test_env_var_prefix = 'TEST_EASYBUILD_'
for key in os.environ.keys():
if key.startswith(test_env_var_prefix):
val = os.environ[key]
del os.environ[key]
newkey = '%s_%s' % (CONFIG_ENV_VAR_PREFIX, key[len(test_env_var_prefix):])
os.environ[newkey] = val
class EnhancedTestCase(_EnhancedTestCase):
"""Enhanced test case, provides extra functionality (e.g. an assertErrorRegex method)."""
def setUp(self):
"""Set up testcase."""
super(EnhancedTestCase, self).setUp()
# keep track of log handlers
log = fancylogger.getLogger(fname=False)
self.orig_log_handlers = log.handlers[:]
self.orig_tmpdir = tempfile.gettempdir()
# use a subdirectory for this test (which we can clean up easily after the test completes)
self.test_prefix = set_tmpdir()
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
fd, self.logfile = tempfile.mkstemp(suffix='.log', prefix='eb-test-')
os.close(fd)
self.cwd = os.getcwd()
# keep track of original environment to restore
self.orig_environ = copy.deepcopy(os.environ)
# keep track of original environment/Python search path to restore
self.orig_sys_path = sys.path[:]
testdir = os.path.dirname(os.path.abspath(__file__))
self.test_sourcepath = os.path.join(testdir, 'sandbox', 'sources')
os.environ['EASYBUILD_SOURCEPATH'] = self.test_sourcepath
os.environ['EASYBUILD_PREFIX'] = self.test_prefix
self.test_buildpath = tempfile.mkdtemp()
os.environ['EASYBUILD_BUILDPATH'] = self.test_buildpath
self.test_installpath = tempfile.mkdtemp()
os.environ['EASYBUILD_INSTALLPATH'] = self.test_installpath
# make sure that the tests only pick up easyconfigs provided with the tests
os.environ['EASYBUILD_ROBOT_PATHS'] = os.path.join(testdir, 'easyconfigs')
# make sure no deprecated behaviour is being triggered (unless intended by the test)
# trip *all* log.deprecated statements by setting deprecation version ridiculously high
self.orig_current_version = eb_build_log.CURRENT_VERSION
os.environ['EASYBUILD_DEPRECATED'] = '10000000'
init_config()
# remove any entries in Python search path that seem to provide easyblocks
for path in sys.path[:]:
if os.path.exists(os.path.join(path, 'easybuild', 'easyblocks', '__init__.py')):
sys.path.remove(path)
# add test easyblocks to Python search path and (re)import and reload easybuild modules
import easybuild
sys.path.append(os.path.join(testdir, 'sandbox'))
reload(easybuild)
import easybuild.easyblocks
reload(easybuild.easyblocks)
import easybuild.easyblocks.generic
reload(easybuild.easyblocks.generic)
reload(easybuild.tools.module_naming_scheme) # required to run options unit tests stand-alone
modtool = modules_tool()
# purge out any loaded modules with original $MODULEPATH before running each test
modtool.purge()
self.reset_modulepath([os.path.join(testdir, 'modules')])
def tearDown(self):
"""Clean up after running testcase."""
super(EnhancedTestCase, self).tearDown()
# go back to where we were before
os.chdir(self.cwd)
# restore original environment
modify_env(os.environ, self.orig_environ)
# restore original Python search path
sys.path = self.orig_sys_path
# remove any log handlers that were added (so that log files can be effectively removed)
log = fancylogger.getLogger(fname=False)
new_log_handlers = [h for h in log.handlers if h not in self.orig_log_handlers]
for log_handler in new_log_handlers:
log_handler.close()
log.removeHandler(log_handler)
# cleanup test tmp dir
try:
shutil.rmtree(self.test_prefix)
except (OSError, IOError):
pass
# restore original 'parent' tmpdir
for var in ['TMPDIR', 'TEMP', 'TMP']:
os.environ[var] = self.orig_tmpdir
# reset to make sure tempfile picks up new temporary directory to use
tempfile.tempdir = None
def reset_modulepath(self, modpaths):
"""Reset $MODULEPATH with specified paths."""
modtool = modules_tool()
for modpath in os.environ.get('MODULEPATH', '').split(os.pathsep):
modtool.remove_module_path(modpath)
# make very sure $MODULEPATH is totally empty
# some paths may be left behind, e.g. when they contain environment variables
# example: "module unuse Modules/$MODULE_VERSION/modulefiles" may not yield the desired result
os.environ['MODULEPATH'] = ''
for modpath in modpaths:
modtool.add_module_path(modpath)
def eb_main(self, args, do_build=False, return_error=False, logfile=None, verbose=False, raise_error=False,
reset_env=True):
"""Helper method to call EasyBuild main function."""
cleanup()
myerr = False
if logfile is None:
logfile = self.logfile
# clear log file
if logfile:
f = open(logfile, 'w')
f.write('')
f.close()
env_before = copy.deepcopy(os.environ)
try:
main((args, logfile, do_build))
except SystemExit:
pass
except Exception, err:
myerr = err
if verbose:
print "err: %s" % err
if logfile:
logtxt = read_file(logfile)
else:
logtxt = None
os.chdir(self.cwd)
# make sure config is reinitialized
init_config()
# restore environment to what it was before running main,
# changes may have been made by eb_main (e.g. $TMPDIR & co)
if reset_env:
modify_env(os.environ, env_before)
tempfile.tempdir = None
if myerr and raise_error:
raise myerr
if return_error:
return logtxt, myerr
else:
return logtxt
def setup_hierarchical_modules(self):
"""Setup hierarchical modules to run tests on."""
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
# simply copy module files under 'Core' and 'Compiler' to test install path
# EasyBuild is responsible for making sure that the toolchain can be loaded using the short module name
mkdir(mod_prefix, parents=True)
for mod_subdir in ['Core', 'Compiler', 'MPI']:
src_mod_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules', mod_subdir)
shutil.copytree(src_mod_path, os.path.join(mod_prefix, mod_subdir))
# make sure only modules in a hierarchical scheme are available, mixing modules installed with
# a flat scheme like EasyBuildMNS and a hierarhical one like HierarchicalMNS doesn't work
self.reset_modulepath([mod_prefix, os.path.join(mod_prefix, 'Core')])
# tweak use statements in modules to ensure correct paths
mpi_pref = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')
for modfile in [
os.path.join(mod_prefix, 'Core', 'GCC', '4.7.2'),
os.path.join(mod_prefix, 'Core', 'GCC', '4.8.3'),
os.path.join(mod_prefix, 'Core', 'icc', '2013.5.192-GCC-4.8.3'),
os.path.join(mod_prefix, 'Core', 'ifort', '2013.5.192-GCC-4.8.3'),
os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'OpenMPI', '1.6.4'),
os.path.join(mod_prefix, 'Compiler', 'intel', '2013.5.192-GCC-4.8.3', 'impi', '4.1.3.049'),
os.path.join(mpi_pref, 'FFTW', '3.3.3'),
os.path.join(mpi_pref, 'OpenBLAS', '0.2.6-LAPACK-3.4.2'),
os.path.join(mpi_pref, 'ScaLAPACK', '2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2'),
]:
for line in fileinput.input(modfile, inplace=1):
line = re.sub(r"(module\s*use\s*)/tmp/modules/all",
r"\1%s/modules/all" % self.test_installpath,
line)
sys.stdout.write(line)
def setup_categorized_hmns_modules(self):
"""Setup categorized hierarchical modules to run tests on."""
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
# simply copy module files under 'CategorizedHMNS/{Core,Compiler,MPI}' to test install path
# EasyBuild is responsible for making sure that the toolchain can be loaded using the short module name
mkdir(mod_prefix, parents=True)
for mod_subdir in ['Core', 'Compiler', 'MPI']:
src_mod_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'modules', 'CategorizedHMNS', mod_subdir)
shutil.copytree(src_mod_path, os.path.join(mod_prefix, mod_subdir))
# create empty module file directory to make C/Tcl modules happy
mpi_pref = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')
mkdir(os.path.join(mpi_pref, 'base'))
# make sure only modules in the CategorizedHMNS are available
self.reset_modulepath([os.path.join(mod_prefix, 'Core', 'compiler'),
os.path.join(mod_prefix, 'Core', 'toolchain')])
# tweak use statements in modules to ensure correct paths
for modfile in [
os.path.join(mod_prefix, 'Core', 'compiler', 'GCC', '4.7.2'),
os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'mpi', 'OpenMPI', '1.6.4'),
]:
for line in fileinput.input(modfile, inplace=1):
line = re.sub(r"(module\s*use\s*)/tmp/modules/all",
r"\1%s/modules/all" % self.test_installpath,
line)
sys.stdout.write(line)
def cleanup():
"""Perform cleanup of singletons and caches."""
# clear Singelton instances, to start afresh
Singleton._instances.clear()
# empty caches
tc_utils._initial_toolchain_instances.clear()
easyconfig._easyconfigs_cache.clear()
easyconfig._easyconfig_files_cache.clear()
mns_toolchain._toolchain_details_cache.clear()
def init_config(args=None, build_options=None):
"""(re)initialize configuration"""
cleanup()
# initialize configuration so config.get_modules_tool function works
eb_go = eboptions.parse_options(args=args)
config.init(eb_go.options, eb_go.get_options_by_section('config'))
# initialize build options
if build_options is None:
build_options = {
'valid_module_classes': module_classes(),
'valid_stops': [x[0] for x in EasyBlock.get_steps()],
}
if 'suffix_modules_path' not in build_options:
build_options.update({'suffix_modules_path': GENERAL_CLASS})
config.init_build_options(build_options=build_options)
return eb_go.options
def find_full_path(base_path, trim=(lambda x: x)):
"""
Determine full path for given base path by looking in sys.path and PYTHONPATH.
trim: a function that takes a path and returns a trimmed version of that path
"""
full_path = None
pythonpath = os.getenv('PYTHONPATH')
if pythonpath:
pythonpath = pythonpath.split(':')
else:
pythonpath = []
for path in sys.path + pythonpath:
tmp_path = os.path.join(trim(path), base_path)
if os.path.exists(tmp_path):
full_path = tmp_path
break
return full_path
|
ULHPC/modules
|
easybuild/easybuild-framework/test/framework/utilities.py
|
Python
|
mit
| 14,540 | 0.002682 |
import io
import base64
import gevent
from Tkinter import Label
from PIL import ImageTk, Image
class AnimatedImgLabel(Label):
# http://stackoverflow.com/questions/7960600/python-tkinter-display-animated-gif-using-pil
def __init__(self, master, data, encoding='base64', **kwargs):
if encoding == 'base64':
data = base64.b64decode(data)
self.img = Image.open(io.BytesIO(data))
seq = list()
try:
while True:
seq.append(self.img.copy())
self.img.seek(len(seq)) # skip to next frame
except EOFError:
pass # we're done
try:
self.delay = float(self.img.info['duration'])/1000
except KeyError:
self.delay = 0.200
self.frames = list()
for frame in seq:
#frame = frame.convert('RGBA')
self.frames.append(ImageTk.PhotoImage(frame))
self.idx = 0
self.first = self.frames[0]
Label.__init__(self, master, image=self.first, **kwargs)
self.greenlet = gevent.spawn_later(self.delay, self.play)
def destroy(self):
self.greenlet.kill()
Label.destroy(self)
def play(self):
try:
self.config(image=self.frames[self.idx])
self.master.update()
self.idx += 1
if self.idx == len(self.frames):
self.idx = 0
self.greenlet = gevent.spawn_later(self.delay, self.play)
except:
import traceback
traceback.print_exc()
raise
|
MoroGasper/client
|
client/plugins/ui/tk/animate.py
|
Python
|
gpl-3.0
| 1,581 | 0.003163 |
import os.path
from fabric.api import env
from fabsetup.fabutils import checkup_git_repo_legacy, needs_packages
from fabsetup.fabutils import needs_repo_fabsetup_custom, suggest_localhost
from fabsetup.fabutils import install_file_legacy, run, subtask, subsubtask, task
from fabsetup.utils import flo, update_or_append_line, comment_out_line
from fabsetup.utils import uncomment_or_update_or_append_line, query_yes_no
@task
@needs_repo_fabsetup_custom
@suggest_localhost
@needs_packages('python-pip')
def powerline():
'''Install and set up powerline for vim, bash, tmux, and i3.
It uses pip (python2) and the most up to date powerline version (trunk) from
the github repository.
More infos:
https://github.com/powerline/powerline
https://powerline.readthedocs.io/en/latest/installation.html
https://github.com/powerline/fonts
https://youtu.be/_D6RkmgShvU
http://www.tecmint.com/powerline-adds-powerful-statuslines-and-prompts-to-vim-and-bash/
'''
bindings_dir, scripts_dir = install_upgrade_powerline()
set_up_powerline_fonts()
set_up_powerline_daemon(scripts_dir)
powerline_for_vim(bindings_dir)
powerline_for_bash_or_powerline_shell(bindings_dir)
powerline_for_tmux(bindings_dir)
powerline_for_i3(bindings_dir)
print('\nYou may have to reboot for make changes take effect')
@subsubtask
def install_special_glyphs():
'''
More infos:
https://powerline.readthedocs.io/en/latest/installation/linux.html#fonts-installation
https://wiki.archlinux.org/index.php/Font_configuration
$XDG_CONFIG_HOME: http://superuser.com/a/365918
'''
from_dir = '~/repos/powerline/font'
run('mkdir -p ~/.local/share/fonts')
run(flo('cp {from_dir}/PowerlineSymbols.otf ~/.local/share/fonts'))
to_dir = '~/.config/fontconfig/conf.d/'
run(flo('mkdir -p {to_dir}'))
run(flo('cp {from_dir}/10-powerline-symbols.conf {to_dir}'))
@subtask
def install_upgrade_powerline():
'''
More infos:
https://powerline.readthedocs.io/en/latest/installation.html#pip-installation
'''
checkup_git_repo_legacy('https://github.com/powerline/powerline.git')
path_to_powerline = os.path.expanduser('~/repos/powerline')
run(flo('pip install --user --editable={path_to_powerline}'))
run('pip show powerline-status') # only for information
install_special_glyphs()
bindings_dir = '~/repos/powerline/powerline/bindings'
scripts_dir = '~/repos/powerline/scripts'
return bindings_dir, scripts_dir
@subtask
def set_up_powerline_fonts():
checkup_git_repo_legacy('https://github.com/powerline/fonts.git',
name='powerline-fonts')
# install fonts into ~/.local/share/fonts
run('cd ~/repos/powerline-fonts && ./install.sh')
prefix = 'URxvt*font: '
from config import fontlist
line = prefix + fontlist
update_or_append_line(filename='~/.Xresources', prefix=prefix,
new_line=line)
if env.host_string == 'localhost':
run('xrdb ~/.Xresources')
@subtask
def set_up_powerline_daemon(scripts_dir):
bash_snippet = '~/.bashrc_powerline_daemon'
install_file_legacy(path=bash_snippet, scripts_dir=scripts_dir)
prefix = flo('if [ -f {bash_snippet} ]; ')
enabler = flo('if [ -f {bash_snippet} ]; then source {bash_snippet}; fi')
update_or_append_line(filename='~/.bashrc', prefix=prefix, new_line=enabler)
@subtask
def powerline_for_vim(bindings_dir):
pass # TODO
def powerline_for_bash_or_powerline_shell(bindings_dir):
'''Set up the bash extension of powerline or powerline_shell (another task).
'''
question = '\nSet up powerline-shell instead of powerline bash extension?'
if query_yes_no(question, default='yes'):
from setup import powerline_shell
powerline_shell()
# disable powerline bash extension if it has been set up
powerline_bash_enabler = 'if [ -f ~/.bashrc_powerline_bash ]; then ' \
'source ~/.bashrc_powerline_bash; fi'
comment_out_line(filename='~/.bashrc', line=powerline_bash_enabler)
else:
powerline_for_bash(bindings_dir)
# disable powerline_shell if it has been set up
powerline_shell_enabler = 'if [ -f ~/.bashrc_powerline_shell ]; then ' \
'source ~/.bashrc_powerline_shell; fi'
comment_out_line(filename='~/.bashrc', line=powerline_shell_enabler)
@subtask
def powerline_for_bash(bindings_dir):
bash_snippet = '~/.bashrc_powerline_bash'
install_file_legacy(path=bash_snippet, bindings_dir=bindings_dir)
prefix = flo('if [ -f {bash_snippet} ]; ')
enabler = flo('if [ -f {bash_snippet} ]; then source {bash_snippet}; fi')
uncomment_or_update_or_append_line(filename='~/.bashrc', prefix=prefix,
new_line=enabler, comment='#')
@subtask
def powerline_for_tmux(bindings_dir):
pass # TODO
@subtask
def powerline_for_i3(bindings_dir):
pass # TODO
|
theno/fabsetup
|
fabsetup/fabfile/setup/powerline.py
|
Python
|
mit
| 5,053 | 0.00099 |
import unittest
import random, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_hosts, h2o_import as h2i
# RF train parameters
paramsTrainRF = {
'ntree' : 100,
'depth' : 300,
'bin_limit' : 20000,
'ignore' : None,
'stat_type' : 'ENTROPY',
'out_of_bag_error_estimate': 1,
'exclusive_split_limit': 0,
'timeoutSecs': 14800,
}
# RF test parameters
paramsTestRF = {
# scoring requires the response_variable. it defaults to last, so normally
# we don't need to specify. But put this here and (above if used)
# in case a dataset doesn't use last col
'response_variable': None,
'out_of_bag_error_estimate': 0,
'timeoutSecs': 14800,
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_iris(self):
# Train RF
trainParseResult = h2i.import_parse(bucket='smalldata', path='iris/iris2.csv', hex_key='train_iris2.hex', schema='put')
kwargs = paramsTrainRF.copy()
trainResult = h2o_rf.trainRF(trainParseResult, **kwargs)
scoreParseResult = h2i.import_parse(bucket='smalldata', path='iris/iris2.csv', hex_key='score_iris2.hex', schema='put')
kwargs = paramsTestRF.copy()
scoreResult = h2o_rf.scoreRF(scoreParseResult, trainResult, **kwargs)
print "\nTrain\n=========={0}".format(h2o_rf.pp_rf_result(trainResult))
print "\nScoring\n========={0}".format(h2o_rf.pp_rf_result(scoreResult))
if __name__ == '__main__':
h2o.unit_main()
|
janezhango/BigDataMachineLearning
|
py/testdir_ec2/test_rf_iris.py
|
Python
|
apache-2.0
| 2,017 | 0.011403 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from datetime import date
from inspirehep.utils.latex import Latex
from inspirehep.utils.record_getter import get_db_record
import pytest
@pytest.mark.xfail(reason='wrong output')
def test_format_latex_eu(app):
article = get_db_record('lit', 4328)
today = date.today().strftime('%d %b %Y')
expected = u'''%\cite{Glashow:1961tr}
\\bibitem{Glashow:1961tr}
S.~L.~Glashow,
%``Partial Symmetries of Weak Interactions,''
Nucl.\ Phys.\ {\\bf 22} (1961) 579.
doi:10.1016/0029-5582(61)90469-2
%%CITATION = doi:10.1016/0029-5582(61)90469-2;%%
%11 citations counted in INSPIRE as of ''' + today
result = Latex(article, 'latex_eu').format()
assert expected == result
@pytest.mark.xfail(reason='wrong output')
def test_format_latex_us(app):
article = get_db_record('lit', 4328)
today = date.today().strftime('%d %b %Y')
expected = u'''%\cite{Glashow:1961tr}
\\bibitem{Glashow:1961tr}
S.~L.~Glashow,
%``Partial Symmetries of Weak Interactions,''
Nucl.\ Phys.\ {\\bf 22}, 579 (1961).
doi:10.1016/0029-5582(61)90469-2
%%CITATION = doi:10.1016/0029-5582(61)90469-2;%%
%11 citations counted in INSPIRE as of ''' + today
result = Latex(article, 'latex_us').format()
assert expected == result
|
kaplun/inspire-next
|
tests/integration/test_latex_exporting.py
|
Python
|
gpl-3.0
| 2,222 | 0.0027 |
#!/usr/bin/python
"""Test to verify presentation of selectable list items."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control><Shift>n"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Tab to list item",
["KNOWN ISSUE: We are presenting nothing here",
""]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"2. Right to next list item",
["BRAILLE LINE: 'soffice application Template Manager frame Template Manager dialog Drawings page tab list Presentation Backgrounds list item'",
" VISIBLE: 'Presentation Backgrounds list it', cursor=1",
"SPEECH OUTPUT: 'Presentation Backgrounds'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"3. Left to previous list item",
["BRAILLE LINE: 'soffice application Template Manager frame Template Manager dialog Drawings page tab list My Templates list item'",
" VISIBLE: 'My Templates list item', cursor=1",
"SPEECH OUTPUT: 'My Templates'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
pvagner/orca
|
test/keystrokes/oowriter/ui_role_list_item.py
|
Python
|
lgpl-2.1
| 1,384 | 0.001445 |
"""
@name: GDWCalc_Lite.py
@vers: 1.3
@author: Douglas Thor
@created: 2013-04-19
@modified: 2013-10-08
@descr: Calcualtes Gross Die per Wafer (GDW), accounting for
wafer flat, edge exclusion, and front-side-scribe (FSS)
exclusion (also called flat exclusion).
Returns nothing.
This Lite version does not include the option to plot
a wafer map or generate an OWT mask file.
"""
import math
# Defined by SEMI M1-0302
FLAT_LENGTHS = {50: 15.88, 75: 22.22, 100: 32.5, 125: 42.5, 150: 57.5}
PROG_VERS = "1.3"
REL_DATE = "2013-10-08"
def round_to_multiple(x, multiple):
return int(multiple * round(float(x)/multiple))
def max_dist(center, size):
"""
Calcualtes the largest distance from the origin for a rectangle of
size (x, y), where the center of the rectangle's coordinates are known.
If the rectangle's center is in the Q1, then the upper-right corner is
the farthest away from the origin. If in Q2, then the upper-left corner
is farthest away. Etc.
Returns the magnitude of the largest distance.
"""
halfX = size[0]/2.
halfY = size[1]/2.
if center[0] < 0: halfX = -halfX
if center[1] < 0: halfY = -halfY
dist = math.sqrt((center[0] + halfX)**2 + (center[1] + halfY)**2)
return dist
def progress_bar(n, size, barSize=10):
"""
A simple terminal progress bar.
Usage:
Insert into the loop that you want to monitor progrss on and once after
the loop is completed (with n = size)
n = ieration that you want to display.
size = maximum number of iterations that the loop will go through
barLen = Integer length of the progress bar.
Example:
size = 1000
n = 0
barSize = 17
for item in range(size):
time.sleep(.02)
progress_bar(n, size, barSize)
n += 1
progress_bar(n, size, barSize)
"""
barFill = int(n * barSize // float(size))
if barFill > barSize: barFill = barSize
if barFill < 0: barFill = 0
barText = "[" + "#" * barFill + " " * (barSize - barFill) + "] %d/%d\r"
print barText % (n, size),
def dieSizePrompt():
while True:
try:
dieX = float(raw_input("Die X size (mm): "))
if dieX > 1000 or dieX <= 0: raise(ValueError)
break
except ValueError:
print "Invalid entry. Please enter a number between 0 and 1000."
while True:
try:
dieY = float(raw_input("Die Y size (mm): "))
if dieY > 1000 or dieY <= 0: raise(ValueError)
break
except ValueError:
print "Invalid entry. Please enter a number between 0 and 1000."
return (dieX, dieY)
def waferSizePrompt():
while True:
default = 150.0
dia = raw_input("Wafer diameter (mm) [%dmm]: " % default)
if dia == "":
dia = float(default)
print "Using default value of %dmm." % default
break
else:
try:
dia = float(dia)
if dia <= 0 or dia > 500: raise(ValueError)
break
except ValueError:
print "Invalid entry. Please enter a number between 0 and 500."
return dia
def exclSizePrompt():
while True:
default = 5.0
exclSize = raw_input("Exclusion ring width (mm) [%dmm]: " % default)
if exclSize == "":
exclSize = float(default)
print "Using default value of %dmm." % default
break
else:
try:
exclSize = float(exclSize)
if exclSize < 0: raise(ValueError)
break
except ValueError:
print "Invalid entry. Please enter a number greater than 0."
return exclSize
def FSSExclPrompt():
""" Prompts user for Front-Side Scribe Exclusion width. Also called Flat
Exclusion """
while True:
default = 5.0
FSSExcl = raw_input("Front Side Scribe (Flat) Exclusion (mm) [%dmm]: " % default)
if FSSExcl == "":
FSSExcl = float(default)
print "Using default value of %dmm." % default
break
else:
try:
FSSExcl = float(FSSExcl)
if FSSExcl < 0: raise(ValueError)
break
except ValueError:
print "Invalid entry. Please enter a number greater than 0."
return FSSExcl
def gdw(dieSize, dia, centerType, excl, FSS_EXCLUSION):
"""
Calculates Gross Die per Wafer (GDW) for a given dieSize (X, Y),
wafer diameter dia, centerType (xType, yType), and exclusion width (mm).
Returns a list of tuples (X, Y, XCoord, YCoord, dieStatus)
"""
origin = (0, 0)
dieX = dieSize[0]
dieY = dieSize[1]
rad = 0.5 * dia
# assume that the reticle center is the wafer center
dieCenter = list(origin)
if centerType[0] == "even":
# offset the dieCenter by 1/2 the die size, X direction
dieCenter[0] = 0.5 * dieX
if centerType[1] == "even":
# offset the dieCenter by 1/2 the die size, Y direction
dieCenter[1] = 0.5 * dieY
# find out how many die we can fit on the wafer
nX = int(math.ceil(dia/dieX))
nY = int(math.ceil(dia/dieY))
# If we're centered on the wafer, we need to add one to the axis count
if centerType[0] == "odd": nX += 1
if centerType[1] == "odd": nY += 1
# make a list of (x, y) center coordinate pairs
centers = []
for i in range(nX):
for j in range(nY):
centers.append(((i-nX/2) * dieX + dieCenter[0],
(j-nY/2) * dieY + dieCenter[1]))
if dia in FLAT_LENGTHS:
# A flat is defined, so we draw it.
flatSize = FLAT_LENGTHS[dia]
x = flatSize/2
y = -math.sqrt(rad**2 - x**2)
else:
# A flat is not defined so...
y = -rad
yExcl = y + FSS_EXCLUSION
# Take only those that are within the wafer radius
dieList = []
n = 0
updateValue = round_to_multiple(len(centers) // 100, 100)
listLen = len(centers)
print "Calculating GDW:"
for coord in centers:
if n % updateValue == 0:
progress_bar(n, listLen)
newCoords = (coord[0] - dieX/2, coord[1] - dieY/2)
if max_dist(coord, dieSize) > rad:
# it's off the wafer
status = "wafer"
elif coord[1] - dieY/2 < y:
# it's off the flat
status = "flat"
elif max_dist(coord, dieSize) > (rad - excl):
# it's outside of the exclusion
status = "excl"
elif coord[1] - dieY/2 < yExcl:
# it's ouside the flat exclusion
status = "flatExcl"
else:
# it's a good die, add it to the list
status = "probe"
# need to figure out how to get true RC numbers
dieList.append(("X column", "Y row", newCoords[0], newCoords[1], status))
n += 1
progress_bar(n, listLen)
print ""
return dieList
def maxGDW(dieSize, dia, excl, fssExcl):
# list of available die shifts
ds = [("odd", "odd"),
("odd", "even"),
("even", "odd"),
("even", "even")]
j = (0, "")
probeList = []
for shift in ds:
probeCount = 0
edgeCount = 0
flatCount = 0
flatExclCount = 0
dieList = gdw(dieSize, dia, shift, excl, fssExcl)
for die in dieList:
if die[-1] == "probe":
probeCount += 1
elif die[-1] == "excl":
edgeCount += 1
elif die[-1] == "flat":
flatCount += 1
elif die[-1] == "flatExcl":
flatExclCount += 1
if probeCount > j[0]:
j = (probeCount, shift, edgeCount, flatCount, flatExclCount)
probeList = dieList
print ""
print "----------------------------------"
print "Maximum GDW: %d %s" % (j[0], j[1])
print "Die lost to Edge Exclusion: %d" % j[2]
print "Die Lost to Wafer Flat: %d" % j[3]
print "Die Lost to Front-Side Scribe Exclusion: %d" % j[4]
print "----------------------------------"
return probeList
def printHeader():
print "++++++++++++++++++++++++++++++"
print "GDWCalc_Lite v%s" % PROG_VERS
print "Released %s" % REL_DATE
print "++++++++++++++++++++++++++++++"
print ""
def main():
printHeader()
dieXY = dieSizePrompt()
dia = waferSizePrompt()
excl = exclSizePrompt()
FSS_Width = FSSExclPrompt()
print ""
probeList = maxGDW(dieXY, dia, excl, FSS_Width)
raw_input("Press Enter to close this window.")
if __name__ == "__main__":
main()
|
dougthor42/GDWCalc
|
archive/GDWCalc_Lite v1.3.py
|
Python
|
gpl-2.0
| 8,852 | 0.001695 |
__source__ = 'https://leetcode.com/problems/walls-and-gates/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/walls-and-gates.py
# Time: O(m * n)
# Space: O(g)
#
# Description: Leetcode # 286. Walls and Gates
#
# You are given a m x n 2D grid initialized with these three possible values.
#
# -1 - A wall or an obstacle.
# 0 - A gate.
# INF - Infinity means an empty room.
#
# We use the value 231 - 1 = 2147483647 to represent INF
# as you may assume that the distance to a gate is less than 2147483647.
# Fill each empty room with the distance to its nearest gate.
# If it is impossible to reach a gate, it should be filled with INF.
#
# For example, given the 2D grid:
# INF -1 0 INF
# INF INF INF -1
# INF -1 INF -1
# 0 -1 INF INF
# After running your function, the 2D grid should be:
# 3 -1 0 1
# 2 2 1 -1
# 1 -1 2 -1
# 0 -1 3 4
#
# Companies
# Google Facebook
# Related Topics
# Breadth-first Search
# Similar Questions
# Surrounded Regions Number of Islands Shortest Distance from All Buildings
import unittest
#BFS
class Solution(object):
def wallsAndGates(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: void Do not return anything, modify rooms in-place instead.
"""
for i in xrange(len(rooms)):
for j in xrange(len(rooms[0])):
if rooms[i][j] == 0:
stack = [
(i+1, j, 1),
(i-1, j, 1),
(i, j+1, 1),
(i, j-1, 1)
]
while stack:
ii, jj, dist = stack.pop()
if ii < 0 or jj < 0 or ii >= len(rooms) or jj >= len(rooms[0]) or rooms[ii][jj] < dist:
continue
rooms[ii][jj] = dist
stack.append((ii+1, jj, dist + 1))
stack.append((ii-1, jj, dist + 1))
stack.append((ii, jj+1, dist + 1))
stack.append((ii, jj-1, dist + 1))
#BFS -2
class Solution2(object):
def wallsAndGates(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: void Do not return anything, modify rooms in-place instead.
"""
if not rooms:
return
m = len(rooms)
n = len(rooms[0])
stack = []
for i in xrange(m):
for j in xrange(n):
if rooms[i][j] == 0:
stack.append([i*n +j, 0])
cube = [0, 1, 0, -1, 0]
while stack:
digit, dis = stack.pop()
x = digit / n
y = digit % n
for k in xrange(4):
p = x + cube[k]
q = y + cube[k+1]
if p >= 0 and p < m and q >= 0 and q < n and rooms[p][q] > dis + 1:
rooms[p][q] = dis + 1
stack.append([p*n+q, dis+1])
#DFS
class Solution3(object):
def wallsAndGates(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: void Do not return anything, modify rooms in-place instead.
"""
if not rooms:
return
m = len(rooms)
n = len(rooms[0])
padding = [ 0, 1, 0, -1, 0]
for i in xrange(m):
for j in xrange(n):
if rooms[i][j] == 0:
self.dfs(rooms, m, n, i, j, padding)
def dfs(self, rooms, m, n, i, j, padding):
for k in xrange(4):
p = i + padding[k]
q = j + padding[k+1]
if p >= 0 and q >= 0 and p < m and q < n and rooms[p][q] > rooms[i][j] + 1:
rooms[p][q] = rooms[i][j] + 1
self.dfs(rooms, m, n, p, q, padding)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/walls-and-gates/solution/
# DFS
# 99.31% 4ms
class Solution {
public static final int[][] DIRECTIONS = new int[][] {{-1, 0}, {1, 0}, {0, -1}, {0, 1}};
public void wallsAndGates(int[][] rooms) {
int m = rooms.length;
int n = m == 0 ? 0 : rooms[0].length;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (rooms[i][j] == 0) {
dfs(rooms, m, n, i, j, 1);
}
}
}
}
private void dfs(int[][] rooms, int m, int n, int i, int j, int steps) {
for (int[] direction : DIRECTIONS) {
int newI = i + direction[0];
int newJ = j + direction[1];
if (newI >= 0 && newI < m && newJ >= 0 && newJ < n && rooms[newI][newJ] > steps) {
rooms[newI][newJ] = steps;
dfs(rooms, m, n, newI, newJ, steps + 1);
}
}
}
}
# DFS
# 99.31% 4ms
public class Solution {
private static int[] dir = {0, 1, 0, -1, 0};
public void wallsAndGates(int[][] rooms) {
for (int i = 0; i < rooms.length; i++) {
for (int j = 0; j < rooms[0].length; j++) {
if (rooms[i][j] == 0) dfs(rooms, i, j);
}
}
}
public void dfs(int[][] rooms, int i, int j) {
for (int k = 0; k < 4; k++) {
int p = i + dir[k], q = j + dir[k+1];
if ( 0 <= p && p < rooms.length && 0 <= q && q < rooms[0].length && rooms[p][q] > rooms[i][j] + 1) {
rooms[p][q] = rooms[i][j] + 1;
dfs(rooms, p, q);
}
}
}
}
# BFS
# 59.16% 9ms
public class Solution {
//The Multi End BFS solution used is this
public static final int[] d = {0, 1, 0, -1, 0};
public void wallsAndGates(int[][] rooms) {
if (rooms.length == 0) return;
int m = rooms.length, n = rooms[0].length;
Deque<Integer> queue = new ArrayDeque<>();
for (int i = 0; i < m ; i++) {
for (int j = 0; j < n; j++) {
if (rooms[i][j] == 0){
queue.offer(i *n + j); }
}
}
while(!queue.isEmpty()){
int x = queue.poll();
int i = x / n, j = x % n;
for (int k = 0; k < 4; k++) {
int p = i + d[k], q = j + d[k+1];
if (0 <= p && p < m && 0 <= q && q < n && rooms[p][q] == Integer.MAX_VALUE) {
rooms[p][q] = rooms[i][j] + 1;
queue.offer(p * n + q);
}
}
}
}
private void bfs(int[][] rooms, int i, int j) {
int m = rooms.length, n = rooms[0].length;
Deque<Integer> queue = new ArrayDeque<>();
queue.offer(i * n + j); // Put gate in the queue
while (!queue.isEmpty()) {
int x = queue.poll();
i = x / n; j = x % n;
for (int k = 0; k < 4; ++k) {
int p = i + d[k], q = j + d[k+1];
if (0 <= p && p < m && 0 <= q && q < n && rooms[p][q] > rooms[i][j] + 1) {
rooms[p][q] = rooms[i][j] + 1;
queue.offer(p * n + q);
}
}
}
}
}
# BFS2
# 32.38% 13ms
class Solution {
//The Multi End BFS solution used is this
public static final int[] d = {0, 1, 0, -1, 0};
public void wallsAndGates(int[][] rooms) {
if (rooms.length == 0) return;
int m = rooms.length, n = rooms[0].length;
Deque<Integer> queue = new ArrayDeque<>();
for (int i = 0; i < m ; i++) {
for (int j = 0; j < n; j++) {
if (rooms[i][j] == 0){
queue.offer(i * n + j);
bfs(rooms, i, j); //naive BFS solution
}
}
}
}
private void bfs(int[][] rooms, int i, int j) {
int m = rooms.length, n = rooms[0].length;
Deque<Integer> queue = new ArrayDeque<>();
queue.offer(i * n + j); // Put gate in the queue
while (!queue.isEmpty()) {
int x = queue.poll();
i = x / n; j = x % n;
for (int k = 0; k < 4; ++k) {
int p = i + d[k], q = j + d[k+1];
if (0 <= p && p < m && 0 <= q && q < n && rooms[p][q] > rooms[i][j] + 1) {
rooms[p][q] = rooms[i][j] + 1;
queue.offer(p * n + q);
}
}
}
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/WallsandGates.py
|
Python
|
apache-2.0
| 8,470 | 0.002597 |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from api.account.views import RegistrationAPI, LoginAPI, LogoutAPI, UpdatePasswordAPI, UpdateProfileAPI
urlpatterns = [
url(r'^registration/$', RegistrationAPI.as_view(), name='registration'),
url(r'^login/$', LoginAPI.as_view(), name='login'),
url(r'^logout/$', LogoutAPI.as_view(), name='logout'),
url(r'^update_password/$', UpdatePasswordAPI.as_view(), name='update_password'),
url(r'^update_profile/$', UpdateProfileAPI.as_view(), name='update_profile'),
]
|
doraemonext/DEOnlineJudge
|
api/account/urls.py
|
Python
|
mit
| 541 | 0.005545 |
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
"reddit-validations": {
"task": "reddit.tasks.process_validations",
"schedule": timedelta(minutes=10),
},
"eveapi-update": {
"task": "eve_api.tasks.account.queue_apikey_updates",
"schedule": timedelta(minutes=10),
},
"alliance-update": {
"task": "eve_api.tasks.alliance.import_alliance_details",
"schedule": timedelta(hours=6),
},
"api-log-clear": {
"task": "eve_proxy.tasks.clear_old_logs",
"schedule": timedelta(days=1),
},
"blacklist-check": {
"task": "hr.tasks.blacklist_check",
"schedule": timedelta(days=7),
},
"reddit-update": {
"task": "reddit.tasks.queue_account_updates",
"schedule": timedelta(minutes=15),
}
}
CELERY_ROUTES = {
"sso.tasks.update_service_groups": {'queue': 'bulk'},
"hr.tasks.blacklist_check": {'queue': 'bulk'},
"eve_api.tasks.import_apikey_result": {'queue': 'fastresponse'},
"sso.tasks.update_user_access": {'queue': 'fastresponse'},
}
|
nikdoof/test-auth
|
app/conf/celeryschedule.py
|
Python
|
bsd-3-clause
| 1,084 | 0 |
#!/usr/bin/env python
"""
"""
# Python 2.6 and newer support
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import (
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
try:
unicode()
except NameError:
unicode = str
import sys
__python_version__ = dict()
try:
__python_version__['major'] = sys.version_info.major
except AttributeError:
__python_version__['major'] = sys.version_info[0]
try:
__python_version__['minor'] = sys.version_info.minor
except AttributeError:
__python_version__['minor'] = sys.version_info[1]
from ..defaultencoding import DefaultEncoding
from ..import channel
class Query(object):
"""More than just a simple aggregate of a Command and a Response.
Executes a command, attempts to retrieve an IMMEDIATE response, all without
releasing the resource locks.
"""
def __init__(self, message, device=None, io=None,
send_encoding=DefaultEncoding(),
receive_encoding=DefaultEncoding(),
receive_count=-1):
object.__init__(self)
self.message = message
self.device = device
self.io = io
self.send_encoding = send_encoding
self.receive_encoding = receive_encoding
self.receive_count = receive_count
self.__response = None
@property
def value(self):
"""The most recently retrieved response.
"""
return self.__response
def __call__(self, *args, **kwargs):
"""Sends the command, fetches a response, stores and returns that response.
Any arguments and/or keyword arguments will be passed to ``format``,
which is called on the command message before sending.
"""
if isinstance(self.send_encoding, DefaultEncoding):
with channel(self.device, self.io) as dev:
dev.send(self.message.format(*args, **kwargs))
self.__response = dev.receive(count=self.receive_count)
else:
with channel(self.device, self.io) as dev:
dev.send(self.message.format(*args, **kwargs), encoding=self.send_encoding)
self.__response = dev.receive(count=self.receive_count, encoding=self.receive_encoding)
return self.value
|
sfinucane/deviceutils
|
deviceutils/action/query.py
|
Python
|
apache-2.0
| 2,510 | 0.004382 |
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iSCSI Cinder volume driver for Hitachi storage.
"""
from contextlib import nested
import os
import threading
from oslo.config import cfg
import six
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_common as common
LOG = logging.getLogger(__name__)
CHAP_METHOD = ('None', 'CHAP None', 'CHAP')
volume_opts = [
cfg.BoolOpt('hitachi_add_chap_user',
default=False,
help='Add CHAP user'),
cfg.StrOpt('hitachi_auth_method',
default=None,
help='iSCSI authentication method'),
cfg.StrOpt('hitachi_auth_user',
default='%sCHAP-user' % basic_lib.NAME_PREFIX,
help='iSCSI authentication username'),
cfg.StrOpt('hitachi_auth_password',
default='%sCHAP-password' % basic_lib.NAME_PREFIX,
help='iSCSI authentication password'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
VERSION = common.VERSION
def __init__(self, *args, **kwargs):
os.environ['LANG'] = 'C'
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
self.db = kwargs.get('db')
self.common = None
self.configuration.append_config_values(common.volume_opts)
self._stats = {}
self.context = None
self.do_setup_status = threading.Event()
def _check_param(self):
self.configuration.append_config_values(volume_opts)
if (self.configuration.hitachi_auth_method and
self.configuration.hitachi_auth_method not in CHAP_METHOD):
msg = basic_lib.output_err(601, param='hitachi_auth_method')
raise exception.HBSDError(message=msg)
if self.configuration.hitachi_auth_method == 'None':
self.configuration.hitachi_auth_method = None
for opt in volume_opts:
getattr(self.configuration, opt.name)
def check_param(self):
try:
self.common.check_param()
self._check_param()
except exception.HBSDError:
raise
except Exception as ex:
msg = basic_lib.output_err(601, param=six.text_type(ex))
raise exception.HBSDError(message=msg)
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
with lock:
self.common.output_param_to_log('iSCSI')
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info('\t%-35s%s' % (opt.name + ': ',
six.text_type(value)))
def _delete_lun_iscsi(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(301, ldev=ldev)
LOG.warning(msg)
def _add_target(self, hostgroups, ldev):
self.common.add_lun('autargetmap', hostgroups, ldev)
def _add_initiator(self, hgs, port, gid, host_iqn):
self.common.command.comm_add_initiator(port, gid, host_iqn)
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
LOG.debug("Create iSCSI target for %s" % hgs)
def _get_unused_gid_iscsi(self, port):
group_range = self.configuration.hitachi_group_range
if not group_range:
group_range = basic_lib.DEFAULT_GROUP_RANGE
return self.common.command.get_unused_gid_iscsi(group_range, port)
def _delete_iscsi_target(self, port, target_no, target_alias):
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
port, target_no, target_alias)
if ret:
msg = basic_lib.set_msg(
307, port=port, tno=target_no, alias=target_alias)
LOG.warning(msg)
def _delete_chap_user(self, port):
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
if ret:
msg = basic_lib.set_msg(
303, user=self.configuration.hitachi_auth_user)
LOG.warning(msg)
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
return self.common.command.comm_get_hostgroup_info_iscsi(
hgs, host_iqn, self.configuration.hitachi_target_ports)
def _discovery_iscsi_target(self, hostgroups):
for hostgroup in hostgroups:
ip_addr, ip_port = self.common.command.comm_get_iscsi_ip(
hostgroup['port'])
target_iqn = self.common.command.comm_get_target_iqn(
hostgroup['port'], hostgroup['gid'])
hostgroup['ip_addr'] = ip_addr
hostgroup['ip_port'] = ip_port
hostgroup['target_iqn'] = target_iqn
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s"
% {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
for port in ports:
added_hostgroup = False
added_user = False
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
'target_iqn: %(tiqn)s target_alias: %(alias)s '
'add_iqn: %(aiqn)s)' %
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
'alias': target_alias, 'aiqn': add_iqn})
gid = self.common.command.get_gid_from_targetiqn(
target_iqn, target_alias, port)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
gid = None
try:
gid = self._get_unused_gid_iscsi(port)
self.common.command.comm_add_hostgrp_iscsi(
port, gid, target_alias, target_iqn)
added_hostgroup = True
except exception.HBSDNotFound:
msg = basic_lib.set_msg(312, resource='GID')
LOG.warning(msg)
continue
except Exception as ex:
msg = basic_lib.set_msg(
309, port=port, alias=target_alias,
reason=six.text_type(ex))
LOG.warning(msg)
break
else:
LOG.debug('Completed to add target'
'(port: %(port)s gid: %(gid)d)'
% {'port': port, 'gid': gid})
break
if gid is None:
LOG.error(_('Failed to add target(port: %s)') % port)
continue
try:
if added_hostgroup:
if self.configuration.hitachi_auth_method:
added_user = self.common.command.set_chap_authention(
port, gid)
self.common.command.comm_set_hostgrp_reportportal(
port, target_alias)
self._add_initiator(hgs, port, gid, add_iqn)
except Exception as ex:
msg = basic_lib.set_msg(
316, port=port, reason=six.text_type(ex))
LOG.warning(msg)
if added_hostgroup:
if added_user:
self._delete_chap_user(port)
self._delete_iscsi_target(port, gid, target_alias)
def add_hostgroup_core(self, hgs, ports, target_iqn,
target_alias, add_iqn):
if ports:
self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn)
def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports):
target_ports = self.configuration.hitachi_target_ports
group_request = self.configuration.hitachi_group_request
target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
if target_ports and group_request:
target_iqn = '%s.target' % master_iqn
diff_ports = []
for port in security_ports:
for hostgroup in hgs:
if hostgroup['port'] == port:
break
else:
diff_ports.append(port)
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
target_alias, master_iqn)
if not hgs:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'initiator' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
LOG.debug("initiator: %s" % properties['initiator'])
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, properties['initiator'])
self.add_hostgroup_master(hostgroups, properties['initiator'],
properties['ip'], security_ports)
def _get_properties(self, volume, hostgroups):
conf = self.configuration
properties = {}
self._discovery_iscsi_target(hostgroups)
hostgroup = hostgroups[0]
properties['target_discovered'] = True
properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'],
hostgroup['ip_port'])
properties['target_iqn'] = hostgroup['target_iqn']
properties['target_lun'] = hostgroup['lun']
if conf.hitachi_auth_method:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = conf.hitachi_auth_user
properties['auth_password'] = conf.hitachi_auth_password
return properties
def do_setup(self, context):
self.context = context
self.common = common.HBSDCommon(self.configuration, self,
context, self.db)
self.check_param()
self.common.create_lock_file()
self.common.command.connect_storage()
lock = basic_lib.get_process_lock(self.common.service_lock_file)
with lock:
self.add_hostgroup()
self.output_param_to_log()
self.do_setup_status.set()
def check_for_setup_error(self):
pass
def extend_volume(self, volume, new_size):
self.do_setup_status.wait()
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
if refresh:
if self.do_setup_status.isSet():
self.common.output_backend_available_once()
_stats = self.common.update_volume_stats("iSCSI")
if _stats:
self._stats = _stats
return self._stats
def create_volume(self, volume):
self.do_setup_status.wait()
metadata = self.common.create_volume(volume)
return metadata
def delete_volume(self, volume):
self.do_setup_status.wait()
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_snapshot(snapshot)
return metadata
def delete_snapshot(self, snapshot):
self.do_setup_status.wait()
self.common.delete_snapshot(snapshot)
def create_cloned_volume(self, volume, src_vref):
self.do_setup_status.wait()
metadata = self.common.create_cloned_volume(volume, src_vref)
return metadata
def create_volume_from_snapshot(self, volume, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
return metadata
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)"
% {'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs:
hostgroups = src_hgs[:]
else:
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, connector['initiator'])
self.add_hostgroup_master(hostgroups, connector['initiator'],
connector['ip'], security_ports)
self._add_target(hostgroups, ldev)
return hostgroups
def initialize_connection(self, volume, connector):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with nested(self.common.volume_info[ldev]['lock'],
self.common.volume_info[ldev]['in_use']):
hostgroups = self._initialize_connection(ldev, connector)
protocol = 'iscsi'
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s'
% self.common.volume_info)
LOG.debug('HFCDrv: properties=%s' % properties)
return {
'driver_volume_type': protocol,
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)"
% self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun_iscsi(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
def terminate_connection(self, volume, connector, **kwargs):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.set_msg(302, volume_id=volume['id'])
LOG.warning(msg)
return
if 'initiator' not in connector:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
hostgroups = []
self._get_hostgroup_info_iscsi(hostgroups,
connector['initiator'])
if not hostgroups:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with nested(self.common.volume_info[ldev]['lock'],
self.common.volume_info[ldev]['in_use']):
self._terminate_connection(ldev, connector, hostgroups)
def create_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def pair_initialize_connection(self, unused_ldev):
pass
def pair_terminate_connection(self, unused_ldev):
pass
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self.do_setup_status.wait()
if (volume['instance_uuid'] or volume['attached_host']):
desc = 'volume %s' % volume['id']
msg = basic_lib.output_err(660, desc=desc)
raise exception.HBSDError(message=msg)
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)
|
hybrid-storage-dev/cinder-fs-111t-hybrid-cherry
|
volume/drivers/hitachi/hbsd_iscsi.py
|
Python
|
apache-2.0
| 16,385 | 0 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.internet import task
from buildbot.test.fake import fakemaster
class ChangeSourceMixin:
"""
This class is used for testing change sources, and handles a few things:
- starting and stopping a ChangeSource service
- a fake master with a data API implementation
"""
changesource = None
started = False
DUMMY_CHANGESOURCE_ID = 20
OTHER_MASTER_ID = 93
DEFAULT_NAME = "ChangeSource"
def setUpChangeSource(self):
"Set up the mixin - returns a deferred."
self.master = fakemaster.make_master(self, wantDb=True, wantData=True)
assert not hasattr(self.master, 'addChange') # just checking..
return defer.succeed(None)
@defer.inlineCallbacks
def tearDownChangeSource(self):
"Tear down the mixin - returns a deferred."
if not self.started:
return
if self.changesource.running:
yield self.changesource.stopService()
yield self.changesource.disownServiceParent()
return
@defer.inlineCallbacks
def attachChangeSource(self, cs):
"Set up a change source for testing; sets its .master attribute"
self.changesource = cs
# FIXME some changesource does not have master property yet but
# mailchangesource has :-/
try:
self.changesource.master = self.master
except AttributeError:
yield self.changesource.setServiceParent(self.master)
# configure the service to let secret manager render the secrets
try:
yield self.changesource.configureService()
except NotImplementedError: # non-reconfigurable change sources can't reconfig
pass
# also, now that changesources are ClusteredServices, setting up
# the clock here helps in the unit tests that check that behavior
self.changesource.clock = task.Clock()
return cs
def startChangeSource(self):
"start the change source as a service"
self.started = True
return self.changesource.startService()
@defer.inlineCallbacks
def stopChangeSource(self):
"stop the change source again; returns a deferred"
yield self.changesource.stopService()
self.started = False
def setChangeSourceToMaster(self, otherMaster):
# some tests build the CS late, so for those tests we will require that
# they use the default name in order to run tests that require master
# assignments
if self.changesource is not None:
name = self.changesource.name
else:
name = self.DEFAULT_NAME
self.master.data.updates.changesourceIds[
name] = self.DUMMY_CHANGESOURCE_ID
if otherMaster:
self.master.data.updates.changesourceMasters[
self.DUMMY_CHANGESOURCE_ID] = otherMaster
else:
del self.master.data.updates.changesourceMasters[
self.DUMMY_CHANGESOURCE_ID]
|
pmisik/buildbot
|
master/buildbot/test/util/changesource.py
|
Python
|
gpl-2.0
| 3,747 | 0.000267 |
# Copyright (C) 2016 Fan Long, Martin Rianrd and MIT CSAIL
# Prophet
#
# This file is part of Prophet.
#
# Prophet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Prophet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Prophet. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
from os import system, chdir, getcwd
from sys import argv
import subprocess
build_cmd = argv[1];
dep_dir = argv[2];
src_dir = argv[3];
test_dir = argv[4];
rev = argv[5];
if (len(argv) < 7):
out_dir = test_dir + "-" + rev;
else:
out_dir = argv[6];
work_dir = "__tmp" + rev;
system("cp -rf " + src_dir + " " + work_dir);
ori_dir = getcwd();
chdir(work_dir);
system("git checkout -f " + rev);
system("git clean -f -d");
chdir(ori_dir);
system(build_cmd + " -p " + dep_dir + " " + work_dir);
system("mv " + work_dir + "/test " + work_dir+"/ori_test");
system("cp -rf " + test_dir + " " + work_dir + "/test");
chdir(work_dir + "/test");
system("GENEXPOUT=1 CMPEXPOUT=0 make check");
chdir(ori_dir);
print "Goint to generate testdir for revision " + rev + " case: " + out_dir;
system("cp -rf " + test_dir + " " + out_dir);
system("cp -rf " + work_dir + "/test/*.exp " + work_dir + "/test/*.tol " + out_dir+"/");
system("rm -rf " + work_dir);
|
jyi/ITSP
|
prophet-gpl/tools/libtiff-prepare-test.py
|
Python
|
mit
| 1,700 | 0.018235 |
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
import usrp_options
import transmit_path
from pick_bitrate import pick_tx_bitrate
from gnuradio import eng_notation
def add_freq_option(parser):
"""
Hackery that has the -f / --freq option set both tx_freq and rx_freq
"""
def freq_callback(option, opt_str, value, parser):
parser.values.rx_freq = value
parser.values.tx_freq = value
if not parser.has_option('--freq'):
parser.add_option('-f', '--freq', type="eng_float",
action="callback", callback=freq_callback,
help="set Tx and/or Rx frequency to FREQ [default=%default]",
metavar="FREQ")
def add_options(parser, expert):
add_freq_option(parser)
usrp_options.add_tx_options(parser)
transmit_path.transmit_path.add_options(parser, expert)
expert.add_option("", "--tx-freq", type="eng_float", default=None,
help="set transmit frequency to FREQ [default=%default]", metavar="FREQ")
parser.add_option("-v", "--verbose", action="store_true", default=False)
class usrp_transmit_path(gr.hier_block2):
def __init__(self, modulator_class, options):
'''
See below for what options should hold
'''
gr.hier_block2.__init__(self, "usrp_transmit_path",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
if options.tx_freq is None:
sys.stderr.write("-f FREQ or --freq FREQ or --tx-freq FREQ must be specified\n")
raise SystemExit
tx_path = transmit_path.transmit_path(modulator_class, options)
for attr in dir(tx_path): #forward the methods
if not attr.startswith('_') and not hasattr(self, attr):
setattr(self, attr, getattr(tx_path, attr))
#setup usrp
self._modulator_class = modulator_class
self._setup_usrp_sink(options)
#connect
self.connect(tx_path, self.u)
def _setup_usrp_sink(self, options):
"""
Creates a USRP sink, determines the settings for best bitrate,
and attaches to the transmitter's subdevice.
"""
self.u = usrp_options.create_usrp_sink(options)
dac_rate = self.u.dac_rate()
if options.verbose:
print 'USRP Sink:', self.u
(self._bitrate, self._samples_per_symbol, self._interp) = \
pick_tx_bitrate(options.bitrate, self._modulator_class.bits_per_symbol(), \
options.samples_per_symbol, options.interp, dac_rate, \
self.u.get_interp_rates())
self.u.set_interp(self._interp)
self.u.set_auto_tr(True)
if not self.u.set_center_freq(options.tx_freq):
print "Failed to set Rx frequency to %s" % (eng_notation.num_to_str(options.tx_freq))
raise ValueError, eng_notation.num_to_str(options.tx_freq)
|
UpYou/relay
|
usrp_transmit_path.py
|
Python
|
gpl-3.0
| 3,809 | 0.006563 |
from bottle import redirect, request, abort, response
from db import db
from functools import wraps
from inspect import Signature
from user import User
class SessionPlugin(object):
name = 'SessionPlugin'
keyword = 'user'
api = 2
def __init__(self, loginpage):
self.loginpage = loginpage
def apply(self, callback, route):
if self.keyword in Signature.from_callable(route.callback).parameters:
@wraps(callback)
def wrapper(*args, **kwargs):
uid = request.get_cookie('uid', secret=db.get_secret())
if uid is None:
return redirect(self.loginpage)
kwargs[self.keyword] = User(uid)
if request.method == 'POST':
if request.forms['csrf'] != request.get_cookie('csrf',
secret=db.get_secret()):
abort(400)
return callback(*args, **kwargs)
return wrapper
else:
return callback
|
b3yond/ticketfrei
|
session.py
|
Python
|
isc
| 1,063 | 0.001881 |
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import dnf.logging
import logging
import libdnf
DNF_LIBREPO_LOG = "/tmp/dnf.librepo.log"
DNF_LOGGER = "dnf"
def configure_dnf_logging():
"""Configure the DNF logging."""
# Set up librepo.
# This is still required even when the librepo has a separate logger because
# DNF needs to have callbacks that the librepo log is written to be able to
# process that log.
libdnf.repo.LibrepoLog.removeAllHandlers()
libdnf.repo.LibrepoLog.addHandler(DNF_LIBREPO_LOG)
# Set up DNF. Increase the log level to the custom DDEBUG level.
dnf_logger = logging.getLogger(DNF_LOGGER)
dnf_logger.setLevel(dnf.logging.DDEBUG)
|
jkonecny12/anaconda
|
pyanaconda/modules/payloads/payload/dnf/initialization.py
|
Python
|
gpl-2.0
| 1,604 | 0.000623 |
from django import template
register = template.Library()
@register.filter
def multiplyby(value, arg):
return int(value * arg)
@register.filter
def subtractfrom(value, arg):
return arg - value
@register.filter
def plus(value, arg):
return value + arg
@register.filter
def appears_in(value, arg):
for name in arg:
if name == value: return True
return False
@register.filter
def length(value):
return len(value)
@register.filter
def user_can_downvote(votes, id):
if id not in votes: return True
if votes[id].is_downvote(): return False
return True
@register.filter
def user_can_upvote(votes, id):
if id not in votes: return True
if votes[id].is_upvote(): return False
return True
@register.filter
def stripnewlines(str):
return str.replace('\n', ' ').replace('\r', ' ')
|
rtts/qqq
|
qqq/templatetags/customfilters.py
|
Python
|
gpl-3.0
| 807 | 0.032218 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import mock
from neutron.services.loadbalancer.agent import agent_api as api
from neutron.tests import base
class TestApiCache(base.BaseTestCase):
def setUp(self):
super(TestApiCache, self).setUp()
self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host')
self.make_msg = mock.patch.object(self.api, 'make_msg').start()
self.mock_call = mock.patch.object(self.api, 'call').start()
def test_init(self):
self.assertEqual(self.api.host, 'host')
self.assertEqual(self.api.context, mock.sentinel.context)
def test_get_ready_devices(self):
self.assertEqual(
self.api.get_ready_devices(),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with('get_ready_devices', host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_get_logical_device(self):
self.assertEqual(
self.api.get_logical_device('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'get_logical_device',
pool_id='pool_id')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_pool_destroyed(self):
self.assertEqual(
self.api.pool_destroyed('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'pool_destroyed',
pool_id='pool_id')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_pool_deployed(self):
self.assertEqual(
self.api.pool_deployed('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'pool_deployed',
pool_id='pool_id')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_update_status(self):
self.assertEqual(
self.api.update_status('pool', 'pool_id', 'ACTIVE'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'update_status',
obj_type='pool',
obj_id='pool_id',
status='ACTIVE')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_plug_vip_port(self):
self.assertEqual(
self.api.plug_vip_port('port_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'plug_vip_port',
port_id='port_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_unplug_vip_port(self):
self.assertEqual(
self.api.unplug_vip_port('port_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'unplug_vip_port',
port_id='port_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
def test_update_pool_stats(self):
self.assertEqual(
self.api.update_pool_stats('pool_id', {'stat': 'stat'}),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'update_pool_stats',
pool_id='pool_id',
stats={'stat': 'stat'},
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
topic='topic'
)
|
sajuptpm/neutron-ipam
|
neutron/tests/unit/services/loadbalancer/agent/test_api.py
|
Python
|
apache-2.0
| 4,901 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb, sys
# for ide
if False:
from gluon import *
def clumusuario(email):
"""consulto usuario tabla clave unificada"""
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select username from auth_user where email='%s';"""%(email))
registro=cursor.fetchall()
log("usuario: "+str(registro))
dbmysql.close()
if not registro:
salida='no configurado'
elif registro[0][0]=='':
salida='no configurado'
else:
salida=str(registro[0][0])
return salida
def consulta_id(usuario):
"""consulto email tabla clave unificada"""
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select id from auth_user where username='%s';"""%(usuario))
registro=cursor.fetchall()
log("id: "+str(registro))
dbmysql.close()
if not registro:
salida='no creado'
elif registro[0][0]=='':
salida='no configurado'
else:
salida=int(registro[0][0])
return salida
def consulta_emailalt(usuario):
"""consulto email tabla clave unificada"""
try:
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select email from auth_user where username='%s';"""%(usuario))
registro=cursor.fetchall()
#log("mailalt: "+str(registro))
dbmysql.close()
except Exception as e:
return ['error',e.args]
if not registro:
salida=['error','no creado']
elif registro[0][0]=='':
salida=['error','no configurado']
else:
salida=['ok',str(registro[0][0])]
return salida
def consulta_autogestion(usuario):
"""consulto email tabla clave unificada"""
try:
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select * from auth_user where username='%s';"""%(usuario))
registro=cursor.fetchall()
#log("mailalt: "+str(registro))
dbmysql.close()
except Exception as e:
return ['error',e.args]
if not registro:
salida=['error','no creado']
elif registro[0][0]=='':
salida=['error','no configurado']
else:
#return registro
aux={
"id":registro[0][0],
"first_name":registro[0][1],
"last_name":registro[0][2],
"email":registro[0][3],
"username":registro[0][4],
"password":registro[0][5],
"registration_key":registro[0][6],
"reset_password_key":registro[0][7],
"registration_id":registro[0][8],
"is_active":registro[0][9],
"created_on":registro[0][10],
"created_by":registro[0][11],
"modified_on":registro[0][12],
"modified_by":registro[0][13]
}
salida=['ok', aux]
return salida
def todos_autogestion():
"""consulto email tabla clave unificada"""
try:
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user'),
passwd=myconf.take('datos.clum_pass'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
cursor.execute("""select first_name,last_name,email,username,created_on,modified_on from auth_user;""")
registro=cursor.fetchall()
#log("mailalt: "+str(registro))
dbmysql.close()
except Exception as e:
return ['error',e.args]
#return registro
resultado={}
for i in registro:
resultado[i[3]]={
'nombre':i[0],
'apellido':i[1],
'mailalt':i[2],
'usuario':i[3],
'fcreado':i[4],
'fmodificado':i[5]
}
return ['ok',resultado]
def agrego_autogestion(username,nombre,apellido,correo,creadopor):
"""agrego usuario a autogestion"""
#consulto que el usuario exista en seguusua
log("intento agregar a clum: "+str(username)+" "+str(nombre)+" "+str(apellido)+" "+str(correo)+" "+str(creadopor))
consulta=seguusua(username)
if consulta[0]=='error':
log('no existe en seguusua')
return ['error',consulta[1]]
email=str(correo)
usuarioclum=clumusuario(email)
if usuarioclum!='no configurado':
return ['error',str(correo)+" utilizado por "+str(usuarioclum)+". No se agrega "+str(username)+" en autogestion."]
#valido que no exista en la base, si existe y no esta configurado lo borro para no duplicar registro
usuario_clum=consulta_emailalt(username)[1]
if usuario_clum=='no configurado':
elimino_autogestion(username)
creador=consulta_id(creadopor)
#solo lo creo si no existe"
if usuario_clum=='no creado':
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user_insert'),
passwd=myconf.take('datos.clum_pass_insert'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
modeloclum="(first_name,last_name,email,username,registration_id,is_active,created_on,created_by,respuesta,pregunta,tyc)"
valores="""'%s','%s','%s','%s','%s','T','%s','%s','Ninguna','Ninguna','T'"""%(nombre,apellido,correo,username,username,datetime.datetime.now(),creador)
#log("valores"+str(valores))
sqladd="""insert into auth_user %s values (%s);"""%(modeloclum,valores)
cursor.execute(sqladd)
dbmysql.commit()
registro=cursor.fetchall()
log("fetch: "+str(registro))
dbmysql.close()
log("agregado a clum: "+str(valores))
retorno="agregado ("+str(valores)+") fetch: "+str(registro)
return ['ok',retorno]
else:
return ['error','usuario ya existe en autogestion']
def elimino_autogestion(username):
log("intento borrar de clum: "+str(username))
consulta=seguusua(username)
if consulta[0]=='error':
log('no existe en seguusua')
return ['error',consulta[1]]
usuario_clum=consulta_emailalt(username)[1]
if usuario_clum[1]!='no creado':
dbmysql = MySQLdb.connect(
host=myconf.take('datos.clum_srv'),
port=int(myconf.take('datos.clum_port')),
user=myconf.take('datos.clum_user_insert'),
passwd=myconf.take('datos.clum_pass_insert'),
db=myconf.take('datos.clum_db'))
cursor = dbmysql.cursor()
sqldelete="""delete from auth_user where username='%s';"""%(username)
log(sqldelete)
cursor.execute(sqldelete)
dbmysql.commit()
usuario_clum=consulta_emailalt(username)[1]
if usuario_clum[1]=='no creado':
return['ok','borrado']
else:
return['error','no borrado']
def clave_unificada(usuario, id_clave, **kwargs):
# la clave se encuentra almacenada temporalmente en memoria (redis).
# testeo que todos los servicios esten disponibles, requiero que exista en kerberos y tenga rama mr, opcional en sw
# guardar clave con
# redis_server.setex("new"+session.sesiong,base64.b64encode(session.sesiong+request.vars.newpass),tiemposession+10)
# kerberos
log('cambio clave ' + str('usuario'))
if hasattr(auth.user, 'username'):
datamap = dni_datos_map(seguusua(auth.user.username)[1]['DNI'])
if datamap[0]=='ok':
usuarioadmin=datamap[1]['NOMBRE']+' '+datamap[1]['APELLIDO']+' ('+auth.user.username+')'
else:
usuarioadmin=auth.user.username
else:
usuarioadmin='admin'
fechacambio=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if ('solomr' in kwargs):
solomr = kwargs['solomr']
else:
solomr = False
existe_krb=principal_krb(usuario)
if existe_krb[0]=='error':
if existe_krb[1]=='no existe en kerberos':
#intento crearlo
resultado_krb=crear_princ_kerberos(usuario)
if resultado_krb[0]=='error':
#no pude crear salgo y devuelvo error
return resultado_krb
else:
#salgo si es otro tipo de error
return existe_krb
# asegure que el usuario existe en kerberos
# obtengo clave
try:
clave = base64.b64decode(redis_server.get(id_clave)).replace(id_clave,'')
except Exception as e:
return ['error','no pude obtener la clave '+str(e.args)]
# el cambio en kerberos es el unico que no tiene vuelta atras. Intento cambiarlo sino retorno error
rtdo_krb = cambiar_kerberos(usuario,clave)
if rtdo_krb[0]=='error':
return rtdo_krb
# ldap
ramasldap=ramas(usuario)
if ramasldap[0] == 'ok':
log('intento reparar rama mr')
# existe en ldap, intento actualizar datos contra map
consultadni=seguusua(usuario)
if consultadni[0] == 'ok':
dni=consultadni[1]['DNI']
consultamap=dni_datos_map(dni)
if consultamap[0]=='ok':
# tengo todo para actuzalizar ldap contra map
apellido=consultamap[1]['APELLIDO']
nombre=consultamap[1]['NOMBRE']
rama='mr'
reparticion=consultamap[1]['REPARTICION']
elimino_ldap(usuario,'mr')
mr_result=agrego_ldap(usuario,rama,nombre,apellido,reparticion,clave=clave)
else:
mr_result=reparo_rama_mr(usuario,clave)
if mr_result[0]=='ok':
# seguridadweb
# mover!
existe_sw=sw_datos(usuario)
if existe_sw[0]=='ok':
rtdo_sw=sw_cambio_pass(usuario, clave)
else:
rtdo_sw=['ok','no tiene sw']
if solomr==True:
# ya lo cambie cuando repare la rama
rtdo_ldap = mr_result
else:
# cambio en el resto de las ramas
rtdo_ldap=ldap_cambio_pass(usuario,clave)
avisocorreo=envio_aviso_cambio(usuario,usuarioadmin,fechacambio)
log(str(usuario)+str(' krb: '+rtdo_krb[1])+' sw: '+str(rtdo_sw[1])+' ldap: '+str(rtdo_ldap)[1]+' aviso: '+str(avisocorreo))
if rtdo_krb[0]=='ok' and rtdo_sw[0]=='ok' and rtdo_ldap[0]=='ok':
return ['ok', 'clave cambiada con exito']
else:
return ['ok', 'clave cambiada con errores: krb '+str(rtdo_krb[1])+' sw '+str(rtdo_sw[1])+' ldap '+str(rtdo_ldap[1])]
else:
log('algo fallo en reparar mr: '+str(mr_result))
return mr_result
else:
log('algo fallo en la consulta en seguusua: '+str(consultadni))
return consultadni
else:
log('algo fallo en la consulta a ldap :'+str(ramasldap))
return ramasldap
def test_clave_unificada():
#variables necesarias
session.last_login='prueba1'
session.clave_id='aaa'
request.vars.fnewpass='Qq9'+str(id_generator(size=12))
redis_server.setex(session.clave_id,base64.b64encode(session.clave_id+request.vars.fnewpass),tiemposession+100)
return ['clave: '+str(request.vars.fnewpass),clave_unificada(session.last_login,session.clave_id,solomr=True)]
def envio_aviso_cambio(usuario,usuarioadmin,fechacambio):
anio_correo = str(datetime.datetime.now().year)
#busco correo autogestion
mailalternativo=consulta_emailalt(usuario)
#reviso que tenga rama webmail
casilla=consulta_casilla(usuario)
correo=[]
if mailalternativo[0]=='ok':
correo.append(mailalternativo[1])
if casilla[0]=='ok':
correo.append(str(usuario)+'@rosario.gov.ar')
if correo==[]:
mensaje='no hay direcciones disponibles donde enviar la notificacion '+ str([usuario,usuarioadmin,fechacambio])
log(mensaje)
return ['error',mensaje]
#genero mail
mensaje_text = """Estimado Usuario %s:
Su clave de acceso fue cambiada por %s el %s
SI UD NO HA SOLICITADO DICHO CAMBIO, LE ROGAMOS INFORME ESTE INCIDENTE A MESA DE AYUDA
Para poder utilizar los servicios informáticos, ahora deberá cambiarla Ud. personalmente,
de la siguiente manera:
* Ingrese a http://www.rosario.gov.ar/unificada
* Ingresar Usuario y Contraseña y Aceptar.
* Ingresar Nueva Contraseña, repetirla para confirmación y Aceptar.
Recuerde respetar las siguientes directivas:
* Mantenga su contraseña en secreto
* Cambie su contraseña cuando sospeche que alguien más la conoce
* Seleccione una contraseña 'de calidad':
o Que sea fácil de recordar.
o Que no esté basada en algún dato que otra persona pueda adivinar u obtener fácilmente mediante información relacionada con usted, por ejemplo
nombres, números de teléfono, patente de su vehículo, fecha de nacimiento, etc.
o Que no tenga caracteres idénticos consecutivos o grupos
totalmente numéricos o totalmente alfabéticos
o Que contengan diferentes clases de caracteres. Son diferentes clases de caracteres: letras mayúsculas, letras minúsculas, números y
signos de puntuación
o Evite reutilizar o reciclar viejas contraseñas
* Notifique cualquier incidente de seguridad relacionado con sus contraseñas: pérdida, robo o indicio de pérdida de confidencialidad.
"""%(usuario, usuarioadmin, fechacambio)
anio_correo = str(datetime.datetime.now().year)
open_mensaje_html = open("applications/useradm/static/correos/correo3.html", "r")
mensaje_html = open_mensaje_html.read()
mensaje_html = mensaje_html.replace("anio_correo", anio_correo)
mensaje_html = mensaje_html.replace("mensaje_correo", mensaje_text.replace("\n", '<BR>').replace('https://www.rosario.gov.ar/unificada',"<a href='https://www.rosario.gov.ar/unificada'>Clave unificada</a>"))
#session.exito = "Correo enviado a "+str(correo)[:5]+"...@..."+str(correo)[-8:]
#mail.send(to=[correo],subject='Usuario para Recibo Digital MR',reply_to='seguridad@rosario.gov.ar',message=(mensaje_text, mensaje_html))
mensajemail="Aviso a "+str(usuario)+" al correo "+str(correo)+" en "+str(URL('index',host=True))
asunto='Aviso de Cambio de clave desde soporte'
mail.send(to=correo,subject=asunto,reply_to='auditoria-sec@rosario.gov.ar', message=(mensaje_text, mensaje_html))
mail.send(to=['mredond1@rosario.gov.ar'],subject='Cambio clave useradm',reply_to='auditoria-sec@rosario.gov.ar', message=mensajemail)
log("Correo enviado a " + str(correo) + " usuario: " + str(usuario))
return ['ok',"Correo enviado a "+str(correo)+" usuario: "+str(usuario)+ " cambiado por: "+str(usuarioadmin)]
# envio a direccion @rosario.gov.ar (con webmail) y alternativo
def test_envio_aviso_cambio():
resultado=envio_aviso_cambio('mredond1','test','1/1/2018')
esperado=['ok', "Correo enviado a ['redondomarco@gmail.com', 'mredond1@rosario.gov.ar'] usuario: mredond1 cambiado por: test"]
if resultado==esperado:
return True
def desbloquear(usuario):
"""
Intento desbloquear la clave
"""
try:
# desbloquear seguridadweb
unlocksw = sw_desbloquear(usuario)
# desbloquear en ldap
unlockldap = ldap_desbloquear(usuario)
return['ok', unlocksw[1], unlockldap[1]]
except Exception as e:
return ['error', str(e)]
|
redondomarco/useradm
|
src/models/unificada.py
|
Python
|
gpl-3.0
| 16,421 | 0.015982 |
# Test which PDB entries error on PDB/mmCIF parsers
# Writes output to a file labelled with the week
import os
from datetime import datetime
from math import ceil
from Bio.PDB import PDBList
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.MMCIFParser import MMCIFParser
start = datetime.now()
basedir = "."
pdbl = PDBList()
pdblist = pdbl.get_all_entries()
outstrs = ["Checking all PDB entries at {}".format(start.isoformat()),
"Checking {} entries".format(len(pdblist))]
pdb_parser = PDBParser()
mmcif_parser = MMCIFParser()
for pu in sorted(pdblist):
p = pu.lower()
try:
pdbl.retrieve_pdb_file(p, pdir=basedir, file_format="pdb")
except:
# Not having a PDB file is acceptable, though a failure to download an
# available file may hide an error in parsing
try:
os.remove("{}/pdb{}.ent".format(basedir, p))
except:
pass
if os.path.isfile("{}/pdb{}.ent".format(basedir, p)):
try:
s = pdb_parser.get_structure("", "{}/pdb{}.ent".format(basedir, p))
except:
outstrs.append("{} - PDB parsing error".format(pu))
os.remove("{}/pdb{}.ent".format(basedir, p))
try:
pdbl.retrieve_pdb_file(p, pdir=basedir, file_format="mmCif")
except:
try:
os.remove("{}/{}.cif".format(basedir, p))
except:
pass
outstrs.append("{} - no mmCIF download".format(pu))
if os.path.isfile("{}/{}.cif".format(basedir, p)):
try:
s = mmcif_parser.get_structure("", "{}/{}.cif".format(basedir, p))
except:
outstrs.append("{} - mmCIF parsing error".format(pu))
os.remove("{}/{}.cif".format(basedir, p))
if len(outstrs) == 2:
outstrs.append("All entries read fine")
end = datetime.now()
outstrs.append("Time taken - {} minute(s)".format(int(ceil((end - start).seconds / 60))))
datestr = str(end.date()).replace("-", "")
# This overwrites any existing file
with open("{}/wholepdb_py_{}.txt".format(basedir, datestr), "w") as f:
for l in outstrs:
f.write(l + "\n")
|
jgreener64/pdb-benchmarks
|
checkwholepdb/checkwholepdb.py
|
Python
|
mit
| 2,107 | 0.004271 |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', include('dashboard.urls', namespace='dashboard')),
url(r'^admin/', include(admin.site.urls)),
url(r'^dashboard/', include('dashboard.urls', namespace='dashboard')),
# url(r'^uploads/', include('uploads.urls', namespace='uploads')),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
DigitalMockingbird/EULAThingy
|
eulathingy/urls.py
|
Python
|
mit
| 637 | 0.006279 |
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
An object that makes some of the attributes of your class persistent, pickling
them and lazily writing them to a file.
"""
# from the Python Standard Library
import os
import cPickle as pickle
import warnings
# from the pyutil library
import fileutil
import nummedobj
import twistedutil
# from the Twisted library
from twisted.python import log
class PickleSaver(nummedobj.NummedObj):
"""
This makes some of the attributes of your class persistent, saving
them in a pickle and saving them lazily.
The general idea: You are going to tell PickleSaver which of your
attributes ought to be persistently saved, and the name of a file to
save them in. Those attributes will get saved to disk, and when
your object is instantiated those attributes will get set to the
values loaded from the file.
Usage: inherit from PickleSaver and call PickleSaver.__init__() in your
constructor. You will pass arguments to PickleSaver.__init__()
telling it which attributes to save, which file to save them in, and
what values they should have if there is no value stored for them in
the file.
Note: do *not* assign values to your persistent attributes in your
constructor, because you might thus overwrite their persistent
values.
Then whenever you change one of the persistent attributes, call
self.lazy_save() (it won't *really* save -- it'll just schedule a
save for DELAY minutes later.) If you update an attribute and
forget to call self.lazy_save() then the change will not be saved,
unless you later call self.lazy_save() before you shut down.
Data could be lost if the Python interpreter were to die
unexpectedly (for example, due to a segfault in a compiled machine
code module or due to the Python process being killed without
warning via SIGKILL) before the delay passes. However if the Python
interpreter shuts down cleanly (i.e., if it garbage collects and
invokes the __del__ methods of the collected objects), then the data
will be saved at that time (unless your class has the "not-collectable"
problem: http://python.org/doc/current/lib/module-gc.html -- search
in text for "uncollectable").
Note: you can pass DELAY=0 to make PickleSaver a not-so-lazy saver.
The advantage of laziness is that you don't touch the disk as
often -- touching disk is a performance cost.
To cleanly shutdown, invoke shutdown(). Further operations after that
will result in exceptions.
"""
class ExtRes:
"""
This is for holding things (external resources) that PickleSaver needs
to finalize after PickleSaver is killed. (post-mortem finalization)
In particular, this holds the names and values of all attributes
that have been changed, so that after the PickleSaver is
garbage-collected those values will be saved to the persistent file.
"""
def __init__(self, fname, objname):
self.fname = fname
self.objname = objname
self.dirty = False # True iff the attrs have been changed and need to be saved to disk; When you change this flag from False to True, you schedule a save task for 10 minutes later. When the save task goes off it changes the flag from True to False.
self.savertask = None
self.valstr = None # the pickled (serialized, string) contents of the attributes that should be saved
def _save_to_disk(self):
if self.valstr is not None:
log.msg("%s._save_to_disk(): fname: %s" % (self.objname, self.fname,))
of = open(self.fname + ".tmp", "wb")
of.write(self.valstr)
of.flush()
of.close()
of = None
fileutil.remove_if_possible(self.fname)
fileutil.rename(self.fname + ".tmp", self.fname)
log.msg("%s._save_to_disk(): now, having finished write(), os.path.isfile(%s): %s" % (self, self.fname, os.path.isfile(self.fname),))
self.valstr = None
self.dirty = False
try:
self.savertask.callId.cancel()
except:
pass
self.savertask = None
def shutdown(self):
if self.dirty:
self._save_to_disk()
if self.savertask:
try:
self.savertask.callId.cancel()
except:
pass
self.savertask = None
def __del__(self):
self.shutdown()
def __init__(self, fname, attrs, DELAY=60*60, savecb=None):
"""
@param attrs: a dict whose keys are the names of all the attributes to be persistently stored and whose values are the initial default value that the attribute gets set to the first time it is ever used; After this first initialization, the value will be persistent so the initial default value will never be used again.
@param savecb: if not None, then it is a callable that will be called after each save completes (useful for unit tests) (savecb doesn't get called after a shutdown-save, only after a scheduled save)
"""
warnings.warn("deprecated", DeprecationWarning)
nummedobj.NummedObj.__init__(self)
self._DELAY = DELAY
self._attrnames = attrs.keys()
self._extres = PickleSaver.ExtRes(fname=fname, objname=self.__repr__())
self._savecb = savecb
for attrname, defaultval in attrs.items():
setattr(self, attrname, defaultval)
try:
attrdict = pickle.loads(open(self._extres.fname, "rb").read())
for attrname, attrval in attrdict.items():
if not hasattr(self, attrname):
log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,))
setattr(self, attrname, attrval)
except (pickle.UnpicklingError, IOError, EOFError,), le:
try:
attrdict = pickle.loads(open(self._extres.fname + ".tmp", "rb").read())
for attrname, attrval in attrdict.items():
if not hasattr(self, attrname):
log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,))
setattr(self, attrname, attrval)
except (pickle.UnpicklingError, IOError, EOFError,), le2:
log.msg("Got exception attempting to load attrs. (This is normal if this is the first time you've used this persistent %s object.) fname: %s, le: %s, le2: %s" % (self.__class__, self._extres.fname, le, le2,))
self.lazy_save()
def _store_attrs_in_extres(self):
d = {}
for attrname in self._attrnames:
d[attrname] = getattr(self, attrname)
# log.msg("%s._store_attrs_in_extres: attrname: %s, val: %s" % (self, attrname, getattr(self, attrname),))
# pickle the attrs now, to ensure that there are no reference cycles
self._extres.valstr = pickle.dumps(d, True)
# log.msg("%s._store_attrs_in_extres: valstr: %s" % (self, self._extres.valstr,))
self._extres.dirty = True
def _save_to_disk(self):
log.msg("%s._save_to_disk()" % (self,))
self._extres._save_to_disk()
if self._savecb:
self._savecb()
def _lazy_save(self, delay=None):
""" @deprecated: use lazy_save() instead """
return self.lazy_save(delay)
def lazy_save(self, delay=None):
"""
@param delay: how long from now before the data gets saved to disk, or `None' in order to use the default value provided in the constructor
"""
if delay is None:
delay=self._DELAY
# copy the values into extres so that if `self' gets garbage-collected the values will be written to disk during post-mortem finalization. (This also marks it as dirty.)
self._store_attrs_in_extres()
newsavetask = twistedutil.callLater_weakly(delay, self._save_to_disk)
if self._extres.savertask:
if self._extres.savertask.callId.getTime() < newsavetask.callId.getTime():
try:
newsavetask.callId.cancel()
except:
pass
else:
try:
self._extres.savertask.callId.cancel()
except:
pass
self._extres.savertask = newsavetask
else:
self._extres.savertask = newsavetask
def shutdown(self):
self.extres.shutdown()
self.extres = None
|
heathseals/CouchPotatoServer
|
libs/pyutil/PickleSaver.py
|
Python
|
gpl-3.0
| 8,932 | 0.002799 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_instance_template
version_added: "2.3"
short_description: create or destroy instance templates of Compute Engine of GCP.
description:
- Creates or destroy Google instance templates
of Compute Engine of Google Cloud Platform.
options:
state:
description:
- The desired state for the instance template.
default: "present"
choices: ["present", "absent"]
name:
description:
- The name of the GCE instance template.
required: true
default: null
size:
description:
- The desired machine type for the instance template.
default: "f1-micro"
source:
description:
- A source disk to attach to the instance.
Cannot specify both I(image) and I(source).
default: null
image:
description:
- The image to use to create the instance.
Cannot specify both both I(image) and I(source).
default: null
image_family:
description:
- The image family to use to create the instance.
If I(image) has been used I(image_family) is ignored.
Cannot specify both I(image) and I(source).
default: null
disk_type:
description:
- Specify a C(pd-standard) disk or C(pd-ssd)
for an SSD disk.
default: pd-standard
disk_auto_delete:
description:
- Indicate that the boot disk should be
deleted when the Node is deleted.
default: true
network:
description:
- The network to associate with the instance.
default: "default"
subnetwork:
description:
- The Subnetwork resource name for this instance.
default: null
can_ip_forward:
description:
- Set to True to allow instance to
send/receive non-matching src/dst packets.
default: false
external_ip:
description:
- The external IP address to use.
If C(ephemeral), a new non-static address will be
used. If C(None), then no external address will
be used. To use an existing static IP address
specify address name.
default: "ephemeral"
service_account_email:
description:
- service account email
default: null
service_account_permissions:
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
automatic_restart:
description:
- Defines whether the instance should be
automatically restarted when it is
terminated by Compute Engine.
default: null
preemptible:
description:
- Defines whether the instance is preemptible.
default: null
tags:
description:
- a comma-separated list of tags to associate with the instance
default: null
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
default: null
description:
description:
- description of instance template
default: null
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
default: null
nic_gce_struct:
description:
- Support passing in the GCE-specific
formatted networkInterfaces[] structure.
default: null
disks_gce_struct:
description:
- Support passing in the GCE-specific
formatted formatted disks[] structure. Case sensitive.
see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
default: null
version_added: "2.4"
project_id:
description:
- your GCE project ID
default: null
pem_file:
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
default: null
credentials_file:
description:
- path to the JSON file associated with the service account email
default: null
subnetwork_region:
version_added: "2.4"
description:
- Region that subnetwork resides in. (Required for subnetwork to successfully complete)
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- JSON credentials strongly preferred.
author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
'''
EXAMPLES = '''
# Usage
- name: create instance template named foo
gce_instance_template:
name: foo
size: n1-standard-1
image_family: ubuntu-1604-lts
state: present
project_id: "your-project-name"
credentials_file: "/path/to/your-key.json"
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
# Example Playbook
- name: Compute Engine Instance Template Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create instance template
gce_instance_template:
name: my-test-instance-template
size: n1-standard-1
image_family: ubuntu-1604-lts
state: present
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
- name: delete instance template
gce_instance_template:
name: my-test-instance-template
size: n1-standard-1
image_family: ubuntu-1604-lts
state: absent
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
# Example playbook using disks_gce_struct
- name: Compute Engine Instance Template Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create instance template
gce_instance_template:
name: foo
size: n1-standard-1
state: present
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
disks_gce_struct:
- device_name: /dev/sda
boot: true
autoDelete: true
initializeParams:
diskSizeGb: 30
diskType: pd-ssd
sourceImage: projects/debian-cloud/global/images/family/debian-8
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
def get_info(inst):
"""Retrieves instance template information
"""
return({
'name': inst.name,
'extra': inst.extra,
})
def create_instance_template(module, gce):
"""Create an instance template
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
instance template information
"""
# get info from module
name = module.params.get('name')
size = module.params.get('size')
source = module.params.get('source')
image = module.params.get('image')
image_family = module.params.get('image_family')
disk_type = module.params.get('disk_type')
disk_auto_delete = module.params.get('disk_auto_delete')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
subnetwork_region = module.params.get('subnetwork_region')
can_ip_forward = module.params.get('can_ip_forward')
external_ip = module.params.get('external_ip')
service_account_email = module.params.get('service_account_email')
service_account_permissions = module.params.get(
'service_account_permissions')
on_host_maintenance = module.params.get('on_host_maintenance')
automatic_restart = module.params.get('automatic_restart')
preemptible = module.params.get('preemptible')
tags = module.params.get('tags')
metadata = module.params.get('metadata')
description = module.params.get('description')
disks = module.params.get('disks')
disks_gce_struct = module.params.get('disks_gce_struct')
changed = False
# args of ex_create_instancetemplate
gce_args = dict(
name="instance",
size="f1-micro",
source=None,
image=None,
disk_type='pd-standard',
disk_auto_delete=True,
network='default',
subnetwork=None,
can_ip_forward=None,
external_ip='ephemeral',
service_accounts=None,
on_host_maintenance=None,
automatic_restart=None,
preemptible=None,
tags=None,
metadata=None,
description=None,
disks_gce_struct=None,
nic_gce_struct=None
)
gce_args['name'] = name
gce_args['size'] = size
if source is not None:
gce_args['source'] = source
if image:
gce_args['image'] = image
else:
if image_family:
image = gce.ex_get_image_from_family(image_family)
gce_args['image'] = image
else:
gce_args['image'] = "debian-8"
gce_args['disk_type'] = disk_type
gce_args['disk_auto_delete'] = disk_auto_delete
gce_network = gce.ex_get_network(network)
gce_args['network'] = gce_network
if subnetwork is not None:
gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
if can_ip_forward is not None:
gce_args['can_ip_forward'] = can_ip_forward
if external_ip == "ephemeral":
instance_external_ip = external_ip
elif external_ip == "none":
instance_external_ip = None
else:
try:
instance_external_ip = gce.ex_get_address(external_ip)
except GoogleBaseError as err:
# external_ip is name ?
instance_external_ip = external_ip
gce_args['external_ip'] = instance_external_ip
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP:
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
gce_args['service_accounts'] = ex_sa_perms
if on_host_maintenance is not None:
gce_args['on_host_maintenance'] = on_host_maintenance
if automatic_restart is not None:
gce_args['automatic_restart'] = automatic_restart
if preemptible is not None:
gce_args['preemptible'] = preemptible
if tags is not None:
gce_args['tags'] = tags
if disks_gce_struct is not None:
gce_args['disks_gce_struct'] = disks_gce_struct
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
gce_args['metadata'] = metadata
if description is not None:
gce_args['description'] = description
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
except ResourceNotFoundError:
try:
instance = gce.ex_create_instancetemplate(**gce_args)
changed = True
except GoogleBaseError as err:
module.fail_json(
msg='Unexpected error attempting to create instance {}, error: {}'
.format(
instance,
err.value
)
)
if instance:
json_data = get_info(instance)
else:
module.fail_json(msg="no instance template!")
return (changed, json_data, name)
def delete_instance_template(module, gce):
""" Delete instance template.
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
instance template information
"""
name = module.params.get('name')
current_state = "absent"
changed = False
# get instance template
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
current_state = "present"
except GoogleBaseError as err:
json_data = dict(msg='instance template not exists')
if current_state == "present":
rc = instance.destroy()
if rc:
changed = True
else:
module.fail_json(
msg='instance template destroy failed'
)
json_data = {}
return (changed, json_data, name)
def module_controller(module, gce):
''' Control module state parameter.
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
nothing
Exit:
AnsibleModule object exit with json data.
'''
json_output = dict()
state = module.params.get("state")
if state == "present":
(changed, output, name) = create_instance_template(module, gce)
json_output['changed'] = changed
json_output['msg'] = output
elif state == "absent":
(changed, output, name) = delete_instance_template(module, gce)
json_output['changed'] = changed
json_output['msg'] = output
module.exit_json(**json_output)
def check_if_system_state_would_be_changed(module, gce):
''' check_if_system_state_would_be_changed !
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
system_state changed
'''
changed = False
current_state = "absent"
state = module.params.get("state")
name = module.params.get("name")
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
current_state = "present"
except GoogleBaseError as err:
module.fail_json(msg='GCE get instancetemplate problem')
if current_state != state:
changed = True
if current_state == "absent":
if changed:
output = 'instance template {} will be created'.format(name)
else:
output = 'nothing to do for instance template {} '.format(name)
if current_state == "present":
if changed:
output = 'instance template {} will be destroyed'.format(name)
else:
output = 'nothing to do for instance template {} '.format(name)
return (changed, output)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
name=dict(require=True, aliases=['base_name']),
size=dict(default='f1-micro'),
source=dict(),
image=dict(),
image_family=dict(default='debian-8'),
disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
disk_auto_delete=dict(type='bool', default=True),
network=dict(default='default'),
subnetwork=dict(),
can_ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
automatic_restart=dict(type='bool', default=None),
preemptible=dict(type='bool', default=None),
tags=dict(type='list'),
metadata=dict(),
description=dict(),
disks=dict(type='list'),
nic_gce_struct=dict(type='list'),
project_id=dict(),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
subnetwork_region=dict(),
disks_gce_struct=dict(type='list')
),
mutually_exclusive=[['source', 'image']],
required_one_of=[['image', 'image_family']],
supports_check_mode=True
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(
msg='libcloud with GCE support (0.17.0+) required for this module')
try:
gce = gce_connect(module)
except GoogleBaseError as err:
module.fail_json(msg='GCE Connexion failed')
if module.check_mode:
(changed, output) = check_if_system_state_would_be_changed(module, gce)
module.exit_json(
changed=changed,
msg=output
)
else:
module_controller(module, gce)
if __name__ == '__main__':
main()
|
Tatsh-ansible/ansible
|
lib/ansible/modules/cloud/google/gce_instance_template.py
|
Python
|
gpl-3.0
| 19,433 | 0.000412 |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: romanText/rtObjects.py
# Purpose: music21 objects for processing roman numeral analysis text files
#
# Authors: Christopher Ariza
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2011-2012 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Objects for processing roman numeral analysis text files, as defined and
demonstrated by Dmitri Tymoczko.
'''
#from __future__ import unicode_literals
from fractions import Fraction
import io
import re
import unittest
from music21 import common
from music21 import exceptions21
from music21 import environment
from music21 import key
_MOD = 'romanText.rtObjects.py'
environLocal = environment.Environment(_MOD)
# alternate endings might end with a,b,c for non
# zero or more for everything after the first number
reMeasureTag = re.compile(r'm[0-9]+[a-b]*-*[0-9]*[a-b]*')
reVariant = re.compile(r'var[0-9]+')
reVariantLetter = re.compile(r'var([A-Z]+)')
reNoteTag = re.compile(r'[Nn]ote:')
reOptKeyOpenAtom = re.compile(r'\?\([A-Ga-g]+[b#]*:')
reOptKeyCloseAtom = re.compile(r'\?\)[A-Ga-g]+[b#]*:?')
# ?g:( ?
reKeyAtom = re.compile('[A-Ga-g]+[b#]*;:')
reAnalyticKeyAtom = re.compile('[A-Ga-g]+[b#]*:')
reKeySignatureAtom = re.compile(r'KS\-?[0-7]')
# must distinguish b3 from bVII; there may be b1.66.5
reBeatAtom = re.compile(r'b[1-9.]+')
reRepeatStartAtom = re.compile(r'\|\|\:')
reRepeatStopAtom = re.compile(r'\:\|\|')
reNoChordAtom = re.compile('NC')
#-------------------------------------------------------------------------------
class RomanTextException(exceptions21.Music21Exception):
pass
class RTTokenException(exceptions21.Music21Exception):
pass
class RTHandlerException(exceptions21.Music21Exception):
pass
class RTFileException(exceptions21.Music21Exception):
pass
#-------------------------------------------------------------------------------
class RTToken(object):
'''Stores each linear, logical entity of a RomanText.
A multi-pass parsing procedure is likely necessary, as RomanText permits
variety of groupings and markings.
>>> rtt = romanText.rtObjects.RTToken('||:')
>>> rtt
<RTToken '||:'>
A standard RTToken returns `False` for all of the following.
>>> rtt.isComposer() or rtt.isTitle() or rtt.isPiece()
False
>>> rtt.isAnalyst() or rtt.isProofreader()
False
>>> rtt.isTimeSignature() or rtt.isKeySignature() or rtt.isNote()
False
>>> rtt.isForm() or rtt.isPedal() or rtt.isMeasure() or rtt.isWork()
False
>>> rtt.isMovement() or rtt.isAtom()
False
'''
def __init__(self, src=u''):
self.src = src # store source character sequence
self.lineNumber = 0
def __repr__(self):
return '<RTToken %r>' % self.src
def isComposer(self):
return False
def isTitle(self):
return False
def isPiece(self):
return False
def isAnalyst(self):
return False
def isProofreader(self):
return False
def isTimeSignature(self):
return False
def isKeySignature(self):
return False
def isNote(self):
return False
def isForm(self):
'''Occasionally found in header.
'''
return False
def isMeasure(self):
return False
def isPedal(self):
return False
def isWork(self):
return False
def isMovement(self):
return False
def isAtom(self):
'''Atoms are any untagged data; generally only found inside of a
measure definition.
'''
return False
class RTTagged(RTToken):
'''In romanText, some data elements are tags, that is a tag name, a colon,
optional whitespace, and data. In non-RTTagged elements, there is just
data.
All tagged tokens are subclasses of this class. Examples are:
Title: Die Jahrzeiten
Composer: Fanny Mendelssohn
>>> rttag = romanText.rtObjects.RTTagged('Title: Die Jahrzeiten')
>>> rttag.tag
'Title'
>>> rttag.data
'Die Jahrzeiten'
>>> rttag.isTitle()
True
>>> rttag.isComposer()
False
'''
def __init__(self, src =u''):
RTToken.__init__(self, src)
# try to split off tag from data
self.tag = ''
self.data = ''
if ':' in src:
iFirst = src.find(':') # first index found at
self.tag = src[:iFirst].strip()
# add one to skip colon
self.data = src[iFirst+1:].strip()
else: # we do not have a clear tag; perhaps store all as data
self.data = src
def __repr__(self):
return '<RTTagged %r>' % self.src
def isComposer(self):
'''True is the tag represents a composer.
>>> rth = romanText.rtObjects.RTTagged('Composer: Claudio Monteverdi')
>>> rth.isComposer()
True
>>> rth.isTitle()
False
>>> rth.isWork()
False
>>> rth.data
'Claudio Monteverdi'
'''
if self.tag.lower() in ['composer']:
return True
return False
def isTitle(self):
'''True if tag represents a title, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Title: This is a title.')
>>> tag.isTitle()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isTitle()
False
'''
if self.tag.lower() in ['title']:
return True
return False
def isPiece(self):
'''
True if tag represents a piece, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Piece: This is a piece.')
>>> tag.isPiece()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isPiece()
False
'''
if self.tag.lower() in ['piece']:
return True
return False
def isAnalyst(self):
'''True if tag represents a analyst, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Analyst: This is an analyst.')
>>> tag.isAnalyst()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isAnalyst()
False
'''
if self.tag.lower() in ['analyst']:
return True
return False
def isProofreader(self):
'''True if tag represents a proofreader, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Proofreader: This is a proofreader.')
>>> tag.isProofreader()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isProofreader()
False
'''
if self.tag.lower() in ['proofreader', 'proof reader']:
return True
return False
def isTimeSignature(self):
'''True if tag represents a time signature, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('TimeSignature: This is a time signature.')
>>> tag.isTimeSignature()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isTimeSignature()
False
TimeSignature header data can be found intermingled with measures.
'''
if self.tag.lower() in ['timesignature', 'time signature']:
return True
return False
def isKeySignature(self):
'''True if tag represents a key signature, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('KeySignature: This is a key signature.')
>>> tag.isKeySignature()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isKeySignature()
False
KeySignatures are a type of tagged data found outside of measures,
such as "Key Signature: Bb," meaning one flat.
Note: this is not the same as a key definition found inside of a
Measure. These are represented by RTKey rtObjects, defined below, and are
not RTTagged rtObjects, but RTAtom subclasses.
'''
if self.tag.lower() in ['keysignature', 'key signature']:
return True
else:
return False
def isNote(self):
'''True if tag represents a note, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Note: This is a note.')
>>> tag.isNote()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isNote()
False
'''
if self.tag.lower() in ['note']:
return True
return False
def isForm(self):
'''True if tag represents a form, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Form: This is a form.')
>>> tag.isForm()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isForm()
False
'''
if self.tag.lower() in ['form']:
return True
return False
def isPedal(self):
'''True if tag represents a pedal, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Pedal: This is a pedal.')
>>> tag.isPedal()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isPedal()
False
'''
if self.tag.lower() in ['pedal']:
return True
return False
def isWork(self):
'''True if tag represents a work, otherwise False.
The "work" is not defined as a header tag, but is used to represent
all tags, often placed after Composer, for the work or pieces designation.
>>> rth = romanText.rtObjects.RTTagged('Madrigal: 4.12')
>>> rth.isTitle()
False
>>> rth.isWork()
True
>>> rth.tag
'Madrigal'
>>> rth.data
'4.12'
'''
if self.tag == 'Work' or self.tag == 'Madrigal':
return True
else:
return False
def isMovement(self):
'''True if tag represents a movement, otherwise False.
>>> tag = romanText.rtObjects.RTTagged('Movement: This is a movement.')
>>> tag.isMovement()
True
>>> tag = romanText.rtObjects.RTTagged('Nothing: Nothing at all.')
>>> tag.isMovement()
False
'''
if self.tag.lower() in ['movement']:
return True
return False
class RTMeasure(RTToken):
'''In RomanText, measures are given one per line and always start with 'm'.
For instance:
m4 i b3 v b4 VI
m5 b2 g: IV b4 V
m6 i
m7 D: V
Measure ranges can be used and copied, such as:
m8-m9=m4-m5
RTMeasure objects can also define variant readings for a measure:
m1 ii
m1var1 ii b2 ii6 b3 IV
Variants are not part of the tag, but are read into an attribute.
Endings are indicated by a single letter after the measure number, such as
"a" for first ending.
>>> rtm = romanText.rtObjects.RTMeasure('m15a V6 b1.5 V6/5 b2 I b3 viio6')
>>> rtm.data
'V6 b1.5 V6/5 b2 I b3 viio6'
>>> rtm.number
[15]
>>> rtm.repeatLetter
['a']
>>> rtm.isMeasure()
True
'''
def __init__(self, src =u''):
RTToken.__init__(self, src)
# try to split off tag from data
self.tag = '' # the measure number or range
self.data = '' # only chord, phrase, and similar definitions
self.number = [] # one or more measure numbers
self.repeatLetter = [] # one or more repeat letters
self.variantNumber = None # a one-measure or short variant
self.variantLetter = None # a longer-variant that defines a different way of reading a large section
# store boolean if this measure defines copying another range
self.isCopyDefinition = False
# store processed tokens associated with this measure
self.atoms = []
if len(src) > 0:
self._parseAttributes(src)
def _getMeasureNumberData(self, src):
'''Return the number or numbers as a list, as well as any repeat
indications.
>>> rtm = romanText.rtObjects.RTMeasure()
>>> rtm._getMeasureNumberData('m77')
([77], [''])
>>> rtm._getMeasureNumberData('m123b-432b')
([123, 432], ['b', 'b'])
'''
# note: this is separate procedure b/c it is used to get copy
# boundaries
if '-' in src: # its a range
mnStart, mnEnd = src.split('-')
proc = [mnStart, mnEnd]
else:
proc = [src] # treat as one
number = []
repeatLetter = []
for mn in proc:
# append in order, start, end
numStr, alphaStr = common.getNumFromStr(mn)
number.append(int(numStr))
# remove all 'm' in alpha
alphaStr = alphaStr.replace('m', '')
repeatLetter.append(alphaStr)
return number, repeatLetter
def _parseAttributes(self, src):
# assume that we have already checked that this is a measure
g = reMeasureTag.match(src)
if g is None: # not measure tag found
raise RTHandlerException('found no measure tag: %s' % src)
iEnd = g.end() # get end index
rawTag = src[:iEnd].strip()
self.tag = rawTag
rawData = src[iEnd:].strip() # may have variant
# get the number list from the tag
self.number, self.repeatLetter = self._getMeasureNumberData(rawTag)
# strip a variant indication off of rawData if found
g = reVariant.match(rawData)
if g is not None: # there is a variant tag
varStr = g.group(0)
self.variantNumber = int(common.getNumFromStr(varStr)[0])
self.data = rawData[g.end():].strip()
else:
self.data = rawData
g = reVariantLetter.match(rawData)
if g is not None: # there is a variant letter tag
varStr = g.group(1)
self.variantLetter = varStr
self.data = rawData[g.end():].strip()
if self.data.startswith('='):
self.isCopyDefinition = True
def __repr__(self):
if len(self.number) == 1:
numberStr = '%s' % self.number[0]
else:
numberStr = '%s-%s' % (self.number[0], self.number[1])
return '<RTMeasure %s>' % numberStr
def isMeasure(self):
return True
def getCopyTarget(self):
'''If this measure defines a copy operation, return two lists defining
the measures to copy; the second list has the repeat data.
>>> rtm = romanText.rtObjects.RTMeasure('m35-36 = m29-30')
>>> rtm.number
[35, 36]
>>> rtm.getCopyTarget()
([29, 30], ['', ''])
>>> rtm = romanText.rtObjects.RTMeasure('m4 = m1')
>>> rtm.number
[4]
>>> rtm.getCopyTarget()
([1], [''])
'''
# remove equal sign
rawData = self.data.replace('=', '').strip()
return self._getMeasureNumberData(rawData)
class RTAtom(RTToken):
'''In RomanText, definitions of chords, phrases boundaries, open/close
parenthesis, beat indicators, etc. appear within measures (RTMeasure
objects). These individual elements will be called Atoms, as they are data
that is not tagged.
Each atom store a reference to its container (normally an RTMeasure).
>>> chordIV = romanText.rtObjects.RTAtom('IV')
>>> beat4 = romanText.rtObjects.RTAtom('b4')
>>> beat4
<RTAtom 'b4'>
>>> beat4.isAtom()
True
However, see RTChord, RTBeat, etc. which are subclasses of RTAtom
specifically for storing chords, beats, etc.
'''
def __init__(self, src =u'', container=None):
# this stores the source
RTToken.__init__(self, src)
self.container = container
def __repr__(self):
return '<RTAtom %r>' % self.src
def isAtom(self):
return True
# for lower level distinctions, use isinstance(), as each type has its own subclass.
class RTChord(RTAtom):
r'''An RTAtom subclass that defines a chord. Also contains a reference to
the container.
>>> chordIV = romanText.rtObjects.RTChord('IV')
>>> chordIV
<RTChord 'IV'>
'''
def __init__(self, src =u'', container=None):
RTAtom.__init__(self, src, container)
# store offset within measure
self.offset = None
# store a quarterlength duration
self.quarterLength = None
def __repr__(self):
return '<RTChord %r>' % self.src
class RTNoChord(RTAtom):
r'''An RTAtom subclass that defines absence of a chord. Also contains a
reference to the container.
>>> chordNC = romanText.rtObjects.RTNoChord('NC')
>>> chordNC
<RTNoChord 'NC'>
'''
def __init__(self, src =u'', container=None):
RTAtom.__init__(self, src, container)
# store offset within measure
self.offset = None
# store a quarterlength duration
self.quarterLength = None
def __repr__(self):
return '<RTNoChord %r>' % self.src
class RTBeat(RTAtom):
r'''An RTAtom subclass that defines a beat definition. Also contains a
reference to the container.
>>> beatFour = romanText.rtObjects.RTBeat('b4')
>>> beatFour
<RTBeat 'b4'>
'''
def __init__(self, src =u'', container=None):
RTAtom.__init__(self, src, container)
def __repr__(self):
return '<RTBeat %r>' % self.src
def getBeatFloatOrFrac(self):
'''
Gets the beat number as a float or fraction. Time signature independent
>>> RTB = romanText.rtObjects.RTBeat
Simple ones:
>>> RTB('b1').getBeatFloatOrFrac()
1.0
>>> RTB('b2').getBeatFloatOrFrac()
2.0
etc.
with easy float:
>>> RTB('b1.5').getBeatFloatOrFrac()
1.5
>>> RTB('b1.25').getBeatFloatOrFrac()
1.25
with harder:
>>> RTB('b1.33').getBeatFloatOrFrac()
Fraction(4, 3)
>>> RTB('b2.66').getBeatFloatOrFrac()
Fraction(8, 3)
>>> RTB('b1.2').getBeatFloatOrFrac()
Fraction(6, 5)
A third digit of .5 adds 1/2 of 1/DENOM of before. Here DENOM is 3 (in 5/3) so
we add 1/6 to 5/3 to get 11/6:
>>> RTB('b1.66').getBeatFloatOrFrac()
Fraction(5, 3)
>>> RTB('b1.66.5').getBeatFloatOrFrac()
Fraction(11, 6)
Similarly .25 adds 1/4 of 1/DENOM... to get 21/12 or 7/4 or 1.75
>>> RTB('b1.66.25').getBeatFloatOrFrac()
1.75
And .75 adds 3/4 of 1/DENOM to get 23/12
>>> RTB('b1.66.75').getBeatFloatOrFrac()
Fraction(23, 12)
A weird way of writing 'b1.5'
>>> RTB('b1.33.5').getBeatFloatOrFrac()
1.5
'''
beatStr = self.src.replace('b', '')
# there may be more than one decimal in the number, such as
# 1.66.5, to show halfway through 2/3rd of a beat
parts = beatStr.split('.')
mainBeat = int(parts[0])
if len(parts) > 1: # 1.66
fracPart = common.addFloatPrecision('.' + parts[1])
else:
fracPart = 0.0
if len(parts) > 2: # 1.66.5
fracPartDivisor = float('.' + parts[2]) # 0.5
if isinstance(fracPart, float):
fracPart = Fraction.from_float(fracPart)
denom = fracPart.denominator
fracBeatFrac = common.opFrac(1./(denom/fracPartDivisor))
else:
fracBeatFrac = 0.0
if len(parts) > 3:
environLocal.printDebug(['got unexpected beat: %s' % self.src])
raise RTTokenException('cannot handle specification: %s' % self.src)
beat = common.opFrac(mainBeat + fracPart + fracBeatFrac)
return beat
def getOffset(self, timeSignature):
'''Given a time signature, return the offset position specified by this
beat.
>>> rtb = romanText.rtObjects.RTBeat('b1.5')
>>> rtb.getOffset(meter.TimeSignature('3/4'))
0.5
>>> rtb.getOffset(meter.TimeSignature('6/8'))
0.75
>>> rtb.getOffset(meter.TimeSignature('2/2'))
1.0
>>> rtb = romanText.rtObjects.RTBeat('b2')
>>> rtb.getOffset(meter.TimeSignature('3/4'))
1.0
>>> rtb.getOffset(meter.TimeSignature('6/8'))
1.5
>>> rtb = romanText.rtObjects.RTBeat('b1.66')
>>> rtb.getOffset(meter.TimeSignature('6/8'))
1.0
>>> rtc = romanText.rtObjects.RTBeat('b1.66.5')
>>> rtc.getOffset(meter.TimeSignature('6/8'))
1.25
'''
from music21 import meter
beat = self.getBeatFloatOrFrac()
#environLocal.printDebug(['using beat value:', beat])
# TODO: check for exceptions/errors if this beat is bad
try:
post = timeSignature.getOffsetFromBeat(beat)
except meter.TimeSignatureException:
environLocal.printDebug(['bad beat specification: %s in a meter of %s' % (self.src, timeSignature)])
post = 0.0
return post
class RTKeyTypeAtom(RTAtom):
'''RTKeyTypeAtoms contain utility functions for all Key-type tokens, i.e.
RTKey, RTAnalyticKey, but not KeySignature.
>>> gminor = romanText.rtObjects.RTKeyTypeAtom('g;:')
>>> gminor
<RTKeyTypeAtom 'g;:'>
'''
def __repr__(self):
return '<RTKeyTypeAtom %r>' % self.src
def getKey(self):
'''
This returns a Key, not a KeySignature object
'''
myKey = self.src.rstrip(self.footerStrip)
myKey = key.convertKeyStringToMusic21KeyString(myKey)
return key.Key(myKey)
def getKeySignature(self):
'''Get a KeySignature object.
'''
myKey = self.getKey()
return key.KeySignature(myKey.sharps)
class RTKey(RTKeyTypeAtom):
footerStrip = ';:'
def __init(self, src=u'', container=None):
'''An RTKey(RTAtom) defines both a change in KeySignature and a change
in the analyzed Key.
They are defined by ";:" after the Key.
>>> gminor = romanText.rtObjects.RTKey('g;:')
>>> gminor
<RTKey 'g;:'>
>>> gminor.getKey()
<music21.key.Key of g minor>
>>> bminor = romanText.rtObjects.RTKey('bb;:')
>>> bminor
<RTKey 'bb;:'>
>>> bminor.getKey()
<music21.key.Key of b- minor>
>>> bminor.getKeySignature()
<music21.key.KeySignature of 5 flats>
>>> eflatmajor = romanText.rtObjects.RTKey('Eb;:')
>>> eflatmajor
<RTKey 'Eb;:'>
>>> eflatmajor.getKey()
<music21.key.Key of E- major>
'''
super(RTKey, self).__init__(src, container)
def __repr__(self):
return '<RTKey %r>' % self.src
class RTAnalyticKey(RTKeyTypeAtom):
footerStrip = ':'
def __init__(self, src =u'', container=None):
'''An RTAnalyticKey(RTKeyTypeAtom) only defines a change in the key
being analyzed. It does not in itself create a :class:~'music21.key.Key'
object.
>>> gminor = romanText.rtObjects.RTAnalyticKey('g:')
>>> gminor
<RTAnalyticKey 'g:'>
>>> gminor.getKey()
<music21.key.Key of g minor>
>>> bminor = romanText.rtObjects.RTAnalyticKey('bb:')
>>> bminor
<RTAnalyticKey 'bb:'>
>>> bminor.getKey()
<music21.key.Key of b- minor>
'''
super(RTAnalyticKey, self).__init__(src, container)
def __repr__(self):
return '<RTAnalyticKey %r>' % self.src
class RTKeySignature(RTAtom):
def __init__(self, src =u'', container=None):
'''An RTKeySignature(RTAtom) only defines a change in the KeySignature.
It does not in itself create a :class:~'music21.key.Key' object, nor
does it change the analysis taking place.
The number after KS defines the number of sharps (negative for flats).
>>> gminor = romanText.rtObjects.RTKeySignature('KS-2')
>>> gminor
<RTKeySignature 'KS-2'>
>>> gminor.getKeySignature()
<music21.key.KeySignature of 2 flats>
>>> Amajor = romanText.rtObjects.RTKeySignature('KS3')
>>> Amajor.getKeySignature()
<music21.key.KeySignature of 3 sharps>
'''
super(RTKeySignature, self).__init__(src, container)
def __repr__(self):
return '<RTKeySignature %r>' % self.src
def getKeySignature(self):
numSharps = int(self.src[2:])
return key.KeySignature(numSharps)
class RTOpenParens(RTAtom):
'''
A simple open parenthesis Atom with a sensible default
>>> romanText.rtObjects.RTOpenParens('(')
<RTOpenParens '('>
'''
def __init__(self, src =u'(', container=None):
RTAtom.__init__(self, src, container)
def __repr__(self):
return '<RTOpenParens %r>' % self.src
class RTCloseParens(RTAtom):
'''
A simple close parenthesis Atom with a sensible default
>>> romanText.rtObjects.RTCloseParens(')')
<RTCloseParens ')'>
'''
def __init__(self, src =u')', container=None):
RTAtom.__init__(self, src, container)
def __repr__(self):
return '<RTCloseParens %r>' % self.src
class RTOptionalKeyOpen(RTAtom):
def __init__(self, src=u'', container=None):
'''
Marks the beginning of an optional Key area which does not
affect the roman numeral analysis. (For instance, it is
possible to analyze in Bb major, while remaining in g minor)
>>> possibleKey = romanText.rtObjects.RTOptionalKeyOpen('?(Bb:')
>>> possibleKey
<RTOptionalKeyOpen '?(Bb:'>
>>> possibleKey.getKey()
<music21.key.Key of B- major>
'''
RTAtom.__init__(self, src, container)
def __repr__(self):
return '<RTOptionalKeyOpen %r>' % self.src
def getKey(self):
# alter flat symbol
if self.src == '?(b:':
return key.Key('b')
else:
keyStr = self.src.replace('b', '-')
keyStr = keyStr.replace(':', '')
keyStr = keyStr.replace('?', '')
keyStr = keyStr.replace('(', '')
#environLocal.printDebug(['create a key from:', keyStr])
return key.Key(keyStr)
class RTOptionalKeyClose(RTAtom):
def __init__(self, src=u'', container=None):
'''Marks the end of an optional Key area which does not affect the roman
numeral analysis.
For example, it is ossible to analyze in Bb major, while remaining in g
minor.
>>> possibleKey = romanText.rtObjects.RTOptionalKeyClose('?)Bb:')
>>> possibleKey
<RTOptionalKeyClose '?)Bb:'>
>>> possibleKey.getKey()
<music21.key.Key of B- major>
'''
RTAtom.__init__(self, src, container)
def __repr__(self):
return '<RTOptionalKeyClose %r>' % self.src
def getKey(self):
# alter flat symbol
if self.src == '?)b:' or self.src == '?)b':
return key.Key('b')
else:
keyStr = self.src.replace('b', '-')
keyStr = keyStr.replace(':', '')
keyStr = keyStr.replace('?', '')
keyStr = keyStr.replace(')', '')
#environLocal.printDebug(['create a key from:', keyStr])
return key.Key(keyStr)
class RTPhraseMarker(RTAtom):
'''
A Phrase Marker:
>>> rtpm = romanText.rtObjects.RTPhraseMarker('')
>>> rtpm
<RTPhraseMarker ''>
'''
def __init__(self, src=u'', container=None):
RTAtom.__init__(self, src, container)
def __repr__(self):
return '<RTPhraseMarker %r>' % self.src
class RTPhraseBoundary(RTPhraseMarker):
def __init__(self, src =u'||', container=None):
'''
>>> phrase = romanText.rtObjects.RTPhraseBoundary('||')
>>> phrase
<RTPhraseBoundary '||'>
'''
RTPhraseMarker.__init__(self, src, container)
def __repr__(self):
return '<RTPhraseBoundary %r>' % self.src
class RTEllisonStart(RTPhraseMarker):
def __init__(self, src =u'|*', container=None):
'''
>>> phrase = romanText.rtObjects.RTEllisonStart('|*')
>>> phrase
<RTEllisonStart '|*'>
'''
RTPhraseMarker.__init__(self, src, container)
def __repr__(self):
return '<RTEllisonStart %r>' % self.src
class RTEllisonStop(RTPhraseMarker):
def __init__(self, src =u'|*', container=None):
'''
>>> phrase = romanText.rtObjects.RTEllisonStop('*|')
>>> phrase
<RTEllisonStop '*|'>
'''
RTPhraseMarker.__init__(self, src, container)
def __repr__(self):
return '<RTEllisonStop %r>' % self.src
class RTRepeat(RTAtom):
def __init__(self, src =u'', container=None):
'''
>>> repeat = romanText.rtObjects.RTRepeat('||:')
>>> repeat
<RTRepeat '||:'>
'''
RTAtom.__init__(self, src, container)
def __repr__(self):
return '<RTRepeat %r>' % self.src
class RTRepeatStart(RTRepeat):
def __init__(self, src =u'||:', container=None):
'''
>>> repeat = romanText.rtObjects.RTRepeatStart()
>>> repeat
<RTRepeatStart ...'||:'>
'''
RTRepeat.__init__(self, src, container)
def __repr__(self):
return '<RTRepeatStart %r>' % self.src
class RTRepeatStop(RTRepeat):
def __init__(self, src =u':||', container=None):
'''
>>> repeat = romanText.rtObjects.RTRepeatStop()
>>> repeat
<RTRepeatStop ...':||'>
'''
RTRepeat.__init__(self, src, container)
def __repr__(self):
return '<RTRepeatStop %r>' % self.src
#-------------------------------------------------------------------------------
class RTHandler(object):
# divide elements of a character stream into rtObjects and handle
# store in a list, and pass global information to components
def __init__(self):
# tokens are ABC rtObjects in a linear stream
# tokens are strongly divided between header and body, so can
# divide here
self._tokens = []
self.currentLineNumber = 0
def splitAtHeader(self, lines):
'''Divide string into header and non-header; this is done before
tokenization.
>>> rth = romanText.rtObjects.RTHandler()
>>> rth.splitAtHeader(['Title: s', 'Time Signature:', '', 'm1 g: i'])
(['Title: s', 'Time Signature:', ''], ['m1 g: i'])
'''
# iterate over lines and find the first measure definition
iStartBody = None
for i, l in enumerate(lines):
if reMeasureTag.match(l.strip()) is not None:
# found a measure definition
iStartBody = i
break
if iStartBody is None:
raise RomanTextException("Cannot find the first measure definition in this file. Dumping contextss: %s", lines)
return lines[:iStartBody], lines[iStartBody:]
def tokenizeHeader(self, lines):
'''In the header, we only have :class:`~music21.romanText.base.RTTagged`
tokens. We can this process these all as the same class.
'''
post = []
for i,l in enumerate(lines):
l = l.strip()
if l == '':
continue
# wrap each line in a header token
rtt = RTTagged(l)
rtt.lineNumber = i + 1
post.append(rtt)
self.currentLineNumber = len(lines) + 1
return post
def tokenizeBody(self, lines):
'''In the body, we may have measure, time signature, or note
declarations, as well as possible other tagged definitions.
'''
post = []
startLineNumber = self.currentLineNumber
for i,l in enumerate(lines):
currentLineNumber = startLineNumber + i
try:
l = l.strip()
if l == '':
continue
# first, see if it is a measure definition, if not, than assume it is tagged data
if reMeasureTag.match(l) is not None:
rtm = RTMeasure(l)
rtm.lineNumber = currentLineNumber
# note: could places these in-line, after post
rtm.atoms = self.tokenizeAtoms(rtm.data, container=rtm)
for a in rtm.atoms:
a.lineNumber = currentLineNumber
post.append(rtm)
else:
# store items in a measure tag outside of the measure
rtt = RTTagged(l)
rtt.lineNumber = currentLineNumber
post.append(rtt)
except Exception:
import traceback
tracebackMessage = traceback.format_exc()
raise RTHandlerException("At line %d (%s) an exception was raised: \n%s" % (currentLineNumber, l, tracebackMessage))
return post
def tokenizeAtoms(self, line, container=None):
'''Given a line of data stored in measure consisting only of Atoms,
tokenize and return a list.
>>> rth = romanText.rtObjects.RTHandler()
>>> str(rth.tokenizeAtoms('IV b3 ii7 b4 ii'))
"[<RTChord 'IV'>, <RTBeat 'b3'>, <RTChord 'ii7'>, <RTBeat 'b4'>, <RTChord 'ii'>]"
>>> str(rth.tokenizeAtoms('V7 b2 V13 b3 V7 iio6/5[no5]'))
"[<RTChord 'V7'>, <RTBeat 'b2'>, <RTChord 'V13'>, <RTBeat 'b3'>, <RTChord 'V7'>, <RTChord 'iio6/5[no5]'>]"
>>> tokenList = rth.tokenizeAtoms('I b2 I b2.25 V/ii b2.5 bVII b2.75 V g: IV')
>>> str(tokenList)
"[<RTChord 'I'>, <RTBeat 'b2'>, <RTChord 'I'>, <RTBeat 'b2.25'>, <RTChord 'V/ii'>, <RTBeat 'b2.5'>, <RTChord 'bVII'>, <RTBeat 'b2.75'>, <RTChord 'V'>, <RTAnalyticKey 'g:'>, <RTChord 'IV'>]"
>>> tokenList[9].getKey()
<music21.key.Key of g minor>
>>> str(rth.tokenizeAtoms('= m3'))
'[]'
>>> tokenList = rth.tokenizeAtoms('g;: ||: V b2 ?(Bb: VII7 b3 III b4 ?)Bb: i :||')
>>> str(tokenList)
"[<RTKey 'g;:'>, <RTRepeatStart '||:'>, <RTChord 'V'>, <RTBeat 'b2'>, <RTOptionalKeyOpen '?(Bb:'>, <RTChord 'VII7'>, <RTBeat 'b3'>, <RTChord 'III'>, <RTBeat 'b4'>, <RTOptionalKeyClose '?)Bb:'>, <RTChord 'i'>, <RTRepeatStop ':||'>]"
'''
post = []
# break by spaces
for word in line.split(' '):
word = word.strip()
if word == '':
continue
elif word == '=':
# if an = is found, this is a copy definition, and no atoms here
break
elif word == '||':
post.append(RTPhraseBoundary(word, container))
elif word == '(':
post.append(RTOpenParens(word, container))
elif word == ')':
post.append(RTCloseParens(word, container))
elif reBeatAtom.match(word) is not None:
post.append(RTBeat(word, container))
# from here, all that is left is keys or chords
elif reOptKeyOpenAtom.match(word) is not None:
post.append(RTOptionalKeyOpen(word, container))
elif reOptKeyCloseAtom.match(word) is not None:
post.append(RTOptionalKeyClose(word, container))
elif reKeyAtom.match(word) is not None:
post.append(RTKey(word, container))
elif reAnalyticKeyAtom.match(word) is not None:
post.append(RTAnalyticKey(word, container))
elif reKeySignatureAtom.match(word) is not None:
post.append(RTKeySignature(word, container))
elif reRepeatStartAtom.match(word) is not None:
post.append(RTRepeatStart(word, container))
elif reRepeatStopAtom.match(word) is not None:
post.append(RTRepeatStop(word, container))
elif reNoChordAtom.match(word) is not None:
post.append(RTNoChord(word, container))
else: # only option is that it is a chord
post.append(RTChord(word, container))
return post
def tokenize(self, src):
'''
Walk the RT string, creating RT rtObjects along the way.
'''
# break into lines
lines = src.split('\n')
linesHeader, linesBody = self.splitAtHeader(lines)
#environLocal.printDebug([linesHeader])
self._tokens += self.tokenizeHeader(linesHeader)
self._tokens += self.tokenizeBody(linesBody)
def process(self, src):
'''
Given an entire specification as a single source string, strSrc, tokenize it.
This is usually provided in a file.
'''
self._tokens = []
self.tokenize(src)
def definesMovements(self, countRequired=2):
'''Return True if more than one movement is defined in a RT file.
>>> rth = romanText.rtObjects.RTHandler()
>>> rth.process('Movement: 1 \\n Movement: 2 \\n \\n m1')
>>> rth.definesMovements()
True
>>> rth.process('Movement: 1 \\n m1')
>>> rth.definesMovements()
False
'''
if len(self._tokens) == 0:
raise RTHandlerException('must create tokens first')
count = 0
for t in self._tokens:
if t.isMovement():
count += 1
if count >= countRequired:
return True
return False
def definesMovement(self):
'''Return True if this handler has 1 or more movement.
>>> rth = romanText.rtObjects.RTHandler()
>>> rth.process('Movement: 1 \\n \\n m1')
>>> rth.definesMovements()
False
>>> rth.definesMovement()
True
'''
return self.definesMovements(countRequired=1)
def splitByMovement(self, duplicateHeader=True):
'''If we have movements defined, return a list of RTHandler rtObjects,
representing header information and each movement, in order.
>>> rth = romanText.rtObjects.RTHandler()
>>> rth.process('Title: Test \\n Movement: 1 \\n m1 \\n Movement: 2 \\n m1')
>>> post = rth.splitByMovement(False)
>>> len(post)
3
>>> len(post[0])
1
>>> post[0].__class__
<class 'music21.romanText.rtObjects.RTHandler'>
>>> len(post[1]), len(post[2])
(2, 2)
>>> post = rth.splitByMovement(duplicateHeader=True)
>>> len(post)
2
>>> len(post[0]), len(post[1])
(3, 3)
'''
post = []
sub = []
for t in self._tokens:
if t.isMovement():
# when finding a movement, we are ending a previous
# and starting a new; this may just have metadata
rth = RTHandler()
rth.tokens = sub
post.append(rth)
sub = []
sub.append(t)
if len(sub) > 0:
rth = RTHandler()
rth.tokens = sub
post.append(rth)
if duplicateHeader:
alt = []
# if no movement in this first handler, assume it is header info
if not post[0].definesMovement():
handlerHead = post[0]
iStart = 1
else:
handlerHead = None
iStart = 0
for h in post[iStart:]:
if handlerHead is not None:
h = handlerHead + h # add metadata
alt.append(h)
# reassign
post = alt
return post
#---------------------------------------------------------------------------
# access tokens
def _getTokens(self):
if self._tokens == []:
raise RTHandlerException('must process tokens before calling split')
return self._tokens
def _setTokens(self, tokens):
'''Assign tokens to this Handler.
'''
self._tokens = tokens
tokens = property(_getTokens, _setTokens,
doc = '''Get or set tokens for this Handler.
''')
def __len__(self):
return len(self._tokens)
def __add__(self, other):
'''Return a new handler adding the tokens in both
'''
rth = self.__class__() # will get the same class type
rth.tokens = self._tokens + other._tokens
return rth
#-------------------------------------------------------------------------------
class RTFile(object):
'''
Roman Text File access.
'''
def __init__(self):
self.file = None
self.filename = None
def open(self, filename):
'''Open a file for reading, trying a variety of encodings and then
trying them again with an ignore if it is not possible.
'''
for encoding in ('utf-8', 'macintosh', 'latin-1', 'utf-16'):
try:
self.file = io.open(filename, encoding=encoding)
if self.file is not None:
break
except UnicodeDecodeError:
pass
if self.file is None:
for encoding in ('utf-8', 'macintosh', 'latin-1', 'utf-16', None):
try:
self.file = io.open(filename, encoding=encoding, errors='ignore')
if self.file is not None:
break
except UnicodeDecodeError:
pass
if self.file is None:
raise RomanTextException("Cannot parse file %s, possibly a broken codec?" % filename)
self.filename = filename
def openFileLike(self, fileLike):
'''Assign a file-like object, such as those provided by StringIO, as an
open file object.
'''
self.file = fileLike # already 'open'
def __repr__(self):
r = "<RTFile>"
return r
def close(self):
self.file.close()
def read(self):
'''Read a file. Note that this calls readstring, which processes all tokens.
If `number` is given, a work number will be extracted if possible.
'''
return self.readstr(self.file.read())
def readstr(self, strSrc):
'''Read a string and process all Tokens. Returns a ABCHandler instance.
'''
handler = RTHandler()
# return the handler instance
handler.process(strSrc)
return handler
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
def testBasicA(self):
from music21.romanText import testFiles
for fileStr in testFiles.ALL:
f = RTFile()
unused_rth = f.readstr(fileStr) # get a handler from a string
def testReA(self):
# gets the index of the end of the measure indication
g = reMeasureTag.match('m1 g: V b2 i')
self.assertEqual(g.end(), 2)
self.assertEqual(g.group(0), 'm1')
self.assertEqual(reMeasureTag.match('Time Signature: 2/2'), None)
g = reMeasureTag.match('m3-4=m1-2')
self.assertEqual(g.end(), 4)
self.assertEqual(g.start(), 0)
self.assertEqual(g.group(0), 'm3-4')
g = reMeasureTag.match('m123-432=m1120-24234')
self.assertEqual(g.group(0), 'm123-432')
g = reMeasureTag.match('m231a IV6 b4 C: V')
self.assertEqual(g.group(0), 'm231a')
g = reMeasureTag.match('m123b-432b=m1120a-24234a')
self.assertEqual(g.group(0), 'm123b-432b')
g = reNoteTag.match('Note: this is a note')
self.assertEqual(g.group(0), 'Note:')
g = reNoteTag.match('note: this is a note')
self.assertEqual(g.group(0), 'note:')
g = reMeasureTag.match('m231var1 IV6 b4 C: V')
self.assertEqual(g.group(0), 'm231')
# this only works if it starts the string
g = reVariant.match('var1 IV6 b4 C: V')
self.assertEqual(g.group(0), 'var1')
g = reAnalyticKeyAtom.match('Bb:')
self.assertEqual(g.group(0), 'Bb:')
g = reAnalyticKeyAtom.match('F#:')
self.assertEqual(g.group(0), 'F#:')
g = reAnalyticKeyAtom.match('f#:')
self.assertEqual(g.group(0), 'f#:')
g = reAnalyticKeyAtom.match('b:')
self.assertEqual(g.group(0), 'b:')
g = reAnalyticKeyAtom.match('bb:')
self.assertEqual(g.group(0), 'bb:')
g = reAnalyticKeyAtom.match('g:')
self.assertEqual(g.group(0), 'g:')
# beats do not have a colon
self.assertEqual(reKeyAtom.match('b2'), None)
self.assertEqual(reKeyAtom.match('b2.5'), None)
g = reBeatAtom.match('b2.5')
self.assertEqual(g.group(0), 'b2.5')
g = reBeatAtom.match('bVII')
self.assertEqual(g, None)
g = reBeatAtom.match('b1.66.5')
self.assertEqual(g.group(0), 'b1.66.5')
def testMeasureAttributeProcessing(self):
rtm = RTMeasure('m17var1 vi b2 IV b2.5 viio6/4 b3.5 I')
self.assertEqual(rtm.data, 'vi b2 IV b2.5 viio6/4 b3.5 I')
self.assertEqual(rtm.number, [17])
self.assertEqual(rtm.tag, 'm17')
self.assertEqual(rtm.variantNumber, 1)
rtm = RTMeasure('m17varC vi b2 IV b2.5 viio6/4 b3.5 I')
self.assertEqual(rtm.data, 'vi b2 IV b2.5 viio6/4 b3.5 I')
self.assertEqual(rtm.variantLetter, "C")
rtm = RTMeasure('m20 vi b2 ii6/5 b3 V b3.5 V7')
self.assertEqual(rtm.data, 'vi b2 ii6/5 b3 V b3.5 V7')
self.assertEqual(rtm.number, [20])
self.assertEqual(rtm.tag, 'm20')
self.assertEqual(rtm.variantNumber, None)
self.assertEqual(rtm.isCopyDefinition, False)
rtm = RTMeasure('m0 b3 G: I')
self.assertEqual(rtm.data, 'b3 G: I')
self.assertEqual(rtm.number, [0])
self.assertEqual(rtm.tag, 'm0')
self.assertEqual(rtm.variantNumber, None)
self.assertEqual(rtm.isCopyDefinition, False)
rtm = RTMeasure('m59 = m57')
self.assertEqual(rtm.data, '= m57')
self.assertEqual(rtm.number, [59])
self.assertEqual(rtm.tag, 'm59')
self.assertEqual(rtm.variantNumber, None)
self.assertEqual(rtm.isCopyDefinition, True)
rtm = RTMeasure('m3-4 = m1-2')
self.assertEqual(rtm.data, '= m1-2')
self.assertEqual(rtm.number, [3,4])
self.assertEqual(rtm.tag, 'm3-4')
self.assertEqual(rtm.variantNumber, None)
self.assertEqual(rtm.isCopyDefinition, True)
def testTokenDefinition(self):
# test that we are always getting the right number of tokens
from music21.romanText import testFiles
rth = RTHandler()
rth.process(testFiles.mozartK279)
count = 0
for t in rth._tokens:
if t.isMovement():
count += 1
self.assertEqual(count, 3)
rth.process(testFiles.riemenschneider001)
count = 0
for t in rth._tokens:
if t.isMeasure():
#print t.src
count += 1
# 21, 2 variants, and one pickup
self.assertEqual(count, 21+3)
count = 0
for t in rth._tokens:
if t.isMeasure():
for a in t.atoms:
if isinstance(a, RTAnalyticKey):
count += 1
self.assertEqual(count, 1)
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = []
if __name__ == "__main__":
# sys.arg test options will be used in mainTest()
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/music21/romanText/rtObjects.py
|
Python
|
mit
| 48,702 | 0.004312 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WebhookReceiver(Model):
"""A webhook receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the webhook receiver. Names must be
unique across all receivers within an action group.
:type name: str
:param service_uri: Required. The URI where webhooks should be sent.
:type service_uri: str
"""
_validation = {
'name': {'required': True},
'service_uri': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'service_uri': {'key': 'serviceUri', 'type': 'str'},
}
def __init__(self, *, name: str, service_uri: str, **kwargs) -> None:
super(WebhookReceiver, self).__init__(**kwargs)
self.name = name
self.service_uri = service_uri
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-monitor/azure/mgmt/monitor/models/webhook_receiver_py3.py
|
Python
|
mit
| 1,347 | 0 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_ExportData.ui'
#
# Created: Sat May 28 00:16:57 2011
# by: PyQt4 UI code generator 4.8.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ExportData(object):
def setupUi(self, ExportData):
ExportData.setObjectName(_fromUtf8("ExportData"))
ExportData.resize(354, 527)
self.verticalLayout_5 = QtGui.QVBoxLayout(ExportData)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.groupBox_2 = QtGui.QGroupBox(ExportData)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.fileName = QtGui.QLineEdit(self.groupBox_2)
self.fileName.setObjectName(_fromUtf8("fileName"))
self.gridLayout.addWidget(self.fileName, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.outputType = QtGui.QComboBox(self.groupBox_2)
self.outputType.setObjectName(_fromUtf8("outputType"))
self.outputType.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.outputType, 1, 1, 1, 2)
self.stackedWidget = QtGui.QStackedWidget(self.groupBox_2)
self.stackedWidget.setObjectName(_fromUtf8("stackedWidget"))
self.delimitedStackedWidget = QtGui.QWidget()
self.delimitedStackedWidget.setObjectName(_fromUtf8("delimitedStackedWidget"))
self.gridLayout_2 = QtGui.QGridLayout(self.delimitedStackedWidget)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_3 = QtGui.QLabel(self.delimitedStackedWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.delimitedDelimiterGroupBox = QtGui.QGroupBox(self.delimitedStackedWidget)
self.delimitedDelimiterGroupBox.setTitle(_fromUtf8(""))
self.delimitedDelimiterGroupBox.setObjectName(_fromUtf8("delimitedDelimiterGroupBox"))
self.horizontalLayout = QtGui.QHBoxLayout(self.delimitedDelimiterGroupBox)
self.horizontalLayout.setContentsMargins(2, 0, 0, 0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.delimitedCommaRadio = QtGui.QRadioButton(self.delimitedDelimiterGroupBox)
self.delimitedCommaRadio.setChecked(True)
self.delimitedCommaRadio.setObjectName(_fromUtf8("delimitedCommaRadio"))
self.delimiterButtonGroup = QtGui.QButtonGroup(ExportData)
self.delimiterButtonGroup.setObjectName(_fromUtf8("delimiterButtonGroup"))
self.delimiterButtonGroup.addButton(self.delimitedCommaRadio)
self.horizontalLayout.addWidget(self.delimitedCommaRadio)
self.delimitedTabRadio = QtGui.QRadioButton(self.delimitedDelimiterGroupBox)
self.delimitedTabRadio.setObjectName(_fromUtf8("delimitedTabRadio"))
self.delimiterButtonGroup.addButton(self.delimitedTabRadio)
self.horizontalLayout.addWidget(self.delimitedTabRadio)
self.delimitedOtherRadio = QtGui.QRadioButton(self.delimitedDelimiterGroupBox)
self.delimitedOtherRadio.setObjectName(_fromUtf8("delimitedOtherRadio"))
self.delimiterButtonGroup.addButton(self.delimitedOtherRadio)
self.horizontalLayout.addWidget(self.delimitedOtherRadio)
self.delimitedOtherDelimiter = QtGui.QLineEdit(self.delimitedDelimiterGroupBox)
self.delimitedOtherDelimiter.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.delimitedOtherDelimiter.sizePolicy().hasHeightForWidth())
self.delimitedOtherDelimiter.setSizePolicy(sizePolicy)
self.delimitedOtherDelimiter.setMaximumSize(QtCore.QSize(20, 16777215))
self.delimitedOtherDelimiter.setBaseSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(12)
self.delimitedOtherDelimiter.setFont(font)
self.delimitedOtherDelimiter.setMaxLength(1)
self.delimitedOtherDelimiter.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.delimitedOtherDelimiter.setObjectName(_fromUtf8("delimitedOtherDelimiter"))
self.horizontalLayout.addWidget(self.delimitedOtherDelimiter)
self.horizontalLayout.setStretch(0, 5)
self.horizontalLayout.setStretch(1, 5)
self.gridLayout_2.addWidget(self.delimitedDelimiterGroupBox, 0, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.delimitedStackedWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)
self.delimitedDataDirectionGroupBox = QtGui.QGroupBox(self.delimitedStackedWidget)
self.delimitedDataDirectionGroupBox.setTitle(_fromUtf8(""))
self.delimitedDataDirectionGroupBox.setObjectName(_fromUtf8("delimitedDataDirectionGroupBox"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.delimitedDataDirectionGroupBox)
self.horizontalLayout_3.setContentsMargins(2, 0, 0, 0)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.dataDirectionColumns = QtGui.QRadioButton(self.delimitedDataDirectionGroupBox)
self.dataDirectionColumns.setChecked(True)
self.dataDirectionColumns.setObjectName(_fromUtf8("dataDirectionColumns"))
self.dataDirectionButtonGroup = QtGui.QButtonGroup(ExportData)
self.dataDirectionButtonGroup.setObjectName(_fromUtf8("dataDirectionButtonGroup"))
self.dataDirectionButtonGroup.addButton(self.dataDirectionColumns)
self.horizontalLayout_3.addWidget(self.dataDirectionColumns)
self.dataDirectionRows = QtGui.QRadioButton(self.delimitedDataDirectionGroupBox)
self.dataDirectionRows.setChecked(False)
self.dataDirectionRows.setObjectName(_fromUtf8("dataDirectionRows"))
self.dataDirectionButtonGroup.addButton(self.dataDirectionRows)
self.horizontalLayout_3.addWidget(self.dataDirectionRows)
self.gridLayout_2.addWidget(self.delimitedDataDirectionGroupBox, 1, 1, 1, 1)
self.stackedWidget.addWidget(self.delimitedStackedWidget)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName(_fromUtf8("page_2"))
self.stackedWidget.addWidget(self.page_2)
self.gridLayout.addWidget(self.stackedWidget, 2, 0, 1, 3)
self.fileNameButton = QtGui.QPushButton(self.groupBox_2)
self.fileNameButton.setObjectName(_fromUtf8("fileNameButton"))
self.gridLayout.addWidget(self.fileNameButton, 0, 2, 1, 1)
self.verticalLayout_5.addWidget(self.groupBox_2)
self.groupBox_3 = QtGui.QGroupBox(ExportData)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_6 = QtGui.QLabel(self.groupBox_3)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout.addWidget(self.label_6)
self.allWavesListView = QtGui.QListView(self.groupBox_3)
self.allWavesListView.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.allWavesListView.setObjectName(_fromUtf8("allWavesListView"))
self.verticalLayout.addWidget(self.allWavesListView)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.addWaveButton = QtGui.QPushButton(self.groupBox_3)
self.addWaveButton.setObjectName(_fromUtf8("addWaveButton"))
self.verticalLayout_2.addWidget(self.addWaveButton)
self.removeWaveButton = QtGui.QPushButton(self.groupBox_3)
self.removeWaveButton.setObjectName(_fromUtf8("removeWaveButton"))
self.verticalLayout_2.addWidget(self.removeWaveButton)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_5 = QtGui.QLabel(self.groupBox_3)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_3.addWidget(self.label_5)
self.fileWavesListView = QtGui.QListView(self.groupBox_3)
self.fileWavesListView.setDragEnabled(True)
self.fileWavesListView.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.fileWavesListView.setDefaultDropAction(QtCore.Qt.MoveAction)
self.fileWavesListView.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.fileWavesListView.setObjectName(_fromUtf8("fileWavesListView"))
self.verticalLayout_3.addWidget(self.fileWavesListView)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout_5.addWidget(self.groupBox_3)
self.groupBox_5 = QtGui.QGroupBox(ExportData)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_5)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.exportDataButton = QtGui.QPushButton(self.groupBox_5)
self.exportDataButton.setObjectName(_fromUtf8("exportDataButton"))
self.verticalLayout_4.addWidget(self.exportDataButton)
self.verticalLayout_5.addWidget(self.groupBox_5)
self.retranslateUi(ExportData)
self.stackedWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.delimitedOtherRadio, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.delimitedOtherDelimiter.setEnabled)
QtCore.QObject.connect(self.outputType, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.stackedWidget.setCurrentIndex)
def retranslateUi(self, ExportData):
ExportData.setWindowTitle(QtGui.QApplication.translate("ExportData", "Export Data", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("ExportData", "Step 1 - File Options", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("ExportData", "File", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("ExportData", "Type", None, QtGui.QApplication.UnicodeUTF8))
self.outputType.setItemText(0, QtGui.QApplication.translate("ExportData", "Delimited", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("ExportData", "Delimiter", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedCommaRadio.setText(QtGui.QApplication.translate("ExportData", "Comma", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedTabRadio.setText(QtGui.QApplication.translate("ExportData", "Tab", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedOtherRadio.setText(QtGui.QApplication.translate("ExportData", "Other", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedOtherDelimiter.setText(QtGui.QApplication.translate("ExportData", ",", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("ExportData", "Data as", None, QtGui.QApplication.UnicodeUTF8))
self.dataDirectionColumns.setText(QtGui.QApplication.translate("ExportData", "Columns", None, QtGui.QApplication.UnicodeUTF8))
self.dataDirectionRows.setText(QtGui.QApplication.translate("ExportData", "Rows", None, QtGui.QApplication.UnicodeUTF8))
self.fileNameButton.setText(QtGui.QApplication.translate("ExportData", "Select...", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("ExportData", "Step 2 - Select Data", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("ExportData", "All Waves", None, QtGui.QApplication.UnicodeUTF8))
self.addWaveButton.setText(QtGui.QApplication.translate("ExportData", "Add -->", None, QtGui.QApplication.UnicodeUTF8))
self.removeWaveButton.setText(QtGui.QApplication.translate("ExportData", "<-- Remove", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("ExportData", "Waves to Export", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_5.setTitle(QtGui.QApplication.translate("ExportData", "Step 3 - Export", None, QtGui.QApplication.UnicodeUTF8))
self.exportDataButton.setText(QtGui.QApplication.translate("ExportData", "Export Data", None, QtGui.QApplication.UnicodeUTF8))
|
bbreslauer/PySciPlot
|
src/ui/Ui_ExportData.py
|
Python
|
gpl-3.0
| 13,612 | 0.004187 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import osv
from openerp.report import report_sxw
def titlize(journal_name):
words = journal_name.split()
while words.pop() != 'journal':
continue
return ' '.join(words)
class order(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(order, self).__init__(cr, uid, name, context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
partner = user.company_id.partner_id
self.localcontext.update({
'time': time,
'disc': self.discount,
'net': self.netamount,
'get_journal_amt': self._get_journal_amt,
'address': partner or False,
'titlize': titlize
})
def netamount(self, order_line_id):
sql = 'select (qty*price_unit) as net_price from pos_order_line where id = %s'
self.cr.execute(sql, (order_line_id,))
res = self.cr.fetchone()
return res[0]
def discount(self, order_id):
sql = 'select discount, price_unit, qty from pos_order_line where order_id = %s '
self.cr.execute(sql, (order_id,))
res = self.cr.fetchall()
dsum = 0
for line in res:
if line[0] != 0:
dsum = dsum +(line[2] * (line[0]*line[1]/100))
return dsum
def _get_journal_amt(self, order_id):
data={}
sql = """ select aj.name,absl.amount as amt from account_bank_statement as abs
LEFT JOIN account_bank_statement_line as absl ON abs.id = absl.statement_id
LEFT JOIN account_journal as aj ON aj.id = abs.journal_id
WHERE absl.pos_statement_id =%d"""%(order_id)
self.cr.execute(sql)
data = self.cr.dictfetchall()
return data
class report_order_receipt(osv.AbstractModel):
_name = 'report.point_of_sale.report_receipt'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_receipt'
_wrapped_report_class = order
|
vileopratama/vitech
|
src/addons/point_of_sale/report/pos_receipt.py
|
Python
|
mit
| 2,154 | 0.003714 |
def Tmin(arg0, *args):
_min = arg0
for arg in args:
if arg < _min:
_min = arg
return _min
|
Oreder/PythonSelfStudy
|
TestModule/Tmin.py
|
Python
|
mit
| 122 | 0 |
__author__ = 'mdavid'
from attrdict import AttrDict
import os
# Addressimo Configuration
config = AttrDict()
# General Setup
config.site_url = 'addressimo.netki.com'
config.cache_loader_process_pool_size = 4
config.cache_loader_blocktx_pool_size = 15
config.bip32_enabled = True
config.bip70_enabled = True
config.bip70_default_amount = 0
config.bip70_default_expiration = 900
config.bip72_compatability = True
config.bip70_audit_log = True
config.bip70_payment_expiration_days = 61
config.ir_expiration_days = 30
config.rpr_expiration_days = 16
config.ir_nonce_allowable = 5
config.ir_nonce_db_maxkeys = 100000000
config.old_nonce_cleanup_size = 1000
config.paymentprotocol_message_expiration_days = 7
# Operational Modes
config.store_and_forward_only = True
# Presigned Payment Request
config.presigned_pr_limit = 100
# Path Configuration
config.home_dir = '/Users/frank/PycharmProjects/addressimo/addressimo'
config.plugin_directories = [
'logger',
'resolvers',
'signer'
]
redis_uri = 'redis://localhost:6379'
if 'ADDRESSIMO_REDIS_URI' in os.environ:
redis_uri = os.environ['ADDRESSIMO_REDIS_URI']
# Redis Setup
config.redis_id_obj_uri = '%s/1' % redis_uri
config.redis_tx_map_uri = '%s/2' % redis_uri
config.redis_tx_uri = '%s/3' % redis_uri
config.redis_pr_store = '%s/3' % redis_uri
config.redis_payment_store = '%s/4' % redis_uri
config.redis_logdb_uri = '%s/6' % redis_uri
config.redis_address_branch_uri = '%s/13' % redis_uri
config.redis_addr_cache_uri = '%s/14' % redis_uri
config.redis_ratelimit_uri = '%s/15' % redis_uri
# Object Configuration
config.resolver_type = 'REDIS'
config.signer_type = 'LOCAL'
# Logging Plugin Setup
config.logger_type = 'LOCAL'
config.logger_api_endpoint = 'https://auditor.mydomain.com/log'
# Bitcoin Setup
config.bitcoin_user = 'bitcoinrpc'
config.bitcoin_pass = '03fd3f1cba637e40e984611b50bed238'
config.cache_blockheight_threshold = 2
config.payment_submit_tx_retries = 5
# Admin public key for authenticating signatures for signed requests to get_branches endpoint (hex encoded).
# That endpoint is used for HD wallets to retrieve which branches Addressimo has served addresses for
config.admin_public_key = 'ac79cd6b0ac5f2a6234996595cb2d91fceaa0b9d9a6495f12f1161c074587bd19ae86928bddea635c930c09ea9c7de1a6a9c468f9afd18fbaeed45d09564ded6'
#config.signer_api_endpoint = 'https://signer.mydomain.com/sign'
|
netkicorp/addressimo
|
addressimo/config.py
|
Python
|
bsd-3-clause
| 2,378 | 0.002103 |
#!/usr/bin/env python
from cogent.app.util import CommandLineApplication,\
CommandLineAppResult, ResultPath
from cogent.app.parameters import Parameter,ValuedParameter,Parameters
__author__ = "Shandy Wikman"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__contributors__ = ["Shandy Wikman"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Shandy Wikman"
__email__ = "ens01svn@cs.umu.se"
__status__ = "Development"
class ILM(CommandLineApplication):
"""Application controller ILM application
Predict a secondary structure given a score matrix
Main options:
-L l: minimum loop length (default=3)
-V v: minimum virtual loop length (default=3)
-H h: minimum helix length (default=3)
-N n: number of helices selected per iteration (default=1)
-I i: number of iterations before termination(default=unlimited)
"""
_parameters = {
'-L':ValuedParameter(Prefix='-',Name='L',Delimiter=' '),
'-V':ValuedParameter(Prefix='-',Name='V',Delimiter=' '),
'-H':ValuedParameter(Prefix='-',Name='H',Delimiter=' '),
'-N':ValuedParameter(Prefix='-',Name='N',Delimiter=' '),
'-I':ValuedParameter(Prefix='-',Name='I',Delimiter=' ')}
_command = 'ilm'
_input_handler = '_input_as_string'
class hlxplot(CommandLineApplication):
"""Application controller hlxplot application
Compute a helix plot score matrix from a sequence alignment
Options:
-b B: Set bad pair penalty to B
(Default = 2)
-g G: Set good pair score to G
(Default = 1)
-h H: Set minimum helix length to H
(Default = 2)
-l L: Set minimum loop length to L
(Default = 3)
-s S: Set helix length score to S
(Default = 2.0)
-t : Write output in text format
(Default = Binary format)
-x X: Set paired gap penalty to X
(Default = 3)
"""
_parameters = {
'-b':ValuedParameter(Prefix='-',Name='b',Delimiter=' '),
'-g':ValuedParameter(Prefix='-',Name='g',Delimiter=' '),
'-h':ValuedParameter(Prefix='-',Name='h',Delimiter=' '),
'-l':ValuedParameter(Prefix='-',Name='l',Delimiter=' '),
'-s':ValuedParameter(Prefix='-',Name='s',Delimiter=' '),
'-t':ValuedParameter(Prefix='-',Name='t',Delimiter=' '),
'-x':ValuedParameter(Prefix='-',Name='x',Delimiter=' ')}
_command = 'hlxplot'
_input_handler = '_input_as_string'
class xhlxplot(CommandLineApplication):
"""Application controller xhlxplot application
Compute an extended helix plot score matrix from a single sequence
Options:
-b B: Set bad pair penalty to B
(Default = 200)
-h H: Set minimum helix length to H
(Default = 2)
-l L: Set minimum loop length to L
(Default = 3)
-x X: Set paired gap penalty to X
(Default = 500)
-t : Write output in text format
(Default = Binary format)
-c : No Closing GU
(Default = allows closing GU)
"""
_parameters = {
'-b':ValuedParameter(Prefix='-',Name='b',Delimiter=' '),
'-h':ValuedParameter(Prefix='-',Name='h',Delimiter=' '),
'-l':ValuedParameter(Prefix='-',Name='l',Delimiter=' '),
'-x':ValuedParameter(Prefix='-',Name='x',Delimiter=' '),
'-t':ValuedParameter(Prefix='-',Name='t',Delimiter=' '),
'-c':ValuedParameter(Prefix='-',Name='c',Delimiter=' ')}
_command = 'xhlxplot'
_input_handler = '_input_as_string'
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/cogent/app/ilm.py
|
Python
|
mit
| 3,567 | 0.017101 |
../common/cgi_runtests.py
|
ankurjimmy/catawampus
|
tr/vendor/tornado/maint/appengine/py27/cgi_runtests.py
|
Python
|
apache-2.0
| 25 | 0.04 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.