code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from subprocess import run
# python -u val_resnet.py
cuda = 0 # which gpu to use
dataset = 'cifar10'
logs_path = 'logs_resnet' + '_' + dataset
manualSeed = 99
workers = 0
for model in ['resnet20', 'preact_resnet20']:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
run(commands)
for model in ['resnet20', 'preact_resnet20']:
f = True
for k in [1, 3]:
for ff in [False, True]:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-k=' + str(k),
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
if f: commands.append('-f')
if ff: commands.append('--ff')
run(commands)
|
[
"subprocess.run"
] |
[((497, 510), 'subprocess.run', 'run', (['commands'], {}), '(commands)\n', (500, 510), False, 'from subprocess import run\n'), ((1096, 1109), 'subprocess.run', 'run', (['commands'], {}), '(commands)\n', (1099, 1109), False, 'from subprocess import run\n')]
|
from akagi.data_source import DataSource
from akagi.data_file import DataFile
class SpreadsheetDataSource(DataSource):
'''SpreadsheetSource replesents a data on Google Spreadsheets
'''
def __init__(self, sheet_id, sheet_range='A:Z', no_cache=False):
self._sheet_id = sheet_id
self._sheet_range = sheet_range
@property
def data_files(self):
return [DataFile.spreadsheet(self._sheet_id, self._sheet_range)]
|
[
"akagi.data_file.DataFile.spreadsheet"
] |
[((396, 451), 'akagi.data_file.DataFile.spreadsheet', 'DataFile.spreadsheet', (['self._sheet_id', 'self._sheet_range'], {}), '(self._sheet_id, self._sheet_range)\n', (416, 451), False, 'from akagi.data_file import DataFile\n')]
|
"""
File: my_drawing.py
Name: 黃科諺
----------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc
from campy.graphics.gwindow import GWindow
def main():
"""
Meet Snorlax (卡比獸) of stanCode! He dreams of Python when he sleeps. Be like Snorlax.
"""
window = GWindow(width=300, height=300)
face_outer = GOval(120, 75, x=(window.width-120)/2, y=50)
face_outer.filled = True
face_outer.fill_color = 'darkcyan'
face_outer.color = 'darkcyan'
window.add(face_outer)
face_inner = GOval(100, 65, x=(window.width-100)/2, y=60)
face_inner.filled = True
face_inner.fill_color = 'lightsalmon'
face_inner.color = 'lightsalmon'
window.add(face_inner)
forehead = GPolygon()
forehead.add_vertex((135, 60))
forehead.add_vertex((165, 60))
forehead.add_vertex((150, 68))
forehead.filled = True
forehead.fill_color = 'darkcyan'
forehead.color = 'darkcyan'
window.add(forehead)
r_ear = GPolygon()
r_ear.add_vertex((113, 35))
r_ear.add_vertex((95, 75))
r_ear.add_vertex((140, 50))
r_ear.filled = True
r_ear.fill_color = 'darkcyan'
r_ear.color = 'darkcyan'
window.add(r_ear)
l_ear = GPolygon()
l_ear.add_vertex((187, 35))
l_ear.add_vertex((205, 75))
l_ear.add_vertex((160, 50))
l_ear.filled = True
l_ear.fill_color = 'darkcyan'
l_ear.color = 'darkcyan'
window.add(l_ear)
r_eye = GLine (120, 75, 140, 75)
window.add(r_eye)
l_eye = GLine(180, 75, 160, 75)
window.add(l_eye)
mouth = GLine(135, 85, 165, 85)
window.add(mouth)
r_tooth = GPolygon()
r_tooth.add_vertex((135, 84))
r_tooth.add_vertex((139, 84))
r_tooth.add_vertex((137, 80))
r_tooth.filled = True
r_tooth.fill_color = 'white'
r_tooth.color = 'white'
window.add(r_tooth)
l_tooth = GPolygon()
l_tooth.add_vertex((165, 84))
l_tooth.add_vertex((161, 84))
l_tooth.add_vertex((163, 80))
l_tooth.filled = True
l_tooth.fill_color = 'white'
l_tooth.color = 'white'
window.add(l_tooth)
r_arm = GOval(100, 45, x=25, y=98)
r_arm.filled = True
r_arm.fill_color = 'darkcyan'
r_arm.color = 'darkcyan'
window.add(r_arm)
l_arm = GOval(100, 45, x=175, y=98)
l_arm.filled = True
l_arm.fill_color = 'darkcyan'
l_arm.color = 'darkcyan'
window.add(l_arm)
body = GOval(200, 160, x=(window.width - 200) / 2, y=95)
body.filled = True
body.fill_color = 'darkcyan'
body.color = 'darkcyan'
window.add(body)
belly = GOval(176, 120, x=(window.width - 176) / 2, y=95)
belly.filled = True
belly.fill_color = 'lightsalmon'
window.add(belly)
r_claw1 = GPolygon()
r_claw1.add_vertex((38, 100))
r_claw1.add_vertex((44, 102))
r_claw1.add_vertex((40, 106))
r_claw1.filled = True
r_claw1.fill_color = 'white'
window.add(r_claw1)
r_claw2 = GPolygon()
r_claw2.add_vertex((32, 102))
r_claw2.add_vertex((38, 104))
r_claw2.add_vertex((35, 108))
r_claw2.filled = True
r_claw2.fill_color = 'white'
window.add(r_claw2)
r_claw3 = GPolygon()
r_claw3.add_vertex((28, 104))
r_claw3.add_vertex((34, 106))
r_claw3.add_vertex((31, 110))
r_claw3.filled = True
r_claw3.fill_color = 'white'
window.add(r_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((24, 109))
r_claw4.add_vertex((30, 111))
r_claw4.add_vertex((27, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((19, 122))
r_claw5.add_vertex((25, 121))
r_claw5.add_vertex((28, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
l_claw1 = GPolygon()
l_claw1.add_vertex((262, 100))
l_claw1.add_vertex((256, 102))
l_claw1.add_vertex((260, 106))
l_claw1.filled = True
l_claw1.fill_color = 'white'
window.add(l_claw1)
l_claw2 = GPolygon()
l_claw2.add_vertex((268, 102))
l_claw2.add_vertex((262, 104))
l_claw2.add_vertex((265, 108))
l_claw2.filled = True
l_claw2.fill_color = 'white'
window.add(l_claw2)
l_claw3 = GPolygon()
l_claw3.add_vertex((272, 104))
l_claw3.add_vertex((266, 106))
l_claw3.add_vertex((269, 110))
l_claw3.filled = True
l_claw3.fill_color = 'white'
window.add(l_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((276, 109))
r_claw4.add_vertex((270, 111))
r_claw4.add_vertex((273, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((281, 122))
r_claw5.add_vertex((275, 121))
r_claw5.add_vertex((272, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
r_foot = GOval(65, 60, x=50, y=220)
r_foot.filled = True
r_foot.fill_color = 'lightsalmon'
r_foot.color = 'lightsalmon'
window.add(r_foot)
r_palm = GOval(45, 40, x=65, y=235)
r_palm.filled = True
r_palm.fill_color = 'Chocolate'
r_palm.color = 'Chocolate'
window.add(r_palm)
r_nail1 = GPolygon()
r_nail1.add_vertex((80, 210))
r_nail1.add_vertex((88, 223))
r_nail1.add_vertex((78, 224))
r_nail1.filled = True
r_nail1.fill_color = 'white'
window.add(r_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((52, 220))
r_nail2.add_vertex((65, 228))
r_nail2.add_vertex((57, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((43, 250))
r_nail3.add_vertex((54, 248))
r_nail3.add_vertex((52, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
l_foot = GOval(65, 60, x=185, y=220)
l_foot.filled = True
l_foot.fill_color = 'lightsalmon'
l_foot.color = 'lightsalmon'
window.add(l_foot)
l_palm = GOval(45, 40, x=190, y=235)
l_palm.filled = True
l_palm.fill_color = 'Chocolate'
l_palm.color = 'Chocolate'
window.add(l_palm)
l_nail1 = GPolygon()
l_nail1.add_vertex((220, 210))
l_nail1.add_vertex((212, 223))
l_nail1.add_vertex((222, 224))
l_nail1.filled = True
l_nail1.fill_color = 'white'
window.add(l_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((248, 220))
r_nail2.add_vertex((235, 228))
r_nail2.add_vertex((243, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((257, 250))
r_nail3.add_vertex((246, 248))
r_nail3.add_vertex((248, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
word = GLabel('stanCode', x=123, y=185)
word.font = '-8-bold'
window.add(word)
bubble1 = GOval(10, 10, x=140, y=35)
window.add(bubble1)
bubble2 = GOval(15, 15, x=155, y=23)
window.add(bubble2)
bubble3 = GOval(20, 20, x=175, y=12)
window.add(bubble3)
bubble4 = GOval(95, 85, x=200, y=5)
window.add(bubble4)
word2 = GLabel('Python', x=207, y=50)
word2.font = 'Courier-18'
window.add(word2)
word3 = GLabel('Python', x=220, y=80)
word3.font = 'Courier-13'
window.add(word3)
word4 = GLabel('Python', x=242, y=60)
word4.font = 'Courier-8'
window.add(word4)
if __name__ == '__main__':
main()
|
[
"campy.graphics.gobjects.GPolygon",
"campy.graphics.gobjects.GLabel",
"campy.graphics.gwindow.GWindow",
"campy.graphics.gobjects.GLine",
"campy.graphics.gobjects.GOval"
] |
[((323, 353), 'campy.graphics.gwindow.GWindow', 'GWindow', ([], {'width': '(300)', 'height': '(300)'}), '(width=300, height=300)\n', (330, 353), False, 'from campy.graphics.gwindow import GWindow\n'), ((371, 419), 'campy.graphics.gobjects.GOval', 'GOval', (['(120)', '(75)'], {'x': '((window.width - 120) / 2)', 'y': '(50)'}), '(120, 75, x=(window.width - 120) / 2, y=50)\n', (376, 419), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((562, 610), 'campy.graphics.gobjects.GOval', 'GOval', (['(100)', '(65)'], {'x': '((window.width - 100) / 2)', 'y': '(60)'}), '(100, 65, x=(window.width - 100) / 2, y=60)\n', (567, 610), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((757, 767), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (765, 767), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((1006, 1016), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (1014, 1016), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((1233, 1243), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (1241, 1243), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((1461, 1484), 'campy.graphics.gobjects.GLine', 'GLine', (['(120)', '(75)', '(140)', '(75)'], {}), '(120, 75, 140, 75)\n', (1466, 1484), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((1520, 1543), 'campy.graphics.gobjects.GLine', 'GLine', (['(180)', '(75)', '(160)', '(75)'], {}), '(180, 75, 160, 75)\n', (1525, 1543), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((1578, 1601), 'campy.graphics.gobjects.GLine', 'GLine', (['(135)', '(85)', '(165)', '(85)'], {}), '(135, 85, 165, 85)\n', (1583, 1601), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((1638, 1648), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (1646, 1648), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((1876, 1886), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (1884, 1886), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((2112, 2138), 'campy.graphics.gobjects.GOval', 'GOval', (['(100)', '(45)'], {'x': '(25)', 'y': '(98)'}), '(100, 45, x=25, y=98)\n', (2117, 2138), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((2260, 2287), 'campy.graphics.gobjects.GOval', 'GOval', (['(100)', '(45)'], {'x': '(175)', 'y': '(98)'}), '(100, 45, x=175, y=98)\n', (2265, 2287), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((2408, 2457), 'campy.graphics.gobjects.GOval', 'GOval', (['(200)', '(160)'], {'x': '((window.width - 200) / 2)', 'y': '(95)'}), '(200, 160, x=(window.width - 200) / 2, y=95)\n', (2413, 2457), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((2575, 2624), 'campy.graphics.gobjects.GOval', 'GOval', (['(176)', '(120)'], {'x': '((window.width - 176) / 2)', 'y': '(95)'}), '(176, 120, x=(window.width - 176) / 2, y=95)\n', (2580, 2624), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((2722, 2732), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (2730, 2732), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((2932, 2942), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (2940, 2942), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((3142, 3152), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (3150, 3152), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((3352, 3362), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (3360, 3362), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((3562, 3572), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (3570, 3572), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((3772, 3782), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (3780, 3782), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((3985, 3995), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (3993, 3995), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((4198, 4208), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (4206, 4208), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((4411, 4421), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (4419, 4421), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((4624, 4634), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (4632, 4634), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((4836, 4862), 'campy.graphics.gobjects.GOval', 'GOval', (['(65)', '(60)'], {'x': '(50)', 'y': '(220)'}), '(65, 60, x=50, y=220)\n', (4841, 4862), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((4995, 5021), 'campy.graphics.gobjects.GOval', 'GOval', (['(45)', '(40)'], {'x': '(65)', 'y': '(235)'}), '(45, 40, x=65, y=235)\n', (5000, 5021), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((5151, 5161), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (5159, 5161), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((5361, 5371), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (5369, 5371), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((5571, 5581), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (5579, 5581), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((5780, 5807), 'campy.graphics.gobjects.GOval', 'GOval', (['(65)', '(60)'], {'x': '(185)', 'y': '(220)'}), '(65, 60, x=185, y=220)\n', (5785, 5807), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((5940, 5967), 'campy.graphics.gobjects.GOval', 'GOval', (['(45)', '(40)'], {'x': '(190)', 'y': '(235)'}), '(45, 40, x=190, y=235)\n', (5945, 5967), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((6097, 6107), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (6105, 6107), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((6310, 6320), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (6318, 6320), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((6523, 6533), 'campy.graphics.gobjects.GPolygon', 'GPolygon', ([], {}), '()\n', (6531, 6533), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((6733, 6765), 'campy.graphics.gobjects.GLabel', 'GLabel', (['"""stanCode"""'], {'x': '(123)', 'y': '(185)'}), "('stanCode', x=123, y=185)\n", (6739, 6765), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((6827, 6853), 'campy.graphics.gobjects.GOval', 'GOval', (['(10)', '(10)'], {'x': '(140)', 'y': '(35)'}), '(10, 10, x=140, y=35)\n', (6832, 6853), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((6892, 6918), 'campy.graphics.gobjects.GOval', 'GOval', (['(15)', '(15)'], {'x': '(155)', 'y': '(23)'}), '(15, 15, x=155, y=23)\n', (6897, 6918), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((6957, 6983), 'campy.graphics.gobjects.GOval', 'GOval', (['(20)', '(20)'], {'x': '(175)', 'y': '(12)'}), '(20, 20, x=175, y=12)\n', (6962, 6983), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((7022, 7047), 'campy.graphics.gobjects.GOval', 'GOval', (['(95)', '(85)'], {'x': '(200)', 'y': '(5)'}), '(95, 85, x=200, y=5)\n', (7027, 7047), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((7084, 7113), 'campy.graphics.gobjects.GLabel', 'GLabel', (['"""Python"""'], {'x': '(207)', 'y': '(50)'}), "('Python', x=207, y=50)\n", (7090, 7113), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((7178, 7207), 'campy.graphics.gobjects.GLabel', 'GLabel', (['"""Python"""'], {'x': '(220)', 'y': '(80)'}), "('Python', x=220, y=80)\n", (7184, 7207), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n'), ((7272, 7301), 'campy.graphics.gobjects.GLabel', 'GLabel', (['"""Python"""'], {'x': '(242)', 'y': '(60)'}), "('Python', x=242, y=60)\n", (7278, 7301), False, 'from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc\n')]
|
from girder.exceptions import ValidationException
from girder.utility import setting_utilities
class PluginSettings:
AUTO_COMPUTE = 'hashsum_download.auto_compute'
@setting_utilities.default(PluginSettings.AUTO_COMPUTE)
def _defaultAutoCompute():
return False
@setting_utilities.validator(PluginSettings.AUTO_COMPUTE)
def _validateAutoCompute(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Auto-compute hash setting must be true or false.')
|
[
"girder.exceptions.ValidationException",
"girder.utility.setting_utilities.default",
"girder.utility.setting_utilities.validator"
] |
[((173, 227), 'girder.utility.setting_utilities.default', 'setting_utilities.default', (['PluginSettings.AUTO_COMPUTE'], {}), '(PluginSettings.AUTO_COMPUTE)\n', (198, 227), False, 'from girder.utility import setting_utilities\n'), ((275, 331), 'girder.utility.setting_utilities.validator', 'setting_utilities.validator', (['PluginSettings.AUTO_COMPUTE'], {}), '(PluginSettings.AUTO_COMPUTE)\n', (302, 331), False, 'from girder.utility import setting_utilities\n'), ((420, 491), 'girder.exceptions.ValidationException', 'ValidationException', (['"""Auto-compute hash setting must be true or false."""'], {}), "('Auto-compute hash setting must be true or false.')\n", (439, 491), False, 'from girder.exceptions import ValidationException\n')]
|
from collections import defaultdict, namedtuple
from dataclasses import dataclass
import distutils.util
import functools
import itertools
import json
import math
import operator
import os
import random
import uuid
import shutil
import logging
import time
from typing import List, Dict, NamedTuple, Optional
from django.db.models import Q
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, get_list_or_404
from django.conf import settings
from google.cloud import storage
from rest_framework.decorators import api_view
import requests
from expiringdict import ExpiringDict
from .models import (
Dataset,
DatasetItem,
Category,
Mode,
User,
Annotation,
DNNModel,
CategoryCount,
)
BUILTIN_MODES = ["POSITIVE", "NEGATIVE", "HARD_NEGATIVE", "UNSURE"]
logger = logging.getLogger(__name__)
@api_view(["POST"])
@csrf_exempt
def start_cluster(request):
# TODO(mihirg): Remove this setting from Django; it's now managed by Terraform
# (or figure out how to set it from the frontend if we need that)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_cluster",
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"cluster_id": response_data["cluster_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_cluster_status(request, cluster_id):
params = {"cluster_id": cluster_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/cluster_status", params=params
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_cluster(request, cluster_id):
params = {"cluster_id": cluster_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_cluster",
json=params,
)
return JsonResponse(
{
"status": "success",
}
)
@api_view(["POST"])
@csrf_exempt
def create_model(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_name = payload["model_name"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
pos_tags = parse_tag_set_from_query_v2(payload["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(payload["neg_tags"])
val_pos_tags = parse_tag_set_from_query_v2(payload["val_pos_tags"])
val_neg_tags = parse_tag_set_from_query_v2(payload["val_neg_tags"])
augment_negs = bool(payload["augment_negs"])
model_kwargs = payload["model_kwargs"]
resume_model_id = payload.get("resume", None)
dataset = get_object_or_404(Dataset, name=dataset_name)
eligible_images = DatasetItem.objects.filter(dataset=dataset, is_val=False)
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, val_pos_tags, val_neg_tags)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_images,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
val_pos_dataset_item_pks = []
val_neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags for t in tags):
neg_dataset_item_pks.append(pk)
elif any(t in val_pos_tags for t in tags):
val_pos_dataset_item_pks.append(pk)
elif any(t in val_neg_tags for t in tags):
val_neg_dataset_item_pks.append(pk)
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.BGSPLIT_NUM_NEGS_MULTIPLIER * len(
pos_dataset_item_pks
) - len(neg_dataset_item_pks)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from request
all_eligible_pks = filtered_images_v2(
request,
dataset,
exclude_pks=(
pos_dataset_item_pks
+ neg_dataset_item_pks
+ val_pos_dataset_item_pks
+ val_neg_dataset_item_pks
),
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
if resume_model_id:
resume_model = get_object_or_404(DNNModel, model_id=resume_model_id)
resume_model_path = resume_model.checkpoint_path
else:
resume_model = None
resume_model_path = None
params = {
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"val_pos_identifiers": val_pos_dataset_item_internal_identifiers,
"val_neg_identifiers": val_neg_dataset_item_internal_identifiers,
"augment_negs": augment_negs,
"model_kwargs": model_kwargs,
"model_name": model_name,
"bucket": bucket_name,
"cluster_id": cluster_id,
"index_id": index_id,
"resume_from": resume_model_path,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_job",
json=params,
)
response_data = r.json()
if r.status_code != 200:
return JsonResponse(
{"status": "failure", "reason": response_data.get("reason", "")},
status=r.status_code,
)
m = DNNModel(
dataset=dataset,
name=model_name,
model_id=response_data["model_id"],
category_spec={
"augment_negs": augment_negs,
"pos_tags": payload["pos_tags"],
"neg_tags": payload["neg_tags"],
"augment_negs_include": payload.get("include", []) if augment_negs else [],
"augment_negs_exclude": payload.get("exclude", []) if augment_negs else [],
},
)
model_epoch = -1 + model_kwargs.get("epochs_to_run", 1)
if resume_model_id:
m.resume_model_id = resume_model_id
if model_kwargs.get("resume_training", False):
model_epoch += resume_model.epoch + 1
m.epoch = model_epoch
m.save()
return JsonResponse(
{
"status": "success",
"model_id": response_data["model_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_status(request, model_id):
params = {"model_id": model_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_job_status", params=params
)
response_data = r.json()
if response_data["has_model"]:
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.checkpoint_path = response_data["checkpoint_path"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def update_model_v2(request):
payload = json.loads(request.body)
# user = payload["user"]
old_model_name = payload["old_model_name"]
new_model_name = payload["new_model_name"]
models = get_list_or_404(DNNModel, name=old_model_name)
for m in models:
m.name = new_model_name
m.save()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def delete_model_v2(request):
payload = json.loads(request.body)
model_name = payload["model_name"]
# cluster_id = payload['cluster_id']
models = get_list_or_404(DNNModel, name=model_name)
for m in models:
# TODO(fpoms): delete model data stored on NFS?
# shutil.rmtree(os.path.join(m.checkpoint_path, '..'))
shutil.rmtree(m.output_directory, ignore_errors=True)
m.delete()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def run_model_inference(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_id = payload["model_id"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
dataset = get_object_or_404(Dataset, name=dataset_name)
model_checkpoint_path = get_object_or_404(
DNNModel, model_id=model_id
).checkpoint_path
if model_checkpoint_path is None or len(model_checkpoint_path) == 0:
return JsonResponse(
{
"status": "failure",
"reason": f"Model {model_id} does not have a model checkpoint.",
},
status=400,
)
params = {
"bucket": bucket_name,
"model_id": model_id,
"checkpoint_path": model_checkpoint_path,
"cluster_id": cluster_id,
"index_id": index_id,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_inference_job",
json=params,
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"job_id": response_data["job_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_inference_status(request, job_id):
params = {"job_id": job_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_inference_job_status",
params=params,
)
response_data = r.json()
if response_data["has_output"]:
model_id = response_data["model_id"]
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.output_directory = response_data["output_dir"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_model_inference(request, job_id):
params = {"job_id": job_id}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_bgsplit_inference_job", json=params
)
response_data = r.json()
return JsonResponse(response_data, status=r.status_code)
#
# V2 ENDPOINTS
# TODO(mihirg): Make these faster
#
Tag = namedtuple("Tag", "category value") # type: NamedTuple[str, str]
Box = namedtuple(
"Box", "category value x1 y1 x2 y2"
) # type: NamedTuple[str, str, float, float, float, float]
PkType = int
@dataclass
class ResultSet:
type: str
ranking: List[PkType]
distances: List[float]
model: Optional[str]
# TODO(fpoms): this needs to be wrapped in a lock so that
# updates are atomic across concurrent requests
current_result_sets = ExpiringDict(
max_age_seconds=30 * 60,
max_len=50,
) # type: Dict[str, ResultSet]
def parse_tag_set_from_query_v2(s):
if isinstance(s, list):
parts = s
elif isinstance(s, str) and s:
parts = s.split(",")
else:
parts = []
ts = set()
for part in parts:
if not part:
continue
category, value_str = part.split(":")
ts.add(Tag(category, value_str))
return ts
def tag_sets_to_query(*tagsets):
merged = set().union(*tagsets)
if not merged:
return Q()
return Q(
annotation__in=Annotation.objects.filter(
functools.reduce(
operator.or_,
[Q(category__name=t.category, mode__name=t.value) for t in merged],
)
)
)
def serialize_tag_set_for_client_v2(ts):
return [{"category": t.category, "value": t.value} for t in sorted(list(ts))]
def serialize_boxes_for_client_v2(bs):
return [
{
"category": b.category,
"value": b.value,
"x1": b.x1,
"y1": b.y1,
"x2": b.x2,
"y2": b.y2,
}
for b in sorted(list(bs))
]
def get_tags_from_annotations_v2(annotations):
tags_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=False)
ann_dicts = annotations.values("dataset_item__pk", "category__name", "mode__name")
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
tags_by_pk[pk].append(Tag(category, mode))
return tags_by_pk
def get_boxes_from_annotations_v2(annotations):
boxes_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=True)
ann_dicts = annotations.values(
"dataset_item__pk",
"category__name",
"mode__name",
"bbox_x1",
"bbox_y1",
"bbox_x2",
"bbox_y2",
)
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
box = (ann["bbox_x1"], ann["bbox_y1"], ann["bbox_x2"], ann["bbox_y2"])
boxes_by_pk[pk].append(Box(category, mode, *box))
return boxes_by_pk
def filtered_images_v2(request, dataset, exclude_pks=None) -> List[PkType]:
filt_start = time.time()
if request.method == "POST":
payload = json.loads(request.body)
include_tags = parse_tag_set_from_query_v2(payload.get("include"))
exclude_tags = parse_tag_set_from_query_v2(payload.get("exclude"))
pks = [i for i in payload.get("subset", []) if i]
split = payload.get("split", "train")
offset_to_return = int(payload.get("offset", 0))
num_to_return = int(payload.get("num", -1))
else:
include_tags = parse_tag_set_from_query_v2(request.GET.get("include"))
exclude_tags = parse_tag_set_from_query_v2(request.GET.get("exclude"))
pks = [i for i in request.GET.get("subset", "").split(",") if i]
split = request.GET.get("split", "train")
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", -1))
end_to_return = None if num_to_return == -1 else offset_to_return + num_to_return
dataset_items = None
is_val = split == "val"
db_start = time.time()
# Get pks for dataset items of interest
if pks and exclude_pks:
# Get specific pks - excluded pks if requested
exclude_pks = set(exclude_pks)
pks = [pk for pk in pks if pk not in exclude_pks]
elif not pks:
# Otherwise get all dataset items - exclude pks
dataset_items = DatasetItem.objects.filter(dataset=dataset, is_val=is_val)
if exclude_pks:
dataset_items = dataset_items.exclude(pk__in=exclude_pks)
pks = dataset_items.values_list("pk", flat=True)
db_end = time.time()
result = None
db_tag_start = time.time()
if not include_tags and not exclude_tags:
# If no tags specified, just return retrieved pks
result = pks
else:
# Otherwise, filter using include and exclude tags
if dataset_items is None:
dataset_items = DatasetItem.objects.filter(pk__in=pks)
if include_tags:
dataset_items = dataset_items.filter(tag_sets_to_query(include_tags))
if exclude_tags:
dataset_items = dataset_items.exclude(tag_sets_to_query(exclude_tags))
result = dataset_items.values_list("pk", flat=True)
db_tag_end = time.time()
result = list(result[offset_to_return:end_to_return])
filt_end = time.time()
print(
f"filtered_images_v2: tot: {filt_end-filt_start}, "
f"db ({len(result)} items): {db_end-db_start}, db tag: {db_tag_end-db_tag_start}"
)
return result
def process_image_query_results_v2(request, dataset, query_response):
filtered_pks = filtered_images_v2(request, dataset)
# TODO(mihirg): Eliminate this database call by directly returning pks from backend
dataset_items = DatasetItem.objects.filter(pk__in=filtered_pks)
dataset_items_by_path = {di.path: di for di in dataset_items}
distances = []
ordered_pks = []
for r in query_response["results"]:
if r["label"] in dataset_items_by_path:
ordered_pks.append(dataset_items_by_path[r["label"]].pk)
distances.append(r["dist"])
return dict(
pks=ordered_pks,
distances=distances,
)
def create_result_set_v2(results, type, model=None):
pks = results["pks"]
distances = results["distances"]
result_set_id = str(uuid.uuid4())
current_result_sets[result_set_id] = ResultSet(
type=type, ranking=pks, distances=distances, model=model
)
return {
"id": result_set_id,
"num_results": len(pks),
"type": type,
}
@api_view(["GET"])
@csrf_exempt
def get_results_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
index_id = request.GET["index_id"]
result_set_id = request.GET["result_set_id"]
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", 500))
clustering_model = request.GET.get("clustering_model", None)
result_set = current_result_sets[result_set_id]
pks = result_set.ranking[offset_to_return : offset_to_return + num_to_return]
distances = result_set.distances[
offset_to_return : offset_to_return + num_to_return
]
dataset_items_by_pk = DatasetItem.objects.in_bulk(pks)
dataset_items = [dataset_items_by_pk[pk] for pk in pks] # preserve order
bucket_name = dataset.train_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
internal_identifiers = [di.identifier for di in dataset_items]
params = {
"index_id": index_id,
"identifiers": internal_identifiers,
}
if clustering_model:
params["model"] = clustering_model
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/perform_clustering",
json=params,
)
clustering_data = r.json()
dataset_item_paths = [
(di.path if di.path.find("http") != -1 else path_template.format(di.path))
for di in dataset_items
]
dataset_item_identifiers = [di.pk for di in dataset_items]
return JsonResponse(
{
"paths": dataset_item_paths,
"identifiers": dataset_item_identifiers,
"distances": distances,
"clustering": clustering_data["clustering"],
}
)
@api_view(["POST"])
@csrf_exempt
def keep_alive_v2(request):
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/keep_alive",
)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def generate_embedding_v2(request):
payload = json.loads(request.body)
image_id = payload.get("image_id")
if image_id:
payload["identifier"] = DatasetItem.objects.get(pk=image_id).identifier
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def generate_text_embedding_v2(request):
payload = json.loads(request.body)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_text_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def query_knn_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
embeddings = payload["embeddings"]
use_full_image = bool(payload.get("use_full_image", True))
use_dot_product = bool(payload.get("use_dot_product", False))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
query_knn_start = time.time()
params = {
"index_id": index_id,
"embeddings": embeddings,
"use_full_image": use_full_image,
"use_dot_product": use_dot_product,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_knn_v2",
json=params,
)
response_data = r.json()
query_knn_end = time.time()
logger.debug("query_knn_v2 time: {:f}".format(query_knn_end - query_knn_start))
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "knn", model=model))
@api_view(["GET"])
@csrf_exempt
def train_svm_v2(request, dataset_name):
index_id = request.GET["index_id"]
model = request.GET.get("model", "imagenet")
pos_tags = parse_tag_set_from_query_v2(request.GET["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(request.GET.get("neg_tags"))
augment_negs = bool(
distutils.util.strtobool(request.GET.get("augment_negs", "false"))
)
dataset = get_object_or_404(Dataset, name=dataset_name)
pos_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(pos_tags),
dataset=dataset,
is_val=False,
)
pos_dataset_item_pks = list(pos_dataset_items.values_list("pk", flat=True))
if neg_tags:
neg_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(neg_tags),
dataset=dataset,
is_val=False,
).difference(pos_dataset_items)
neg_dataset_item_pks = list(neg_dataset_items.values_list("pk", flat=True))
else:
neg_dataset_item_pks = []
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.SVM_NUM_NEGS_MULTIPLIER * len(pos_dataset_item_pks) - len(
neg_dataset_item_pks
)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from GET request
all_eligible_pks = filtered_images_v2(
request, dataset, exclude_pks=pos_dataset_item_pks + neg_dataset_item_pks
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
params = {
"index_id": index_id,
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/train_svm_v2",
json=params,
)
return JsonResponse(r.json()) # {"svm_vector": base64-encoded string}
@api_view(["POST"])
@csrf_exempt
def query_svm_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
svm_vector = payload["svm_vector"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"svm_vector": svm_vector,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_svm_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "svm"))
@api_view(["POST"])
@csrf_exempt
def query_ranking_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload["model"]
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_ranking_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "ranking", model=model))
@api_view(["POST"])
@csrf_exempt
def query_images_v2(request, dataset_name):
query_start = time.time()
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
order = payload.get("order", "id")
filter_start = time.time()
result_pks = filtered_images_v2(request, dataset)
filter_end = time.time()
if order == "random":
random.shuffle(result_pks)
elif order == "id":
result_pks.sort()
results = {"pks": result_pks, "distances": [-1 for _ in result_pks]}
resp = JsonResponse(create_result_set_v2(results, "query"))
query_end = time.time()
print(
f"query_images_v2: tot: {query_end-query_start}, "
f"filter: {filter_end-filter_start}"
)
return resp
#
# ACTIVE VALIDATION
#
VAL_NEGATIVE_TYPE = "model_val_negative"
def get_val_examples_v2(dataset, model_id):
# Get positive and negative categories
model = get_object_or_404(DNNModel, model_id=model_id)
pos_tags = parse_tag_set_from_query_v2(model.category_spec["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(model.category_spec["neg_tags"])
augment_negs = model.category_spec.get("augment_negs", False)
augment_negs_include = (
parse_tag_set_from_query_v2(model.category_spec.get("augment_negs_include", []))
if augment_negs
else set()
)
# Limit to validation set
eligible_dataset_items = DatasetItem.objects.filter(
dataset=dataset,
is_val=True,
)
# Get positives and negatives matching these categories
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, augment_negs_include)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags or t in augment_negs_include for t in tags):
neg_dataset_item_pks.append(pk)
# Get extra negatives
if augment_negs:
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
label_category=model_id,
label_type=VAL_NEGATIVE_TYPE,
)
neg_dataset_item_pks.extend(ann.dataset_item.pk for ann in annotations)
return pos_dataset_item_pks, neg_dataset_item_pks
@api_view(["POST"])
def query_metrics_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
internal_identifiers_to_weights = payload["weights"] # type: Dict[str, int]
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct identifiers, labels, and weights
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
weights = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifier = di.identifier
weight = internal_identifiers_to_weights.get(identifier)
if weight is None:
continue
identifiers.append(identifier)
labels.append(label)
weights.append(weight)
# TODO(mihirg): Parse false positives and false negatives
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"weights": weights,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_metrics",
json=params,
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
def query_active_validation_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
current_f1 = payload.get("current_f1")
if current_f1 is None:
current_f1 = 0.5
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct paths, identifiers, and labels
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifiers.append(di.identifier)
labels.append(label)
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"current_f1": current_f1,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_active_validation",
json=params,
)
response_data = r.json()
if response_data["identifiers"]:
pks_and_paths = list(
DatasetItem.objects.filter(
dataset=dataset,
identifier__in=response_data["identifiers"],
is_val=True,
).values_list("pk", "path")
)
random.shuffle(pks_and_paths)
pks, paths = zip(*pks_and_paths)
else:
pks, paths = [], []
bucket_name = dataset.val_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
paths = [path_template.format(p) for p in paths]
return JsonResponse(
{
"paths": paths,
"identifiers": pks,
"weights": response_data["weights"],
}
)
@api_view(["POST"])
def add_val_annotations_v2(request):
payload = json.loads(request.body)
annotations = payload["annotations"]
user_email = payload["user"]
model = payload["model"]
anns = []
cat_modes = defaultdict(int)
dataset = None
for ann_payload in annotations:
image_pk = ann_payload["identifier"]
is_other_negative = ann_payload.get("is_other_negative", False)
mode_str = "NEGATIVE" if is_other_negative else ann_payload["mode"]
category_name = (
"active:" + model if is_other_negative else ann_payload["category"]
)
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_str)
di = DatasetItem.objects.get(pk=image_pk)
dataset = di.dataset
assert di.is_val
ann = Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
misc_data={"created_by": "active_val"},
)
cat_modes[(category, mode)] += 1
anns.append(ann)
Annotation.objects.bulk_create(anns)
for (cat, mode), c in cat_modes.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset, category=cat, mode=mode
)
category_count.count += c
category_count.save()
return JsonResponse({"created": len(anns)})
# DATASET INFO
@api_view(["GET"])
@csrf_exempt
def get_datasets_v2(request):
datasets = Dataset.objects.filter(hidden=False)
dataset_names = list(datasets.values_list("name", flat=True))
return JsonResponse({"dataset_names": dataset_names})
@api_view(["GET"])
@csrf_exempt
def get_dataset_info_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
num_train = dataset.datasetitem_set.filter(is_val=False).count()
num_val = dataset.datasetitem_set.filter(is_val=True).count()
return JsonResponse(
{
"index_id": dataset.index_id,
"num_train": num_train,
"num_val": num_val,
}
)
@api_view(["GET"])
@csrf_exempt
def get_models_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
model_objs = DNNModel.objects.filter(
dataset=dataset,
checkpoint_path__isnull=False,
).order_by("-last_updated")
model_names = set()
latest = {}
with_output = {}
for model in model_objs:
model_names.add(model.name)
if model.name not in latest:
latest[model.name] = model
if model.output_directory and model.name not in with_output:
with_output[model.name] = model
models = [
{
"name": model_name,
"latest": model_info(latest[model_name]),
"with_output": model_info(with_output.get(model_name)),
}
for model_name in model_names
]
return JsonResponse({"models": models})
def model_info(model):
if model is None:
return None
pos_tags = parse_tag_set_from_query_v2(model.category_spec.get("pos_tags", []))
neg_tags = parse_tag_set_from_query_v2(model.category_spec.get("neg_tags", []))
augment_negs_include = parse_tag_set_from_query_v2(
model.category_spec.get("augment_negs_include", [])
)
return {
"model_id": model.model_id,
"timestamp": model.last_updated,
"has_checkpoint": model.checkpoint_path is not None,
"has_output": model.output_directory is not None,
"pos_tags": serialize_tag_set_for_client_v2(pos_tags),
"neg_tags": serialize_tag_set_for_client_v2(neg_tags | augment_negs_include),
"augment_negs": model.category_spec.get("augment_negs", False),
"epoch": model.epoch,
}
@api_view(["POST"])
@csrf_exempt
def create_dataset_v2(request):
payload = json.loads(request.body)
name = payload["dataset"]
train_directory = payload["train_path"]
val_directory = payload["val_path"]
index_id = payload["index_id"]
assert all(d.startswith("gs://") for d in (train_directory, val_directory))
# Download index on index server
params = {"index_id": index_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/download_index",
json=params,
)
client = storage.Client()
all_blobs = []
for d, is_val in ((train_directory, False), (val_directory, True)):
split_dir = d[len("gs://") :].split("/")
bucket_name = split_dir[0]
bucket_path = "/".join(split_dir[1:])
all_blobs.extend(
(blob, is_val)
for blob in client.list_blobs(bucket_name, prefix=bucket_path)
)
dataset = Dataset(
name=name,
train_directory=train_directory,
val_directory=val_directory,
index_id=index_id,
)
dataset.save()
# Create all the DatasetItems for this dataset
items = [
DatasetItem(
dataset=dataset,
identifier=os.path.splitext(os.path.basename(blob.name))[0],
path=blob.name,
is_val=is_val,
)
for blob, is_val in all_blobs
if (
blob.name.endswith(".jpg")
or blob.name.endswith(".jpeg")
or blob.name.endswith(".png")
)
]
DatasetItem.objects.bulk_create(items, batch_size=10000)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def get_annotations_v2(request):
payload = json.loads(request.body)
image_pks = [i for i in payload["identifiers"] if i]
if not image_pks:
return JsonResponse({})
annotations = Annotation.objects.filter(
dataset_item__in=DatasetItem.objects.filter(pk__in=image_pks),
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
boxes_by_pk = get_boxes_from_annotations_v2(annotations)
annotations_by_pk = defaultdict(lambda: {"tags": [], "boxes": []})
for pk, tags in tags_by_pk.items():
annotations_by_pk[pk]["tags"] = serialize_tag_set_for_client_v2(tags)
for pk, boxes in boxes_by_pk.items():
annotations_by_pk[pk]["boxes"] = serialize_boxes_for_client_v2(boxes)
return JsonResponse(annotations_by_pk)
@api_view(["POST"])
@csrf_exempt
def add_annotations_v2(request):
payload = json.loads(request.body)
image_pks = payload["identifiers"]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_multi_v2(request):
payload = json.loads(request.body)
num_created = bulk_add_multi_annotations_v2(payload)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_by_internal_identifiers_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
image_identifiers = payload["identifiers"]
images = DatasetItem.objects.filter(
dataset=dataset, identifier__in=image_identifiers
)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_to_result_set_v2(request):
payload = json.loads(request.body)
result_set_id = payload["result_set_id"]
lower_bound = float(payload["from"])
upper_bound = float(payload["to"])
result_set = current_result_sets[result_set_id]
result_ranking = result_set.ranking
# e.g., lower_bound=0.0, upper_bound=0.5 -> second half of the result set
start_index = math.ceil(len(result_ranking) * (1.0 - upper_bound))
end_index = math.floor(len(result_ranking) * (1.0 - lower_bound))
image_pks = result_ranking[start_index:end_index]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
def bulk_add_single_tag_annotations_v2(payload, images):
'''Adds annotations for a single tag to many dataset items'''
if not images:
return 0
user_email = payload["user"]
category_name = payload["category"]
mode_name = payload["mode"]
created_by = payload.get("created_by",
"tag" if len(images) == 1 else "tag-bulk")
dataset = None
if len(images) > 0:
dataset = images[0].dataset
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_name)
Annotation.objects.filter(
dataset_item__in=images, category=category, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
annotations = [
Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
is_box=False,
misc_data={"created_by": created_by},
)
for di in images
]
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_multi_annotations_v2(payload : Dict):
'''Adds multiple annotations for the same dataset and user to the database
at once'''
dataset_name = payload["dataset"]
dataset = get_object_or_404(Dataset, name=dataset_name)
user_email = payload["user"]
user, _ = User.objects.get_or_create(email=user_email)
created_by = payload.get("created_by",
"tag" if len(payload["annotations"]) == 1 else
"tag-bulk")
# Get pks
idents = [ann['identifier'] for ann in payload["annotations"]
if 'identifier' in ann]
di_pks = list(DatasetItem.objects.filter(
dataset=dataset, identifier__in=idents
).values_list("pk", "identifier"))
ident_to_pk = {ident: pk for pk, ident in di_pks}
cats = {}
modes = {}
to_delete = defaultdict(set)
annotations = []
for ann in payload["annotations"]:
db_ann = Annotation()
category_name = ann["category"]
mode_name = ann["mode"]
if category_name not in cats:
cats[category_name] = Category.objects.get_or_create(
name=category_name)[0]
if mode_name not in modes:
modes[mode_name] = Mode.objects.get_or_create(
name=mode_name)[0]
if "identifier" in ann:
pk = ident_to_pk[ann["identifier"]]
else:
pk = ann["pk"]
db_ann.dataset_item_id = pk
db_ann.user = user
db_ann.category = cats[category_name]
db_ann.mode = modes[mode_name]
db_ann.is_box = ann.get("is_box", False)
if db_ann.is_box:
db_ann.bbox_x1 = ann["x1"]
db_ann.bbox_y1 = ann["y1"]
db_ann.bbox_x2 = ann["x2"]
db_ann.bbox_y2 = ann["y2"]
else:
to_delete[db_ann.category].add(pk)
db_ann.misc_data={"created_by": created_by}
annotations.append(db_ann)
for cat, pks in to_delete.items():
# Delete per-frame annotations for the category if they exist since
# we should only have on mode per image
Annotation.objects.filter(
category=cat, dataset_item_id__in=pks, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_annotations_v2(dataset, annotations):
'''Handles book keeping for adding many annotations at once'''
Annotation.objects.bulk_create(annotations)
counts = defaultdict(int)
for ann in annotations:
counts[(ann.category, ann.mode)] += 1
for (cat, mode), count in counts.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset,
category=cat,
mode=mode
)
category_count.count += count
category_count.save()
@api_view(["POST"])
@csrf_exempt
def delete_category_v2(request):
payload = json.loads(request.body)
category = payload["category"]
category = Category.objects.get(name=category)
category.delete()
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def update_category_v2(request):
payload = json.loads(request.body)
old_category_name = payload["oldCategory"]
new_category_name = payload["newCategory"]
category = Category.objects.get(name=old_category_name)
category.name = new_category_name
category.save()
return JsonResponse({"status": "success"})
@api_view(["GET"])
@csrf_exempt
def get_category_counts_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
counts = CategoryCount.objects.filter(dataset=dataset).values(
"category__name", "mode__name", "count"
)
n_labeled = defaultdict(dict)
for c in counts:
category = c["category__name"]
mode = c["mode__name"]
count = c["count"]
n_labeled[category][mode] = count
return JsonResponse(n_labeled)
|
[
"logging.getLogger",
"itertools.chain",
"requests.post",
"expiringdict.ExpiringDict",
"django.shortcuts.get_list_or_404",
"django.shortcuts.get_object_or_404",
"rest_framework.decorators.api_view",
"json.loads",
"collections.namedtuple",
"random.shuffle",
"django.http.JsonResponse",
"requests.get",
"uuid.uuid4",
"time.time",
"google.cloud.storage.Client",
"collections.defaultdict",
"os.path.basename",
"shutil.rmtree",
"django.db.models.Q"
] |
[((879, 906), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (896, 906), False, 'import logging\n'), ((909, 927), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (917, 927), False, 'from rest_framework.decorators import api_view\n'), ((1384, 1401), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1392, 1401), False, 'from rest_framework.decorators import api_view\n'), ((1676, 1694), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (1684, 1694), False, 'from rest_framework.decorators import api_view\n'), ((1981, 1999), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (1989, 1999), False, 'from rest_framework.decorators import api_view\n'), ((7145, 7162), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (7153, 7162), False, 'from rest_framework.decorators import api_view\n'), ((7674, 7692), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (7682, 7692), False, 'from rest_framework.decorators import api_view\n'), ((8076, 8094), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (8084, 8094), False, 'from rest_framework.decorators import api_view\n'), ((8581, 8599), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (8589, 8599), False, 'from rest_framework.decorators import api_view\n'), ((9795, 9812), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (9803, 9812), False, 'from rest_framework.decorators import api_view\n'), ((10389, 10407), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (10397, 10407), False, 'from rest_framework.decorators import api_view\n'), ((10765, 10800), 'collections.namedtuple', 'namedtuple', (['"""Tag"""', '"""category value"""'], {}), "('Tag', 'category value')\n", (10775, 10800), False, 'from collections import defaultdict, namedtuple\n'), ((10837, 10884), 'collections.namedtuple', 'namedtuple', (['"""Box"""', '"""category value x1 y1 x2 y2"""'], {}), "('Box', 'category value x1 y1 x2 y2')\n", (10847, 10884), False, 'from collections import defaultdict, namedtuple\n'), ((11214, 11263), 'expiringdict.ExpiringDict', 'ExpiringDict', ([], {'max_age_seconds': '(30 * 60)', 'max_len': '(50)'}), '(max_age_seconds=30 * 60, max_len=50)\n', (11226, 11263), False, 'from expiringdict import ExpiringDict\n'), ((17121, 17138), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (17129, 17138), False, 'from rest_framework.decorators import api_view\n'), ((18892, 18910), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (18900, 18910), False, 'from rest_framework.decorators import api_view\n'), ((19086, 19104), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (19094, 19104), False, 'from rest_framework.decorators import api_view\n'), ((19485, 19503), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (19493, 19503), False, 'from rest_framework.decorators import api_view\n'), ((19757, 19775), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (19765, 19775), False, 'from rest_framework.decorators import api_view\n'), ((20852, 20869), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (20860, 20869), False, 'from rest_framework.decorators import api_view\n'), ((23246, 23264), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (23254, 23264), False, 'from rest_framework.decorators import api_view\n'), ((24325, 24343), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (24333, 24343), False, 'from rest_framework.decorators import api_view\n'), ((25340, 25358), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (25348, 25358), False, 'from rest_framework.decorators import api_view\n'), ((27914, 27932), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (27922, 27932), False, 'from rest_framework.decorators import api_view\n'), ((29382, 29400), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (29390, 29400), False, 'from rest_framework.decorators import api_view\n'), ((31364, 31382), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (31372, 31382), False, 'from rest_framework.decorators import api_view\n'), ((32883, 32900), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (32891, 32900), False, 'from rest_framework.decorators import api_view\n'), ((33123, 33140), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (33131, 33140), False, 'from rest_framework.decorators import api_view\n'), ((33562, 33579), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (33570, 33579), False, 'from rest_framework.decorators import api_view\n'), ((35255, 35273), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (35263, 35273), False, 'from rest_framework.decorators import api_view\n'), ((36893, 36911), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (36901, 36911), False, 'from rest_framework.decorators import api_view\n'), ((37706, 37724), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (37714, 37724), False, 'from rest_framework.decorators import api_view\n'), ((38030, 38048), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (38038, 38048), False, 'from rest_framework.decorators import api_view\n'), ((38250, 38268), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (38258, 38268), False, 'from rest_framework.decorators import api_view\n'), ((38728, 38746), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (38736, 38746), False, 'from rest_framework.decorators import api_view\n'), ((43711, 43729), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (43719, 43729), False, 'from rest_framework.decorators import api_view\n'), ((43975, 43993), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (43983, 43993), False, 'from rest_framework.decorators import api_view\n'), ((44344, 44361), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (44352, 44361), False, 'from rest_framework.decorators import api_view\n'), ((1130, 1197), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/start_cluster')"], {}), "(settings.EMBEDDING_SERVER_ADDRESS + '/start_cluster')\n", (1143, 1197), False, 'import requests\n'), ((1253, 1331), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success', 'cluster_id': response_data['cluster_id']}"], {}), "({'status': 'success', 'cluster_id': response_data['cluster_id']})\n", (1265, 1331), False, 'from django.http import JsonResponse\n'), ((1508, 1595), 'requests.get', 'requests.get', (["(settings.EMBEDDING_SERVER_ADDRESS + '/cluster_status')"], {'params': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/cluster_status', params=\n params)\n", (1520, 1595), False, 'import requests\n'), ((1645, 1672), 'django.http.JsonResponse', 'JsonResponse', (['response_data'], {}), '(response_data)\n', (1657, 1672), False, 'from django.http import JsonResponse\n'), ((1791, 1870), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/stop_cluster')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/stop_cluster', json=params)\n", (1804, 1870), False, 'import requests\n'), ((1905, 1940), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success'}"], {}), "({'status': 'success'})\n", (1917, 1940), False, 'from django.http import JsonResponse\n'), ((2082, 2106), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (2092, 2106), False, 'import json\n'), ((2685, 2730), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (2702, 2730), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((5954, 6042), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/start_bgsplit_job')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/start_bgsplit_job',\n json=params)\n", (5967, 6042), False, 'import requests\n'), ((7018, 7092), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success', 'model_id': response_data['model_id']}"], {}), "({'status': 'success', 'model_id': response_data['model_id']})\n", (7030, 7092), False, 'from django.http import JsonResponse\n'), ((7261, 7351), 'requests.get', 'requests.get', (["(settings.EMBEDDING_SERVER_ADDRESS + '/bgsplit_job_status')"], {'params': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/bgsplit_job_status',\n params=params)\n", (7273, 7351), False, 'import requests\n'), ((7643, 7670), 'django.http.JsonResponse', 'JsonResponse', (['response_data'], {}), '(response_data)\n', (7655, 7670), False, 'from django.http import JsonResponse\n'), ((7750, 7774), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (7760, 7774), False, 'import json\n'), ((7912, 7958), 'django.shortcuts.get_list_or_404', 'get_list_or_404', (['DNNModel'], {'name': 'old_model_name'}), '(DNNModel, name=old_model_name)\n', (7927, 7958), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((8041, 8072), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True}"], {}), "({'success': True})\n", (8053, 8072), False, 'from django.http import JsonResponse\n'), ((8152, 8176), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (8162, 8176), False, 'import json\n'), ((8270, 8312), 'django.shortcuts.get_list_or_404', 'get_list_or_404', (['DNNModel'], {'name': 'model_name'}), '(DNNModel, name=model_name)\n', (8285, 8312), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((8546, 8577), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True}"], {}), "({'success': True})\n", (8558, 8577), False, 'from django.http import JsonResponse\n'), ((8689, 8713), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (8699, 8713), False, 'import json\n'), ((8874, 8919), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (8891, 8919), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((9513, 9611), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/start_bgsplit_inference_job')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS +\n '/start_bgsplit_inference_job', json=params)\n", (9526, 9611), False, 'import requests\n'), ((9672, 9742), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success', 'job_id': response_data['job_id']}"], {}), "({'status': 'success', 'job_id': response_data['job_id']})\n", (9684, 9742), False, 'from django.http import JsonResponse\n'), ((9915, 10015), 'requests.get', 'requests.get', (["(settings.EMBEDDING_SERVER_ADDRESS + '/bgsplit_inference_job_status')"], {'params': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS +\n '/bgsplit_inference_job_status', params=params)\n", (9927, 10015), False, 'import requests\n'), ((10358, 10385), 'django.http.JsonResponse', 'JsonResponse', (['response_data'], {}), '(response_data)\n', (10370, 10385), False, 'from django.http import JsonResponse\n'), ((10504, 10601), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/stop_bgsplit_inference_job')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS +\n '/stop_bgsplit_inference_job', json=params)\n", (10517, 10601), False, 'import requests\n'), ((10652, 10701), 'django.http.JsonResponse', 'JsonResponse', (['response_data'], {'status': 'r.status_code'}), '(response_data, status=r.status_code)\n', (10664, 10701), False, 'from django.http import JsonResponse\n'), ((12477, 12494), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12488, 12494), False, 'from collections import defaultdict, namedtuple\n'), ((12911, 12928), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12922, 12928), False, 'from collections import defaultdict, namedtuple\n'), ((13565, 13576), 'time.time', 'time.time', ([], {}), '()\n', (13574, 13576), False, 'import time\n'), ((14581, 14592), 'time.time', 'time.time', ([], {}), '()\n', (14590, 14592), False, 'import time\n'), ((15138, 15149), 'time.time', 'time.time', ([], {}), '()\n', (15147, 15149), False, 'import time\n'), ((15188, 15199), 'time.time', 'time.time', ([], {}), '()\n', (15197, 15199), False, 'import time\n'), ((15790, 15801), 'time.time', 'time.time', ([], {}), '()\n', (15799, 15801), False, 'import time\n'), ((15875, 15886), 'time.time', 'time.time', ([], {}), '()\n', (15884, 15886), False, 'import time\n'), ((17209, 17254), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (17226, 17254), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((18298, 18387), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/perform_clustering')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/perform_clustering',\n json=params)\n", (18311, 18387), False, 'import requests\n'), ((18662, 18823), 'django.http.JsonResponse', 'JsonResponse', (["{'paths': dataset_item_paths, 'identifiers': dataset_item_identifiers,\n 'distances': distances, 'clustering': clustering_data['clustering']}"], {}), "({'paths': dataset_item_paths, 'identifiers':\n dataset_item_identifiers, 'distances': distances, 'clustering':\n clustering_data['clustering']})\n", (18674, 18823), False, 'from django.http import JsonResponse\n'), ((18956, 19020), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/keep_alive')"], {}), "(settings.EMBEDDING_SERVER_ADDRESS + '/keep_alive')\n", (18969, 19020), False, 'import requests\n'), ((19047, 19082), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success'}"], {}), "({'status': 'success'})\n", (19059, 19082), False, 'from django.http import JsonResponse\n'), ((19168, 19192), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (19178, 19192), False, 'import json\n'), ((19338, 19428), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/generate_embedding')"], {'json': 'payload'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/generate_embedding',\n json=payload)\n", (19351, 19428), False, 'import requests\n'), ((19572, 19596), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (19582, 19596), False, 'import json\n'), ((19605, 19700), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/generate_text_embedding')"], {'json': 'payload'}), "(settings.EMBEDDING_SERVER_ADDRESS +\n '/generate_text_embedding', json=payload)\n", (19618, 19700), False, 'import requests\n'), ((19844, 19868), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (19854, 19868), False, 'import json\n'), ((20132, 20177), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (20149, 20177), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((20201, 20212), 'time.time', 'time.time', ([], {}), '()\n', (20210, 20212), False, 'import time\n'), ((20416, 20495), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/query_knn_v2')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/query_knn_v2', json=params)\n", (20429, 20495), False, 'import requests\n'), ((20568, 20579), 'time.time', 'time.time', ([], {}), '()\n', (20577, 20579), False, 'import time\n'), ((21273, 21318), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (21290, 21318), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((23065, 23144), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/train_svm_v2')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/train_svm_v2', json=params)\n", (23078, 23144), False, 'import requests\n'), ((23333, 23357), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (23343, 23357), False, 'import json\n'), ((23598, 23643), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (23615, 23643), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((23826, 23905), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/query_svm_v2')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/query_svm_v2', json=params)\n", (23839, 23905), False, 'import requests\n'), ((24416, 24440), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (24426, 24440), False, 'import json\n'), ((24626, 24671), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (24643, 24671), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((24820, 24908), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/query_ranking_v2')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/query_ranking_v2', json\n =params)\n", (24833, 24908), False, 'import requests\n'), ((25434, 25445), 'time.time', 'time.time', ([], {}), '()\n', (25443, 25445), False, 'import time\n'), ((25461, 25506), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (25478, 25506), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((25521, 25545), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (25531, 25545), False, 'import json\n'), ((25605, 25616), 'time.time', 'time.time', ([], {}), '()\n', (25614, 25616), False, 'import time\n'), ((25688, 25699), 'time.time', 'time.time', ([], {}), '()\n', (25697, 25699), False, 'import time\n'), ((25966, 25977), 'time.time', 'time.time', ([], {}), '()\n', (25975, 25977), False, 'import time\n'), ((26285, 26331), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DNNModel'], {'model_id': 'model_id'}), '(DNNModel, model_id=model_id)\n', (26302, 26331), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((27992, 28037), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (28009, 28037), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((28052, 28076), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (28062, 28076), False, 'import json\n'), ((28552, 28662), 'itertools.chain', 'itertools.chain', (['((pk, True) for pk in pos_dataset_item_pks)', '((pk, False) for pk in neg_dataset_item_pks)'], {}), '(((pk, True) for pk in pos_dataset_item_pks), ((pk, False) for\n pk in neg_dataset_item_pks))\n', (28567, 28662), False, 'import itertools\n'), ((29207, 29292), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/query_metrics')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/query_metrics', json=params\n )\n", (29220, 29292), False, 'import requests\n'), ((29351, 29378), 'django.http.JsonResponse', 'JsonResponse', (['response_data'], {}), '(response_data)\n', (29363, 29378), False, 'from django.http import JsonResponse\n'), ((29470, 29515), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (29487, 29515), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((29530, 29554), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (29540, 29554), False, 'import json\n'), ((30025, 30135), 'itertools.chain', 'itertools.chain', (['((pk, True) for pk in pos_dataset_item_pks)', '((pk, False) for pk in neg_dataset_item_pks)'], {}), '(((pk, True) for pk in pos_dataset_item_pks), ((pk, False) for\n pk in neg_dataset_item_pks))\n', (30040, 30135), False, 'import itertools\n'), ((30447, 30541), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/query_active_validation')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS +\n '/query_active_validation', json=params)\n", (30460, 30541), False, 'import requests\n'), ((31212, 31304), 'django.http.JsonResponse', 'JsonResponse', (["{'paths': paths, 'identifiers': pks, 'weights': response_data['weights']}"], {}), "({'paths': paths, 'identifiers': pks, 'weights': response_data[\n 'weights']})\n", (31224, 31304), False, 'from django.http import JsonResponse\n'), ((31434, 31458), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (31444, 31458), False, 'import json\n'), ((31593, 31609), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (31604, 31609), False, 'from collections import defaultdict, namedtuple\n'), ((33073, 33119), 'django.http.JsonResponse', 'JsonResponse', (["{'dataset_names': dataset_names}"], {}), "({'dataset_names': dataset_names})\n", (33085, 33119), False, 'from django.http import JsonResponse\n'), ((33216, 33261), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (33233, 33261), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((33409, 33501), 'django.http.JsonResponse', 'JsonResponse', (["{'index_id': dataset.index_id, 'num_train': num_train, 'num_val': num_val}"], {}), "({'index_id': dataset.index_id, 'num_train': num_train,\n 'num_val': num_val})\n", (33421, 33501), False, 'from django.http import JsonResponse\n'), ((33649, 33694), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (33666, 33694), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((34395, 34427), 'django.http.JsonResponse', 'JsonResponse', (["{'models': models}"], {}), "({'models': models})\n", (34407, 34427), False, 'from django.http import JsonResponse\n'), ((35333, 35357), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (35343, 35357), False, 'import json\n'), ((35666, 35752), 'requests.post', 'requests.post', (["(settings.EMBEDDING_SERVER_ADDRESS + '/download_index')"], {'json': 'params'}), "(settings.EMBEDDING_SERVER_ADDRESS + '/download_index', json=\n params)\n", (35679, 35752), False, 'import requests\n'), ((35785, 35801), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (35799, 35801), False, 'from google.cloud import storage\n'), ((36854, 36889), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success'}"], {}), "({'status': 'success'})\n", (36866, 36889), False, 'from django.http import JsonResponse\n'), ((36972, 36996), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (36982, 36996), False, 'import json\n'), ((37375, 37422), 'collections.defaultdict', 'defaultdict', (["(lambda : {'tags': [], 'boxes': []})"], {}), "(lambda : {'tags': [], 'boxes': []})\n", (37386, 37422), False, 'from collections import defaultdict, namedtuple\n'), ((37671, 37702), 'django.http.JsonResponse', 'JsonResponse', (['annotations_by_pk'], {}), '(annotations_by_pk)\n', (37683, 37702), False, 'from django.http import JsonResponse\n'), ((37785, 37809), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (37795, 37809), False, 'import json\n'), ((37988, 38026), 'django.http.JsonResponse', 'JsonResponse', (["{'created': num_created}"], {}), "({'created': num_created})\n", (38000, 38026), False, 'from django.http import JsonResponse\n'), ((38115, 38139), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (38125, 38139), False, 'import json\n'), ((38208, 38246), 'django.http.JsonResponse', 'JsonResponse', (["{'created': num_created}"], {}), "({'created': num_created})\n", (38220, 38246), False, 'from django.http import JsonResponse\n'), ((38367, 38412), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (38384, 38412), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((38428, 38452), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (38438, 38452), False, 'import json\n'), ((38686, 38724), 'django.http.JsonResponse', 'JsonResponse', (["{'created': num_created}"], {}), "({'created': num_created})\n", (38698, 38724), False, 'from django.http import JsonResponse\n'), ((38821, 38845), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (38831, 38845), False, 'import json\n'), ((39477, 39515), 'django.http.JsonResponse', 'JsonResponse', (["{'created': num_created}"], {}), "({'created': num_created})\n", (39489, 39515), False, 'from django.http import JsonResponse\n'), ((40938, 40983), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (40955, 40983), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((41587, 41603), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (41598, 41603), False, 'from collections import defaultdict, namedtuple\n'), ((43350, 43366), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (43361, 43366), False, 'from collections import defaultdict, namedtuple\n'), ((43790, 43814), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (43800, 43814), False, 'import json\n'), ((43936, 43971), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success'}"], {}), "({'status': 'success'})\n", (43948, 43971), False, 'from django.http import JsonResponse\n'), ((44054, 44078), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (44064, 44078), False, 'import json\n'), ((44305, 44340), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'success'}"], {}), "({'status': 'success'})\n", (44317, 44340), False, 'from django.http import JsonResponse\n'), ((44440, 44485), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Dataset'], {'name': 'dataset_name'}), '(Dataset, name=dataset_name)\n', (44457, 44485), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((44623, 44640), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (44634, 44640), False, 'from collections import defaultdict, namedtuple\n'), ((44813, 44836), 'django.http.JsonResponse', 'JsonResponse', (['n_labeled'], {}), '(n_labeled)\n', (44825, 44836), False, 'from django.http import JsonResponse\n'), ((5215, 5268), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DNNModel'], {'model_id': 'resume_model_id'}), '(DNNModel, model_id=resume_model_id)\n', (5232, 5268), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((7506, 7552), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DNNModel'], {'model_id': 'model_id'}), '(DNNModel, model_id=model_id)\n', (7523, 7552), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((8461, 8514), 'shutil.rmtree', 'shutil.rmtree', (['m.output_directory'], {'ignore_errors': '(True)'}), '(m.output_directory, ignore_errors=True)\n', (8474, 8514), False, 'import shutil\n'), ((8948, 8994), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DNNModel'], {'model_id': 'model_id'}), '(DNNModel, model_id=model_id)\n', (8965, 8994), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((9113, 9229), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'failure', 'reason':\n f'Model {model_id} does not have a model checkpoint.'}"], {'status': '(400)'}), "({'status': 'failure', 'reason':\n f'Model {model_id} does not have a model checkpoint.'}, status=400)\n", (9125, 9229), False, 'from django.http import JsonResponse\n'), ((10225, 10271), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DNNModel'], {'model_id': 'model_id'}), '(DNNModel, model_id=model_id)\n', (10242, 10271), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((11767, 11770), 'django.db.models.Q', 'Q', ([], {}), '()\n', (11768, 11770), False, 'from django.db.models import Q\n'), ((13628, 13652), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (13638, 13652), False, 'import json\n'), ((16878, 16890), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16888, 16890), False, 'import uuid\n'), ((25735, 25761), 'random.shuffle', 'random.shuffle', (['result_pks'], {}), '(result_pks)\n', (25749, 25761), False, 'import random\n'), ((30879, 30908), 'random.shuffle', 'random.shuffle', (['pks_and_paths'], {}), '(pks_and_paths)\n', (30893, 30908), False, 'import random\n'), ((37091, 37107), 'django.http.JsonResponse', 'JsonResponse', (['{}'], {}), '({})\n', (37103, 37107), False, 'from django.http import JsonResponse\n'), ((11913, 11961), 'django.db.models.Q', 'Q', ([], {'category__name': 't.category', 'mode__name': 't.value'}), '(category__name=t.category, mode__name=t.value)\n', (11914, 11961), False, 'from django.db.models import Q\n'), ((36492, 36519), 'os.path.basename', 'os.path.basename', (['blob.name'], {}), '(blob.name)\n', (36508, 36519), False, 'import os\n')]
|
from __future__ import absolute_import
import os
import yaml
from ccmlib import common, extension, repository
from ccmlib.cluster import Cluster
from ccmlib.dse_cluster import DseCluster
from ccmlib.node import Node
from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module
class ClusterFactory():
@staticmethod
def load(path, name):
cluster_path = os.path.join(path, name)
filename = os.path.join(cluster_path, 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.safe_load(f)
try:
install_dir = None
if 'install_dir' in data:
install_dir = data['install_dir']
repository.validate(install_dir)
if install_dir is None and 'cassandra_dir' in data:
install_dir = data['cassandra_dir']
repository.validate(install_dir)
cassandra_version = None
if 'cassandra_version' in data:
cassandra_version = LooseVersion(data['cassandra_version'])
if common.isDse(install_dir):
cluster = DseCluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
else:
cluster = Cluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
node_list = data['nodes']
seed_list = data['seeds']
if 'partitioner' in data:
cluster.partitioner = data['partitioner']
if 'config_options' in data:
cluster._config_options = data['config_options']
if 'dse_config_options' in data:
cluster._dse_config_options = data['dse_config_options']
if 'misc_config_options' in data:
cluster._misc_config_options = data['misc_config_options']
if 'log_level' in data:
cluster.__log_level = data['log_level']
if 'use_vnodes' in data:
cluster.use_vnodes = data['use_vnodes']
if 'datadirs' in data:
cluster.data_dir_count = int(data['datadirs'])
extension.load_from_cluster_config(cluster, data)
except KeyError as k:
raise common.LoadError("Error Loading " + filename + ", missing property:" + k)
for node_name in node_list:
cluster.nodes[node_name] = Node.load(cluster_path, node_name, cluster)
for seed in seed_list:
cluster.seeds.append(seed)
return cluster
|
[
"distutils.version.LooseVersion",
"ccmlib.repository.validate",
"os.path.join",
"ccmlib.common.LoadError",
"ccmlib.dse_cluster.DseCluster",
"yaml.safe_load",
"ccmlib.extension.load_from_cluster_config",
"ccmlib.common.isDse",
"ccmlib.node.Node.load",
"ccmlib.cluster.Cluster"
] |
[((407, 431), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (419, 431), False, 'import os\n'), ((451, 493), 'os.path.join', 'os.path.join', (['cluster_path', '"""cluster.conf"""'], {}), "(cluster_path, 'cluster.conf')\n", (463, 493), False, 'import os\n'), ((552, 569), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (566, 569), False, 'import yaml\n'), ((1090, 1115), 'ccmlib.common.isDse', 'common.isDse', (['install_dir'], {}), '(install_dir)\n', (1102, 1115), False, 'from ccmlib import common, extension, repository\n'), ((2246, 2295), 'ccmlib.extension.load_from_cluster_config', 'extension.load_from_cluster_config', (['cluster', 'data'], {}), '(cluster, data)\n', (2280, 2295), False, 'from ccmlib import common, extension, repository\n'), ((2494, 2537), 'ccmlib.node.Node.load', 'Node.load', (['cluster_path', 'node_name', 'cluster'], {}), '(cluster_path, node_name, cluster)\n', (2503, 2537), False, 'from ccmlib.node import Node\n'), ((718, 750), 'ccmlib.repository.validate', 'repository.validate', (['install_dir'], {}), '(install_dir)\n', (737, 750), False, 'from ccmlib import common, extension, repository\n'), ((883, 915), 'ccmlib.repository.validate', 'repository.validate', (['install_dir'], {}), '(install_dir)\n', (902, 915), False, 'from ccmlib import common, extension, repository\n'), ((1034, 1073), 'distutils.version.LooseVersion', 'LooseVersion', (["data['cassandra_version']"], {}), "(data['cassandra_version'])\n", (1046, 1073), False, 'from distutils.version import LooseVersion\n'), ((1143, 1272), 'ccmlib.dse_cluster.DseCluster', 'DseCluster', (['path', "data['name']"], {'install_dir': 'install_dir', 'create_directory': '(False)', 'derived_cassandra_version': 'cassandra_version'}), "(path, data['name'], install_dir=install_dir, create_directory=\n False, derived_cassandra_version=cassandra_version)\n", (1153, 1272), False, 'from ccmlib.dse_cluster import DseCluster\n'), ((1312, 1437), 'ccmlib.cluster.Cluster', 'Cluster', (['path', "data['name']"], {'install_dir': 'install_dir', 'create_directory': '(False)', 'derived_cassandra_version': 'cassandra_version'}), "(path, data['name'], install_dir=install_dir, create_directory=False,\n derived_cassandra_version=cassandra_version)\n", (1319, 1437), False, 'from ccmlib.cluster import Cluster\n'), ((2344, 2417), 'ccmlib.common.LoadError', 'common.LoadError', (["('Error Loading ' + filename + ', missing property:' + k)"], {}), "('Error Loading ' + filename + ', missing property:' + k)\n", (2360, 2417), False, 'from ccmlib import common, extension, repository\n')]
|
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior.
"""
import itertools
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy as np
import torch
from causalnex.structure.structuremodel import StructureModel
class DistTypeBase(metaclass=ABCMeta):
"""Base class defining the distribution default behavior and interface"""
def __init__(self, idx: int):
"""
Default constructor for the DistTypeBase class.
Unless overridden, provides default behavior to all subclasses.
Args:
idx: Positional index in data passed to the NOTEARS algorithm
which correspond to this datatype.
"""
self.idx = idx
def get_columns(
self,
X: np.ndarray,
) -> np.ndarray:
"""
Gets the column(s) associated with the instantiated DistType.
Args:
X: Full dataset to be selected from.
Returns:
1d or 2d np.ndarray of columns.
"""
return X[:, self.idx]
# pylint: disable=no-self-use
# pylint: disable=unused-argument
def preprocess_X(self, X: np.ndarray, fit_transform: bool = True) -> np.ndarray:
"""
Overload this method to perform any required preprocessing of the data
matrix. This can include data conversion, column expansion etc.
Changes to the tabu parameters should also be done here.
**WARN** This preprocessing CANNOT reorder the columns of X.
Args:
X: The original passed-in data.
fit_transform: Whether the class first fits
then transforms the data, or just transforms.
Just transforming is used to preprocess new data after the
initial NOTEARS fit.
Returns:
Preprocessed X
"""
return X
# pylint: disable=no-self-use
def preprocess_tabu_edges(
self, tabu_edges: List[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""
Overload this method to perform any required preprocessing of the tabu_edges.
Args:
tabu_edges: The original tabu_edges.
Returns:
Preprocessed tabu_edges.
"""
return tabu_edges
# pylint: disable=no-self-use
def preprocess_tabu_nodes(self, tabu_nodes: List[int]) -> List[int]:
"""
Overload this method to perform any required preprocessing of the tabu_nodes.
Args:
tabu_nodes: The original tabu_nodes.
Returns:
Preprocessed tabu_nodes.
"""
return tabu_nodes
# pylint: disable=no-self-use
def update_idx_col(self, idx_col: Dict[int, str]) -> Dict[int, str]:
"""
Overload this method to update the idx_col dict with expanded colnames.
Args:
idx_col: The original index to column mapping.
Returns:
Updated index to column mapping.
"""
return idx_col
def add_to_node(self, sm: StructureModel) -> StructureModel:
"""
Adds self to a node of a structure model corresponding to self.idx.
Args:
sm: The input StructureModel
Returns:
Updated StructureModel
"""
sm.nodes[self.idx]["dist_type"] = self
return sm
# pylint: disable=no-self-use
def modify_h(self, square_weight_mat: torch.Tensor) -> torch.Tensor:
"""
Overload this method to apply updates to the W matrix in h(W).
Typically used to prevent spurious cycles when using expended columns.
Args:
square_weight_mat: The weight matrix used in h(W).
Returns:
Updated weight matrix used in h(W).
"""
return square_weight_mat
# pylint: disable=no-self-use
def collapse_adj(self, adj: np.ndarray) -> np.ndarray:
"""
Overload this method to apply updates to collapse the W matrix
of a multi-parameter distribution
Likely has the same impact as modify_h.
Args:
adj: The adjacency matrix.
Returns:
Updated adjacency matrix.
"""
return adj
@abstractmethod
def loss(self, X: torch.Tensor, X_hat: torch.Tensor) -> torch.Tensor:
"""
Args:
X: The original data passed into NOTEARS (i.e. the reconstruction target).
X_hat: The reconstructed data.
Returns:
Scalar pytorch tensor of the reconstruction loss between X and X_hat.
"""
raise NotImplementedError("Must implement the loss() method")
@abstractmethod
def inverse_link_function(self, X_hat: torch.Tensor) -> torch.Tensor:
"""
Convert the transformed data from the latent space to the original dtype
using the inverse link function.
Args:
X_hat: Reconstructed data in the latent space.
Returns:
Modified X_hat.
MUST be same shape as passed in data.
Projects the self.idx column from the latent space to the dist_type space.
"""
raise NotImplementedError("Must implement the inverse_link_function() method")
class ExpandColumnsMixin:
"""
Mixin class providing convenience methods for column expansion.
"""
@staticmethod
def _expand_columns(X: np.ndarray, new_columns: np.ndarray) -> np.ndarray:
"""
Expands the data matrix columns without reordering the indices.
Args:
X: Base dataset to expand.
new_columns: The columns to expand the dataset by.
Returns:
Expanded dataset.
"""
return np.hstack([X, new_columns])
@staticmethod
def update_tabu_edges(
idx_group: List[int],
tabu_edges: List[Tuple[int, int]],
tabu_idx_group: bool,
) -> List[Tuple[int, int]]:
"""
Tabu edges are:
1. all user defined connections to original feature column
2. all inter-feature connections (optional)
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_edges: The list of tabu_edges to be updated.
tabu_idx_group: Whether inter-group edges should also be considered tabu.
I.e if a result of a column expansion, often want to prevent edges being learned
between parameters.
Returns:
Updated tabu_edges
"""
if tabu_edges is None:
tabu_edges = []
# copy to prevent mutations
tabu_edges = deepcopy(tabu_edges)
# handle 1.
new_tabu_edges = []
# for each original tabu pair
for (i, j) in tabu_edges:
# idx_group[0] is the original column index
if i == idx_group[0]:
new_tabu_edges += [(idx, j) for idx in idx_group[1:]]
elif j == idx_group[0]:
new_tabu_edges += [(i, idx) for idx in idx_group[1:]]
# all new edges added to tabu_edges
tabu_edges += new_tabu_edges
# handle 2.
if tabu_idx_group:
# add on all pairwise permutations of particular feature group
# NOTE: permutations are needed for edge directionality
tabu_edges += list(itertools.permutations(idx_group, 2))
return tabu_edges
@staticmethod
def update_tabu_nodes(
idx_group: List[int], tabu_nodes: List[int]
) -> List[Tuple[int, int]]:
"""
Tabu nodes are:
1. all user defined connections to original feature column
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_nodes: The list of tabu_nodes to be updated.
Returns:
Updated tabu_nodes
"""
if tabu_nodes is None:
return tabu_nodes
# copy to prevent mutations
tabu_nodes = deepcopy(tabu_nodes)
new_tabu_nodes = []
for i in tabu_nodes:
# NOTE: the first element in the idx_group is guaranteed as self.idx
if i == idx_group[0]:
new_tabu_nodes += idx_group[1:]
# add on the new tabu nodes
tabu_nodes += new_tabu_nodes
return tabu_nodes
|
[
"itertools.permutations",
"copy.deepcopy",
"numpy.hstack"
] |
[((7215, 7242), 'numpy.hstack', 'np.hstack', (['[X, new_columns]'], {}), '([X, new_columns])\n', (7224, 7242), True, 'import numpy as np\n'), ((8158, 8178), 'copy.deepcopy', 'deepcopy', (['tabu_edges'], {}), '(tabu_edges)\n', (8166, 8178), False, 'from copy import deepcopy\n'), ((9531, 9551), 'copy.deepcopy', 'deepcopy', (['tabu_nodes'], {}), '(tabu_nodes)\n', (9539, 9551), False, 'from copy import deepcopy\n'), ((8869, 8905), 'itertools.permutations', 'itertools.permutations', (['idx_group', '(2)'], {}), '(idx_group, 2)\n', (8891, 8905), False, 'import itertools\n')]
|
"""Module for BlameInteractionGraph plots."""
import typing as tp
from datetime import datetime
from pathlib import Path
import click
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import plotly.offline as offply
from matplotlib import style
from varats.data.reports.blame_interaction_graph import (
create_blame_interaction_graph,
CIGNodeAttrs,
CIGEdgeAttrs,
AIGNodeAttrs,
CAIGNodeAttrs,
)
from varats.data.reports.blame_report import BlameReport
from varats.mapping.commit_map import get_commit_map
from varats.paper_mgmt.case_study import (
newest_processed_revision_for_case_study,
)
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plot.plots import (
PlotGenerator,
REQUIRE_CASE_STUDY,
REQUIRE_REVISION,
)
from varats.plots.chord_plot_utils import (
make_chord_plot,
make_arc_plot,
NodeTy,
ChordPlotNodeInfo,
ChordPlotEdgeInfo,
ArcPlotEdgeInfo,
ArcPlotNodeInfo,
)
from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option
from varats.utils.git_util import (
CommitRepoPair,
create_commit_lookup_helper,
UNCOMMITTED_COMMIT_HASH,
FullCommitHash,
ShortCommitHash,
)
class CommitInteractionGraphPlot(Plot, plot_name='cig_plot'):
"""Creates a dot file for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
# Nothing to do here.
pass
def save(self, plot_dir: Path, filetype: str = 'svg') -> None:
project_name = self.plot_kwargs["project"]
revision = self.plot_kwargs["revision"]
cig = create_blame_interaction_graph(project_name, revision
).commit_interaction_graph()
nx.set_node_attributes(
cig, {node: cig.nodes[node]["commit_hash"] for node in cig.nodes},
"label"
)
# pylint: disable=import-outside-toplevel
from networkx.drawing.nx_agraph import write_dot
write_dot(cig, plot_dir / self.plot_file_name("dot"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CommitInteractionGraphPlotGenerator(
PlotGenerator,
generator_name="cig-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Plot a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphPlot(self.plot_config, **self.plot_kwargs)
]
NodeInfoTy = tp.TypeVar("NodeInfoTy", ChordPlotNodeInfo, ArcPlotNodeInfo)
EdgeInfoTy = tp.TypeVar("EdgeInfoTy", ChordPlotEdgeInfo, ArcPlotEdgeInfo)
def _prepare_cig_plotly(
project_name: str, revision: FullCommitHash,
create_node_info: tp.Callable[[NodeTy, CommitRepoPair, nx.DiGraph],
NodeInfoTy],
create_edge_info: tp.Callable[[CommitRepoPair, CommitRepoPair, int],
EdgeInfoTy]
) -> tp.Tuple[tp.List[tp.Tuple[NodeTy, NodeInfoTy]], tp.List[tp.Tuple[
NodeTy, NodeTy, EdgeInfoTy]]]:
commit_lookup = create_commit_lookup_helper(project_name)
cig = create_blame_interaction_graph(project_name,
revision).commit_interaction_graph()
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
commit = commit_lookup(node)
if not commit:
return False
# make filter configurable
return datetime.utcfromtimestamp(commit.commit_time
) >= datetime(2015, 1, 1)
nodes: tp.List[tp.Tuple[NodeTy, NodeInfoTy]] = []
node_meta: tp.Dict[NodeTy, CommitRepoPair] = {}
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
node_meta[node] = commit
nodes.append((node, create_node_info(node, commit, cig)))
nodes = sorted(
nodes, key=lambda x: int(commit_lookup(node_meta[x[0]]).commit_time)
)
edges: tp.List[tp.Tuple[NodeTy, NodeTy, EdgeInfoTy]] = []
for source, sink in cig.edges:
amount = tp.cast(CIGEdgeAttrs, cig[source][sink])["amount"]
source_commit = tp.cast(CIGNodeAttrs, cig.nodes[source])["commit"]
sink_commit = tp.cast(CIGNodeAttrs, cig.nodes[sink])["commit"]
if not filter_nodes(source_commit) or not filter_nodes(sink_commit):
continue
edges.append((
source, sink, create_edge_info(source_commit, sink_commit, amount)
))
return nodes, edges
class CommitInteractionGraphChordPlot(Plot, plot_name='cig_chord_plot'):
"""Chord plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ChordPlotNodeInfo:
del node
del cig
return {"info": commit.commit_hash.short_hash, "color": 1}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ChordPlotEdgeInfo:
return {
"size": amount,
"color": 1,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_chord_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGChordPlotGenerator(
PlotGenerator,
generator_name="cig-chord-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates a chord plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphChordPlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitInteractionGraphArcPlot(Plot, plot_name='cig_arc_plot'):
"""Arc plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ArcPlotNodeInfo:
return {
"info": commit.commit_hash.short_hash,
"size": cig.degree(node),
"fill_color": cig.out_degree(node),
"line_color": cig.in_degree(node)
}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ArcPlotEdgeInfo:
return {
"size": amount,
"color": amount,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_arc_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGArcPlotGenerator(
PlotGenerator,
generator_name="cig-arc-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates an arc plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphArcPlot(self.plot_config, **self.plot_kwargs)
]
OPTIONAL_SORT_METHOD: CLIOptionTy = make_cli_option(
"--sort-by",
type=click.Choice(["degree", "time"]),
default="degree",
required=False,
help="Sort method for commit interaction graph nodes."
)
class CommitInteractionGraphNodeDegreePlot(Plot, plot_name='cig_node_degrees'):
"""
Plot node degrees of a commit interaction graph.
Additional arguments:
- sort: criteria to sort the revisions [degree, time]
"""
def plot(self, view_mode: bool) -> None:
sort = self.plot_kwargs["sort"]
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
xlabel = ""
if sort == "time":
xlabel = "Time (old to new)"
elif sort == "degree":
xlabel = "Commits"
axes.set_xlabel(xlabel)
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
cig = create_blame_interaction_graph(case_study.project_name, revision
).commit_interaction_graph()
commit_lookup = create_commit_lookup_helper(case_study.project_name)
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
return bool(commit_lookup(node))
def commit_time(node: CommitRepoPair) -> datetime:
return datetime.utcfromtimestamp(commit_lookup(node).commit_time)
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
nodes.append(({
"commit_hash": commit.commit_hash,
"commit_time": commit_time(commit),
"node_degree": cig.degree(node),
"node_out_degree": cig.out_degree(node),
"node_in_degree": cig.in_degree(node),
}))
data = pd.DataFrame(nodes)
if sort == "time":
data.sort_values(by="commit_time", inplace=True)
node_degrees = data.loc[:, ["commit_hash", "node_degree"]]
node_out_degrees = data.loc[:, ["commit_hash", "node_out_degree"]]
node_in_degrees = data.loc[:, ["commit_hash", "node_in_degree"]]
if sort == "degree":
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="cig-node-degrees",
options=[REQUIRE_CASE_STUDY, OPTIONAL_SORT_METHOD]
):
"""Generates a plot of node degrees of a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class AuthorInteractionGraphNodeDegreePlot(Plot, plot_name='aig_node_degrees'):
"""Plot node degrees of a author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Author Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
axes.set_xlabel("Authors")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
aig = create_blame_interaction_graph(project_name, revision
).author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in aig.nodes:
node_attrs = tp.cast(AIGNodeAttrs, aig.nodes[node])
author = node_attrs["author"]
nodes.append(({
"author": author,
"node_degree": aig.degree(node),
"node_out_degree": aig.out_degree(node),
"node_in_degree": aig.in_degree(node),
}))
data = pd.DataFrame(nodes)
node_degrees = data.loc[:, ["author", "node_degree"]]
node_out_degrees = data.loc[:, ["author", "node_out_degree"]]
node_in_degrees = data.loc[:, ["author", "node_in_degree"]]
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class AIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="aig-node-degrees",
options=[REQUIRE_CASE_STUDY]
):
"""Generates a plot of node degrees of a author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
AuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitAuthorInteractionGraphNodeDegreePlot(
Plot, plot_name='caig_node_degrees'
):
"""Plot node degrees of commits in a commit-author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit-Author Interaction Graph - # Interacting Authors")
axes.set_title(case_study.project_name)
axes.set_ylabel("Authors")
axes.set_xlabel("Commits")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
caig = create_blame_interaction_graph(project_name, revision
).commit_author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in caig.nodes:
node_attrs = tp.cast(CAIGNodeAttrs, caig.nodes[node])
commit = node_attrs["commit"]
if commit:
nodes.append(({
"commit": commit.commit_hash,
"num_authors": caig.degree(node)
}))
data = pd.DataFrame(nodes)
num_authors = data.loc[:, ["commit", "num_authors"]]
num_authors.sort_values(by="num_authors", inplace=True)
axes.plot(num_authors["num_authors"].values)
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CAIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="caig-node-degrees",
options=[
REQUIRE_CASE_STUDY,
]
):
"""Generates a plot of node degrees of a commit-author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitAuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
|
[
"varats.data.reports.blame_interaction_graph.create_blame_interaction_graph",
"click.Choice",
"datetime.datetime.utcfromtimestamp",
"datetime.datetime",
"varats.utils.git_util.create_commit_lookup_helper",
"varats.plots.chord_plot_utils.make_arc_plot",
"varats.paper_mgmt.case_study.newest_processed_revision_for_case_study",
"varats.plots.chord_plot_utils.make_chord_plot",
"networkx.set_node_attributes",
"matplotlib.pyplot.subplots",
"varats.mapping.commit_map.get_commit_map",
"pandas.DataFrame",
"varats.plot.plot.PlotDataEmpty",
"varats.utils.git_util.ShortCommitHash",
"typing.cast",
"typing.TypeVar"
] |
[((2528, 2588), 'typing.TypeVar', 'tp.TypeVar', (['"""NodeInfoTy"""', 'ChordPlotNodeInfo', 'ArcPlotNodeInfo'], {}), "('NodeInfoTy', ChordPlotNodeInfo, ArcPlotNodeInfo)\n", (2538, 2588), True, 'import typing as tp\n'), ((2602, 2662), 'typing.TypeVar', 'tp.TypeVar', (['"""EdgeInfoTy"""', 'ChordPlotEdgeInfo', 'ArcPlotEdgeInfo'], {}), "('EdgeInfoTy', ChordPlotEdgeInfo, ArcPlotEdgeInfo)\n", (2612, 2662), True, 'import typing as tp\n'), ((3103, 3144), 'varats.utils.git_util.create_commit_lookup_helper', 'create_commit_lookup_helper', (['project_name'], {}), '(project_name)\n', (3130, 3144), False, 'from varats.utils.git_util import CommitRepoPair, create_commit_lookup_helper, UNCOMMITTED_COMMIT_HASH, FullCommitHash, ShortCommitHash\n'), ((1733, 1835), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['cig', "{node: cig.nodes[node]['commit_hash'] for node in cig.nodes}", '"""label"""'], {}), "(cig, {node: cig.nodes[node]['commit_hash'] for node in\n cig.nodes}, 'label')\n", (1755, 1835), True, 'import networkx as nx\n'), ((3813, 3851), 'typing.cast', 'tp.cast', (['CIGNodeAttrs', 'cig.nodes[node]'], {}), '(CIGNodeAttrs, cig.nodes[node])\n', (3820, 3851), True, 'import typing as tp\n'), ((5913, 5970), 'varats.plots.chord_plot_utils.make_chord_plot', 'make_chord_plot', (['nodes', 'edges', '"""Commit Interaction Graph"""'], {}), "(nodes, edges, 'Commit Interaction Graph')\n", (5928, 5970), False, 'from varats.plots.chord_plot_utils import make_chord_plot, make_arc_plot, NodeTy, ChordPlotNodeInfo, ChordPlotEdgeInfo, ArcPlotEdgeInfo, ArcPlotNodeInfo\n'), ((7961, 8016), 'varats.plots.chord_plot_utils.make_arc_plot', 'make_arc_plot', (['nodes', 'edges', '"""Commit Interaction Graph"""'], {}), "(nodes, edges, 'Commit Interaction Graph')\n", (7974, 8016), False, 'from varats.plots.chord_plot_utils import make_chord_plot, make_arc_plot, NodeTy, ChordPlotNodeInfo, ChordPlotEdgeInfo, ArcPlotEdgeInfo, ArcPlotNodeInfo\n'), ((8720, 8752), 'click.Choice', 'click.Choice', (["['degree', 'time']"], {}), "(['degree', 'time'])\n", (8732, 8752), False, 'import click\n'), ((9303, 9335), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharey': '"""all"""'}), "(1, 1, sharey='all')\n", (9315, 9335), True, 'import matplotlib.pyplot as plt\n'), ((9725, 9790), 'varats.paper_mgmt.case_study.newest_processed_revision_for_case_study', 'newest_processed_revision_for_case_study', (['case_study', 'BlameReport'], {}), '(case_study, BlameReport)\n', (9765, 9790), False, 'from varats.paper_mgmt.case_study import newest_processed_revision_for_case_study\n'), ((10049, 10101), 'varats.utils.git_util.create_commit_lookup_helper', 'create_commit_lookup_helper', (['case_study.project_name'], {}), '(case_study.project_name)\n', (10076, 10101), False, 'from varats.utils.git_util import CommitRepoPair, create_commit_lookup_helper, UNCOMMITTED_COMMIT_HASH, FullCommitHash, ShortCommitHash\n'), ((11009, 11028), 'pandas.DataFrame', 'pd.DataFrame', (['nodes'], {}), '(nodes)\n', (11021, 11028), True, 'import pandas as pd\n'), ((12722, 12754), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharey': '"""all"""'}), "(1, 1, sharey='all')\n", (12734, 12754), True, 'import matplotlib.pyplot as plt\n'), ((13044, 13109), 'varats.paper_mgmt.case_study.newest_processed_revision_for_case_study', 'newest_processed_revision_for_case_study', (['case_study', 'BlameReport'], {}), '(case_study, BlameReport)\n', (13084, 13109), False, 'from varats.paper_mgmt.case_study import newest_processed_revision_for_case_study\n'), ((13776, 13795), 'pandas.DataFrame', 'pd.DataFrame', (['nodes'], {}), '(nodes)\n', (13788, 13795), True, 'import pandas as pd\n'), ((15352, 15384), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharey': '"""all"""'}), "(1, 1, sharey='all')\n", (15364, 15384), True, 'import matplotlib.pyplot as plt\n'), ((15691, 15756), 'varats.paper_mgmt.case_study.newest_processed_revision_for_case_study', 'newest_processed_revision_for_case_study', (['case_study', 'BlameReport'], {}), '(case_study, BlameReport)\n', (15731, 15756), False, 'from varats.paper_mgmt.case_study import newest_processed_revision_for_case_study\n'), ((16375, 16394), 'pandas.DataFrame', 'pd.DataFrame', (['nodes'], {}), '(nodes)\n', (16387, 16394), True, 'import pandas as pd\n'), ((3155, 3209), 'varats.data.reports.blame_interaction_graph.create_blame_interaction_graph', 'create_blame_interaction_graph', (['project_name', 'revision'], {}), '(project_name, revision)\n', (3185, 3209), False, 'from varats.data.reports.blame_interaction_graph import create_blame_interaction_graph, CIGNodeAttrs, CIGEdgeAttrs, AIGNodeAttrs, CAIGNodeAttrs\n'), ((3547, 3592), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['commit.commit_time'], {}), '(commit.commit_time)\n', (3572, 3592), False, 'from datetime import datetime\n'), ((3637, 3657), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)'], {}), '(2015, 1, 1)\n', (3645, 3657), False, 'from datetime import datetime\n'), ((4266, 4306), 'typing.cast', 'tp.cast', (['CIGEdgeAttrs', 'cig[source][sink]'], {}), '(CIGEdgeAttrs, cig[source][sink])\n', (4273, 4306), True, 'import typing as tp\n'), ((4341, 4381), 'typing.cast', 'tp.cast', (['CIGNodeAttrs', 'cig.nodes[source]'], {}), '(CIGNodeAttrs, cig.nodes[source])\n', (4348, 4381), True, 'import typing as tp\n'), ((4414, 4452), 'typing.cast', 'tp.cast', (['CIGNodeAttrs', 'cig.nodes[sink]'], {}), '(CIGNodeAttrs, cig.nodes[sink])\n', (4421, 4452), True, 'import typing as tp\n'), ((5030, 5075), 'varats.utils.git_util.ShortCommitHash', 'ShortCommitHash', (["self.plot_kwargs['revision']"], {}), "(self.plot_kwargs['revision'])\n", (5045, 5075), False, 'from varats.utils.git_util import CommitRepoPair, create_commit_lookup_helper, UNCOMMITTED_COMMIT_HASH, FullCommitHash, ShortCommitHash\n'), ((6955, 7000), 'varats.utils.git_util.ShortCommitHash', 'ShortCommitHash', (["self.plot_kwargs['revision']"], {}), "(self.plot_kwargs['revision'])\n", (6970, 7000), False, 'from varats.utils.git_util import CommitRepoPair, create_commit_lookup_helper, UNCOMMITTED_COMMIT_HASH, FullCommitHash, ShortCommitHash\n'), ((9856, 9871), 'varats.plot.plot.PlotDataEmpty', 'PlotDataEmpty', ([], {}), '()\n', (9869, 9871), False, 'from varats.plot.plot import Plot, PlotDataEmpty\n'), ((10538, 10576), 'typing.cast', 'tp.cast', (['CIGNodeAttrs', 'cig.nodes[node]'], {}), '(CIGNodeAttrs, cig.nodes[node])\n', (10545, 10576), True, 'import typing as tp\n'), ((13175, 13190), 'varats.plot.plot.PlotDataEmpty', 'PlotDataEmpty', ([], {}), '()\n', (13188, 13190), False, 'from varats.plot.plot import Plot, PlotDataEmpty\n'), ((13440, 13478), 'typing.cast', 'tp.cast', (['AIGNodeAttrs', 'aig.nodes[node]'], {}), '(AIGNodeAttrs, aig.nodes[node])\n', (13447, 13478), True, 'import typing as tp\n'), ((15822, 15837), 'varats.plot.plot.PlotDataEmpty', 'PlotDataEmpty', ([], {}), '()\n', (15835, 15837), False, 'from varats.plot.plot import Plot, PlotDataEmpty\n'), ((16097, 16137), 'typing.cast', 'tp.cast', (['CAIGNodeAttrs', 'caig.nodes[node]'], {}), '(CAIGNodeAttrs, caig.nodes[node])\n', (16104, 16137), True, 'import typing as tp\n'), ((1598, 1652), 'varats.data.reports.blame_interaction_graph.create_blame_interaction_graph', 'create_blame_interaction_graph', (['project_name', 'revision'], {}), '(project_name, revision)\n', (1628, 1652), False, 'from varats.data.reports.blame_interaction_graph import create_blame_interaction_graph, CIGNodeAttrs, CIGEdgeAttrs, AIGNodeAttrs, CAIGNodeAttrs\n'), ((4964, 4992), 'varats.mapping.commit_map.get_commit_map', 'get_commit_map', (['project_name'], {}), '(project_name)\n', (4978, 4992), False, 'from varats.mapping.commit_map import get_commit_map\n'), ((6889, 6917), 'varats.mapping.commit_map.get_commit_map', 'get_commit_map', (['project_name'], {}), '(project_name)\n', (6903, 6917), False, 'from varats.mapping.commit_map import get_commit_map\n'), ((9887, 9952), 'varats.data.reports.blame_interaction_graph.create_blame_interaction_graph', 'create_blame_interaction_graph', (['case_study.project_name', 'revision'], {}), '(case_study.project_name, revision)\n', (9917, 9952), False, 'from varats.data.reports.blame_interaction_graph import create_blame_interaction_graph, CIGNodeAttrs, CIGEdgeAttrs, AIGNodeAttrs, CAIGNodeAttrs\n'), ((13206, 13260), 'varats.data.reports.blame_interaction_graph.create_blame_interaction_graph', 'create_blame_interaction_graph', (['project_name', 'revision'], {}), '(project_name, revision)\n', (13236, 13260), False, 'from varats.data.reports.blame_interaction_graph import create_blame_interaction_graph, CIGNodeAttrs, CIGEdgeAttrs, AIGNodeAttrs, CAIGNodeAttrs\n'), ((15854, 15908), 'varats.data.reports.blame_interaction_graph.create_blame_interaction_graph', 'create_blame_interaction_graph', (['project_name', 'revision'], {}), '(project_name, revision)\n', (15884, 15908), False, 'from varats.data.reports.blame_interaction_graph import create_blame_interaction_graph, CIGNodeAttrs, CIGEdgeAttrs, AIGNodeAttrs, CAIGNodeAttrs\n')]
|
import re
class Rule:
def __init__(self, line):
line = line.strip().split(" contain ")
line[1] = line[1].strip(".").split(", ")
self.contents = {}
for item in line[1]:
# Grab that number out in front
regex = re.compile(r"[0-9]+")
# If we didn't find one that means it's no bags inside
if match := regex.match(item):
quantity = int(item[match.span()[0]:match.span()[1]])
# The +1 deals with the space
bag_type = item[match.span()[1] + 1:]
if quantity > 1:
# This gets rid of the s if it's plural
bag_type = bag_type[:-1]
self.contents[bag_type] = quantity
# The s makes things irritating so I want it gone
self.bag_type = line[0][:-1]
def contains_directly(self, bag_type: str):
return bag_type in self.contents
# Warning: recursive
def contains(self, bag_type: str, rule_dict: dict):
if self.contains_directly(bag_type):
return True
else:
for bag in self.contents:
if bag in rule_dict:
if rule_dict[bag].contains(bag_type, rule_dict):
return True
else:
print("An unexpected bag was discovered!")
return False
def count_internal_bags(self, rule_dict: dict):
internal_bags = 0
for bag in self.contents:
# count these bags...
internal_bags += self.contents[bag] # recall that this value represents the quantity
# ...and count the bags inside of it
internal_bags += rule_dict[bag].count_internal_bags(rule_dict) * self.contents[bag]
return internal_bags
def parse_input(filename: str):
with open(filename, "r") as file:
rules = {}
for line in file:
rule = Rule(line)
print(f"{rule.bag_type} contains {rule.contents}")
rules[rule.bag_type] = rule
return rules
def main():
rule_dict = parse_input("input.txt")
shiny_gold = 0
for rule_entry in rule_dict.keys():
rule = rule_dict[rule_entry]
if rule.contains("shiny gold bag", rule_dict):
print(f"Found {rule.contents} in {rule.bag_type}")
shiny_gold += 1
print("\n")
print(f"Found {shiny_gold} bags containing at least one shiny gold bag.")
print(f"A shiny gold bag contains {rule_dict['shiny gold bag'].count_internal_bags(rule_dict)} bags.")
if __name__ == "__main__":
main()
|
[
"re.compile"
] |
[((270, 290), 're.compile', 're.compile', (['"""[0-9]+"""'], {}), "('[0-9]+')\n", (280, 290), False, 'import re\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ApiKeyStageKey',
'ApiKeyTag',
'ClientCertificateTag',
'DeploymentAccessLogSetting',
'DeploymentCanarySetting',
'DeploymentCanarySettings',
'DeploymentMethodSetting',
'DeploymentStageDescription',
'DeploymentTag',
'DocumentationPartLocation',
'DomainNameEndpointConfiguration',
'DomainNameMutualTlsAuthentication',
'DomainNameTag',
'MethodIntegration',
'MethodIntegrationResponse',
'MethodResponse',
'RestApiEndpointConfiguration',
'RestApiS3Location',
'RestApiTag',
'StageAccessLogSetting',
'StageCanarySetting',
'StageMethodSetting',
'StageTag',
'UsagePlanApiStage',
'UsagePlanQuotaSettings',
'UsagePlanTag',
'UsagePlanThrottleSettings',
'VpcLinkTag',
]
@pulumi.output_type
class ApiKeyStageKey(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "restApiId":
suggest = "rest_api_id"
elif key == "stageName":
suggest = "stage_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiKeyStageKey. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiKeyStageKey.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiKeyStageKey.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
rest_api_id: Optional[str] = None,
stage_name: Optional[str] = None):
"""
:param str rest_api_id: The ID of a RestApi resource that includes the stage with which you want to associate the API key.
:param str stage_name: The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property.
"""
if rest_api_id is not None:
pulumi.set(__self__, "rest_api_id", rest_api_id)
if stage_name is not None:
pulumi.set(__self__, "stage_name", stage_name)
@property
@pulumi.getter(name="restApiId")
def rest_api_id(self) -> Optional[str]:
"""
The ID of a RestApi resource that includes the stage with which you want to associate the API key.
"""
return pulumi.get(self, "rest_api_id")
@property
@pulumi.getter(name="stageName")
def stage_name(self) -> Optional[str]:
"""
The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property.
"""
return pulumi.get(self, "stage_name")
@pulumi.output_type
class ApiKeyTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ClientCertificateTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class DeploymentAccessLogSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "destinationArn":
suggest = "destination_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentAccessLogSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentAccessLogSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentAccessLogSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
destination_arn: Optional[str] = None,
format: Optional[str] = None):
"""
:param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-.
:param str format: A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.
"""
if destination_arn is not None:
pulumi.set(__self__, "destination_arn", destination_arn)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter(name="destinationArn")
def destination_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-.
"""
return pulumi.get(self, "destination_arn")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.
"""
return pulumi.get(self, "format")
@pulumi.output_type
class DeploymentCanarySetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentCanarySetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentCanarySetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
:param float percent_traffic: The percent (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache or not.
"""
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percent (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache or not.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class DeploymentCanarySettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentCanarySettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentCanarySettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
:param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache.
"""
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percentage (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class DeploymentMethodSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "httpMethod":
suggest = "http_method"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "resourcePath":
suggest = "resource_path"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentMethodSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentMethodSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentMethodSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
data_trace_enabled: Optional[bool] = None,
http_method: Optional[str] = None,
logging_level: Optional[str] = None,
metrics_enabled: Optional[bool] = None,
resource_path: Optional[str] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None):
"""
:param bool cache_data_encrypted: Indicates whether the cached responses are encrypted
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str http_method: The HTTP method.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
Indicates whether the cached responses are encrypted
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
The HTTP method.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[str]:
"""
The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash.
"""
return pulumi.get(self, "resource_path")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@pulumi.output_type
class DeploymentStageDescription(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessLogSetting":
suggest = "access_log_setting"
elif key == "cacheClusterEnabled":
suggest = "cache_cluster_enabled"
elif key == "cacheClusterSize":
suggest = "cache_cluster_size"
elif key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "canarySetting":
suggest = "canary_setting"
elif key == "clientCertificateId":
suggest = "client_certificate_id"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "documentationVersion":
suggest = "documentation_version"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "methodSettings":
suggest = "method_settings"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
elif key == "tracingEnabled":
suggest = "tracing_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentStageDescription. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentStageDescription.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentStageDescription.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_log_setting: Optional['outputs.DeploymentAccessLogSetting'] = None,
cache_cluster_enabled: Optional[bool] = None,
cache_cluster_size: Optional[str] = None,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
canary_setting: Optional['outputs.DeploymentCanarySetting'] = None,
client_certificate_id: Optional[str] = None,
data_trace_enabled: Optional[bool] = None,
description: Optional[str] = None,
documentation_version: Optional[str] = None,
logging_level: Optional[str] = None,
method_settings: Optional[Sequence['outputs.DeploymentMethodSetting']] = None,
metrics_enabled: Optional[bool] = None,
tags: Optional[Sequence['outputs.DeploymentTag']] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None,
tracing_enabled: Optional[bool] = None,
variables: Optional[Any] = None):
"""
:param 'DeploymentAccessLogSetting' access_log_setting: Specifies settings for logging access in this stage.
:param bool cache_cluster_enabled: Indicates whether cache clustering is enabled for the stage.
:param str cache_cluster_size: The size of the stage's cache cluster.
:param bool cache_data_encrypted: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param 'DeploymentCanarySetting' canary_setting: Specifies settings for the canary deployment in this stage.
:param str client_certificate_id: The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str description: A description of the purpose of the stage.
:param str documentation_version: The version identifier of the API documentation snapshot.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
:param Sequence['DeploymentMethodSetting'] method_settings: Configures settings for all of the stage's methods.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param Sequence['DeploymentTag'] tags: An array of arbitrary tags (key-value pairs) to associate with the stage.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param bool tracing_enabled: Specifies whether active tracing with X-ray is enabled for this stage.
:param Any variables: A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
if access_log_setting is not None:
pulumi.set(__self__, "access_log_setting", access_log_setting)
if cache_cluster_enabled is not None:
pulumi.set(__self__, "cache_cluster_enabled", cache_cluster_enabled)
if cache_cluster_size is not None:
pulumi.set(__self__, "cache_cluster_size", cache_cluster_size)
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if canary_setting is not None:
pulumi.set(__self__, "canary_setting", canary_setting)
if client_certificate_id is not None:
pulumi.set(__self__, "client_certificate_id", client_certificate_id)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if description is not None:
pulumi.set(__self__, "description", description)
if documentation_version is not None:
pulumi.set(__self__, "documentation_version", documentation_version)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if method_settings is not None:
pulumi.set(__self__, "method_settings", method_settings)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
if tracing_enabled is not None:
pulumi.set(__self__, "tracing_enabled", tracing_enabled)
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter(name="accessLogSetting")
def access_log_setting(self) -> Optional['outputs.DeploymentAccessLogSetting']:
"""
Specifies settings for logging access in this stage.
"""
return pulumi.get(self, "access_log_setting")
@property
@pulumi.getter(name="cacheClusterEnabled")
def cache_cluster_enabled(self) -> Optional[bool]:
"""
Indicates whether cache clustering is enabled for the stage.
"""
return pulumi.get(self, "cache_cluster_enabled")
@property
@pulumi.getter(name="cacheClusterSize")
def cache_cluster_size(self) -> Optional[str]:
"""
The size of the stage's cache cluster.
"""
return pulumi.get(self, "cache_cluster_size")
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="canarySetting")
def canary_setting(self) -> Optional['outputs.DeploymentCanarySetting']:
"""
Specifies settings for the canary deployment in this stage.
"""
return pulumi.get(self, "canary_setting")
@property
@pulumi.getter(name="clientCertificateId")
def client_certificate_id(self) -> Optional[str]:
"""
The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage.
"""
return pulumi.get(self, "client_certificate_id")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of the purpose of the stage.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="documentationVersion")
def documentation_version(self) -> Optional[str]:
"""
The version identifier of the API documentation snapshot.
"""
return pulumi.get(self, "documentation_version")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="methodSettings")
def method_settings(self) -> Optional[Sequence['outputs.DeploymentMethodSetting']]:
"""
Configures settings for all of the stage's methods.
"""
return pulumi.get(self, "method_settings")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.DeploymentTag']]:
"""
An array of arbitrary tags (key-value pairs) to associate with the stage.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@property
@pulumi.getter(name="tracingEnabled")
def tracing_enabled(self) -> Optional[bool]:
"""
Specifies whether active tracing with X-ray is enabled for this stage.
"""
return pulumi.get(self, "tracing_enabled")
@property
@pulumi.getter
def variables(self) -> Optional[Any]:
"""
A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
return pulumi.get(self, "variables")
@pulumi.output_type
class DeploymentTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag
:param str value: The value for the tag
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag
"""
return pulumi.get(self, "value")
@pulumi.output_type
class DocumentationPartLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DocumentationPartLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
method: Optional[str] = None,
name: Optional[str] = None,
path: Optional[str] = None,
status_code: Optional[str] = None,
type: Optional[str] = None):
if method is not None:
pulumi.set(__self__, "method", method)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def method(self) -> Optional[str]:
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
@pulumi.output_type
class DomainNameEndpointConfiguration(dict):
def __init__(__self__, *,
types: Optional[Sequence[str]] = None):
if types is not None:
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@pulumi.output_type
class DomainNameMutualTlsAuthentication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "truststoreUri":
suggest = "truststore_uri"
elif key == "truststoreVersion":
suggest = "truststore_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainNameMutualTlsAuthentication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
truststore_uri: Optional[str] = None,
truststore_version: Optional[str] = None):
if truststore_uri is not None:
pulumi.set(__self__, "truststore_uri", truststore_uri)
if truststore_version is not None:
pulumi.set(__self__, "truststore_version", truststore_version)
@property
@pulumi.getter(name="truststoreUri")
def truststore_uri(self) -> Optional[str]:
return pulumi.get(self, "truststore_uri")
@property
@pulumi.getter(name="truststoreVersion")
def truststore_version(self) -> Optional[str]:
return pulumi.get(self, "truststore_version")
@pulumi.output_type
class DomainNameTag(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class MethodIntegration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheKeyParameters":
suggest = "cache_key_parameters"
elif key == "cacheNamespace":
suggest = "cache_namespace"
elif key == "connectionId":
suggest = "connection_id"
elif key == "connectionType":
suggest = "connection_type"
elif key == "contentHandling":
suggest = "content_handling"
elif key == "integrationHttpMethod":
suggest = "integration_http_method"
elif key == "integrationResponses":
suggest = "integration_responses"
elif key == "passthroughBehavior":
suggest = "passthrough_behavior"
elif key == "requestParameters":
suggest = "request_parameters"
elif key == "requestTemplates":
suggest = "request_templates"
elif key == "timeoutInMillis":
suggest = "timeout_in_millis"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodIntegration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodIntegration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodIntegration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: 'MethodIntegrationType',
cache_key_parameters: Optional[Sequence[str]] = None,
cache_namespace: Optional[str] = None,
connection_id: Optional[str] = None,
connection_type: Optional['MethodIntegrationConnectionType'] = None,
content_handling: Optional['MethodIntegrationContentHandling'] = None,
credentials: Optional[str] = None,
integration_http_method: Optional[str] = None,
integration_responses: Optional[Sequence['outputs.MethodIntegrationResponse']] = None,
passthrough_behavior: Optional['MethodIntegrationPassthroughBehavior'] = None,
request_parameters: Optional[Any] = None,
request_templates: Optional[Any] = None,
timeout_in_millis: Optional[int] = None,
uri: Optional[str] = None):
"""
:param 'MethodIntegrationType' type: The type of backend that your method is running.
:param Sequence[str] cache_key_parameters: A list of request parameters whose values API Gateway caches.
:param str cache_namespace: An API-specific tag group of related cached parameters.
:param str connection_id: The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.
:param 'MethodIntegrationConnectionType' connection_type: The type of the network connection to the integration endpoint.
:param 'MethodIntegrationContentHandling' content_handling: Specifies how to handle request payload content type conversions.
:param str credentials: The credentials that are required for the integration.
:param str integration_http_method: The integration's HTTP method type.
:param Sequence['MethodIntegrationResponse'] integration_responses: The response that API Gateway provides after a method's backend completes processing a request.
:param 'MethodIntegrationPassthroughBehavior' passthrough_behavior: Indicates when API Gateway passes requests to the targeted backend.
:param Any request_parameters: The request parameters that API Gateway sends with the backend request.
:param Any request_templates: A map of Apache Velocity templates that are applied on the request payload.
:param int timeout_in_millis: Custom timeout between 50 and 29,000 milliseconds.
:param str uri: The Uniform Resource Identifier (URI) for the integration.
"""
pulumi.set(__self__, "type", type)
if cache_key_parameters is not None:
pulumi.set(__self__, "cache_key_parameters", cache_key_parameters)
if cache_namespace is not None:
pulumi.set(__self__, "cache_namespace", cache_namespace)
if connection_id is not None:
pulumi.set(__self__, "connection_id", connection_id)
if connection_type is not None:
pulumi.set(__self__, "connection_type", connection_type)
if content_handling is not None:
pulumi.set(__self__, "content_handling", content_handling)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if integration_http_method is not None:
pulumi.set(__self__, "integration_http_method", integration_http_method)
if integration_responses is not None:
pulumi.set(__self__, "integration_responses", integration_responses)
if passthrough_behavior is not None:
pulumi.set(__self__, "passthrough_behavior", passthrough_behavior)
if request_parameters is not None:
pulumi.set(__self__, "request_parameters", request_parameters)
if request_templates is not None:
pulumi.set(__self__, "request_templates", request_templates)
if timeout_in_millis is not None:
pulumi.set(__self__, "timeout_in_millis", timeout_in_millis)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def type(self) -> 'MethodIntegrationType':
"""
The type of backend that your method is running.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="cacheKeyParameters")
def cache_key_parameters(self) -> Optional[Sequence[str]]:
"""
A list of request parameters whose values API Gateway caches.
"""
return pulumi.get(self, "cache_key_parameters")
@property
@pulumi.getter(name="cacheNamespace")
def cache_namespace(self) -> Optional[str]:
"""
An API-specific tag group of related cached parameters.
"""
return pulumi.get(self, "cache_namespace")
@property
@pulumi.getter(name="connectionId")
def connection_id(self) -> Optional[str]:
"""
The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.
"""
return pulumi.get(self, "connection_id")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> Optional['MethodIntegrationConnectionType']:
"""
The type of the network connection to the integration endpoint.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="contentHandling")
def content_handling(self) -> Optional['MethodIntegrationContentHandling']:
"""
Specifies how to handle request payload content type conversions.
"""
return pulumi.get(self, "content_handling")
@property
@pulumi.getter
def credentials(self) -> Optional[str]:
"""
The credentials that are required for the integration.
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter(name="integrationHttpMethod")
def integration_http_method(self) -> Optional[str]:
"""
The integration's HTTP method type.
"""
return pulumi.get(self, "integration_http_method")
@property
@pulumi.getter(name="integrationResponses")
def integration_responses(self) -> Optional[Sequence['outputs.MethodIntegrationResponse']]:
"""
The response that API Gateway provides after a method's backend completes processing a request.
"""
return pulumi.get(self, "integration_responses")
@property
@pulumi.getter(name="passthroughBehavior")
def passthrough_behavior(self) -> Optional['MethodIntegrationPassthroughBehavior']:
"""
Indicates when API Gateway passes requests to the targeted backend.
"""
return pulumi.get(self, "passthrough_behavior")
@property
@pulumi.getter(name="requestParameters")
def request_parameters(self) -> Optional[Any]:
"""
The request parameters that API Gateway sends with the backend request.
"""
return pulumi.get(self, "request_parameters")
@property
@pulumi.getter(name="requestTemplates")
def request_templates(self) -> Optional[Any]:
"""
A map of Apache Velocity templates that are applied on the request payload.
"""
return pulumi.get(self, "request_templates")
@property
@pulumi.getter(name="timeoutInMillis")
def timeout_in_millis(self) -> Optional[int]:
"""
Custom timeout between 50 and 29,000 milliseconds.
"""
return pulumi.get(self, "timeout_in_millis")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The Uniform Resource Identifier (URI) for the integration.
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class MethodIntegrationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "contentHandling":
suggest = "content_handling"
elif key == "responseParameters":
suggest = "response_parameters"
elif key == "responseTemplates":
suggest = "response_templates"
elif key == "selectionPattern":
suggest = "selection_pattern"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodIntegrationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodIntegrationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodIntegrationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
content_handling: Optional['MethodIntegrationResponseContentHandling'] = None,
response_parameters: Optional[Any] = None,
response_templates: Optional[Any] = None,
selection_pattern: Optional[str] = None):
"""
:param str status_code: The status code that API Gateway uses to map the integration response to a MethodResponse status code.
:param 'MethodIntegrationResponseContentHandling' content_handling: Specifies how to handle request payload content type conversions.
:param Any response_parameters: The response parameters from the backend response that API Gateway sends to the method response.
:param Any response_templates: The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.
:param str selection_pattern: A regular expression that specifies which error strings or status codes from the backend map to the integration response.
"""
pulumi.set(__self__, "status_code", status_code)
if content_handling is not None:
pulumi.set(__self__, "content_handling", content_handling)
if response_parameters is not None:
pulumi.set(__self__, "response_parameters", response_parameters)
if response_templates is not None:
pulumi.set(__self__, "response_templates", response_templates)
if selection_pattern is not None:
pulumi.set(__self__, "selection_pattern", selection_pattern)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
"""
The status code that API Gateway uses to map the integration response to a MethodResponse status code.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="contentHandling")
def content_handling(self) -> Optional['MethodIntegrationResponseContentHandling']:
"""
Specifies how to handle request payload content type conversions.
"""
return pulumi.get(self, "content_handling")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
The response parameters from the backend response that API Gateway sends to the method response.
"""
return pulumi.get(self, "response_parameters")
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> Optional[Any]:
"""
The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.
"""
return pulumi.get(self, "response_templates")
@property
@pulumi.getter(name="selectionPattern")
def selection_pattern(self) -> Optional[str]:
"""
A regular expression that specifies which error strings or status codes from the backend map to the integration response.
"""
return pulumi.get(self, "selection_pattern")
@pulumi.output_type
class MethodResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "responseModels":
suggest = "response_models"
elif key == "responseParameters":
suggest = "response_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
response_models: Optional[Any] = None,
response_parameters: Optional[Any] = None):
"""
:param str status_code: The method response's status code, which you map to an IntegrationResponse.
:param Any response_models: The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.
:param Any response_parameters: Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.
"""
pulumi.set(__self__, "status_code", status_code)
if response_models is not None:
pulumi.set(__self__, "response_models", response_models)
if response_parameters is not None:
pulumi.set(__self__, "response_parameters", response_parameters)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
"""
The method response's status code, which you map to an IntegrationResponse.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="responseModels")
def response_models(self) -> Optional[Any]:
"""
The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.
"""
return pulumi.get(self, "response_models")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.
"""
return pulumi.get(self, "response_parameters")
@pulumi.output_type
class RestApiEndpointConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "vpcEndpointIds":
suggest = "vpc_endpoint_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RestApiEndpointConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RestApiEndpointConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RestApiEndpointConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
types: Optional[Sequence[str]] = None,
vpc_endpoint_ids: Optional[Sequence[str]] = None):
if types is not None:
pulumi.set(__self__, "types", types)
if vpc_endpoint_ids is not None:
pulumi.set(__self__, "vpc_endpoint_ids", vpc_endpoint_ids)
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@property
@pulumi.getter(name="vpcEndpointIds")
def vpc_endpoint_ids(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "vpc_endpoint_ids")
@pulumi.output_type
class RestApiS3Location(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eTag":
suggest = "e_tag"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RestApiS3Location. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RestApiS3Location.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RestApiS3Location.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket: Optional[str] = None,
e_tag: Optional[str] = None,
key: Optional[str] = None,
version: Optional[str] = None):
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if key is not None:
pulumi.set(__self__, "key", key)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> Optional[str]:
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class RestApiTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class StageAccessLogSetting(dict):
"""
Specifies settings for logging access in this stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "destinationArn":
suggest = "destination_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageAccessLogSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageAccessLogSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageAccessLogSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
destination_arn: Optional[str] = None,
format: Optional[str] = None):
"""
Specifies settings for logging access in this stage.
:param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.
:param str format: A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.
"""
if destination_arn is not None:
pulumi.set(__self__, "destination_arn", destination_arn)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter(name="destinationArn")
def destination_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.
"""
return pulumi.get(self, "destination_arn")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.
"""
return pulumi.get(self, "format")
@pulumi.output_type
class StageCanarySetting(dict):
"""
Specifies settings for the canary deployment in this stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deploymentId":
suggest = "deployment_id"
elif key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageCanarySetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageCanarySetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageCanarySetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
deployment_id: Optional[str] = None,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
Specifies settings for the canary deployment in this stage.
:param str deployment_id: The identifier of the deployment that the stage points to.
:param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache or not.
"""
if deployment_id is not None:
pulumi.set(__self__, "deployment_id", deployment_id)
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="deploymentId")
def deployment_id(self) -> Optional[str]:
"""
The identifier of the deployment that the stage points to.
"""
return pulumi.get(self, "deployment_id")
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percentage (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache or not.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class StageMethodSetting(dict):
"""
Configures settings for all methods in a stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "httpMethod":
suggest = "http_method"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "resourcePath":
suggest = "resource_path"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageMethodSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageMethodSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageMethodSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
data_trace_enabled: Optional[bool] = None,
http_method: Optional[str] = None,
logging_level: Optional[str] = None,
metrics_enabled: Optional[bool] = None,
resource_path: Optional[str] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None):
"""
Configures settings for all methods in a stage.
:param bool cache_data_encrypted: Indicates whether the cached responses are encrypted.
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str http_method: The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
Indicates whether the cached responses are encrypted.
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[str]:
"""
The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
"""
return pulumi.get(self, "resource_path")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@pulumi.output_type
class StageTag(dict):
"""
Identify and categorize resources.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
Identify and categorize resources.
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class UsagePlanApiStage(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "apiId":
suggest = "api_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UsagePlanApiStage. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UsagePlanApiStage.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UsagePlanApiStage.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
api_id: Optional[str] = None,
stage: Optional[str] = None,
throttle: Optional[Any] = None):
"""
:param str api_id: The ID of an API that is in the specified Stage property that you want to associate with the usage plan.
:param str stage: The name of the stage to associate with the usage plan.
:param Any throttle: Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.
"""
if api_id is not None:
pulumi.set(__self__, "api_id", api_id)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if throttle is not None:
pulumi.set(__self__, "throttle", throttle)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
"""
The ID of an API that is in the specified Stage property that you want to associate with the usage plan.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter
def stage(self) -> Optional[str]:
"""
The name of the stage to associate with the usage plan.
"""
return pulumi.get(self, "stage")
@property
@pulumi.getter
def throttle(self) -> Optional[Any]:
"""
Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.
"""
return pulumi.get(self, "throttle")
@pulumi.output_type
class UsagePlanQuotaSettings(dict):
def __init__(__self__, *,
limit: Optional[int] = None,
offset: Optional[int] = None,
period: Optional[str] = None):
"""
:param int limit: The maximum number of requests that users can make within the specified time period.
:param int offset: For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period.
:param str period: The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference.
"""
if limit is not None:
pulumi.set(__self__, "limit", limit)
if offset is not None:
pulumi.set(__self__, "offset", offset)
if period is not None:
pulumi.set(__self__, "period", period)
@property
@pulumi.getter
def limit(self) -> Optional[int]:
"""
The maximum number of requests that users can make within the specified time period.
"""
return pulumi.get(self, "limit")
@property
@pulumi.getter
def offset(self) -> Optional[int]:
"""
For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period.
"""
return pulumi.get(self, "offset")
@property
@pulumi.getter
def period(self) -> Optional[str]:
"""
The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference.
"""
return pulumi.get(self, "period")
@pulumi.output_type
class UsagePlanTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class UsagePlanThrottleSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "burstLimit":
suggest = "burst_limit"
elif key == "rateLimit":
suggest = "rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UsagePlanThrottleSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UsagePlanThrottleSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UsagePlanThrottleSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
burst_limit: Optional[int] = None,
rate_limit: Optional[float] = None):
"""
:param int burst_limit: The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity.
:param float rate_limit: The API request steady-state rate limit (average requests per second over an extended period of time).
"""
if burst_limit is not None:
pulumi.set(__self__, "burst_limit", burst_limit)
if rate_limit is not None:
pulumi.set(__self__, "rate_limit", rate_limit)
@property
@pulumi.getter(name="burstLimit")
def burst_limit(self) -> Optional[int]:
"""
The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity.
"""
return pulumi.get(self, "burst_limit")
@property
@pulumi.getter(name="rateLimit")
def rate_limit(self) -> Optional[float]:
"""
The API request steady-state rate limit (average requests per second over an extended period of time).
"""
return pulumi.get(self, "rate_limit")
@pulumi.output_type
class VpcLinkTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
|
[
"pulumi.getter",
"pulumi.log.warn",
"pulumi.set",
"pulumi.get"
] |
[((2507, 2538), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""restApiId"""'}), "(name='restApiId')\n", (2520, 2538), False, 'import pulumi\n'), ((2781, 2812), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""stageName"""'}), "(name='stageName')\n", (2794, 2812), False, 'import pulumi\n'), ((6540, 6576), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""destinationArn"""'}), "(name='destinationArn')\n", (6553, 6576), False, 'import pulumi\n'), ((9149, 9185), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""percentTraffic"""'}), "(name='percentTraffic')\n", (9162, 9185), False, 'import pulumi\n'), ((9403, 9447), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""stageVariableOverrides"""'}), "(name='stageVariableOverrides')\n", (9416, 9447), False, 'import pulumi\n'), ((9845, 9880), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""useStageCache"""'}), "(name='useStageCache')\n", (9858, 9880), False, 'import pulumi\n'), ((12007, 12043), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""percentTraffic"""'}), "(name='percentTraffic')\n", (12020, 12043), False, 'import pulumi\n'), ((12264, 12308), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""stageVariableOverrides"""'}), "(name='stageVariableOverrides')\n", (12277, 12308), False, 'import pulumi\n'), ((12733, 12768), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""useStageCache"""'}), "(name='useStageCache')\n", (12746, 12768), False, 'import pulumi\n'), ((17567, 17607), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheDataEncrypted"""'}), "(name='cacheDataEncrypted')\n", (17580, 17607), False, 'import pulumi\n'), ((17823, 17862), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheTtlInSeconds"""'}), "(name='cacheTtlInSeconds')\n", (17836, 17862), False, 'import pulumi\n'), ((18122, 18158), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cachingEnabled"""'}), "(name='cachingEnabled')\n", (18135, 18158), False, 'import pulumi\n'), ((18442, 18480), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""dataTraceEnabled"""'}), "(name='dataTraceEnabled')\n", (18455, 18480), False, 'import pulumi\n'), ((18771, 18803), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""httpMethod"""'}), "(name='httpMethod')\n", (18784, 18803), False, 'import pulumi\n'), ((18964, 18998), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""loggingLevel"""'}), "(name='loggingLevel')\n", (18977, 18998), False, 'import pulumi\n'), ((19293, 19329), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""metricsEnabled"""'}), "(name='metricsEnabled')\n", (19306, 19329), False, 'import pulumi\n'), ((19564, 19598), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourcePath"""'}), "(name='resourcePath')\n", (19577, 19598), False, 'import pulumi\n'), ((19872, 19914), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""throttlingBurstLimit"""'}), "(name='throttlingBurstLimit')\n", (19885, 19914), False, 'import pulumi\n'), ((20203, 20244), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""throttlingRateLimit"""'}), "(name='throttlingRateLimit')\n", (20216, 20244), False, 'import pulumi\n'), ((28463, 28501), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""accessLogSetting"""'}), "(name='accessLogSetting')\n", (28476, 28501), False, 'import pulumi\n'), ((28745, 28786), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheClusterEnabled"""'}), "(name='cacheClusterEnabled')\n", (28758, 28786), False, 'import pulumi\n'), ((29012, 29050), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheClusterSize"""'}), "(name='cacheClusterSize')\n", (29025, 29050), False, 'import pulumi\n'), ((29247, 29287), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheDataEncrypted"""'}), "(name='cacheDataEncrypted')\n", (29260, 29287), False, 'import pulumi\n'), ((29548, 29587), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheTtlInSeconds"""'}), "(name='cacheTtlInSeconds')\n", (29561, 29587), False, 'import pulumi\n'), ((29847, 29883), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cachingEnabled"""'}), "(name='cachingEnabled')\n", (29860, 29883), False, 'import pulumi\n'), ((30167, 30202), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""canarySetting"""'}), "(name='canarySetting')\n", (30180, 30202), False, 'import pulumi\n'), ((30442, 30483), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clientCertificateId"""'}), "(name='clientCertificateId')\n", (30455, 30483), False, 'import pulumi\n'), ((30760, 30798), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""dataTraceEnabled"""'}), "(name='dataTraceEnabled')\n", (30773, 30798), False, 'import pulumi\n'), ((31289, 31331), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""documentationVersion"""'}), "(name='documentationVersion')\n", (31302, 31331), False, 'import pulumi\n'), ((31553, 31587), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""loggingLevel"""'}), "(name='loggingLevel')\n", (31566, 31587), False, 'import pulumi\n'), ((31882, 31918), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""methodSettings"""'}), "(name='methodSettings')\n", (31895, 31918), False, 'import pulumi\n'), ((32162, 32198), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""metricsEnabled"""'}), "(name='metricsEnabled')\n", (32175, 32198), False, 'import pulumi\n'), ((32680, 32722), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""throttlingBurstLimit"""'}), "(name='throttlingBurstLimit')\n", (32693, 32722), False, 'import pulumi\n'), ((33011, 33052), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""throttlingRateLimit"""'}), "(name='throttlingRateLimit')\n", (33024, 33052), False, 'import pulumi\n'), ((33348, 33384), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""tracingEnabled"""'}), "(name='tracingEnabled')\n", (33361, 33384), False, 'import pulumi\n'), ((36262, 36294), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""statusCode"""'}), "(name='statusCode')\n", (36275, 36294), False, 'import pulumi\n'), ((38020, 38055), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""truststoreUri"""'}), "(name='truststoreUri')\n", (38033, 38055), False, 'import pulumi\n'), ((38173, 38212), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""truststoreVersion"""'}), "(name='truststoreVersion')\n", (38186, 38212), False, 'import pulumi\n'), ((44623, 44663), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheKeyParameters"""'}), "(name='cacheKeyParameters')\n", (44636, 44663), False, 'import pulumi\n'), ((44897, 44933), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheNamespace"""'}), "(name='cacheNamespace')\n", (44910, 44933), False, 'import pulumi\n'), ((45141, 45175), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""connectionId"""'}), "(name='connectionId')\n", (45154, 45175), False, 'import pulumi\n'), ((45421, 45457), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""connectionType"""'}), "(name='connectionType')\n", (45434, 45457), False, 'import pulumi\n'), ((45703, 45740), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""contentHandling"""'}), "(name='contentHandling')\n", (45716, 45740), False, 'import pulumi\n'), ((46203, 46246), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""integrationHttpMethod"""'}), "(name='integrationHttpMethod')\n", (46216, 46246), False, 'import pulumi\n'), ((46450, 46492), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""integrationResponses"""'}), "(name='integrationResponses')\n", (46463, 46492), False, 'import pulumi\n'), ((46794, 46835), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""passthroughBehavior"""'}), "(name='passthroughBehavior')\n", (46807, 46835), False, 'import pulumi\n'), ((47100, 47139), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""requestParameters"""'}), "(name='requestParameters')\n", (47113, 47139), False, 'import pulumi\n'), ((47369, 47407), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""requestTemplates"""'}), "(name='requestTemplates')\n", (47382, 47407), False, 'import pulumi\n'), ((47639, 47676), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""timeoutInMillis"""'}), "(name='timeoutInMillis')\n", (47652, 47676), False, 'import pulumi\n'), ((50768, 50800), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""statusCode"""'}), "(name='statusCode')\n", (50781, 50800), False, 'import pulumi\n'), ((51037, 51074), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""contentHandling"""'}), "(name='contentHandling')\n", (51050, 51074), False, 'import pulumi\n'), ((51333, 51373), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""responseParameters"""'}), "(name='responseParameters')\n", (51346, 51373), False, 'import pulumi\n'), ((51630, 51669), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""responseTemplates"""'}), "(name='responseTemplates')\n", (51643, 51669), False, 'import pulumi\n'), ((52025, 52063), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""selectionPattern"""'}), "(name='selectionPattern')\n", (52038, 52063), False, 'import pulumi\n'), ((54195, 54227), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""statusCode"""'}), "(name='statusCode')\n", (54208, 54227), False, 'import pulumi\n'), ((54437, 54473), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""responseModels"""'}), "(name='responseModels')\n", (54450, 54473), False, 'import pulumi\n'), ((54816, 54856), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""responseParameters"""'}), "(name='responseParameters')\n", (54829, 54856), False, 'import pulumi\n'), ((56371, 56407), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vpcEndpointIds"""'}), "(name='vpcEndpointIds')\n", (56384, 56407), False, 'import pulumi\n'), ((57801, 57827), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""eTag"""'}), "(name='eTag')\n", (57814, 57827), False, 'import pulumi\n'), ((60392, 60428), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""destinationArn"""'}), "(name='destinationArn')\n", (60405, 60428), False, 'import pulumi\n'), ((63693, 63727), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""deploymentId"""'}), "(name='deploymentId')\n", (63706, 63727), False, 'import pulumi\n'), ((63934, 63970), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""percentTraffic"""'}), "(name='percentTraffic')\n", (63947, 63970), False, 'import pulumi\n'), ((64191, 64235), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""stageVariableOverrides"""'}), "(name='stageVariableOverrides')\n", (64204, 64235), False, 'import pulumi\n'), ((64632, 64667), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""useStageCache"""'}), "(name='useStageCache')\n", (64645, 64667), False, 'import pulumi\n'), ((69977, 70017), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheDataEncrypted"""'}), "(name='cacheDataEncrypted')\n", (69990, 70017), False, 'import pulumi\n'), ((70234, 70273), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cacheTtlInSeconds"""'}), "(name='cacheTtlInSeconds')\n", (70247, 70273), False, 'import pulumi\n'), ((70532, 70568), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cachingEnabled"""'}), "(name='cachingEnabled')\n", (70545, 70568), False, 'import pulumi\n'), ((70852, 70890), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""dataTraceEnabled"""'}), "(name='dataTraceEnabled')\n", (70865, 70890), False, 'import pulumi\n'), ((71180, 71212), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""httpMethod"""'}), "(name='httpMethod')\n", (71193, 71212), False, 'import pulumi\n'), ((71461, 71495), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""loggingLevel"""'}), "(name='loggingLevel')\n", (71474, 71495), False, 'import pulumi\n'), ((71873, 71909), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""metricsEnabled"""'}), "(name='metricsEnabled')\n", (71886, 71909), False, 'import pulumi\n'), ((72144, 72178), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourcePath"""'}), "(name='resourcePath')\n", (72157, 72178), False, 'import pulumi\n'), ((72682, 72724), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""throttlingBurstLimit"""'}), "(name='throttlingBurstLimit')\n", (72695, 72724), False, 'import pulumi\n'), ((73013, 73054), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""throttlingRateLimit"""'}), "(name='throttlingRateLimit')\n", (73026, 73054), False, 'import pulumi\n'), ((76044, 76071), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""apiId"""'}), "(name='apiId')\n", (76057, 76071), False, 'import pulumi\n'), ((81964, 81996), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""burstLimit"""'}), "(name='burstLimit')\n", (81977, 81996), False, 'import pulumi\n'), ((82329, 82360), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""rateLimit"""'}), "(name='rateLimit')\n", (82342, 82360), False, 'import pulumi\n'), ((2729, 2760), 'pulumi.get', 'pulumi.get', (['self', '"""rest_api_id"""'], {}), "(self, 'rest_api_id')\n", (2739, 2760), False, 'import pulumi\n'), ((3060, 3090), 'pulumi.get', 'pulumi.get', (['self', '"""stage_name"""'], {}), "(self, 'stage_name')\n", (3070, 3090), False, 'import pulumi\n'), ((3792, 3824), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (3802, 3824), False, 'import pulumi\n'), ((3833, 3869), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (3843, 3869), False, 'import pulumi\n'), ((4221, 4244), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (4231, 4244), False, 'import pulumi\n'), ((4597, 4622), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (4607, 4622), False, 'import pulumi\n'), ((4774, 4806), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (4784, 4806), False, 'import pulumi\n'), ((4815, 4851), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (4825, 4851), False, 'import pulumi\n'), ((4927, 4950), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (4937, 4950), False, 'import pulumi\n'), ((5028, 5053), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (5038, 5053), False, 'import pulumi\n'), ((6910, 6945), 'pulumi.get', 'pulumi.get', (['self', '"""destination_arn"""'], {}), "(self, 'destination_arn')\n", (6920, 6945), False, 'import pulumi\n'), ((7214, 7240), 'pulumi.get', 'pulumi.get', (['self', '"""format"""'], {}), "(self, 'format')\n", (7224, 7240), False, 'import pulumi\n'), ((9347, 9382), 'pulumi.get', 'pulumi.get', (['self', '"""percent_traffic"""'], {}), "(self, 'percent_traffic')\n", (9357, 9382), False, 'import pulumi\n'), ((9780, 9824), 'pulumi.get', 'pulumi.get', (['self', '"""stage_variable_overrides"""'], {}), "(self, 'stage_variable_overrides')\n", (9790, 9824), False, 'import pulumi\n'), ((10036, 10071), 'pulumi.get', 'pulumi.get', (['self', '"""use_stage_cache"""'], {}), "(self, 'use_stage_cache')\n", (10046, 10071), False, 'import pulumi\n'), ((12208, 12243), 'pulumi.get', 'pulumi.get', (['self', '"""percent_traffic"""'], {}), "(self, 'percent_traffic')\n", (12218, 12243), False, 'import pulumi\n'), ((12668, 12712), 'pulumi.get', 'pulumi.get', (['self', '"""stage_variable_overrides"""'], {}), "(self, 'stage_variable_overrides')\n", (12678, 12712), False, 'import pulumi\n'), ((12917, 12952), 'pulumi.get', 'pulumi.get', (['self', '"""use_stage_cache"""'], {}), "(self, 'use_stage_cache')\n", (12927, 12952), False, 'import pulumi\n'), ((17762, 17802), 'pulumi.get', 'pulumi.get', (['self', '"""cache_data_encrypted"""'], {}), "(self, 'cache_data_encrypted')\n", (17772, 17802), False, 'import pulumi\n'), ((18061, 18101), 'pulumi.get', 'pulumi.get', (['self', '"""cache_ttl_in_seconds"""'], {}), "(self, 'cache_ttl_in_seconds')\n", (18071, 18101), False, 'import pulumi\n'), ((18386, 18421), 'pulumi.get', 'pulumi.get', (['self', '"""caching_enabled"""'], {}), "(self, 'caching_enabled')\n", (18396, 18421), False, 'import pulumi\n'), ((18712, 18750), 'pulumi.get', 'pulumi.get', (['self', '"""data_trace_enabled"""'], {}), "(self, 'data_trace_enabled')\n", (18722, 18750), False, 'import pulumi\n'), ((18912, 18943), 'pulumi.get', 'pulumi.get', (['self', '"""http_method"""'], {}), "(self, 'http_method')\n", (18922, 18943), False, 'import pulumi\n'), ((19239, 19272), 'pulumi.get', 'pulumi.get', (['self', '"""logging_level"""'], {}), "(self, 'logging_level')\n", (19249, 19272), False, 'import pulumi\n'), ((19508, 19543), 'pulumi.get', 'pulumi.get', (['self', '"""metrics_enabled"""'], {}), "(self, 'metrics_enabled')\n", (19518, 19543), False, 'import pulumi\n'), ((19818, 19851), 'pulumi.get', 'pulumi.get', (['self', '"""resource_path"""'], {}), "(self, 'resource_path')\n", (19828, 19851), False, 'import pulumi\n'), ((20140, 20182), 'pulumi.get', 'pulumi.get', (['self', '"""throttling_burst_limit"""'], {}), "(self, 'throttling_burst_limit')\n", (20150, 20182), False, 'import pulumi\n'), ((20478, 20519), 'pulumi.get', 'pulumi.get', (['self', '"""throttling_rate_limit"""'], {}), "(self, 'throttling_rate_limit')\n", (20488, 20519), False, 'import pulumi\n'), ((28686, 28724), 'pulumi.get', 'pulumi.get', (['self', '"""access_log_setting"""'], {}), "(self, 'access_log_setting')\n", (28696, 28724), False, 'import pulumi\n'), ((28950, 28991), 'pulumi.get', 'pulumi.get', (['self', '"""cache_cluster_enabled"""'], {}), "(self, 'cache_cluster_enabled')\n", (28960, 28991), False, 'import pulumi\n'), ((29188, 29226), 'pulumi.get', 'pulumi.get', (['self', '"""cache_cluster_size"""'], {}), "(self, 'cache_cluster_size')\n", (29198, 29226), False, 'import pulumi\n'), ((29487, 29527), 'pulumi.get', 'pulumi.get', (['self', '"""cache_data_encrypted"""'], {}), "(self, 'cache_data_encrypted')\n", (29497, 29527), False, 'import pulumi\n'), ((29786, 29826), 'pulumi.get', 'pulumi.get', (['self', '"""cache_ttl_in_seconds"""'], {}), "(self, 'cache_ttl_in_seconds')\n", (29796, 29826), False, 'import pulumi\n'), ((30111, 30146), 'pulumi.get', 'pulumi.get', (['self', '"""caching_enabled"""'], {}), "(self, 'caching_enabled')\n", (30121, 30146), False, 'import pulumi\n'), ((30387, 30421), 'pulumi.get', 'pulumi.get', (['self', '"""canary_setting"""'], {}), "(self, 'canary_setting')\n", (30397, 30421), False, 'import pulumi\n'), ((30698, 30739), 'pulumi.get', 'pulumi.get', (['self', '"""client_certificate_id"""'], {}), "(self, 'client_certificate_id')\n", (30708, 30739), False, 'import pulumi\n'), ((31030, 31068), 'pulumi.get', 'pulumi.get', (['self', '"""data_trace_enabled"""'], {}), "(self, 'data_trace_enabled')\n", (31040, 31068), False, 'import pulumi\n'), ((31237, 31268), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (31247, 31268), False, 'import pulumi\n'), ((31491, 31532), 'pulumi.get', 'pulumi.get', (['self', '"""documentation_version"""'], {}), "(self, 'documentation_version')\n", (31501, 31532), False, 'import pulumi\n'), ((31828, 31861), 'pulumi.get', 'pulumi.get', (['self', '"""logging_level"""'], {}), "(self, 'logging_level')\n", (31838, 31861), False, 'import pulumi\n'), ((32106, 32141), 'pulumi.get', 'pulumi.get', (['self', '"""method_settings"""'], {}), "(self, 'method_settings')\n", (32116, 32141), False, 'import pulumi\n'), ((32377, 32412), 'pulumi.get', 'pulumi.get', (['self', '"""metrics_enabled"""'], {}), "(self, 'metrics_enabled')\n", (32387, 32412), False, 'import pulumi\n'), ((32635, 32659), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (32645, 32659), False, 'import pulumi\n'), ((32948, 32990), 'pulumi.get', 'pulumi.get', (['self', '"""throttling_burst_limit"""'], {}), "(self, 'throttling_burst_limit')\n", (32958, 32990), False, 'import pulumi\n'), ((33286, 33327), 'pulumi.get', 'pulumi.get', (['self', '"""throttling_rate_limit"""'], {}), "(self, 'throttling_rate_limit')\n", (33296, 33327), False, 'import pulumi\n'), ((33552, 33587), 'pulumi.get', 'pulumi.get', (['self', '"""tracing_enabled"""'], {}), "(self, 'tracing_enabled')\n", (33562, 33587), False, 'import pulumi\n'), ((33893, 33922), 'pulumi.get', 'pulumi.get', (['self', '"""variables"""'], {}), "(self, 'variables')\n", (33903, 33922), False, 'import pulumi\n'), ((34187, 34219), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (34197, 34219), False, 'import pulumi\n'), ((34228, 34264), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (34238, 34264), False, 'import pulumi\n'), ((34396, 34419), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (34406, 34419), False, 'import pulumi\n'), ((34551, 34576), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (34561, 34576), False, 'import pulumi\n'), ((35993, 36019), 'pulumi.get', 'pulumi.get', (['self', '"""method"""'], {}), "(self, 'method')\n", (36003, 36019), False, 'import pulumi\n'), ((36106, 36130), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (36116, 36130), False, 'import pulumi\n'), ((36217, 36241), 'pulumi.get', 'pulumi.get', (['self', '"""path"""'], {}), "(self, 'path')\n", (36227, 36241), False, 'import pulumi\n'), ((36354, 36385), 'pulumi.get', 'pulumi.get', (['self', '"""status_code"""'], {}), "(self, 'status_code')\n", (36364, 36385), False, 'import pulumi\n'), ((36472, 36496), 'pulumi.get', 'pulumi.get', (['self', '"""type"""'], {}), "(self, 'type')\n", (36482, 36496), False, 'import pulumi\n'), ((36827, 36852), 'pulumi.get', 'pulumi.get', (['self', '"""types"""'], {}), "(self, 'types')\n", (36837, 36852), False, 'import pulumi\n'), ((38118, 38152), 'pulumi.get', 'pulumi.get', (['self', '"""truststore_uri"""'], {}), "(self, 'truststore_uri')\n", (38128, 38152), False, 'import pulumi\n'), ((38279, 38317), 'pulumi.get', 'pulumi.get', (['self', '"""truststore_version"""'], {}), "(self, 'truststore_version')\n", (38289, 38317), False, 'import pulumi\n'), ((38725, 38748), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (38735, 38748), False, 'import pulumi\n'), ((38836, 38861), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (38846, 38861), False, 'import pulumi\n'), ((42907, 42941), 'pulumi.set', 'pulumi.set', (['__self__', '"""type"""', 'type'], {}), "(__self__, 'type', type)\n", (42917, 42941), False, 'import pulumi\n'), ((44578, 44602), 'pulumi.get', 'pulumi.get', (['self', '"""type"""'], {}), "(self, 'type')\n", (44588, 44602), False, 'import pulumi\n'), ((44836, 44876), 'pulumi.get', 'pulumi.get', (['self', '"""cache_key_parameters"""'], {}), "(self, 'cache_key_parameters')\n", (44846, 44876), False, 'import pulumi\n'), ((45085, 45120), 'pulumi.get', 'pulumi.get', (['self', '"""cache_namespace"""'], {}), "(self, 'cache_namespace')\n", (45095, 45120), False, 'import pulumi\n'), ((45367, 45400), 'pulumi.get', 'pulumi.get', (['self', '"""connection_id"""'], {}), "(self, 'connection_id')\n", (45377, 45400), False, 'import pulumi\n'), ((45647, 45682), 'pulumi.get', 'pulumi.get', (['self', '"""connection_type"""'], {}), "(self, 'connection_type')\n", (45657, 45682), False, 'import pulumi\n'), ((45934, 45970), 'pulumi.get', 'pulumi.get', (['self', '"""content_handling"""'], {}), "(self, 'content_handling')\n", (45944, 45970), False, 'import pulumi\n'), ((46151, 46182), 'pulumi.get', 'pulumi.get', (['self', '"""credentials"""'], {}), "(self, 'credentials')\n", (46161, 46182), False, 'import pulumi\n'), ((46386, 46429), 'pulumi.get', 'pulumi.get', (['self', '"""integration_http_method"""'], {}), "(self, 'integration_http_method')\n", (46396, 46429), False, 'import pulumi\n'), ((46732, 46773), 'pulumi.get', 'pulumi.get', (['self', '"""integration_responses"""'], {}), "(self, 'integration_responses')\n", (46742, 46773), False, 'import pulumi\n'), ((47039, 47079), 'pulumi.get', 'pulumi.get', (['self', '"""passthrough_behavior"""'], {}), "(self, 'passthrough_behavior')\n", (47049, 47079), False, 'import pulumi\n'), ((47310, 47348), 'pulumi.get', 'pulumi.get', (['self', '"""request_parameters"""'], {}), "(self, 'request_parameters')\n", (47320, 47348), False, 'import pulumi\n'), ((47581, 47618), 'pulumi.get', 'pulumi.get', (['self', '"""request_templates"""'], {}), "(self, 'request_templates')\n", (47591, 47618), False, 'import pulumi\n'), ((47825, 47862), 'pulumi.get', 'pulumi.get', (['self', '"""timeout_in_millis"""'], {}), "(self, 'timeout_in_millis')\n", (47835, 47862), False, 'import pulumi\n'), ((48039, 48062), 'pulumi.get', 'pulumi.get', (['self', '"""uri"""'], {}), "(self, 'uri')\n", (48049, 48062), False, 'import pulumi\n'), ((50233, 50281), 'pulumi.set', 'pulumi.set', (['__self__', '"""status_code"""', 'status_code'], {}), "(__self__, 'status_code', status_code)\n", (50243, 50281), False, 'import pulumi\n'), ((50985, 51016), 'pulumi.get', 'pulumi.get', (['self', '"""status_code"""'], {}), "(self, 'status_code')\n", (50995, 51016), False, 'import pulumi\n'), ((51276, 51312), 'pulumi.get', 'pulumi.get', (['self', '"""content_handling"""'], {}), "(self, 'content_handling')\n", (51286, 51312), False, 'import pulumi\n'), ((51570, 51609), 'pulumi.get', 'pulumi.get', (['self', '"""response_parameters"""'], {}), "(self, 'response_parameters')\n", (51580, 51609), False, 'import pulumi\n'), ((51966, 52004), 'pulumi.get', 'pulumi.get', (['self', '"""response_templates"""'], {}), "(self, 'response_templates')\n", (51976, 52004), False, 'import pulumi\n'), ((52283, 52320), 'pulumi.get', 'pulumi.get', (['self', '"""selection_pattern"""'], {}), "(self, 'selection_pattern')\n", (52293, 52320), False, 'import pulumi\n'), ((53896, 53944), 'pulumi.set', 'pulumi.set', (['__self__', '"""status_code"""', 'status_code'], {}), "(__self__, 'status_code', status_code)\n", (53906, 53944), False, 'import pulumi\n'), ((54385, 54416), 'pulumi.get', 'pulumi.get', (['self', '"""status_code"""'], {}), "(self, 'status_code')\n", (54395, 54416), False, 'import pulumi\n'), ((54760, 54795), 'pulumi.get', 'pulumi.get', (['self', '"""response_models"""'], {}), "(self, 'response_models')\n", (54770, 54795), False, 'import pulumi\n'), ((55166, 55205), 'pulumi.get', 'pulumi.get', (['self', '"""response_parameters"""'], {}), "(self, 'response_parameters')\n", (55176, 55205), False, 'import pulumi\n'), ((56325, 56350), 'pulumi.get', 'pulumi.get', (['self', '"""types"""'], {}), "(self, 'types')\n", (56335, 56350), False, 'import pulumi\n'), ((56482, 56518), 'pulumi.get', 'pulumi.get', (['self', '"""vpc_endpoint_ids"""'], {}), "(self, 'vpc_endpoint_ids')\n", (56492, 56518), False, 'import pulumi\n'), ((57754, 57780), 'pulumi.get', 'pulumi.get', (['self', '"""bucket"""'], {}), "(self, 'bucket')\n", (57764, 57780), False, 'import pulumi\n'), ((57881, 57906), 'pulumi.get', 'pulumi.get', (['self', '"""e_tag"""'], {}), "(self, 'e_tag')\n", (57891, 57906), False, 'import pulumi\n'), ((57992, 58015), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (58002, 58015), False, 'import pulumi\n'), ((58105, 58132), 'pulumi.get', 'pulumi.get', (['self', '"""version"""'], {}), "(self, 'version')\n", (58115, 58132), False, 'import pulumi\n'), ((58274, 58306), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (58284, 58306), False, 'import pulumi\n'), ((58315, 58351), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (58325, 58351), False, 'import pulumi\n'), ((58427, 58450), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (58437, 58450), False, 'import pulumi\n'), ((58528, 58553), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (58538, 58553), False, 'import pulumi\n'), ((60814, 60849), 'pulumi.get', 'pulumi.get', (['self', '"""destination_arn"""'], {}), "(self, 'destination_arn')\n", (60824, 60849), False, 'import pulumi\n'), ((61304, 61330), 'pulumi.get', 'pulumi.get', (['self', '"""format"""'], {}), "(self, 'format')\n", (61314, 61330), False, 'import pulumi\n'), ((63880, 63913), 'pulumi.get', 'pulumi.get', (['self', '"""deployment_id"""'], {}), "(self, 'deployment_id')\n", (63890, 63913), False, 'import pulumi\n'), ((64135, 64170), 'pulumi.get', 'pulumi.get', (['self', '"""percent_traffic"""'], {}), "(self, 'percent_traffic')\n", (64145, 64170), False, 'import pulumi\n'), ((64567, 64611), 'pulumi.get', 'pulumi.get', (['self', '"""stage_variable_overrides"""'], {}), "(self, 'stage_variable_overrides')\n", (64577, 64611), False, 'import pulumi\n'), ((64823, 64858), 'pulumi.get', 'pulumi.get', (['self', '"""use_stage_cache"""'], {}), "(self, 'use_stage_cache')\n", (64833, 64858), False, 'import pulumi\n'), ((70173, 70213), 'pulumi.get', 'pulumi.get', (['self', '"""cache_data_encrypted"""'], {}), "(self, 'cache_data_encrypted')\n", (70183, 70213), False, 'import pulumi\n'), ((70471, 70511), 'pulumi.get', 'pulumi.get', (['self', '"""cache_ttl_in_seconds"""'], {}), "(self, 'cache_ttl_in_seconds')\n", (70481, 70511), False, 'import pulumi\n'), ((70796, 70831), 'pulumi.get', 'pulumi.get', (['self', '"""caching_enabled"""'], {}), "(self, 'caching_enabled')\n", (70806, 70831), False, 'import pulumi\n'), ((71121, 71159), 'pulumi.get', 'pulumi.get', (['self', '"""data_trace_enabled"""'], {}), "(self, 'data_trace_enabled')\n", (71131, 71159), False, 'import pulumi\n'), ((71409, 71440), 'pulumi.get', 'pulumi.get', (['self', '"""http_method"""'], {}), "(self, 'http_method')\n", (71419, 71440), False, 'import pulumi\n'), ((71819, 71852), 'pulumi.get', 'pulumi.get', (['self', '"""logging_level"""'], {}), "(self, 'logging_level')\n", (71829, 71852), False, 'import pulumi\n'), ((72088, 72123), 'pulumi.get', 'pulumi.get', (['self', '"""metrics_enabled"""'], {}), "(self, 'metrics_enabled')\n", (72098, 72123), False, 'import pulumi\n'), ((72628, 72661), 'pulumi.get', 'pulumi.get', (['self', '"""resource_path"""'], {}), "(self, 'resource_path')\n", (72638, 72661), False, 'import pulumi\n'), ((72950, 72992), 'pulumi.get', 'pulumi.get', (['self', '"""throttling_burst_limit"""'], {}), "(self, 'throttling_burst_limit')\n", (72960, 72992), False, 'import pulumi\n'), ((73288, 73329), 'pulumi.get', 'pulumi.get', (['self', '"""throttling_rate_limit"""'], {}), "(self, 'throttling_rate_limit')\n", (73298, 73329), False, 'import pulumi\n'), ((73897, 73929), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (73907, 73929), False, 'import pulumi\n'), ((73938, 73974), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (73948, 73974), False, 'import pulumi\n'), ((74211, 74234), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (74221, 74234), False, 'import pulumi\n'), ((74471, 74496), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (74481, 74496), False, 'import pulumi\n'), ((76263, 76289), 'pulumi.get', 'pulumi.get', (['self', '"""api_id"""'], {}), "(self, 'api_id')\n", (76273, 76289), False, 'import pulumi\n'), ((76465, 76490), 'pulumi.get', 'pulumi.get', (['self', '"""stage"""'], {}), "(self, 'stage')\n", (76475, 76490), False, 'import pulumi\n'), ((76837, 76865), 'pulumi.get', 'pulumi.get', (['self', '"""throttle"""'], {}), "(self, 'throttle')\n", (76847, 76865), False, 'import pulumi\n'), ((78195, 78220), 'pulumi.get', 'pulumi.get', (['self', '"""limit"""'], {}), "(self, 'limit')\n", (78205, 78220), False, 'import pulumi\n'), ((78604, 78630), 'pulumi.get', 'pulumi.get', (['self', '"""offset"""'], {}), "(self, 'offset')\n", (78614, 78630), False, 'import pulumi\n'), ((78952, 78978), 'pulumi.get', 'pulumi.get', (['self', '"""period"""'], {}), "(self, 'period')\n", (78962, 78978), False, 'import pulumi\n'), ((79682, 79714), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (79692, 79714), False, 'import pulumi\n'), ((79723, 79759), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (79733, 79759), False, 'import pulumi\n'), ((80111, 80134), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (80121, 80134), False, 'import pulumi\n'), ((80486, 80511), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (80496, 80511), False, 'import pulumi\n'), ((82277, 82308), 'pulumi.get', 'pulumi.get', (['self', '"""burst_limit"""'], {}), "(self, 'burst_limit')\n", (82287, 82308), False, 'import pulumi\n'), ((82556, 82586), 'pulumi.get', 'pulumi.get', (['self', '"""rate_limit"""'], {}), "(self, 'rate_limit')\n", (82566, 82586), False, 'import pulumi\n'), ((82728, 82760), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (82738, 82760), False, 'import pulumi\n'), ((82769, 82805), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (82779, 82805), False, 'import pulumi\n'), ((82881, 82904), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (82891, 82904), False, 'import pulumi\n'), ((82982, 83007), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (82992, 83007), False, 'import pulumi\n'), ((1432, 1564), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in ApiKeyStageKey. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in ApiKeyStageKey. Access the value via the \'{suggest}\' property getter instead."\n )\n', (1447, 1564), False, 'import pulumi\n'), ((2344, 2392), 'pulumi.set', 'pulumi.set', (['__self__', '"""rest_api_id"""', 'rest_api_id'], {}), "(__self__, 'rest_api_id', rest_api_id)\n", (2354, 2392), False, 'import pulumi\n'), ((2440, 2486), 'pulumi.set', 'pulumi.set', (['__self__', '"""stage_name"""', 'stage_name'], {}), "(__self__, 'stage_name', stage_name)\n", (2450, 2486), False, 'import pulumi\n'), ((5299, 5443), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in DeploymentAccessLogSetting. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in DeploymentAccessLogSetting. Access the value via the \'{suggest}\' property getter instead."\n )\n', (5314, 5443), False, 'import pulumi\n'), ((6381, 6437), 'pulumi.set', 'pulumi.set', (['__self__', '"""destination_arn"""', 'destination_arn'], {}), "(__self__, 'destination_arn', destination_arn)\n", (6391, 6437), False, 'import pulumi\n'), ((6481, 6519), 'pulumi.set', 'pulumi.set', (['__self__', '"""format"""', 'format'], {}), "(__self__, 'format', format)\n", (6491, 6519), False, 'import pulumi\n'), ((7655, 7796), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in DeploymentCanarySetting. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in DeploymentCanarySetting. Access the value via the \'{suggest}\' property getter instead."\n )\n', (7670, 7796), False, 'import pulumi\n'), ((8827, 8883), 'pulumi.set', 'pulumi.set', (['__self__', '"""percent_traffic"""', 'percent_traffic'], {}), "(__self__, 'percent_traffic', percent_traffic)\n", (8837, 8883), False, 'import pulumi\n'), ((8945, 9019), 'pulumi.set', 'pulumi.set', (['__self__', '"""stage_variable_overrides"""', 'stage_variable_overrides'], {}), "(__self__, 'stage_variable_overrides', stage_variable_overrides)\n", (8955, 9019), False, 'import pulumi\n'), ((9072, 9128), 'pulumi.set', 'pulumi.set', (['__self__', '"""use_stage_cache"""', 'use_stage_cache'], {}), "(__self__, 'use_stage_cache', use_stage_cache)\n", (9082, 9128), False, 'import pulumi\n'), ((10487, 10629), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in DeploymentCanarySettings. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in DeploymentCanarySettings. Access the value via the \'{suggest}\' property getter instead."\n )\n', (10502, 10629), False, 'import pulumi\n'), ((11685, 11741), 'pulumi.set', 'pulumi.set', (['__self__', '"""percent_traffic"""', 'percent_traffic'], {}), "(__self__, 'percent_traffic', percent_traffic)\n", (11695, 11741), False, 'import pulumi\n'), ((11803, 11877), 'pulumi.set', 'pulumi.set', (['__self__', '"""stage_variable_overrides"""', 'stage_variable_overrides'], {}), "(__self__, 'stage_variable_overrides', stage_variable_overrides)\n", (11813, 11877), False, 'import pulumi\n'), ((11930, 11986), 'pulumi.set', 'pulumi.set', (['__self__', '"""use_stage_cache"""', 'use_stage_cache'], {}), "(__self__, 'use_stage_cache', use_stage_cache)\n", (11940, 11986), False, 'import pulumi\n'), ((13927, 14068), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in DeploymentMethodSetting. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in DeploymentMethodSetting. Access the value via the \'{suggest}\' property getter instead."\n )\n', (13942, 14068), False, 'import pulumi\n'), ((16460, 16526), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_data_encrypted"""', 'cache_data_encrypted'], {}), "(__self__, 'cache_data_encrypted', cache_data_encrypted)\n", (16470, 16526), False, 'import pulumi\n'), ((16584, 16650), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_ttl_in_seconds"""', 'cache_ttl_in_seconds'], {}), "(__self__, 'cache_ttl_in_seconds', cache_ttl_in_seconds)\n", (16594, 16650), False, 'import pulumi\n'), ((16703, 16759), 'pulumi.set', 'pulumi.set', (['__self__', '"""caching_enabled"""', 'caching_enabled'], {}), "(__self__, 'caching_enabled', caching_enabled)\n", (16713, 16759), False, 'import pulumi\n'), ((16815, 16877), 'pulumi.set', 'pulumi.set', (['__self__', '"""data_trace_enabled"""', 'data_trace_enabled'], {}), "(__self__, 'data_trace_enabled', data_trace_enabled)\n", (16825, 16877), False, 'import pulumi\n'), ((16926, 16974), 'pulumi.set', 'pulumi.set', (['__self__', '"""http_method"""', 'http_method'], {}), "(__self__, 'http_method', http_method)\n", (16936, 16974), False, 'import pulumi\n'), ((17025, 17077), 'pulumi.set', 'pulumi.set', (['__self__', '"""logging_level"""', 'logging_level'], {}), "(__self__, 'logging_level', logging_level)\n", (17035, 17077), False, 'import pulumi\n'), ((17130, 17186), 'pulumi.set', 'pulumi.set', (['__self__', '"""metrics_enabled"""', 'metrics_enabled'], {}), "(__self__, 'metrics_enabled', metrics_enabled)\n", (17140, 17186), False, 'import pulumi\n'), ((17237, 17289), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_path"""', 'resource_path'], {}), "(__self__, 'resource_path', resource_path)\n", (17247, 17289), False, 'import pulumi\n'), ((17349, 17419), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttling_burst_limit"""', 'throttling_burst_limit'], {}), "(__self__, 'throttling_burst_limit', throttling_burst_limit)\n", (17359, 17419), False, 'import pulumi\n'), ((17478, 17546), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttling_rate_limit"""', 'throttling_rate_limit'], {}), "(__self__, 'throttling_rate_limit', throttling_rate_limit)\n", (17488, 17546), False, 'import pulumi\n'), ((22019, 22163), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in DeploymentStageDescription. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in DeploymentStageDescription. Access the value via the \'{suggest}\' property getter instead."\n )\n', (22034, 22163), False, 'import pulumi\n'), ((26349, 26411), 'pulumi.set', 'pulumi.set', (['__self__', '"""access_log_setting"""', 'access_log_setting'], {}), "(__self__, 'access_log_setting', access_log_setting)\n", (26359, 26411), False, 'import pulumi\n'), ((26470, 26538), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_cluster_enabled"""', 'cache_cluster_enabled'], {}), "(__self__, 'cache_cluster_enabled', cache_cluster_enabled)\n", (26480, 26538), False, 'import pulumi\n'), ((26594, 26656), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_cluster_size"""', 'cache_cluster_size'], {}), "(__self__, 'cache_cluster_size', cache_cluster_size)\n", (26604, 26656), False, 'import pulumi\n'), ((26714, 26780), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_data_encrypted"""', 'cache_data_encrypted'], {}), "(__self__, 'cache_data_encrypted', cache_data_encrypted)\n", (26724, 26780), False, 'import pulumi\n'), ((26838, 26904), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_ttl_in_seconds"""', 'cache_ttl_in_seconds'], {}), "(__self__, 'cache_ttl_in_seconds', cache_ttl_in_seconds)\n", (26848, 26904), False, 'import pulumi\n'), ((26957, 27013), 'pulumi.set', 'pulumi.set', (['__self__', '"""caching_enabled"""', 'caching_enabled'], {}), "(__self__, 'caching_enabled', caching_enabled)\n", (26967, 27013), False, 'import pulumi\n'), ((27065, 27119), 'pulumi.set', 'pulumi.set', (['__self__', '"""canary_setting"""', 'canary_setting'], {}), "(__self__, 'canary_setting', canary_setting)\n", (27075, 27119), False, 'import pulumi\n'), ((27178, 27246), 'pulumi.set', 'pulumi.set', (['__self__', '"""client_certificate_id"""', 'client_certificate_id'], {}), "(__self__, 'client_certificate_id', client_certificate_id)\n", (27188, 27246), False, 'import pulumi\n'), ((27302, 27364), 'pulumi.set', 'pulumi.set', (['__self__', '"""data_trace_enabled"""', 'data_trace_enabled'], {}), "(__self__, 'data_trace_enabled', data_trace_enabled)\n", (27312, 27364), False, 'import pulumi\n'), ((27413, 27461), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (27423, 27461), False, 'import pulumi\n'), ((27520, 27588), 'pulumi.set', 'pulumi.set', (['__self__', '"""documentation_version"""', 'documentation_version'], {}), "(__self__, 'documentation_version', documentation_version)\n", (27530, 27588), False, 'import pulumi\n'), ((27639, 27691), 'pulumi.set', 'pulumi.set', (['__self__', '"""logging_level"""', 'logging_level'], {}), "(__self__, 'logging_level', logging_level)\n", (27649, 27691), False, 'import pulumi\n'), ((27744, 27800), 'pulumi.set', 'pulumi.set', (['__self__', '"""method_settings"""', 'method_settings'], {}), "(__self__, 'method_settings', method_settings)\n", (27754, 27800), False, 'import pulumi\n'), ((27853, 27909), 'pulumi.set', 'pulumi.set', (['__self__', '"""metrics_enabled"""', 'metrics_enabled'], {}), "(__self__, 'metrics_enabled', metrics_enabled)\n", (27863, 27909), False, 'import pulumi\n'), ((27951, 27985), 'pulumi.set', 'pulumi.set', (['__self__', '"""tags"""', 'tags'], {}), "(__self__, 'tags', tags)\n", (27961, 27985), False, 'import pulumi\n'), ((28045, 28115), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttling_burst_limit"""', 'throttling_burst_limit'], {}), "(__self__, 'throttling_burst_limit', throttling_burst_limit)\n", (28055, 28115), False, 'import pulumi\n'), ((28174, 28242), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttling_rate_limit"""', 'throttling_rate_limit'], {}), "(__self__, 'throttling_rate_limit', throttling_rate_limit)\n", (28184, 28242), False, 'import pulumi\n'), ((28295, 28351), 'pulumi.set', 'pulumi.set', (['__self__', '"""tracing_enabled"""', 'tracing_enabled'], {}), "(__self__, 'tracing_enabled', tracing_enabled)\n", (28305, 28351), False, 'import pulumi\n'), ((28398, 28442), 'pulumi.set', 'pulumi.set', (['__self__', '"""variables"""', 'variables'], {}), "(__self__, 'variables', variables)\n", (28408, 28442), False, 'import pulumi\n'), ((34813, 34956), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in DocumentationPartLocation. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in DocumentationPartLocation. Access the value via the \'{suggest}\' property getter instead."\n )\n', (34828, 34956), False, 'import pulumi\n'), ((35541, 35579), 'pulumi.set', 'pulumi.set', (['__self__', '"""method"""', 'method'], {}), "(__self__, 'method', method)\n", (35551, 35579), False, 'import pulumi\n'), ((35621, 35655), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (35631, 35655), False, 'import pulumi\n'), ((35697, 35731), 'pulumi.set', 'pulumi.set', (['__self__', '"""path"""', 'path'], {}), "(__self__, 'path', path)\n", (35707, 35731), False, 'import pulumi\n'), ((35780, 35828), 'pulumi.set', 'pulumi.set', (['__self__', '"""status_code"""', 'status_code'], {}), "(__self__, 'status_code', status_code)\n", (35790, 35828), False, 'import pulumi\n'), ((35870, 35904), 'pulumi.set', 'pulumi.set', (['__self__', '"""type"""', 'type'], {}), "(__self__, 'type', type)\n", (35880, 35904), False, 'import pulumi\n'), ((36693, 36729), 'pulumi.set', 'pulumi.set', (['__self__', '"""types"""', 'types'], {}), "(__self__, 'types', types)\n", (36703, 36729), False, 'import pulumi\n'), ((37187, 37338), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in DomainNameMutualTlsAuthentication. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in DomainNameMutualTlsAuthentication. Access the value via the \'{suggest}\' property getter instead."\n )\n', (37202, 37338), False, 'import pulumi\n'), ((37827, 37881), 'pulumi.set', 'pulumi.set', (['__self__', '"""truststore_uri"""', 'truststore_uri'], {}), "(__self__, 'truststore_uri', truststore_uri)\n", (37837, 37881), False, 'import pulumi\n'), ((37937, 37999), 'pulumi.set', 'pulumi.set', (['__self__', '"""truststore_version"""', 'truststore_version'], {}), "(__self__, 'truststore_version', truststore_version)\n", (37947, 37999), False, 'import pulumi\n'), ((38528, 38560), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (38538, 38560), False, 'import pulumi\n'), ((38603, 38639), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (38613, 38639), False, 'import pulumi\n'), ((39935, 40070), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in MethodIntegration. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in MethodIntegration. Access the value via the \'{suggest}\' property getter instead."\n )\n', (39950, 40070), False, 'import pulumi\n'), ((42999, 43065), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_key_parameters"""', 'cache_key_parameters'], {}), "(__self__, 'cache_key_parameters', cache_key_parameters)\n", (43009, 43065), False, 'import pulumi\n'), ((43118, 43174), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_namespace"""', 'cache_namespace'], {}), "(__self__, 'cache_namespace', cache_namespace)\n", (43128, 43174), False, 'import pulumi\n'), ((43225, 43277), 'pulumi.set', 'pulumi.set', (['__self__', '"""connection_id"""', 'connection_id'], {}), "(__self__, 'connection_id', connection_id)\n", (43235, 43277), False, 'import pulumi\n'), ((43330, 43386), 'pulumi.set', 'pulumi.set', (['__self__', '"""connection_type"""', 'connection_type'], {}), "(__self__, 'connection_type', connection_type)\n", (43340, 43386), False, 'import pulumi\n'), ((43440, 43498), 'pulumi.set', 'pulumi.set', (['__self__', '"""content_handling"""', 'content_handling'], {}), "(__self__, 'content_handling', content_handling)\n", (43450, 43498), False, 'import pulumi\n'), ((43547, 43595), 'pulumi.set', 'pulumi.set', (['__self__', '"""credentials"""', 'credentials'], {}), "(__self__, 'credentials', credentials)\n", (43557, 43595), False, 'import pulumi\n'), ((43656, 43728), 'pulumi.set', 'pulumi.set', (['__self__', '"""integration_http_method"""', 'integration_http_method'], {}), "(__self__, 'integration_http_method', integration_http_method)\n", (43666, 43728), False, 'import pulumi\n'), ((43787, 43855), 'pulumi.set', 'pulumi.set', (['__self__', '"""integration_responses"""', 'integration_responses'], {}), "(__self__, 'integration_responses', integration_responses)\n", (43797, 43855), False, 'import pulumi\n'), ((43913, 43979), 'pulumi.set', 'pulumi.set', (['__self__', '"""passthrough_behavior"""', 'passthrough_behavior'], {}), "(__self__, 'passthrough_behavior', passthrough_behavior)\n", (43923, 43979), False, 'import pulumi\n'), ((44035, 44097), 'pulumi.set', 'pulumi.set', (['__self__', '"""request_parameters"""', 'request_parameters'], {}), "(__self__, 'request_parameters', request_parameters)\n", (44045, 44097), False, 'import pulumi\n'), ((44152, 44212), 'pulumi.set', 'pulumi.set', (['__self__', '"""request_templates"""', 'request_templates'], {}), "(__self__, 'request_templates', request_templates)\n", (44162, 44212), False, 'import pulumi\n'), ((44267, 44327), 'pulumi.set', 'pulumi.set', (['__self__', '"""timeout_in_millis"""', 'timeout_in_millis'], {}), "(__self__, 'timeout_in_millis', timeout_in_millis)\n", (44277, 44327), False, 'import pulumi\n'), ((44368, 44400), 'pulumi.set', 'pulumi.set', (['__self__', '"""uri"""', 'uri'], {}), "(__self__, 'uri', uri)\n", (44378, 44400), False, 'import pulumi\n'), ((48631, 48774), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in MethodIntegrationResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in MethodIntegrationResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (48646, 48774), False, 'import pulumi\n'), ((50335, 50393), 'pulumi.set', 'pulumi.set', (['__self__', '"""content_handling"""', 'content_handling'], {}), "(__self__, 'content_handling', content_handling)\n", (50345, 50393), False, 'import pulumi\n'), ((50450, 50514), 'pulumi.set', 'pulumi.set', (['__self__', '"""response_parameters"""', 'response_parameters'], {}), "(__self__, 'response_parameters', response_parameters)\n", (50460, 50514), False, 'import pulumi\n'), ((50570, 50632), 'pulumi.set', 'pulumi.set', (['__self__', '"""response_templates"""', 'response_templates'], {}), "(__self__, 'response_templates', response_templates)\n", (50580, 50632), False, 'import pulumi\n'), ((50687, 50747), 'pulumi.set', 'pulumi.set', (['__self__', '"""selection_pattern"""', 'selection_pattern'], {}), "(__self__, 'selection_pattern', selection_pattern)\n", (50697, 50747), False, 'import pulumi\n'), ((52710, 52842), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in MethodResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in MethodResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (52725, 52842), False, 'import pulumi\n'), ((53997, 54053), 'pulumi.set', 'pulumi.set', (['__self__', '"""response_models"""', 'response_models'], {}), "(__self__, 'response_models', response_models)\n", (54007, 54053), False, 'import pulumi\n'), ((54110, 54174), 'pulumi.set', 'pulumi.set', (['__self__', '"""response_parameters"""', 'response_parameters'], {}), "(__self__, 'response_parameters', response_parameters)\n", (54120, 54174), False, 'import pulumi\n'), ((55454, 55600), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in RestApiEndpointConfiguration. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in RestApiEndpointConfiguration. Access the value via the \'{suggest}\' property getter instead."\n )\n', (55469, 55600), False, 'import pulumi\n'), ((56079, 56115), 'pulumi.set', 'pulumi.set', (['__self__', '"""types"""', 'types'], {}), "(__self__, 'types', types)\n", (56089, 56115), False, 'import pulumi\n'), ((56169, 56227), 'pulumi.set', 'pulumi.set', (['__self__', '"""vpc_endpoint_ids"""', 'vpc_endpoint_ids'], {}), "(__self__, 'vpc_endpoint_ids', vpc_endpoint_ids)\n", (56179, 56227), False, 'import pulumi\n'), ((56735, 56870), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in RestApiS3Location. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in RestApiS3Location. Access the value via the \'{suggest}\' property getter instead."\n )\n', (56750, 56870), False, 'import pulumi\n'), ((57390, 57428), 'pulumi.set', 'pulumi.set', (['__self__', '"""bucket"""', 'bucket'], {}), "(__self__, 'bucket', bucket)\n", (57400, 57428), False, 'import pulumi\n'), ((57471, 57507), 'pulumi.set', 'pulumi.set', (['__self__', '"""e_tag"""', 'e_tag'], {}), "(__self__, 'e_tag', e_tag)\n", (57481, 57507), False, 'import pulumi\n'), ((57548, 57580), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (57558, 57580), False, 'import pulumi\n'), ((57625, 57665), 'pulumi.set', 'pulumi.set', (['__self__', '"""version"""', 'version'], {}), "(__self__, 'version', version)\n", (57635, 57665), False, 'import pulumi\n'), ((58867, 59006), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in StageAccessLogSetting. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in StageAccessLogSetting. Access the value via the \'{suggest}\' property getter instead."\n )\n', (58882, 59006), False, 'import pulumi\n'), ((60233, 60289), 'pulumi.set', 'pulumi.set', (['__self__', '"""destination_arn"""', 'destination_arn'], {}), "(__self__, 'destination_arn', destination_arn)\n", (60243, 60289), False, 'import pulumi\n'), ((60333, 60371), 'pulumi.set', 'pulumi.set', (['__self__', '"""format"""', 'format'], {}), "(__self__, 'format', format)\n", (60343, 60371), False, 'import pulumi\n'), ((61894, 62030), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in StageCanarySetting. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in StageCanarySetting. Access the value via the \'{suggest}\' property getter instead."\n )\n', (61909, 62030), False, 'import pulumi\n'), ((63266, 63318), 'pulumi.set', 'pulumi.set', (['__self__', '"""deployment_id"""', 'deployment_id'], {}), "(__self__, 'deployment_id', deployment_id)\n", (63276, 63318), False, 'import pulumi\n'), ((63371, 63427), 'pulumi.set', 'pulumi.set', (['__self__', '"""percent_traffic"""', 'percent_traffic'], {}), "(__self__, 'percent_traffic', percent_traffic)\n", (63381, 63427), False, 'import pulumi\n'), ((63489, 63563), 'pulumi.set', 'pulumi.set', (['__self__', '"""stage_variable_overrides"""', 'stage_variable_overrides'], {}), "(__self__, 'stage_variable_overrides', stage_variable_overrides)\n", (63499, 63563), False, 'import pulumi\n'), ((63616, 63672), 'pulumi.set', 'pulumi.set', (['__self__', '"""use_stage_cache"""', 'use_stage_cache'], {}), "(__self__, 'use_stage_cache', use_stage_cache)\n", (63626, 63672), False, 'import pulumi\n'), ((65896, 66032), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in StageMethodSetting. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in StageMethodSetting. Access the value via the \'{suggest}\' property getter instead."\n )\n', (65911, 66032), False, 'import pulumi\n'), ((68870, 68936), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_data_encrypted"""', 'cache_data_encrypted'], {}), "(__self__, 'cache_data_encrypted', cache_data_encrypted)\n", (68880, 68936), False, 'import pulumi\n'), ((68994, 69060), 'pulumi.set', 'pulumi.set', (['__self__', '"""cache_ttl_in_seconds"""', 'cache_ttl_in_seconds'], {}), "(__self__, 'cache_ttl_in_seconds', cache_ttl_in_seconds)\n", (69004, 69060), False, 'import pulumi\n'), ((69113, 69169), 'pulumi.set', 'pulumi.set', (['__self__', '"""caching_enabled"""', 'caching_enabled'], {}), "(__self__, 'caching_enabled', caching_enabled)\n", (69123, 69169), False, 'import pulumi\n'), ((69225, 69287), 'pulumi.set', 'pulumi.set', (['__self__', '"""data_trace_enabled"""', 'data_trace_enabled'], {}), "(__self__, 'data_trace_enabled', data_trace_enabled)\n", (69235, 69287), False, 'import pulumi\n'), ((69336, 69384), 'pulumi.set', 'pulumi.set', (['__self__', '"""http_method"""', 'http_method'], {}), "(__self__, 'http_method', http_method)\n", (69346, 69384), False, 'import pulumi\n'), ((69435, 69487), 'pulumi.set', 'pulumi.set', (['__self__', '"""logging_level"""', 'logging_level'], {}), "(__self__, 'logging_level', logging_level)\n", (69445, 69487), False, 'import pulumi\n'), ((69540, 69596), 'pulumi.set', 'pulumi.set', (['__self__', '"""metrics_enabled"""', 'metrics_enabled'], {}), "(__self__, 'metrics_enabled', metrics_enabled)\n", (69550, 69596), False, 'import pulumi\n'), ((69647, 69699), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_path"""', 'resource_path'], {}), "(__self__, 'resource_path', resource_path)\n", (69657, 69699), False, 'import pulumi\n'), ((69759, 69829), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttling_burst_limit"""', 'throttling_burst_limit'], {}), "(__self__, 'throttling_burst_limit', throttling_burst_limit)\n", (69769, 69829), False, 'import pulumi\n'), ((69888, 69956), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttling_rate_limit"""', 'throttling_rate_limit'], {}), "(__self__, 'throttling_rate_limit', throttling_rate_limit)\n", (69898, 69956), False, 'import pulumi\n'), ((74715, 74850), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in UsagePlanApiStage. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in UsagePlanApiStage. Access the value via the \'{suggest}\' property getter instead."\n )\n', (74730, 74850), False, 'import pulumi\n'), ((75818, 75856), 'pulumi.set', 'pulumi.set', (['__self__', '"""api_id"""', 'api_id'], {}), "(__self__, 'api_id', api_id)\n", (75828, 75856), False, 'import pulumi\n'), ((75899, 75935), 'pulumi.set', 'pulumi.set', (['__self__', '"""stage"""', 'stage'], {}), "(__self__, 'stage', stage)\n", (75909, 75935), False, 'import pulumi\n'), ((75981, 76023), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttle"""', 'throttle'], {}), "(__self__, 'throttle', throttle)\n", (75991, 76023), False, 'import pulumi\n'), ((77790, 77826), 'pulumi.set', 'pulumi.set', (['__self__', '"""limit"""', 'limit'], {}), "(__self__, 'limit', limit)\n", (77800, 77826), False, 'import pulumi\n'), ((77870, 77908), 'pulumi.set', 'pulumi.set', (['__self__', '"""offset"""', 'offset'], {}), "(__self__, 'offset', offset)\n", (77880, 77908), False, 'import pulumi\n'), ((77952, 77990), 'pulumi.set', 'pulumi.set', (['__self__', '"""period"""', 'period'], {}), "(__self__, 'period', period)\n", (77962, 77990), False, 'import pulumi\n'), ((80816, 80959), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in UsagePlanThrottleSettings. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in UsagePlanThrottleSettings. Access the value via the \'{suggest}\' property getter instead."\n )\n', (80831, 80959), False, 'import pulumi\n'), ((81801, 81849), 'pulumi.set', 'pulumi.set', (['__self__', '"""burst_limit"""', 'burst_limit'], {}), "(__self__, 'burst_limit', burst_limit)\n", (81811, 81849), False, 'import pulumi\n'), ((81897, 81943), 'pulumi.set', 'pulumi.set', (['__self__', '"""rate_limit"""', 'rate_limit'], {}), "(__self__, 'rate_limit', rate_limit)\n", (81907, 81943), False, 'import pulumi\n')]
|
# ------------------------------------------------------------------------------
# Project: legohdl
# Script: workspace.py
# Author: <NAME>
# Description:
# The Workspace class. A Workspace object has a path and a list of available
# vendors. This is what the user keeps their work's scope within for a given
# "organization".
# ------------------------------------------------------------------------------
import os, shutil, glob
import logging as log
from datetime import datetime
from .vendor import Vendor
from .apparatus import Apparatus as apt
from .cfg import Cfg, Section, Key
from .map import Map
from .git import Git
from .block import Block
class Workspace:
#store all workspaces in dictionary
Jar = Map()
#active-workspace is a workspace object
_ActiveWorkspace = None
DIR = apt.fs(apt.HIDDEN+"workspaces/")
LOG_FILE = "refresh.log"
MIN_RATE = -1
MAX_RATE = 1440
def __init__(self, name, path, vendors=[], ask=True):
'''
Create a workspace instance.
Parameters:
name (str): the identity for the workspace
path (str): the local path where blocks will be looked for
vendors ([str]): the list of vendors that are tied to this workspace
ask (bool): will ask user if wishing to enter workspace path
Returns:
None
'''
self._name = name
#do not create workspace if the name is already taken
if(self.getName().lower() in self.Jar.keys()):
log.error("Skipping workspace "+self.getName()+" due to duplicate naming conflict.")
return
#set the path
self._path = ''
self.setPath(path)
#do not create workspace if the path is empty
if(self.getPath() == ''):
if(ask == False):
log.error("Skipping workspace "+self.getName()+" due to empty local path.")
return
else:
#keep asking to set path until one is decided/input
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
while(self.setPath(path) == False):
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
self._ws_dir = apt.fs(self.DIR+self.getName()+"/")
#ensure all workspace hidden directories exist
if(os.path.isdir(self.getDir()) == False):
log.info("Setting up workspace "+self.getName()+"...")
os.makedirs(self.getDir(), exist_ok=True)
#create workspace's cache where installed blocks will be stored
os.makedirs(self.getDir()+"cache", exist_ok=True)
#create the refresh log if DNE
if(os.path.isfile(self.getDir()+self.LOG_FILE) == False):
open(self.getDir()+self.LOG_FILE, 'w').close()
self._vendors = []
#find all vendor objects by name and store in list
for vndr in vendors:
if(vndr.lower() in Vendor.Jar.keys()):
self._vendors += [Vendor.Jar[vndr]]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
pass
#add to class Jar
self.Jar[self.getName()] = self
pass
def setPath(self, p):
'''
Set the workspace's local path to a new value. Will ask user if okay
to create the path if DNE.
Parameters:
p (str): the path string
Returns:
(bool): true if successfully changed the path attribute
'''
#cannot set an empty path
if(p == '' or p == None):
log.info("Local path for workspace "+self.getName()+" cannot be empty.")
return False
p = apt.fs(p)
#create the workspace's local path if it does not exist
if(os.path.exists(p) == False):
#prompt user
carry_on = apt.confirmation("Workspace "+self.getName()+"'s local path does not exist. Create "+p+"?")
if(carry_on):
os.makedirs(p, exist_ok=True)
self._path = p
return True
else:
log.info("Did not set "+p+" as local path.")
return False
else:
self._path = p
return True
def setName(self, n):
'''
Change the workspace's name if the name is not already taken.
Parameters:
n (str): new name for workspace
Returns:
(bool): true if name successfully altered and updated in Jar
'''
if(n == '' or n == None):
log.error("Workspace name cannot be empty.")
return False
if(n.lower() in self.Jar.keys()):
log.error("Cannot rename workspace to "+n+" due to name conflict.")
return False
else:
#remove old name from Jar
if(self.getName().lower() in self.Jar.keys()):
del self.Jar[self.getName()]
#rename hidden directory if exists
new_dir = apt.fs(self.DIR+n+"/")
if(hasattr(self, "_ws_dir")):
os.rename(self.getDir(), new_dir)
#set the hidden workspace directory
self._ws_dir = new_dir
#change to new name
self._name = n
#update the Jar
self.Jar[self.getName()] = self
return True
def remove(self):
'''
Removes the workspace object from the Jar and its hidden directory.
Parameters:
None
Returns:
None
'''
log.info("Removing workspace "+self.getName()+"...")
#delete the hidden workspace directory
shutil.rmtree(self.getDir(), onerror=apt.rmReadOnly)
#remove from class Jar
del self.Jar[self.getName()]
#remove from cfg file
apt.CFG.remove('workspace.'+self.getName())
apt.CFG.write()
pass
def linkVendor(self, vndr):
'''
Attempts to add a vendor to the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to add
Returns:
(bool): true if the vendor list was modified (successful add)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
return False
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
return True
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
return False
def setVendors(self, vndrs):
'''
Overrides entire _vendors attr by setting it equal to 'vndrs'.
Parameters:
vndrs ([str]): list of vendors
Returns:
(bool): success if all vendors listed were added
'''
#reset vendors list
self._vendors = []
success = True
#iterate through every given vendor
for vndr in vndrs:
#verify the vendor exists
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
#check if the vendor has already been linked
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
#link the vendor to this workspace
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
sucess = False
return success
def unlinkVendor(self, vndr):
'''
Attempts to remove a vendor from the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to remove
Returns:
(bool): true if the vendor list was modified (successful remove)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj not in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already unlinked from the workspace.")
return False
else:
log.info("Unlinking vendor "+vndr_obj.getName()+" from the workspace...")
self._vendors.remove(vndr_obj)
return True
else:
log.warning("Could not unlink unknown vendor "+vndr+" from "+self.getName()+".")
return False
def loadBlocks(self, id_dsgns=False):
'''
Loads all blocks found at all levels: dnld (workspace path), instl (workspace
cache), avail (workspace vendors).
When id_dsgns is True, this method uses the 'multi-develop' setting to
determine which level has precedence in loadHDL().
'multi-develop' set to False will only loadHDL() from cache. 'multi-develop'
set to True will first try to loadHDL() from dnld, and if DNE, then try
to loadHDL() from block's cache.
Either way, if inside a current block, that block's HDL will be loaded over
its cache.
Dynamically creates _visible_blocks ([Block]) attribute to be reused.
Parameters:
id_dsgns (bool): identify design units (loadHDL) from blocks
Returns:
_visible_blocks ([Block]): list of all block objects in cache or path
'''
if(hasattr(self, "_visible_blocks")):
return self._visible_blocks
self._visible_blocks = []
#read the setting for multi-develop
mult_dev = apt.getMultiDevelop()
#1. Search for downloaded blocks
#glob on the local workspace path
#print("Local Blocks on:",self.getPath())
marker_files = glob.glob(self.getPath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found downloads
for mf in marker_files:
b = Block(mf, self, Block.Level.DNLD)
#if the user is within a current block, load the HDL from its DNLD level (not INSTL)
if(mult_dev == True or Block.getCurrent(bypass=True) == b):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#2. Search for installed blocks
#glob on the workspace cache path
#print("Cache Blocks on:",self.getCachePath())
marker_files = glob.glob(self.getCachePath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found installations
for mf in marker_files:
#the block must also have a valid git repository at its root
root,_ = os.path.split(mf)
#note: only the head installation has the git repository
if(Git.isValidRepo(root, remote=False)):
b = Block(mf, self, Block.Level.INSTL)
#get the spot for this block's download
dnld_b = Block.Inventory[b.M()][b.L()][b.N()][Block.Level.DNLD.value]
#add this block if a download DNE or the dnld does not match current when
#not in multi-develop mode
if(dnld_b == None or (mult_dev == False and Block.getCurrent(bypass=True) != dnld_b)):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#3. Search for available blocks
#glob on each vendor path
marker_files = []
#find all marker files in each of the workspace's vendors
for vndr in self.getVendors():
marker_files += glob.glob(vndr.getVendorDir()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found availables
for mf in marker_files:
b = Block(mf, self, Block.Level.AVAIL)
#do not add this block to list of visible blocks because it has no
#units associated with it, only metadata
pass
#4. ID all specific version blocks if identifying designs (except current block)
spec_vers_blocks = []
for vis_block in self._visible_blocks:
if(vis_block == Block.getCurrent(bypass=True)):
continue
for spec_block in vis_block.getInstalls().values():
spec_vers_blocks += [spec_block]
if(id_dsgns):
spec_block.loadHDL()
pass
pass
self._visible_blocks += spec_vers_blocks
return self._visible_blocks
def shortcut(self, title, req_entity=False, visibility=True, ref_current=True):
'''
Returns the Block from a shortened title. If title is empty and
'ref_current' is set, then tries to refer to the current block.
Sometimes an entity is required for certain commands; so it can be
assumed entity (instead of block name) if only thing given.
Parameters:
title (str): partial or full M.L.N with optional E attached
req_entity (bool): determine if only thing given then it is an entity
visibility (bool): determine if to only look for visible blocks
ref_current (bool): determine if to try to assign empty title to current block
Returns:
(Block): the identified block from the shortened title
'''
if(title == None):
title = ''
#split into pieces
pieces = title.split('.')
sects = ['']*3
diff = 3 - len(pieces)
for i in range(len(pieces)-1, -1, -1):
sects[diff+i] = pieces[i]
#check final piece if it has an entity attached
entity = ''
if(sects[2].count(apt.ENTITY_DELIM)):
i = sects[2].find(apt.ENTITY_DELIM)
entity = sects[2][i+1:]
sects[2] = sects[2][:i]
#assume only name given is actually the entity
elif(req_entity):
entity = sects[2]
sects[2] = ''
# [!] load all necessary blocks before searching
blocks = self.loadBlocks()
#use all blocks when visibility is off :todo: is this design intent?
if(visibility == False):
blocks = Block.getAllBlocks()
#track list of possible blocks as moving up the chain
possible_blocks = []
#search for an entity
if(len(entity)):
#collect list of all entities
reg = Map()
reg[entity] = []
#iterate through every block and create a mapping for their entity names
for bk in blocks:
#get the entity names from this block
es = bk.loadHDL(returnnames=True)
#print(es)
#create mappings of entity names to their block owners
for e in es:
if(e.lower() not in reg.keys()):
reg[e] = []
reg[e] += [bk]
#see how many blocks were fit to entity name's mapping
num_blocks = len(reg[entity])
#algorithm only detected one possible solution
if(num_blocks == 1):
#make sure rest of sections are correct before returning result
potential = reg[entity][0]
title = potential.getTitle(index=2, dist=2)
#verify each part of block identifier matches what was requested
for i in range(len(sects)):
#print(sects[i])
if(len(sects[i]) and sects[i].lower() != title[i].lower()):
return None
pass
return potential
#algorithm detected multiple possible solutions (cannot infer)
elif(num_blocks > 1):
possible_blocks = reg[entity]
#only was given an entity name, algorithm cannot solve requested entity
if(len(sects[2]) == 0):
log.info("Ambiguous unit; conflicts with")
#display the units/titles that conflict with input
for bk in reg[entity]:
print('\t '+bk.getFull()+":"+entity)
print()
exit()
#no blocks matched the entity name being passed
else:
return None
pass
#search through all block names
for start in range(len(sects)-1, -1, -1):
term = sects[start]
#exit loop if next term is empty
if(len(term) == 0):
break
reg = Map()
reg[term] = []
for bk in blocks:
t = bk.getTitle(index=start, dist=0)[0]
#store the block under the given section name
if(t.lower() not in reg.keys()):
reg[t] = []
reg[t] += [bk]
#count how many blocks occupy this same name
num_blocks = len(reg[term])
#algorithm only detected one possible solution
if(num_blocks == 1):
#make sure rest of sections are correct before returning result
potential = reg[term][0]
title = potential.getTitle(index=2, dist=2)
#verify each part of block identifier matches what was requested
for i in range(len(sects)):
#print(sects[i])
if(len(sects[i]) and sects[i].lower() != title[i].lower()):
return None
pass
return potential
#algorithm detected multiple solutions (cannot infer on this step)
elif(num_blocks > 1):
#compare with blocks for a match and dwindle down choices
next_blocks = []
for bk in reg[term]:
if(bk in possible_blocks or (start == len(sects)-1 and entity == '')):
next_blocks += [bk]
#dwindled down to a single block
if(len(next_blocks) == 1):
#print("FOUND:",next_blocks[0].getTitle(index=2, dist=2))
return next_blocks[0]
#carry on to using next title section
if(len(sects[start-1])):
#continue to using next term
possible_blocks = next_blocks
continue
else:
#ran out of guesses...report the conflicting titles/units
if(req_entity):
log.info("Ambiguous unit; conflicts with")
else:
log.info("Ambiguous title; conflicts with")
for bk in reg[term]:
if(req_entity):
print('\t '+bk.getFull()+":"+entity)
else:
print('\t '+bk.getFull())
exit(print())
pass
#using the current block if title is empty string
if(ref_current and (title == '' or title == None)):
return Block.getCurrent()
#return None if all attempts have failed and not returned anything yet
return None
def decodeUnits(self):
'''
Decodes every available unit to get the complete graphing data structure.
Parameters:
None
Returns:
None
'''
blocks = self.loadBlocks()
#print(blocks)
log.info("Collecting all unit data...")
for b in blocks:
us = b.loadHDL()
for u in us.values():
u.getLanguageFile().decode(u, recursive=False)
log.info("done.")
pass
def listBlocks(self, title, alpha=False, instl=False, dnld=False, avail=False):
'''
Print a formatted table of the available blocks.
Parameters:
title (str): block title to be broken into parts for searching
alpha (bool): determine if to alphabetize the block list order (L.N.V)
instl (bool): determine if to capture only blocks that are installed
dnld (bool): determine if to capture only blocks that are downloaded
avail (bool): determine if to capture blocks available from vendor
Returns:
None
'''
#[!] load the necessary blocks
self.loadBlocks()
#collect if multi-develop is on
mult_dev = apt.getMultiDevelop()
#split the title into parts
M,L,N,_ = Block.snapTitle(title, inc_ent=False)
#get all blocks from the catalog
#store each block's text line in a map to sort keys for alpha flag
catalog = Map()
#iterate through every vendor
for vndr_k,vndrs in Block.Inventory.items():
if(vndr_k.startswith(M.lower()) == False):
continue
#iterate through every library
for lib_k,libs in vndrs.items():
if(lib_k.startswith(L.lower()) == False):
continue
#iterate through every block
for blk_k,lvls in libs.items():
if(blk_k.startswith(N.lower()) == False):
continue
downloaded = installed = available = ' '
disp_d = disp_i = disp_a = False
#if none were set on command-line default to display everything
if((dnld or instl or avail) == False):
dnld = instl = avail = True
#with each lower level, overwrite the block object to print
if(lvls[Block.Level.AVAIL.value] != None):
bk = lvls[Block.Level.AVAIL.value]
available = 'A'
disp_a = True
if(lvls[Block.Level.INSTL.value] != None):
bk = lvls[Block.Level.INSTL.value]
installed = 'I'
disp_i = True
if(lvls[Block.Level.DNLD.value] != None):
if(dnld):
bk = lvls[Block.Level.DNLD.value]
downloaded = 'D'
# if(mult_dev):
# downloaded = 'D'
# installed = installed.lower()
disp_d = True
#one condition pair must be true to display the block
if((disp_a and avail) or (disp_i and instl) or (disp_d and dnld)):
pass
else:
continue
#character to separate different status bits
spacer = ' '
#format the status column's data
sts = downloaded + spacer + installed + spacer + available
#leave version empty if its been unreleased
v = '' if(bk.getVersion() == '0.0.0') else bk.getVersion()
#check if can be updated
#prioritize installation level for checking updates
instllr = bk.getLvlBlock(Block.Level.INSTL)
cmp_v = instllr.getVersion() if(instllr != None and mult_dev == False) else bk.getVersion()
#a '^' is an update symbol indicating the latest referenced version (dnld or instl) is not the actually the latest version found
if(Block.cmpVer(bk.getHighestAvailVersion(), cmp_v) != cmp_v):
sts = sts+' ^'
v = cmp_v
#format the data to print to the console and store in catalog (L.N.V str format)
catalog[bk.L()+'.'+bk.N()+'.'+bk.M()] = '{:<16}'.format(bk.L())+' '+'{:<20}'.format(bk.N())+' '+'{:<8}'.format(sts)+' '+'{:<10}'.format(v)+' '+'{:<16}'.format(bk.M())
pass
pass
keys = list(catalog.keys())
#check if to sort by alphabet
if(alpha):
keys.sort()
#print(keys)
print('{:<16}'.format("Library"),'{:<20}'.format("Block"),'{:<8}'.format("Status"+("*"*int(mult_dev))),'{:<10}'.format("Version"),'{:<16}'.format("Vendor"))
print("-"*16+" "+"-"*20+" "+"-"*8+" "+"-"*10+" "+"-"*16)
#iterate through catalog and print each textline
for k in keys:
print(catalog[k])
pass
def listUnits(self, title, alpha=False, usable=False, ignore_tb=False):
'''
Print a formatted table of all the design units.
Parameters:
title (str): block title to be broken into parts for searching
alpha (bool): determine if to alphabetize the block list order (E.V.L.N)
usable (bool): determine if to display units that can be used
ignore_tb (bool): determine if to ignore testbench files
Returns:
None
'''
#[!] load blocks into inventory
visible = self.loadBlocks()
#:todo: add flag to print 'variations' of an entity/unit (what specific version names exist)
#todo: print status of the unit and which status is usable (D or I)
M,L,N,V,E = Block.snapTitle(title, inc_ent=True)
#print(M,L,N,V,E)
#store each entity's print line in map (key = <unit>:<block-id>) to ensure uniqueness
catalog = Map()
for bk in Block.getAllBlocks():
#for lvl in Block.Inventory[bk.M()][bk.L()][bk.N()]:
block_title = bk.getFull(inc_ver=False)
if(bk.M().lower().startswith(M.lower()) == False):
continue
if(bk.L().lower().startswith(L.lower()) == False):
continue
if(bk.N().lower().startswith(N.lower()) == False):
continue
#collect all units
if(apt.getMultiDevelop() == False):
if(bk.getLvlBlock(Block.Level.INSTL) != None):
bk = bk.getLvlBlock(Block.Level.INSTL)
#skip this block if only displaying usable units and multi-develop off
elif(usable):
continue
units = bk.loadHDL(returnnames=False).values()
for u in units:
if(len(E) and u.E().lower().startswith(E.lower()) == False):
continue
if(ignore_tb and u.isTb()):
continue
#format if unit is visible/usable
vis = '-'
if(bk in visible):
vis = 'yes'
#format design unit name according to its natural language
dsgn = u.getDesign().name.lower()
if(u.getLang() == u.Language.VERILOG and dsgn == 'entity'):
dsgn = 'module'
catalog[u.E()+':'+block_title] = '{:<22}'.format(u.E())+' '+'{:<7}'.format(vis)+' '+'{:<10}'.format(dsgn)+' '+'{:<38}'.format(block_title)
pass
pass
keys = list(catalog.keys())
#check if to sort by alphabet
if(alpha):
keys.sort()
#print to console
print('{:<22}'.format("Unit"),'{:<7}'.format("Usable"),'{:<10}'.format("Type"),'{:<38}'.format("Block"))
print("-"*22+" "+"-"*7+" "+"-"*10+" "+"-"*38)
for k in keys:
print(catalog[k])
pass
pass
@classmethod
def tidy(cls):
'''
Removes any stale hidden workspace directories that aren't mapped to a
workspace found in the class Jar container.
Parameters:
None
Returns:
None
'''
#list all hidden workspace directories
hidden_dirs = os.listdir(cls.DIR)
for hd in hidden_dirs:
if(hd.lower() not in cls.Jar.keys()):
log.info("Removing stale workspace data for "+hd+"...")
if(os.path.isdir(cls.DIR+hd)):
shutil.rmtree(cls.DIR+hd, onerror=apt.rmReadOnly)
#remove all files from workspace directory
else:
os.remove(cls.DIR+hd)
pass
def autoRefresh(self, rate):
'''
Automatically refreshes all vendors for the given workspace. Reads its
log file to determine if past next interval for refresh.
Parameters:
rate (int): how often to ask a refresh within a 24-hour period
Returns:
None
'''
def timeToFloat(prt):
'''
Converts a time object into a float type.
Parameters:
prt (datetime): iso format of current time
Returns:
(float): 0.00 (inclusive) - 24.00 (exclusive)
'''
time_stamp = str(prt).split(' ')[1]
time_sects = time_stamp.split(':')
hrs = int(time_sects[0])
#convert to 'hours'.'minutes'
time_fmt = (float(hrs)+(float(float(time_sects[1])/60)))
return time_fmt
refresh = False
last_punch = None
stage = 1
cur_time = datetime.now()
#do not perform refresh if the rate is 0
if(rate == 0):
return
#always refresh if the rate is set below 0 (-1)
elif(rate <= self.MIN_RATE):
refresh = True
#divide the 24 hour period into even checkpoints
max_hours = float(24)
spacing = float(max_hours / rate)
intervals = []
for i in range(rate):
intervals += [spacing*i]
#ensure log file exists
if(os.path.exists(self.getDir()+self.LOG_FILE) == False):
open(self.getDir()+self.LOG_FILE, 'w').close()
#read log file
#read when the last refresh time occurred
with open(self.getDir()+self.LOG_FILE, 'r') as log_file:
#read the latest date
data = log_file.readlines()
#no refreshes have occurred so automatically need a refresh
if(len(data) == 0):
last_punch = cur_time
refresh = True
else:
last_punch = datetime.fromisoformat(data[0])
#determine if its time to refresh
#get latest time that was punched
last_time_fmt = timeToFloat(last_punch)
#determine the next checkpoint available for today
next_checkpoint = max_hours
for i in range(len(intervals)):
if(last_time_fmt < intervals[i]):
next_checkpoint = intervals[i]
stage = i + 1
break
#print('next checkpoint',next_checkpoint)
cur_time_fmt = timeToFloat(cur_time)
#check if the time has occurred on a previous day, (automatically update because its a new day)
next_day = cur_time.year > last_punch.year or cur_time.month > last_punch.month or cur_time.day > last_punch.day
#print(next_day)
#print("currently",cur_time_fmt)
#determine if the current time has passed the next checkpoint or if its a new day
if(next_day or cur_time_fmt >= next_checkpoint):
last_punch = cur_time
refresh = True
log_file.close()
#determine if its time to refresh
if(refresh):
#display what interval is being refreshed on the day
infoo = "("+str(stage)+"/"+str(rate)+")" if(rate > 0) else ''
log.info("Automatically refreshing workspace "+self.getName()+" vendors... "+infoo)
#refresh all vendors attached to this workspace
for vndr in self.getVendors():
vndr.refresh()
pass
#write updated time value to log file
with open(self.getDir()+self.LOG_FILE, 'w') as lf:
lf.write(str(cur_time))
pass
@classmethod
def load(cls):
'''Load all workspaces from settings.'''
wspcs = apt.CFG.get('workspace', dtype=Section)
for ws in wspcs.keys():
#skip over immediate keys
if(isinstance(wspcs[ws], Section) == False):
continue
path = ''
vendors = '()'
#verify that a path key and vendors key exists under each workspace
apt.CFG.set('workspace.'+ws+'.path', path, override=False)
apt.CFG.set('workspace.'+ws+'.vendors', vendors, override=False)
#retrieve path and vendors keys
if('path' in wspcs[ws].keys()):
path = wspcs[ws]['path']._val
if('vendors' in wspcs[ws].keys()):
vendors = Cfg.castList(wspcs[ws]['vendors']._val)
#create Workspace objects
Workspace(wspcs[ws]._name, path, vendors)
pass
#save if made any changes
if(apt.CFG._modified):
apt.CFG.write()
pass
@classmethod
def save(cls, inc_active=True):
'''
Serializes the Workspace objects and saves them to the settings dictionary.
Parameters:
inc_active (bool): determine if to save the active workspace to settings
Returns:
None
'''
serialized = {}
#serialize the Workspace objects into dictionary format for settings
for ws in cls.Jar.values():
#do not save any workspace that has no path
if(ws.getPath() == ''):
continue
serialized[ws.getName()] = {}
serialized[ws.getName()]['path'] = ws.getPath()
serialized[ws.getName()]['vendors'] = Cfg.castStr(ws.getVendors(returnnames=True, lowercase=False), tab_cnt=2, drop_list=False)
#update settings dictionary
apt.CFG.set('workspace', Section(serialized), override=True)
#update active workspace
if(inc_active):
if(cls.getActive() != None):
apt.CFG.set('general.active-workspace', cls.getActive().getName())
else:
apt.CFG.set('general.active-workspace', '')
apt.save()
pass
@classmethod
def inWorkspace(cls):
'''
Determine if an active workspace is selected.
Parameters:
None
Returns:
(bool): true if ActiveWorkspace is not None
'''
return cls._ActiveWorkspace != None
@classmethod
def setActiveWorkspace(cls, ws):
'''
Set the active workspace after initializing all workspaces into Jar. If
the input name is invalid, it will set the first workspace in the Jar as
active if one is not already assigned.
Parameters:
ws (str): workspace name
Returns:
(bool): true if active-workspace was set
'''
#properly set the active workspace from one found in Jar
if(ws != None and ws.lower() in cls.Jar.keys()):
re_assign = (cls._ActiveWorkspace != None)
#set the active workspace obj from found workspace
cls._ActiveWorkspace = cls.Jar[ws]
#only give prompt if reassigning the active-workspace
if(re_assign):
log.info("Assigning workspace "+cls._ActiveWorkspace.getName()+" as active workspace...")
return True
#try to randomly assign active workspace if not already assigned.
elif(len(cls.Jar.keys()) and cls._ActiveWorkspace == None):
random_ws = list(cls.Jar.keys())[0]
cls._ActiveWorkspace = cls.Jar[random_ws]
msgi = "No active workspace set."
if(ws != ''):
msgi = "Workspace "+ws+" does not exist."
log.info(msgi+" Auto-assigning active workspace to "+cls._ActiveWorkspace.getName()+"...")
return True
#still was not able to set the active workspace with the given argument
elif(cls._ActiveWorkspace != None):
log.info("Workspace "+ws+" does not exist. Keeping "+cls._ActiveWorkspace.getName()+" as active.")
else:
log.error("No workspace set as active.")
return False
def isLinked(self):
'''Returns if any vendors are tied to this workspace (bool).'''
return len(self.getVendors())
def getPath(self):
'''Returns the local path where downloaded blocks are located (str).'''
return self._path
def getDir(self):
'''Returns the base hidden directory where the workspace data is kept (str).'''
return self._ws_dir
def getCachePath(self):
'''Returns the hidden directory where workspace installations are kept. (str).'''
return self.getDir()+"cache/"
def getName(self):
'''Returns the workspace's identifier (str).'''
return self._name
def isActive(self):
'''Returns is this workspace is the active workspace (bool).'''
return self == self.getActive()
def getVendors(self, returnnames=False, lowercase=True):
'''
Return the vendor objects associated with the given workspace.
Parameters:
returnnames (bool): true will return vendor names
lowercase (bool): true will return lower-case names if returnnames is enabled
Returns:
([Vendor]) or ([str]): list of available vendors
'''
if(returnnames):
vndr_names = []
for vndr in self._vendors:
name = vndr.getName()
if(lowercase):
name = name.lower()
vndr_names += [name]
return vndr_names
else:
return self._vendors
@classmethod
def printList(cls):
'''
Prints formatted list for workspaces with vendor availability and which is active.
Parameters:
None
Returns:
None
'''
print('{:<16}'.format("Workspace"),'{:<6}'.format("Active"),'{:<40}'.format("Path"),'{:<14}'.format("Vendors"))
print("-"*16+" "+"-"*6+" "+"-"*40+" "+"-"*14+" ")
for ws in cls.Jar.values():
vndrs = apt.listToStr(ws.getVendors(returnnames=True))
act = 'yes' if(ws == cls.getActive()) else '-'
print('{:<16}'.format(ws.getName()),'{:<6}'.format(act),'{:<40}'.format(ws.getPath()),'{:<14}'.format(vndrs))
pass
pass
@classmethod
def printAll(cls):
for key,ws in cls.Jar.items():
print('key:',key)
print(ws)
@classmethod
def getActive(cls):
'''Returns the active workspace and will exit on error (Workspace).'''
if(cls._ActiveWorkspace == None):
exit(log.error("Not in a workspace!"))
return cls._ActiveWorkspace
# uncomment to use for debugging
# def __str__(self):
# return f'''
# ID: {hex(id(self))}
# Name: {self.getName()}
# Path: {self.getPath()}
# Active: {self.isActive()}
# Hidden directory: {self.getDir()}
# Linked to: {self.isLinked()}
# Vendors: {self.getVendors(returnnames=True)}
# '''
pass
|
[
"os.path.exists",
"os.listdir",
"os.makedirs",
"os.path.split",
"datetime.datetime.now",
"os.path.isdir",
"datetime.datetime.fromisoformat",
"shutil.rmtree",
"logging.info",
"logging.error",
"os.remove"
] |
[((20508, 20547), 'logging.info', 'log.info', (['"""Collecting all unit data..."""'], {}), "('Collecting all unit data...')\n", (20516, 20547), True, 'import logging as log\n'), ((20707, 20724), 'logging.info', 'log.info', (['"""done."""'], {}), "('done.')\n", (20715, 20724), True, 'import logging as log\n'), ((28845, 28864), 'os.listdir', 'os.listdir', (['cls.DIR'], {}), '(cls.DIR)\n', (28855, 28864), False, 'import os, shutil, glob\n'), ((30250, 30264), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (30262, 30264), False, 'from datetime import datetime\n'), ((4420, 4437), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (4434, 4437), False, 'import os, shutil, glob\n'), ((5216, 5260), 'logging.error', 'log.error', (['"""Workspace name cannot be empty."""'], {}), "('Workspace name cannot be empty.')\n", (5225, 5260), True, 'import logging as log\n'), ((5341, 5412), 'logging.error', 'log.error', (["('Cannot rename workspace to ' + n + ' due to name conflict.')"], {}), "('Cannot rename workspace to ' + n + ' due to name conflict.')\n", (5350, 5412), True, 'import logging as log\n'), ((11614, 11631), 'os.path.split', 'os.path.split', (['mf'], {}), '(mf)\n', (11627, 11631), False, 'import os, shutil, glob\n'), ((4631, 4660), 'os.makedirs', 'os.makedirs', (['p'], {'exist_ok': '(True)'}), '(p, exist_ok=True)\n', (4642, 4660), False, 'import os, shutil, glob\n'), ((4754, 4802), 'logging.info', 'log.info', (["('Did not set ' + p + ' as local path.')"], {}), "('Did not set ' + p + ' as local path.')\n", (4762, 4802), True, 'import logging as log\n'), ((28962, 29021), 'logging.info', 'log.info', (["('Removing stale workspace data for ' + hd + '...')"], {}), "('Removing stale workspace data for ' + hd + '...')\n", (28970, 29021), True, 'import logging as log\n'), ((29038, 29065), 'os.path.isdir', 'os.path.isdir', (['(cls.DIR + hd)'], {}), '(cls.DIR + hd)\n', (29051, 29065), False, 'import os, shutil, glob\n'), ((31296, 31327), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['data[0]'], {}), '(data[0])\n', (31318, 31327), False, 'from datetime import datetime\n'), ((40022, 40054), 'logging.error', 'log.error', (['"""Not in a workspace!"""'], {}), "('Not in a workspace!')\n", (40031, 40054), True, 'import logging as log\n'), ((29086, 29137), 'shutil.rmtree', 'shutil.rmtree', (['(cls.DIR + hd)'], {'onerror': 'apt.rmReadOnly'}), '(cls.DIR + hd, onerror=apt.rmReadOnly)\n', (29099, 29137), False, 'import os, shutil, glob\n'), ((29237, 29260), 'os.remove', 'os.remove', (['(cls.DIR + hd)'], {}), '(cls.DIR + hd)\n', (29246, 29260), False, 'import os, shutil, glob\n'), ((37372, 37412), 'logging.error', 'log.error', (['"""No workspace set as active."""'], {}), "('No workspace set as active.')\n", (37381, 37412), True, 'import logging as log\n'), ((16924, 16966), 'logging.info', 'log.info', (['"""Ambiguous unit; conflicts with"""'], {}), "('Ambiguous unit; conflicts with')\n", (16932, 16966), True, 'import logging as log\n'), ((2345, 2379), 'logging.info', 'log.info', (['"""Workspace not created."""'], {}), "('Workspace not created.')\n", (2353, 2379), True, 'import logging as log\n'), ((19558, 19600), 'logging.info', 'log.info', (['"""Ambiguous unit; conflicts with"""'], {}), "('Ambiguous unit; conflicts with')\n", (19566, 19600), True, 'import logging as log\n'), ((19651, 19694), 'logging.info', 'log.info', (['"""Ambiguous title; conflicts with"""'], {}), "('Ambiguous title; conflicts with')\n", (19659, 19694), True, 'import logging as log\n'), ((2776, 2810), 'logging.info', 'log.info', (['"""Workspace not created."""'], {}), "('Workspace not created.')\n", (2784, 2810), True, 'import logging as log\n')]
|
import os
from PIL import Image
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from sidechainnet.utils.sequence import ProteinVocabulary
from einops import rearrange
# general functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# singleton msa transformer
msa_instances = None
def get_msa_transformer():
global msa_instances
if not exists(msa_instances):
msa_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S")
batch_converter = alphabet.get_batch_converter()
return msa_model, batch_converter
return msa_instances
# MSA embedding related functions
VOCAB = ProteinVocabulary()
def ids_to_aa_str(x):
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
is_char = lambda c: isinstance(c, str) and len(c) == 1
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_aa_str(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(is_char, out)):
return ''.join(out)
return out
def aa_str_to_embed_input(x):
assert isinstance(x, list), 'input must be a list'
out = []
for el in x:
if isinstance(el, list):
out.append(aa_str_to_embed_input(el))
elif isinstance(el, str):
out.append((None, el))
else:
raise TypeError('type must be either list or string')
return out
def apc(x):
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12)
normalized = x - avg
return normalized
def symmetrize(x):
return x + x.transpose(-1, -2)
def pad_image_to(tensor, size, value = 0.):
remainder = size - tensor.shape[-1]
tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value)
return tensor
# getting a single MSA attention embedding, with caching
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))
FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))
os.makedirs(CACHE_PATH, exist_ok = True)
@torch.no_grad()
def get_msa_attention_embedding(
model,
batch_converter,
aa_str,
id,
fetch_msas_fn = lambda t: [],
cache = True
):
device = next(model.parameters()).device
cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt')
if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path):
try:
loaded = torch.load(cache_full_path).to(device)
except:
loaded = None
if exists(loaded):
return loaded
msas = default(fetch_msas_fn(aa_str), [])
seq_with_msas = [aa_str, *msas]
embed_inputs = aa_str_to_embed_input(seq_with_msas)
_, _, msa_batch_tokens = batch_converter(embed_inputs)
results = model(msa_batch_tokens.to(device), need_head_weights = True)
attentions = results['row_attentions']
attentions = attentions[..., 1:, 1:]
attentions = rearrange(attentions, 'b l h m n -> b (l h) m n')
attentions = apc(symmetrize(attentions))
if cache:
print(f'caching to {cache_full_path}')
torch.save(attentions, cache_full_path)
return attentions
def get_msa_attention_embeddings(
model,
batch_converter,
seqs,
ids,
fetch_msas_fn = lambda t: [],
cache = True
):
n = seqs.shape[1]
seqs = rearrange(seqs, 'b n -> b () n')
aa_strs = ids_to_aa_str(seqs.cpu().tolist())
embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)]
embeds_list = [pad_image_to(embed, n) for embed in embeds_list]
embeds = torch.cat(embeds_list, dim = 0)
return embeds
# training utils
def cycle(loader, thres = 256):
while True:
for data in loader:
if data.seqs.shape[1] <= thres:
yield data
def save_heatmap(tensor, filepath, dpi = 200, return_image = False):
heatmap = sn.heatmap(tensor.cpu().numpy())
figure = heatmap.get_figure()
figure.savefig(filepath, dpi = dpi)
plt.clf()
if not return_image:
return
return Image.open(filepath)
|
[
"os.path.expanduser",
"os.path.exists",
"PIL.Image.open",
"torch.hub.load",
"os.makedirs",
"os.getenv",
"sidechainnet.utils.sequence.ProteinVocabulary",
"torch.load",
"matplotlib.pyplot.clf",
"os.path.join",
"einops.rearrange",
"torch.save",
"torch.nn.functional.pad",
"torch.no_grad",
"torch.cat"
] |
[((1710, 1729), 'sidechainnet.utils.sequence.ProteinVocabulary', 'ProteinVocabulary', ([], {}), '()\n', (1727, 1729), False, 'from sidechainnet.utils.sequence import ProteinVocabulary\n'), ((3236, 3274), 'os.makedirs', 'os.makedirs', (['CACHE_PATH'], {'exist_ok': '(True)'}), '(CACHE_PATH, exist_ok=True)\n', (3247, 3274), False, 'import os\n'), ((3279, 3294), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3292, 3294), False, 'import torch\n'), ((1278, 1305), 'torch.cat', 'torch.cat', (['tensors'], {'dim': 'dim'}), '(tensors, dim=dim)\n', (1287, 1305), False, 'import torch\n'), ((2951, 3007), 'torch.nn.functional.pad', 'F.pad', (['tensor', '(0, remainder, 0, remainder)'], {'value': 'value'}), '(tensor, (0, remainder, 0, remainder), value=value)\n', (2956, 3007), True, 'import torch.nn.functional as F\n'), ((3108, 3131), 'os.getenv', 'os.getenv', (['"""CACHE_PATH"""'], {}), "('CACHE_PATH')\n", (3117, 3131), False, 'import os\n'), ((3133, 3177), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.cache.ddpm-proteins"""'], {}), "('~/.cache.ddpm-proteins')\n", (3151, 3177), False, 'import os\n'), ((3502, 3538), 'os.path.join', 'os.path.join', (['CACHE_PATH', 'f"""{id}.pt"""'], {}), "(CACHE_PATH, f'{id}.pt')\n", (3514, 3538), False, 'import os\n'), ((4156, 4205), 'einops.rearrange', 'rearrange', (['attentions', '"""b l h m n -> b (l h) m n"""'], {}), "(attentions, 'b l h m n -> b (l h) m n')\n", (4165, 4205), False, 'from einops import rearrange\n'), ((4557, 4589), 'einops.rearrange', 'rearrange', (['seqs', '"""b n -> b () n"""'], {}), "(seqs, 'b n -> b () n')\n", (4566, 4589), False, 'from einops import rearrange\n'), ((4855, 4884), 'torch.cat', 'torch.cat', (['embeds_list'], {'dim': '(0)'}), '(embeds_list, dim=0)\n', (4864, 4884), False, 'import torch\n'), ((5270, 5279), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5277, 5279), True, 'import matplotlib.pyplot as plt\n'), ((5332, 5352), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (5342, 5352), False, 'from PIL import Image\n'), ((1476, 1541), 'torch.hub.load', 'torch.hub.load', (['"""facebookresearch/esm"""', '"""esm_msa1_t12_100M_UR50S"""'], {}), "('facebookresearch/esm', 'esm_msa1_t12_100M_UR50S')\n", (1490, 1541), False, 'import torch\n'), ((3209, 3233), 'os.getenv', 'os.getenv', (['"""CLEAR_CACHE"""'], {}), "('CLEAR_CACHE')\n", (3218, 3233), False, 'import os\n'), ((3577, 3608), 'os.path.exists', 'os.path.exists', (['cache_full_path'], {}), '(cache_full_path)\n', (3591, 3608), False, 'import os\n'), ((4321, 4360), 'torch.save', 'torch.save', (['attentions', 'cache_full_path'], {}), '(attentions, cache_full_path)\n', (4331, 4360), False, 'import torch\n'), ((3644, 3671), 'torch.load', 'torch.load', (['cache_full_path'], {}), '(cache_full_path)\n', (3654, 3671), False, 'import torch\n')]
|
from adam_visual_perception import LandmarkDetector
from adam_visual_perception.utility import *
import numpy as np
import math
import cv2
import os
import sys
class HeadGazeEstimator:
""" A class for estimating gaze ray from facial landmarks """
def __init__(self, write_video=False):
# 3D model points.
self.model_points = np.array(
[
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0), # Right mouth corner
]
)
self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
"""
Parameters
----------
write_video : bool, optional
Write the resulting OpenCV video
"""
self.write_video = write_video
self.landmark_detector = LandmarkDetector(write_video=False)
def get_gaze_rays(self, filename, bbox_history=None, show=True):
"""
Get the gaze rays for the given video file
"""
# Get the landmarks for the entire video
landmark_map = self.landmark_detector.detect(filename, show=False)
# Capture the video
cap = cv2.VideoCapture(filename)
frame_no = 0
gaze_angles = {}
# Loop over the frames from the video stream
while True:
success, frame = cap.read()
if not success:
if frame_no == 0:
print("Failed to read video")
sys.exit(1)
else:
break
if frame_no == 0:
# Camera internals
size = frame.shape
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1],
],
dtype="double",
)
if self.write_video:
# Initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
par_path = os.path.abspath(os.path.join(filename, os.pardir))
dir_path = par_path + "_pnp"
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
video_path = os.path.join(dir_path, os.path.basename(filename))
writer = cv2.VideoWriter(
video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True
)
if frame_no in landmark_map:
# 2D image points.
image_points = np.array(
[
landmark_map[frame_no][33], # Nose tip
landmark_map[frame_no][8], # Chin
landmark_map[frame_no][36], # Left eye left corner
landmark_map[frame_no][45], # Right eye right corne
landmark_map[frame_no][48], # Left Mouth corner
landmark_map[frame_no][54], # Right mouth corner
],
dtype="double",
)
# We use this to draw a line sticking out of the nose
success, rotation_vector, translation_vector = cv2.solvePnP(
self.model_points,
image_points,
camera_matrix,
self.dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE,
)
nose_end_point2D, jacobian = cv2.projectPoints(
np.array([(0.0, 0.0, 1000.0)]),
rotation_vector,
translation_vector,
camera_matrix,
self.dist_coeffs,
)
for p in image_points:
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (255, 0, 0), -1)
for p in landmark_map[frame_no]:
if p in image_points:
continue
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
lenAB = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
length = lenAB * 3
C_x = int(p2[0] + (p2[0] - p1[0]) / lenAB * length)
C_y = int(p2[1] + (p2[1] - p1[1]) / lenAB * length)
cv2.line(frame, p1, (C_x, C_y), (0, 255, 0), 2)
if bbox_history is not None and (self.write_video or show):
bboxes = bbox_history[frame_no]
for i, bbox in enumerate(bboxes):
x, y = int(bbox[0]), int(bbox[1])
w, h = int(bbox[2]), int(bbox[3])
cv2.circle(
frame, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1
)
# Store in the return dictionary
gaze_angles[frame_no] = (p1, p2)
# Show the frame if the flag is on
if show:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Write the video if the flag is on
if self.write_video:
writer.write(frame)
frame_no += 1
# Cleanup
cv2.destroyAllWindows()
if self.write_video:
writer.release()
return gaze_angles
|
[
"os.makedirs",
"cv2.line",
"math.sqrt",
"os.path.join",
"cv2.imshow",
"cv2.VideoWriter",
"numpy.array",
"numpy.zeros",
"cv2.solvePnP",
"cv2.destroyAllWindows",
"adam_visual_perception.LandmarkDetector",
"cv2.VideoCapture",
"sys.exit",
"cv2.VideoWriter_fourcc",
"os.path.isdir",
"os.path.basename",
"cv2.waitKey"
] |
[((352, 506), 'numpy.array', 'np.array', (['[(0.0, 0.0, 0.0), (0.0, -330.0, -65.0), (-225.0, 170.0, -135.0), (225.0, \n 170.0, -135.0), (-150.0, -150.0, -125.0), (150.0, -150.0, -125.0)]'], {}), '([(0.0, 0.0, 0.0), (0.0, -330.0, -65.0), (-225.0, 170.0, -135.0), (\n 225.0, 170.0, -135.0), (-150.0, -150.0, -125.0), (150.0, -150.0, -125.0)])\n', (360, 506), True, 'import numpy as np\n'), ((774, 790), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (782, 790), True, 'import numpy as np\n'), ((1039, 1074), 'adam_visual_perception.LandmarkDetector', 'LandmarkDetector', ([], {'write_video': '(False)'}), '(write_video=False)\n', (1055, 1074), False, 'from adam_visual_perception import LandmarkDetector\n'), ((1387, 1413), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (1403, 1413), False, 'import cv2\n'), ((5829, 5852), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5850, 5852), False, 'import cv2\n'), ((1992, 2093), 'numpy.array', 'np.array', (['[[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]]'], {'dtype': '"""double"""'}), "([[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0,\n 1]], dtype='double')\n", (2000, 2093), True, 'import numpy as np\n'), ((2969, 3171), 'numpy.array', 'np.array', (['[landmark_map[frame_no][33], landmark_map[frame_no][8], landmark_map[\n frame_no][36], landmark_map[frame_no][45], landmark_map[frame_no][48],\n landmark_map[frame_no][54]]'], {'dtype': '"""double"""'}), "([landmark_map[frame_no][33], landmark_map[frame_no][8],\n landmark_map[frame_no][36], landmark_map[frame_no][45], landmark_map[\n frame_no][48], landmark_map[frame_no][54]], dtype='double')\n", (2977, 3171), True, 'import numpy as np\n'), ((3635, 3748), 'cv2.solvePnP', 'cv2.solvePnP', (['self.model_points', 'image_points', 'camera_matrix', 'self.dist_coeffs'], {'flags': 'cv2.SOLVEPNP_ITERATIVE'}), '(self.model_points, image_points, camera_matrix, self.\n dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)\n', (3647, 3748), False, 'import cv2\n'), ((4661, 4715), 'math.sqrt', 'math.sqrt', (['((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)'], {}), '((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n', (4670, 4715), False, 'import math\n'), ((4904, 4951), 'cv2.line', 'cv2.line', (['frame', 'p1', '(C_x, C_y)', '(0, 255, 0)', '(2)'], {}), '(frame, p1, (C_x, C_y), (0, 255, 0), 2)\n', (4912, 4951), False, 'import cv2\n'), ((5586, 5612), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (5596, 5612), False, 'import cv2\n'), ((1708, 1719), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1716, 1719), False, 'import sys\n'), ((2361, 2392), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (2383, 2392), False, 'import cv2\n'), ((2735, 2814), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_path', 'fourcc', '(30)', '(frame.shape[1], frame.shape[0])', '(True)'], {}), '(video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True)\n', (2750, 2814), False, 'import cv2\n'), ((3948, 3978), 'numpy.array', 'np.array', (['[(0.0, 0.0, 1000.0)]'], {}), '([(0.0, 0.0, 1000.0)])\n', (3956, 3978), True, 'import numpy as np\n'), ((5635, 5649), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5646, 5649), False, 'import cv2\n'), ((2440, 2473), 'os.path.join', 'os.path.join', (['filename', 'os.pardir'], {}), '(filename, os.pardir)\n', (2452, 2473), False, 'import os\n'), ((2551, 2574), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (2564, 2574), False, 'import os\n'), ((2600, 2621), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (2611, 2621), False, 'import os\n'), ((2678, 2704), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2694, 2704), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import disnake
from disnake.ext import commands
from pypoca.config import COLOR, URLS
from pypoca.database import Server
from pypoca.ext import ALL, DEFAULT, Choice, Option
class General(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.slash_command(name="ping", description=DEFAULT["COMMAND_PING_DESC"])
async def slash_ping(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide):
server = Server.get_by_id(inter.guild.id)
locale = ALL[server.language] if server else DEFAULT
latency = int(self.bot.latency * 1000)
description = locale["COMMAND_PING_REPLY"] + f": {latency}ms"
embed = disnake.Embed(description=description, color=COLOR)
await inter.send(embed=embed, ephemeral=hide)
@commands.slash_command(name="help", description=DEFAULT["COMMAND_HELP_DESC"])
async def slash_help(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide):
server = Server.get_by_id(inter.guild.id)
locale = ALL[server.language] if server else DEFAULT
BLANK = "<:blank:914183315056111627>"
description = f"""
**/movie**
{BLANK} **discover** {locale["COMMAND_MOVIE_DISCOVER_DESC"]}
{BLANK} **find** {locale["COMMAND_MOVIE_FIND_DESC"]}
{BLANK} **popular** {locale["COMMAND_MOVIE_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_MOVIE_SEARCH_DESC"]}
{BLANK} **top** {locale["COMMAND_MOVIE_TOP_DESC"]}
{BLANK} **trending** {locale["COMMAND_MOVIE_TRENDING_DESC"]}
{BLANK} **upcoming** {locale["COMMAND_MOVIE_UPCOMING_DESC"]}
**/tv**
{BLANK} **discover** {locale["COMMAND_TV_DISCOVER_DESC"]}
{BLANK} **popular** {locale["COMMAND_TV_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_TV_SEARCH_DESC"]}
{BLANK} **top** {locale["COMMAND_TV_TOP_DESC"]}
{BLANK} **trending** {locale["COMMAND_TV_TRENDING_DESC"]}
{BLANK} **upcoming** {locale["COMMAND_TV_UPCOMING_DESC"]}
**/people**
{BLANK} **popular** {locale["COMMAND_PERSON_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_PERSON_SEARCH_DESC"]}
{BLANK} **trending** {locale["COMMAND_PERSON_TRENDING_DESC"]}
**/game**
{BLANK} **frame** {locale["COMMAND_GAME_FRAME_DESC"]}
{BLANK} **higher** {locale["COMMAND_GAME_HIGHER_DESC"]}
**/setting**
{BLANK} **language** {locale["COMMAND_LANGUAGE_DESC"]}
"""
buttons = [
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_INVITE"], "url": URLS["invite"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_VOTE"], "url": URLS["vote"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_SERVER"], "url": URLS["server"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_SITE"], "url": URLS["site"]},
]
embed = disnake.Embed(description=description, color=COLOR)
view = disnake.ui.View()
[view.add_item(disnake.ui.Button(**button)) for button in buttons]
await inter.send(embed=embed, view=view, ephemeral=hide)
def setup(bot: commands.Bot) -> None:
bot.add_cog(General(bot))
|
[
"disnake.Embed",
"disnake.ui.View",
"pypoca.database.Server.get_by_id",
"disnake.ext.commands.slash_command",
"disnake.ui.Button"
] |
[((301, 378), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {'name': '"""ping"""', 'description': "DEFAULT['COMMAND_PING_DESC']"}), "(name='ping', description=DEFAULT['COMMAND_PING_DESC'])\n", (323, 378), False, 'from disnake.ext import commands\n'), ((859, 936), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {'name': '"""help"""', 'description': "DEFAULT['COMMAND_HELP_DESC']"}), "(name='help', description=DEFAULT['COMMAND_HELP_DESC'])\n", (881, 936), False, 'from disnake.ext import commands\n'), ((510, 542), 'pypoca.database.Server.get_by_id', 'Server.get_by_id', (['inter.guild.id'], {}), '(inter.guild.id)\n', (526, 542), False, 'from pypoca.database import Server\n'), ((747, 798), 'disnake.Embed', 'disnake.Embed', ([], {'description': 'description', 'color': 'COLOR'}), '(description=description, color=COLOR)\n', (760, 798), False, 'import disnake\n'), ((1068, 1100), 'pypoca.database.Server.get_by_id', 'Server.get_by_id', (['inter.guild.id'], {}), '(inter.guild.id)\n', (1084, 1100), False, 'from pypoca.database import Server\n'), ((3093, 3144), 'disnake.Embed', 'disnake.Embed', ([], {'description': 'description', 'color': 'COLOR'}), '(description=description, color=COLOR)\n', (3106, 3144), False, 'import disnake\n'), ((3160, 3177), 'disnake.ui.View', 'disnake.ui.View', ([], {}), '()\n', (3175, 3177), False, 'import disnake\n'), ((3201, 3228), 'disnake.ui.Button', 'disnake.ui.Button', ([], {}), '(**button)\n', (3218, 3228), False, 'import disnake\n')]
|
import time
from collections import deque
import gym
import numpy as np
from stable_baselines import logger, PPO2
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.common import explained_variance, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten
class PPO2WithVAE(PPO2):
"""
Custom PPO2 version.
Notable changes:
- optimization is done after each episode and not after n steps
"""
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2"):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn()
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
n_timesteps = 0
# nupdates = total_timesteps // self.n_batch
for timestep in range(1, total_timesteps + 1):
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - timestep / total_timesteps
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
n_timesteps += len(obs)
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
# timestep = ((update * self.noptepochs * self.n_batch + epoch_num * self.n_batch + start) //
# batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=n_timesteps))
else: # recurrent version
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for stan_timestepsrt in range(0, self.n_envs, envs_per_batch):
# timestep = ((update * self.noptepochs * self.n_envs + epoch_num * self.n_envs + start) //
# envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=n_timesteps,
writer=writer, states=mb_states))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, n_timesteps)
if self.verbose >= 1 and (timestep % log_interval == 0 or timestep == 1):
explained_var = explained_variance(values, returns)
logger.logkv("total_timesteps", n_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if n_timesteps > total_timesteps:
break
return self
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
while True:
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
if self.dones:
print("Episode finished. Reward: {:.2f} {} Steps".format(np.sum(mb_rewards), len(mb_rewards)))
if len(mb_rewards) >= self.n_steps:
break
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
|
[
"numpy.clip",
"numpy.copy",
"numpy.mean",
"stable_baselines.logger.logkv",
"collections.deque",
"stable_baselines.common.explained_variance",
"time.time",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"stable_baselines.common.TensorboardWriter",
"stable_baselines.ppo2.ppo2.safe_mean",
"stable_baselines.ppo2.ppo2.get_schedule_fn",
"stable_baselines.logger.dumpkvs",
"numpy.zeros_like",
"numpy.arange",
"numpy.random.shuffle"
] |
[((724, 759), 'stable_baselines.ppo2.ppo2.get_schedule_fn', 'get_schedule_fn', (['self.learning_rate'], {}), '(self.learning_rate)\n', (739, 759), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((785, 816), 'stable_baselines.ppo2.ppo2.get_schedule_fn', 'get_schedule_fn', (['self.cliprange'], {}), '(self.cliprange)\n', (800, 816), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((8585, 8625), 'numpy.asarray', 'np.asarray', (['mb_obs'], {'dtype': 'self.obs.dtype'}), '(mb_obs, dtype=self.obs.dtype)\n', (8595, 8625), True, 'import numpy as np\n'), ((8647, 8687), 'numpy.asarray', 'np.asarray', (['mb_rewards'], {'dtype': 'np.float32'}), '(mb_rewards, dtype=np.float32)\n', (8657, 8687), True, 'import numpy as np\n'), ((8709, 8731), 'numpy.asarray', 'np.asarray', (['mb_actions'], {}), '(mb_actions)\n', (8719, 8731), True, 'import numpy as np\n'), ((8752, 8791), 'numpy.asarray', 'np.asarray', (['mb_values'], {'dtype': 'np.float32'}), '(mb_values, dtype=np.float32)\n', (8762, 8791), True, 'import numpy as np\n'), ((8816, 8859), 'numpy.asarray', 'np.asarray', (['mb_neglogpacs'], {'dtype': 'np.float32'}), '(mb_neglogpacs, dtype=np.float32)\n', (8826, 8859), True, 'import numpy as np\n'), ((8879, 8914), 'numpy.asarray', 'np.asarray', (['mb_dones'], {'dtype': 'np.bool'}), '(mb_dones, dtype=np.bool)\n', (8889, 8914), True, 'import numpy as np\n'), ((9049, 9074), 'numpy.zeros_like', 'np.zeros_like', (['mb_rewards'], {}), '(mb_rewards)\n', (9062, 9074), True, 'import numpy as np\n'), ((9097, 9116), 'numpy.copy', 'np.copy', (['mb_rewards'], {}), '(mb_rewards)\n', (9104, 9116), True, 'import numpy as np\n'), ((831, 895), 'stable_baselines.common.TensorboardWriter', 'TensorboardWriter', (['self.graph', 'self.tensorboard_log', 'tb_log_name'], {}), '(self.graph, self.tensorboard_log, tb_log_name)\n', (848, 895), False, 'from stable_baselines.common import explained_variance, TensorboardWriter\n'), ((1082, 1106), 'numpy.zeros', 'np.zeros', (['(self.n_envs,)'], {}), '((self.n_envs,))\n', (1090, 1106), True, 'import numpy as np\n'), ((1134, 1151), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (1139, 1151), False, 'from collections import deque\n'), ((1180, 1191), 'time.time', 'time.time', ([], {}), '()\n', (1189, 1191), False, 'import time\n'), ((1486, 1497), 'time.time', 'time.time', ([], {}), '()\n', (1495, 1497), False, 'import time\n'), ((4170, 4199), 'numpy.mean', 'np.mean', (['mb_loss_vals'], {'axis': '(0)'}), '(mb_loss_vals, axis=0)\n', (4177, 4199), True, 'import numpy as np\n'), ((4224, 4235), 'time.time', 'time.time', ([], {}), '()\n', (4233, 4235), False, 'import time\n'), ((7929, 8000), 'numpy.clip', 'np.clip', (['actions', 'self.env.action_space.low', 'self.env.action_space.high'], {}), '(actions, self.env.action_space.low, self.env.action_space.high)\n', (7936, 8000), True, 'import numpy as np\n'), ((2033, 2056), 'numpy.arange', 'np.arange', (['self.n_batch'], {}), '(self.n_batch)\n', (2042, 2056), True, 'import numpy as np\n'), ((2972, 2994), 'numpy.arange', 'np.arange', (['self.n_envs'], {}), '(self.n_envs)\n', (2981, 2994), True, 'import numpy as np\n'), ((4879, 4914), 'stable_baselines.common.explained_variance', 'explained_variance', (['values', 'returns'], {}), '(values, returns)\n', (4897, 4914), False, 'from stable_baselines.common import explained_variance, TensorboardWriter\n'), ((4935, 4979), 'stable_baselines.logger.logkv', 'logger.logkv', (['"""total_timesteps"""', 'n_timesteps'], {}), "('total_timesteps', n_timesteps)\n", (4947, 4979), False, 'from stable_baselines import logger, PPO2\n'), ((5000, 5024), 'stable_baselines.logger.logkv', 'logger.logkv', (['"""fps"""', 'fps'], {}), "('fps', fps)\n", (5012, 5024), False, 'from stable_baselines import logger, PPO2\n'), ((5323, 5376), 'stable_baselines.logger.logkv', 'logger.logkv', (['"""time_elapsed"""', '(t_start - t_first_start)'], {}), "('time_elapsed', t_start - t_first_start)\n", (5335, 5376), False, 'from stable_baselines import logger, PPO2\n'), ((5537, 5553), 'stable_baselines.logger.dumpkvs', 'logger.dumpkvs', ([], {}), '()\n', (5551, 5553), False, 'from stable_baselines import logger, PPO2\n'), ((2142, 2165), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (2159, 2165), True, 'import numpy as np\n'), ((3252, 3282), 'numpy.random.shuffle', 'np.random.shuffle', (['env_indices'], {}), '(env_indices)\n', (3269, 3282), True, 'import numpy as np\n'), ((5149, 5201), 'stable_baselines.ppo2.ppo2.safe_mean', 'safe_mean', (["[ep_info['r'] for ep_info in ep_info_buf]"], {}), "([ep_info['r'] for ep_info in ep_info_buf])\n", (5158, 5201), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((5249, 5301), 'stable_baselines.ppo2.ppo2.safe_mean', 'safe_mean', (["[ep_info['l'] for ep_info in ep_info_buf]"], {}), "([ep_info['l'] for ep_info in ep_info_buf])\n", (5258, 5301), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((5483, 5516), 'stable_baselines.logger.logkv', 'logger.logkv', (['loss_name', 'loss_val'], {}), '(loss_name, loss_val)\n', (5495, 5516), False, 'from stable_baselines import logger, PPO2\n'), ((8405, 8423), 'numpy.sum', 'np.sum', (['mb_rewards'], {}), '(mb_rewards)\n', (8411, 8423), True, 'import numpy as np\n'), ((3030, 3067), 'numpy.arange', 'np.arange', (['(self.n_envs * self.n_steps)'], {}), '(self.n_envs * self.n_steps)\n', (3039, 3067), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
fillRasterwithPatches.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2020-09-01'
__copyright__ = '(C) 2020, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal #https://gdal.org/python/
from math import floor, ceil
import numpy as np
from lftools.geocapt.dip import Interpolar
from lftools.geocapt.imgs import Imgs
import os
from qgis.PyQt.QtGui import QIcon
class FillRasterwithPatches(QgsProcessingAlgorithm):
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return FillRasterwithPatches()
def name(self):
return 'fillrasterwithpatches'
def displayName(self):
return self.tr('Fill with patches', 'Remendar vazios de raster')
def group(self):
return self.tr('Raster')
def groupId(self):
return 'raster'
def tags(self):
return self.tr('fill,hole,raster,cloud,remove,drone,patch').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png'))
txt_en = 'Fills Raster null pixels (no data) with data obtained from other smaller raster layers (Patches).'
txt_pt = 'Preenche vazios de Raster (pixels nulos) com dados obtidos de outras camadas raster menores (Remendos).'
figure = 'images/tutorial/raster_fill_holes.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<p align="right">
<b>'''+self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
RasterIN ='RasterIN'
PATCHES = 'PATCHES'
RESAMPLING = 'RESAMPLING'
RasterOUT = 'RasterOUT'
OPEN = 'OPEN'
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterRasterLayer(
self.RasterIN,
self.tr('Input Raster', 'Raster de Entrada'),
[QgsProcessing.TypeRaster]
)
)
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.PATCHES,
self.tr('Patch Layers', 'Rasters de Remendo'),
layerType = QgsProcessing.TypeRaster
)
)
interp = [self.tr('Nearest neighbor', 'Vizinho mais próximo'),
self.tr('Bilinear'),
self.tr('Bicubic', 'Bicúbica')]
self.addParameter(
QgsProcessingParameterEnum(
self.RESAMPLING,
self.tr('Interpolation', 'Interpolação'),
options = interp,
defaultValue= 0
)
)
# OUTPUT
self.addParameter(
QgsProcessingParameterFileDestination(
self.RasterOUT,
self.tr('Patched Image', 'Imagem Remendada'),
fileFilter = 'GeoTIFF (*.tif)'
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.OPEN,
self.tr('Load patched Image', 'Carregar Imagem Remendada'),
defaultValue= True
)
)
def processAlgorithm(self, parameters, context, feedback):
RasterIN = self.parameterAsRasterLayer(
parameters,
self.RasterIN,
context
)
if RasterIN is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.RasterIN))
RasterIN = RasterIN.dataProvider().dataSourceUri()
PatchesLayers = self.parameterAsLayerList(
parameters,
self.PATCHES,
context
)
reamostragem = self.parameterAsEnum(
parameters,
self.RESAMPLING,
context
)
reamostragem = ['nearest','bilinear','bicubic'][reamostragem]
RGB_Output = self.parameterAsFileOutput(
parameters,
self.RasterOUT,
context
)
Carregar = self.parameterAsBool(
parameters,
self.OPEN,
context
)
limiar = 240
# Abrir Raster layer como array
image = gdal.Open(RasterIN)
prj=image.GetProjection()
CRS=osr.SpatialReference(wkt=prj)
geotransform = image.GetGeoTransform()
n_bands = image.RasterCount # Número de bandas
cols = image.RasterXSize # Number of columns
rows = image.RasterYSize # Number of rows
# Origem e resolucao da imagem
ulx, xres, xskew, uly, yskew, yres = geotransform
origem = (ulx, uly)
resol_X = abs(xres)
resol_Y = abs(yres)
if n_bands ==1:
feedback.pushInfo(self.tr('Opening raster band...', 'Abrindo banda do raster...'))
band1 = image.GetRasterBand(1).ReadAsArray()
if n_bands >=3:
feedback.pushInfo(self.tr('Opening Band R...', 'Abrindo Banda R...'))
band1 = image.GetRasterBand(1).ReadAsArray()
feedback.pushInfo(self.tr('Opening Band G...', 'Abrindo Banda G...'))
band2 = image.GetRasterBand(2).ReadAsArray()
feedback.pushInfo(self.tr('Opening Band B...', 'Abrindo Banda B...'))
band3 = image.GetRasterBand(3).ReadAsArray()
# Transparência
if n_bands == 4:
feedback.pushInfo(self.tr('Opening Band Alpha...', 'Abrindo Banda Alfa...'))
band4 = image.GetRasterBand(4).ReadAsArray()
Pixel_Nulo = image.GetRasterBand(1).GetNoDataValue()
if Pixel_Nulo == None:
Pixel_Nulo = 0
image=None # Fechar imagem
# Número de pixels para processamento
TAM = 0
for Remendo in PatchesLayers:
Rem_Path = Remendo.dataProvider().dataSourceUri()
Rem = gdal.Open(Rem_Path)
# Rem_cols = Rem.RasterXSize # Number of columns
Rem_rows = Rem.RasterYSize # Number of rows
TAM += Rem_rows
# Remendos
total = 100.0 / TAM
cont = 0
for Remendo in PatchesLayers:
feedback.pushInfo((self.tr('Processing Layer: {}', 'Processando Camada: {}')).format(Remendo))
Rem_Path = Remendo.dataProvider().dataSourceUri()
Rem = gdal.Open(Rem_Path)
ulx, xres, xskew, uly, yskew, yres = Rem.GetGeoTransform()
Rem_origem = (ulx, uly)
Rem_resol_X = abs(xres)
Rem_resol_Y = abs(yres)
Rem_cols = Rem.RasterXSize # Number of columns
Rem_rows = Rem.RasterYSize # Number of rows
lrx = ulx + (Rem_cols * xres)
lry = uly + (Rem_rows * yres)
bbox = [ulx, lrx, lry, uly]
Rem_nulo = Rem.GetRasterBand(1).GetNoDataValue()
if Rem_nulo == None:
Rem_nulo = 0
Rem_band1 = Rem.GetRasterBand(1).ReadAsArray()
if n_bands >1:
Rem_band2 = Rem.GetRasterBand(2).ReadAsArray()
Rem_band3 = Rem.GetRasterBand(3).ReadAsArray()
# Limites de Varredura
row_ini = int(round((origem[1]-uly)/resol_Y - 0.5))
row_fim = int(round((origem[1]-lry)/resol_Y - 0.5))
col_ini = int(round((ulx - origem[0])/resol_X - 0.5))
col_fim = int(round((lrx - origem[0])/resol_X - 0.5))
# Varrer Raster
if n_bands == 4:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band4[lin][col]
if px_value == 0 or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
elif n_bands == 3:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band1[lin][col]
if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
elif n_bands == 1:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band1[lin][col]
if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
Rem = None # Fechar imagem
# Criar imagem RGB
feedback.pushInfo(self.tr('Saving Raster...', 'Salvando Raster...'))
GDT = gdal_array.NumericTypeCodeToGDALTypeCode(band1.dtype)
if n_bands ==1:
RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 1, GDT)
else:
RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 3, GDT)
RASTER.SetGeoTransform(geotransform) # specify coords
RASTER.SetProjection(CRS.ExportToWkt()) # export coords to file
if n_bands ==1:
feedback.pushInfo(self.tr('Writing rater band...', 'Escrevendo banda do raster...'))
banda = RASTER.GetRasterBand(1)
banda.WriteArray(band1)
banda.SetNoDataValue(Pixel_Nulo)
else:
feedback.pushInfo(self.tr('Writing Band R...', 'Escrevendo Banda R...'))
bandaR = RASTER.GetRasterBand(1)
bandaR.WriteArray(band1)
feedback.pushInfo(self.tr('Writing Band G...', 'Escrevendo Banda G...'))
bandaG = RASTER.GetRasterBand(2)
bandaG.WriteArray(band2)
feedback.pushInfo(self.tr('Writing Band B...', 'Escrevendo Banda B...'))
bandaB = RASTER.GetRasterBand(3)
bandaB.WriteArray(band3)
feedback.pushInfo(self.tr('Saving raster...', 'Salvando raster...'))
RASTER.FlushCache() # Escrever no disco
RASTER = None # Salvar e fechar
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
self.CAMINHO = RGB_Output
self.CARREGAR = Carregar
return {self.RasterOUT: RGB_Output}
# Carregamento de arquivo de saída
CAMINHO = ''
CARREGAR = True
def postProcessAlgorithm(self, context, feedback):
if self.CARREGAR:
rlayer = QgsRasterLayer(self.CAMINHO, self.tr('Patched Image', 'Imagem Remendada'))
QgsProject.instance().addMapLayer(rlayer)
return {}
|
[
"osgeo.gdal.Open",
"PyQt5.QtCore.QCoreApplication.translate",
"osgeo.osr.SpatialReference",
"qgis.core.QgsApplication.locale",
"lftools.geocapt.dip.Interpolar",
"os.path.dirname",
"qgis.core.QgsProject.instance",
"lftools.geocapt.imgs.Imgs",
"osgeo.gdal_array.NumericTypeCodeToGDALTypeCode",
"osgeo.gdal.GetDriverByName"
] |
[((2401, 2424), 'qgis.core.QgsApplication.locale', 'QgsApplication.locale', ([], {}), '()\n', (2422, 2424), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((2478, 2526), 'PyQt5.QtCore.QCoreApplication.translate', 'QCoreApplication.translate', (['"""Processing"""', 'string'], {}), "('Processing', string)\n", (2504, 2526), False, 'from PyQt5.QtCore import QCoreApplication, QVariant\n'), ((6852, 6871), 'osgeo.gdal.Open', 'gdal.Open', (['RasterIN'], {}), '(RasterIN)\n', (6861, 6871), False, 'from osgeo import osr, gdal_array, gdal\n'), ((6918, 6947), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {'wkt': 'prj'}), '(wkt=prj)\n', (6938, 6947), False, 'from osgeo import osr, gdal_array, gdal\n'), ((12880, 12933), 'osgeo.gdal_array.NumericTypeCodeToGDALTypeCode', 'gdal_array.NumericTypeCodeToGDALTypeCode', (['band1.dtype'], {}), '(band1.dtype)\n', (12920, 12933), False, 'from osgeo import osr, gdal_array, gdal\n'), ((3754, 3760), 'lftools.geocapt.imgs.Imgs', 'Imgs', ([], {}), '()\n', (3758, 3760), False, 'from lftools.geocapt.imgs import Imgs\n'), ((8483, 8502), 'osgeo.gdal.Open', 'gdal.Open', (['Rem_Path'], {}), '(Rem_Path)\n', (8492, 8502), False, 'from osgeo import osr, gdal_array, gdal\n'), ((8937, 8956), 'osgeo.gdal.Open', 'gdal.Open', (['Rem_Path'], {}), '(Rem_Path)\n', (8946, 8956), False, 'from osgeo import osr, gdal_array, gdal\n'), ((3366, 3391), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3381, 3391), False, 'import os\n'), ((12979, 13008), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (12999, 13008), False, 'from osgeo import osr, gdal_array, gdal\n'), ((13083, 13112), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (13103, 13112), False, 'from osgeo import osr, gdal_array, gdal\n'), ((14796, 14817), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (14815, 14817), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((10486, 10579), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', 'Rem_band1', 'Rem_origem', 'Rem_resol_X', 'Rem_resol_Y', 'reamostragem', 'Rem_nulo'], {}), '(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y,\n reamostragem, Rem_nulo)\n', (10496, 10579), False, 'from lftools.geocapt.dip import Interpolar\n'), ((10622, 10715), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', 'Rem_band2', 'Rem_origem', 'Rem_resol_X', 'Rem_resol_Y', 'reamostragem', 'Rem_nulo'], {}), '(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y,\n reamostragem, Rem_nulo)\n', (10632, 10715), False, 'from lftools.geocapt.dip import Interpolar\n'), ((10758, 10851), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', 'Rem_band3', 'Rem_origem', 'Rem_resol_X', 'Rem_resol_Y', 'reamostragem', 'Rem_nulo'], {}), '(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y,\n reamostragem, Rem_nulo)\n', (10768, 10851), False, 'from lftools.geocapt.dip import Interpolar\n'), ((11476, 11569), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', 'Rem_band1', 'Rem_origem', 'Rem_resol_X', 'Rem_resol_Y', 'reamostragem', 'Rem_nulo'], {}), '(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y,\n reamostragem, Rem_nulo)\n', (11486, 11569), False, 'from lftools.geocapt.dip import Interpolar\n'), ((11612, 11705), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', 'Rem_band2', 'Rem_origem', 'Rem_resol_X', 'Rem_resol_Y', 'reamostragem', 'Rem_nulo'], {}), '(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y,\n reamostragem, Rem_nulo)\n', (11622, 11705), False, 'from lftools.geocapt.dip import Interpolar\n'), ((11748, 11841), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', 'Rem_band3', 'Rem_origem', 'Rem_resol_X', 'Rem_resol_Y', 'reamostragem', 'Rem_nulo'], {}), '(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y,\n reamostragem, Rem_nulo)\n', (11758, 11841), False, 'from lftools.geocapt.dip import Interpolar\n'), ((12466, 12559), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', 'Rem_band1', 'Rem_origem', 'Rem_resol_X', 'Rem_resol_Y', 'reamostragem', 'Rem_nulo'], {}), '(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y,\n reamostragem, Rem_nulo)\n', (12476, 12559), False, 'from lftools.geocapt.dip import Interpolar\n'), ((3878, 3903), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3893, 3903), False, 'import os\n')]
|
from PyQt5.QtWidgets import QDialog
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
from dockwina import Ui_Form as docka
class Dialog(QDialog, docka):
def __init__(self):
super(Dialog, self).__init__()
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlag(Qt.FramelessWindowHint)
self.font1 = QFont("Tajawal", 9)
self.label2.setFont(self.font1)
self.label7.setFont(self.font1)
self.label3.setFont(self.font1)
self.label5.setFont(self.font1)
self.label6.setFont(self.font1)
self.label.setFont(self.font1)
|
[
"PyQt5.QtWidgets.QDialog.__init__",
"PyQt5.QtGui.QFont"
] |
[((242, 264), 'PyQt5.QtWidgets.QDialog.__init__', 'QDialog.__init__', (['self'], {}), '(self)\n', (258, 264), False, 'from PyQt5.QtWidgets import QDialog\n'), ((367, 386), 'PyQt5.QtGui.QFont', 'QFont', (['"""Tajawal"""', '(9)'], {}), "('Tajawal', 9)\n", (372, 386), False, 'from PyQt5.QtGui import QFont\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys, warnings
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
import django
from django.core.management import execute_from_command_line
if django.VERSION < (1, 6):
default_test_apps = [
'sortedm2m_tests',
'test_south_support',
]
else:
default_test_apps = [
'sortedm2m_tests',
]
# Only test south support for Django 1.6 and lower.
if django.VERSION < (1, 7):
default_test_apps += [
'test_south_support',
]
def runtests(*args):
if django.VERSION > (1, 8):
warnings.simplefilter("error", Warning)
warnings.filterwarnings("ignore", module="distutils")
try:
warnings.filterwarnings("ignore", category=ResourceWarning)
except NameError:
pass
warnings.filterwarnings("ignore", "invalid escape sequence", DeprecationWarning)
# Ignore a python 3.6 DeprecationWarning in ModelBase.__new__ that isn't
# fixed in Django 1.x
if sys.version_info > (3, 6) and django.VERSION < (2,):
warnings.filterwarnings(
"ignore", "__class__ not set defining", DeprecationWarning)
test_apps = list(args or default_test_apps)
execute_from_command_line([sys.argv[0], 'test', '--verbosity=1'] + test_apps)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
[
"os.environ.setdefault",
"sys.path.insert",
"django.core.management.execute_from_command_line",
"warnings.simplefilter",
"os.path.abspath",
"warnings.filterwarnings"
] |
[((166, 192), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parent'], {}), '(0, parent)\n', (181, 192), False, 'import os, sys, warnings\n'), ((193, 265), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""test_project.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'test_project.settings')\n", (214, 265), False, 'import os, sys, warnings\n'), ((138, 163), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (153, 163), False, 'import os, sys, warnings\n'), ((1414, 1491), 'django.core.management.execute_from_command_line', 'execute_from_command_line', (["([sys.argv[0], 'test', '--verbosity=1'] + test_apps)"], {}), "([sys.argv[0], 'test', '--verbosity=1'] + test_apps)\n", (1439, 1491), False, 'from django.core.management import execute_from_command_line\n'), ((754, 793), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'Warning'], {}), "('error', Warning)\n", (775, 793), False, 'import os, sys, warnings\n'), ((802, 855), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'module': '"""distutils"""'}), "('ignore', module='distutils')\n", (825, 855), False, 'import os, sys, warnings\n'), ((992, 1077), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""invalid escape sequence"""', 'DeprecationWarning'], {}), "('ignore', 'invalid escape sequence', DeprecationWarning\n )\n", (1015, 1077), False, 'import os, sys, warnings\n'), ((881, 940), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'ResourceWarning'}), "('ignore', category=ResourceWarning)\n", (904, 940), False, 'import os, sys, warnings\n'), ((1260, 1347), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""__class__ not set defining"""', 'DeprecationWarning'], {}), "('ignore', '__class__ not set defining',\n DeprecationWarning)\n", (1283, 1347), False, 'import os, sys, warnings\n')]
|
#!/usr/bin/env python3
import concurrent.futures
import logging
import requests
from sys import argv, exit
from urllib.parse import urlparse
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
HEADERS = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.30 Safari/537.36'
}
MIN_RESPONSE_LENGTH = 100
NUM_WORKERS = 50
urls = []
if len(argv) < 2:
exit("Please specify a URLs file.")
with open(argv[1]) as f:
urls = [line.rstrip() for line in f]
def check(url):
# Issue a GET request
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
response_size = len(r.text)
if r.status_code != 200 or response_size < MIN_RESPONSE_LENGTH:
logging.debug("Ignoring %s: response %d, response size %d.",
url, r.status_code, response_size)
return None
# Issue a second request to check for stability (200 + same response size)
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
if r.status_code != 200 or response_size != len(r.text):
logging.debug("URL %s is unstable.", url)
return None
logging.info("URL %s is stable.", url)
# If the URL is stable, try adding a same-origin Origin header
parsed_url = urlparse(r.url)
origin = parsed_url.scheme + '://' + parsed_url.netloc
logging.debug('Sending same-origin Origin %s for %s...', origin, url)
result = {
'url': url,
'SAMEORIGIN_OK': False,
'CROSSORIGIN_OK': False,
'SAMEORIGIN_KO_STATUS': False,
'SAMEORIGIN_KO_RESPONSE': False,
'CROSSORIGIN_KO_STATUS': False,
'CROSSORIGIN_KO_RESPONSE': False
}
r = requests.get(url, timeout=5, allow_redirects=False,
headers={**HEADERS, **{'Origin': origin}})
if r.status_code != 200:
logging.info(
"[SAME ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['SAMEORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[SAME ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['SAMEORIGIN_KO_RESPONSE'] = True
return result
result['SAMEORIGIN_OK'] = True
# If same-origin Origin header is OK, try a cross-origin one.
logging.debug('Sending cross-origin Origin for URL %s.', url)
r = requests.get(url, timeout=5, allow_redirects=False, headers={
**HEADERS, **{'Origin': 'https://example.org'}})
if r.status_code != 200:
logging.info(
"[CROSS ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['CROSSORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[CROSS ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['CROSSORIGIN_KO_RESPONSE'] = True
return result
result['CROSSORIGIN_OK'] = True
return result
with open('results.csv', 'w') as w:
print('url,SAMEORIGIN_OK,CROSSORIGIN_OK,SAMEORIGIN_KO_STATUS,SAMEORIGIN_KO_RESPONSE,CROSSORIGIN_KO_STATUS,CROSSORIGIN_KO_RESPONSE', file=w)
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_result = {executor.submit(check, url): url for url in urls}
for future in concurrent.futures.as_completed(future_to_result):
try:
result = future.result()
except:
continue
else:
if result:
print('{},{},{},{},{},{},{}'.format(result['url'],
int(result['SAMEORIGIN_OK']),
int(result['CROSSORIGIN_OK']),
int(result['SAMEORIGIN_KO_STATUS']),
int(result['SAMEORIGIN_KO_RESPONSE']),
int(result['CROSSORIGIN_KO_STATUS']),
int(result['CROSSORIGIN_KO_RESPONSE'])
), file=w)
|
[
"logging.basicConfig",
"logging.getLogger",
"urllib.parse.urlparse",
"logging.debug",
"requests.get",
"sys.exit",
"logging.info"
] |
[((142, 182), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (161, 182), False, 'import logging\n'), ((192, 219), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (209, 219), False, 'import logging\n'), ((457, 492), 'sys.exit', 'exit', (['"""Please specify a URLs file."""'], {}), "('Please specify a URLs file.')\n", (461, 492), False, 'from sys import argv, exit\n'), ((612, 680), 'requests.get', 'requests.get', (['url'], {'timeout': '(5)', 'allow_redirects': '(False)', 'headers': 'HEADERS'}), '(url, timeout=5, allow_redirects=False, headers=HEADERS)\n', (624, 680), False, 'import requests\n'), ((1015, 1083), 'requests.get', 'requests.get', (['url'], {'timeout': '(5)', 'allow_redirects': '(False)', 'headers': 'HEADERS'}), '(url, timeout=5, allow_redirects=False, headers=HEADERS)\n', (1027, 1083), False, 'import requests\n'), ((1219, 1257), 'logging.info', 'logging.info', (['"""URL %s is stable."""', 'url'], {}), "('URL %s is stable.', url)\n", (1231, 1257), False, 'import logging\n'), ((1343, 1358), 'urllib.parse.urlparse', 'urlparse', (['r.url'], {}), '(r.url)\n', (1351, 1358), False, 'from urllib.parse import urlparse\n'), ((1422, 1491), 'logging.debug', 'logging.debug', (['"""Sending same-origin Origin %s for %s..."""', 'origin', 'url'], {}), "('Sending same-origin Origin %s for %s...', origin, url)\n", (1435, 1491), False, 'import logging\n'), ((1769, 1868), 'requests.get', 'requests.get', (['url'], {'timeout': '(5)', 'allow_redirects': '(False)', 'headers': "{**HEADERS, **{'Origin': origin}}"}), "(url, timeout=5, allow_redirects=False, headers={**HEADERS, **{\n 'Origin': origin}})\n", (1781, 1868), False, 'import requests\n'), ((2414, 2475), 'logging.debug', 'logging.debug', (['"""Sending cross-origin Origin for URL %s."""', 'url'], {}), "('Sending cross-origin Origin for URL %s.', url)\n", (2427, 2475), False, 'import logging\n'), ((2484, 2598), 'requests.get', 'requests.get', (['url'], {'timeout': '(5)', 'allow_redirects': '(False)', 'headers': "{**HEADERS, **{'Origin': 'https://example.org'}}"}), "(url, timeout=5, allow_redirects=False, headers={**HEADERS, **{\n 'Origin': 'https://example.org'}})\n", (2496, 2598), False, 'import requests\n'), ((789, 889), 'logging.debug', 'logging.debug', (['"""Ignoring %s: response %d, response size %d."""', 'url', 'r.status_code', 'response_size'], {}), "('Ignoring %s: response %d, response size %d.', url, r.\n status_code, response_size)\n", (802, 889), False, 'import logging\n'), ((1153, 1194), 'logging.debug', 'logging.debug', (['"""URL %s is unstable."""', 'url'], {}), "('URL %s is unstable.', url)\n", (1166, 1194), False, 'import logging\n'), ((1922, 2010), 'logging.info', 'logging.info', (['"""[SAME ORIGIN] URL %s changed status code to %d."""', 'url', 'r.status_code'], {}), "('[SAME ORIGIN] URL %s changed status code to %d.', url, r.\n status_code)\n", (1934, 2010), False, 'import logging\n'), ((2653, 2742), 'logging.info', 'logging.info', (['"""[CROSS ORIGIN] URL %s changed status code to %d."""', 'url', 'r.status_code'], {}), "('[CROSS ORIGIN] URL %s changed status code to %d.', url, r.\n status_code)\n", (2665, 2742), False, 'import logging\n')]
|
#!/usr/bin/env python
# coding: utf-8
"""
Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
"""
# %%
# Building a Simulated Multi-Sensor Moving Platform
# -------------------------------------------------
# The focus of this example is to show how to setup and configure a simulation environment in order to provide a
# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information
# about trackers and how to configure them review of the tutorials and demonstrations is recommended.
#
# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and
# :class:`~.Sensor` objects.
#
# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.
# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example
# will be imported at the relevant point in order to draw attention to the new features.
# Some general imports and set up
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
# Stone Soup imports:
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
# Define the simulation start time
start_time = datetime.now()
# %%
# Create a multi-sensor platform
# ------------------------------
# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a
# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a
# :class:`~.MultiTargetGroundTruthSimulator`.
#
# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within
# a 6 dimensional state space according to the following :math:`\mathbf{x}`.
#
# .. math::
# \mathbf{x} = \begin{bmatrix}
# x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix}
# = \begin{bmatrix}
# 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix}
#
# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.
# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:
#
# .. math::
# F_{k} = \begin{bmatrix}
# 1 & \triangle k & 0 & 0 & 0 & 0\\
# 0 & 1 & 0 & 0 & 0 & 0\\
# 0 & 0 & 1 & \triangle k & 0 & 0\\
# 0 & 0 & 0 & 1 & 0 & 0\\
# 0 & 0 & 0 & 0 & 1 & \triangle k \\
# 0 & 0 & 0 & 0 & 0 & 1\\
# \end{bmatrix}
# First import the Moving platform
from stonesoup.platform.base import MovingPlatform
# Define the initial platform position, in this case the origin
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
# Define transition model and position for 3D platform
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# create our fixed platform
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.
# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3), # Elevation
np.deg2rad(3), # Bearing
100., # Range
25.]))) # Range Rate
# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])
# Mount the radar onto the platform
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
# %%
# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`)
# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.
# The imager sensor model is described by the following equations:
#
# .. math::
# \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k)
#
# where:
#
# * :math:`\mathbf{z}_k` is a measurement vector of the form:
#
# .. math::
# \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix}
#
# * :math:`h` is a non - linear model function of the form:
#
# .. math::
# h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix}
# \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\
# \arctan(\mathcal{y},\mathcal{x}) \ \
# \end{bmatrix} + \dot{\mathbf{x}}_k
#
# * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:
#
# .. math::
# \mathbf{z}_k \sim \mathcal{N}(0, R)
#
# .. math::
# R = \begin{bmatrix}
# \sigma_{\theta}^2 & 0 \\
# 0 & \sigma_{\phi}^2 \\
# \end{bmatrix}
# Import a passive sensor capability
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation
np.deg2rad(0.05)]))) # Bearing
# imager mounting offset
imager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod
imager_rotation_offsets = StateVector([0, 0, 0])
# Mount the imager onto the platform
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
# %%
# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.
# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset
# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the
# platform.
#
# Now that we have mounted the two sensors we can see that the platform object has both associated with it:
sensor_platform.sensors
# %%
# Create a Target Platform
# ------------------------
# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator
# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on
# our selected parameters. The second method provides a means to generate a target which will perform specific
# behaviours, this is the approach we will take here.
#
# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as
# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform
# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop
# over the transition sequence provided until the simulation ends.
#
# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as
# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms
# within the simulation (sensor-target geometry dependant).
#
# For this example we will create an air target which will fly a sequence of straight and level followed by a
# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8
# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.
# Import a Constant Turn model to enable target to perform basic manoeuvre
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# Configure the aircraft turn behaviour
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32 # specified in radians per seconds...
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
# Configure turn model to maintain current altitude
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
# %%
# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving
# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.
# Import a multi-transition moving platform
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
# %%
# Creating the simulator
# ----------------------
# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do
# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a
# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then
# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.
# Import the required simulators
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
# %%
# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for
# 25 seconds.
times = np.arange(0, 24, 1) # 25 seconds
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# %%
# Create a Tracker
# ------------------------------------
# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we
# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example
# we will use an inflated constant noise model to account for target motion uncertainty.
#
# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
|
[
"stonesoup.updater.particle.ParticleUpdater",
"stonesoup.platform.base.MovingPlatform",
"stonesoup.simulator.simple.DummyGroundTruthSimulator",
"stonesoup.measures.Mahalanobis",
"stonesoup.functions.sphere2cart",
"numpy.array",
"datetime.timedelta",
"stonesoup.simulator.platform.PlatformDetectionSimulator",
"numpy.arange",
"stonesoup.sensor.radar.radar.RadarElevationBearingRangeRate",
"stonesoup.models.transition.linear.ConstantVelocity",
"stonesoup.types.array.StateVector",
"stonesoup.models.transition.linear.ConstantTurn",
"stonesoup.resampler.particle.SystematicResampler",
"stonesoup.dataassociator.neighbour.GNNWith2DAssignment",
"stonesoup.types.state.State",
"numpy.deg2rad",
"stonesoup.sensor.passive.PassiveElevationBearing",
"stonesoup.deleter.time.UpdateTimeStepsDeleter",
"numpy.diag",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"matplotlib.animation.ArtistAnimation",
"stonesoup.tracker.simple.SingleTargetTracker",
"stonesoup.predictor.particle.ParticlePredictor",
"stonesoup.initiator.simple.GaussianParticleInitiator",
"stonesoup.platform.base.MultiTransitionMovingPlatform"
] |
[((2101, 2115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2113, 2115), False, 'from datetime import datetime\n'), ((3612, 3659), 'stonesoup.types.array.StateVector', 'StateVector', (['[[0], [0], [0], [50], [8000], [0]]'], {}), '([[0], [0], [0], [50], [8000], [0]])\n', (3623, 3659), False, 'from stonesoup.types.array import StateVector\n'), ((3676, 3706), 'stonesoup.types.state.State', 'State', (['initial_loc', 'start_time'], {}), '(initial_loc, start_time)\n', (3681, 3706), False, 'from stonesoup.types.state import State, GaussianState\n'), ((3940, 4071), 'stonesoup.platform.base.MovingPlatform', 'MovingPlatform', ([], {'states': 'initial_state', 'position_mapping': '(0, 2, 4)', 'velocity_mapping': '(1, 3, 5)', 'transition_model': 'transition_model'}), '(states=initial_state, position_mapping=(0, 2, 4),\n velocity_mapping=(1, 3, 5), transition_model=transition_model)\n', (3954, 4071), False, 'from stonesoup.platform.base import MovingPlatform\n'), ((5065, 5088), 'stonesoup.types.array.StateVector', 'StateVector', (['[10, 0, 0]'], {}), '([10, 0, 0])\n', (5076, 5088), False, 'from stonesoup.types.array import StateVector\n'), ((5132, 5154), 'stonesoup.types.array.StateVector', 'StateVector', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (5143, 5154), False, 'from stonesoup.types.array import StateVector\n'), ((5201, 5425), 'stonesoup.sensor.radar.radar.RadarElevationBearingRangeRate', 'RadarElevationBearingRangeRate', ([], {'ndim_state': '(6)', 'position_mapping': '(0, 2, 4)', 'velocity_mapping': '(1, 3, 5)', 'noise_covar': 'radar_noise_covar', 'mounting_offset': 'radar_mounting_offsets', 'rotation_offset': 'radar_rotation_offsets'}), '(ndim_state=6, position_mapping=(0, 2, 4),\n velocity_mapping=(1, 3, 5), noise_covar=radar_noise_covar,\n mounting_offset=radar_mounting_offsets, rotation_offset=\n radar_rotation_offsets)\n', (5231, 5425), False, 'from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate\n'), ((7195, 7218), 'stonesoup.types.array.StateVector', 'StateVector', (['[0, 8, -1]'], {}), '([0, 8, -1])\n', (7206, 7218), False, 'from stonesoup.types.array import StateVector\n'), ((7278, 7300), 'stonesoup.types.array.StateVector', 'StateVector', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (7289, 7300), False, 'from stonesoup.types.array import StateVector\n'), ((7348, 7527), 'stonesoup.sensor.passive.PassiveElevationBearing', 'PassiveElevationBearing', ([], {'ndim_state': '(6)', 'mapping': '(0, 2, 4)', 'noise_covar': 'imager_noise_covar', 'mounting_offset': 'imager_mounting_offsets', 'rotation_offset': 'imager_rotation_offsets'}), '(ndim_state=6, mapping=(0, 2, 4), noise_covar=\n imager_noise_covar, mounting_offset=imager_mounting_offsets,\n rotation_offset=imager_rotation_offsets)\n', (7371, 7527), False, 'from stonesoup.sensor.passive import PassiveElevationBearing\n'), ((10056, 10076), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (10064, 10076), True, 'import numpy as np\n'), ((10150, 10235), 'stonesoup.models.transition.linear.ConstantTurn', 'ConstantTurn', ([], {'turn_noise_diff_coeffs': 'turn_noise_diff_coeffs', 'turn_rate': 'turn_rate'}), '(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate\n )\n', (10162, 10235), False, 'from stonesoup.models.transition.linear import ConstantTurn\n'), ((10863, 10914), 'stonesoup.types.array.StateVector', 'StateVector', (['[[0], [-40], [1800], [0], [8000], [0]]'], {}), '([[0], [-40], [1800], [0], [8000], [0]])\n', (10874, 10914), False, 'from stonesoup.types.array import StateVector\n'), ((10938, 10980), 'stonesoup.types.state.State', 'State', (['initial_target_location', 'start_time'], {}), '(initial_target_location, start_time)\n', (10943, 10980), False, 'from stonesoup.types.state import State, GaussianState\n'), ((10990, 11194), 'stonesoup.platform.base.MultiTransitionMovingPlatform', 'MultiTransitionMovingPlatform', ([], {'transition_models': 'manoeuvre_list', 'transition_times': 'manoeuvre_times', 'states': 'initial_target_state', 'position_mapping': '(0, 2, 4)', 'velocity_mapping': '(1, 3, 5)', 'sensors': 'None'}), '(transition_models=manoeuvre_list,\n transition_times=manoeuvre_times, states=initial_target_state,\n position_mapping=(0, 2, 4), velocity_mapping=(1, 3, 5), sensors=None)\n', (11019, 11194), False, 'from stonesoup.platform.base import MultiTransitionMovingPlatform\n'), ((12201, 12220), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(1)'], {}), '(0, 24, 1)\n', (12210, 12220), True, 'import numpy as np\n'), ((12339, 12382), 'stonesoup.simulator.simple.DummyGroundTruthSimulator', 'DummyGroundTruthSimulator', ([], {'times': 'timestamps'}), '(times=timestamps)\n', (12364, 12382), False, 'from stonesoup.simulator.simple import DummyGroundTruthSimulator\n'), ((12389, 12476), 'stonesoup.simulator.platform.PlatformDetectionSimulator', 'PlatformDetectionSimulator', ([], {'groundtruth': 'truths', 'platforms': '[sensor_platform, target]'}), '(groundtruth=truths, platforms=[sensor_platform,\n target])\n', (12415, 12476), False, 'from stonesoup.simulator.platform import PlatformDetectionSimulator\n'), ((13354, 13396), 'stonesoup.predictor.particle.ParticlePredictor', 'ParticlePredictor', (['target_transition_model'], {}), '(target_transition_model)\n', (13371, 13396), False, 'from stonesoup.predictor.particle import ParticlePredictor\n'), ((13456, 13477), 'stonesoup.resampler.particle.SystematicResampler', 'SystematicResampler', ([], {}), '()\n', (13475, 13477), False, 'from stonesoup.resampler.particle import SystematicResampler\n'), ((13488, 13548), 'stonesoup.updater.particle.ParticleUpdater', 'ParticleUpdater', ([], {'measurement_model': 'None', 'resampler': 'resampler'}), '(measurement_model=None, resampler=resampler)\n', (13503, 13548), False, 'from stonesoup.updater.particle import ParticleUpdater\n'), ((13872, 13958), 'stonesoup.initiator.simple.GaussianParticleInitiator', 'GaussianParticleInitiator', ([], {'number_particles': '(500)', 'initiator': 'single_point_initiator'}), '(number_particles=500, initiator=\n single_point_initiator)\n', (13897, 13958), False, 'from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator\n'), ((14114, 14147), 'stonesoup.dataassociator.neighbour.GNNWith2DAssignment', 'GNNWith2DAssignment', (['hypothesiser'], {}), '(hypothesiser)\n', (14133, 14147), False, 'from stonesoup.dataassociator.neighbour import GNNWith2DAssignment\n'), ((14217, 14267), 'stonesoup.deleter.time.UpdateTimeStepsDeleter', 'UpdateTimeStepsDeleter', ([], {'time_steps_since_update': '(10)'}), '(time_steps_since_update=10)\n', (14239, 14267), False, 'from stonesoup.deleter.time import UpdateTimeStepsDeleter\n'), ((14319, 14444), 'stonesoup.tracker.simple.SingleTargetTracker', 'SingleTargetTracker', ([], {'initiator': 'initiator', 'deleter': 'deleter', 'detector': 'sim', 'data_associator': 'data_associator', 'updater': 'updater'}), '(initiator=initiator, deleter=deleter, detector=sim,\n data_associator=data_associator, updater=updater)\n', (14338, 14444), False, 'from stonesoup.tracker.simple import SingleTargetTracker\n'), ((15175, 15202), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (15185, 15202), True, 'from matplotlib import pyplot as plt\n'), ((16673, 16711), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'frames'], {}), '(fig, frames)\n', (16698, 16711), False, 'from matplotlib import animation\n'), ((10436, 10456), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(8)'}), '(seconds=8)\n', (10445, 10456), False, 'from datetime import timedelta\n'), ((10477, 10497), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(8)'}), '(seconds=8)\n', (10486, 10497), False, 'from datetime import timedelta\n'), ((3826, 3847), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (3842, 3847), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((3848, 3869), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (3864, 3869), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((3870, 3891), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (3886, 3891), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((9923, 9944), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (9939, 9944), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((9945, 9966), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (9961, 9966), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((9967, 9988), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (9983, 9988), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((10350, 10371), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (10366, 10371), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13244, 13263), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(5)'], {}), '(5)\n', (13260, 13263), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13265, 13284), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(5)'], {}), '(5)\n', (13281, 13284), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13286, 13305), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(1)'], {}), '(1)\n', (13302, 13305), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13798, 13846), 'numpy.diag', 'np.diag', (['[10000, 1000, 10000, 1000, 10000, 1000]'], {}), '([10000, 1000, 10000, 1000, 10000, 1000])\n', (13805, 13846), True, 'import numpy as np\n'), ((14057, 14070), 'stonesoup.measures.Mahalanobis', 'Mahalanobis', ([], {}), '()\n', (14068, 14070), False, 'from stonesoup.measures import Mahalanobis\n'), ((16090, 16134), 'stonesoup.functions.sphere2cart', 'sphere2cart', (['r', '(a + az_offset)', '(e + el_offset)'], {}), '(r, a + az_offset, e + el_offset)\n', (16101, 16134), False, 'from stonesoup.functions import sphere2cart\n'), ((4888, 4901), 'numpy.deg2rad', 'np.deg2rad', (['(3)'], {}), '(3)\n', (4898, 4901), True, 'import numpy as np\n'), ((4930, 4943), 'numpy.deg2rad', 'np.deg2rad', (['(3)'], {}), '(3)\n', (4940, 4943), True, 'import numpy as np\n'), ((7024, 7040), 'numpy.deg2rad', 'np.deg2rad', (['(0.05)'], {}), '(0.05)\n', (7034, 7040), True, 'import numpy as np\n'), ((7111, 7127), 'numpy.deg2rad', 'np.deg2rad', (['(0.05)'], {}), '(0.05)\n', (7121, 7127), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# This program shows how to use MPI_Alltoall. Each processor
# send/rec a different random number to/from other processors.
#
# numpy is required
import numpy
from numpy import *
# mpi4py module
from mpi4py import MPI
import sys
def myquit(mes):
MPI.Finalize()
print(mes)
sys.exit()
# Initialize MPI and print out hello
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
# We are going to send/recv a single value to/from
# each processor. Here we allocate arrays
s_vals=zeros(numprocs,"i")
r_vals=zeros(numprocs,"i")
# Fill the send arrays with random numbers
random.seed(myid)
for i in range(0, numprocs):
s_vals[i]=random.randint(1,10)
print("myid=",myid,"s_vals=",s_vals)
# Send/recv to/from all
comm.Alltoall(s_vals, r_vals)
print("myid=",myid,"r_vals=",r_vals)
MPI.Finalize()
# Note, the sent values and the recv values are
# like a transpose of each other
#
# mpiexec -n 4 ./P_ex07.py | grep s_v | sort
# myid= 0 s_vals= [6 1 4 4]
# myid= 1 s_vals= [6 9 6 1]
# myid= 2 s_vals= [9 9 7 3]
# myid= 3 s_vals= [9 4 9 9]
# mpiexec -n 4 ./P_ex07.py | grep r_v | sort
# myid= 0 r_vals= [6 6 9 9]
# myid= 1 r_vals= [1 9 9 4]
# myid= 2 r_vals= [4 6 7 9]
# myid= 3 r_vals= [4 1 3 9]
|
[
"mpi4py.MPI.Finalize",
"sys.exit"
] |
[((895, 909), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (907, 909), False, 'from mpi4py import MPI\n'), ((284, 298), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (296, 298), False, 'from mpi4py import MPI\n'), ((326, 336), 'sys.exit', 'sys.exit', ([], {}), '()\n', (334, 336), False, 'import sys\n')]
|
""" render_fmo.py renders obj file to rgb image with fmo model
Aviable function:
- clear_mash: delete all the mesh in the secene
- scene_setting_init: set scene configurations
- node_setting_init: set node configurations
- render: render rgb image for one obj file and one viewpoint
- render_obj: wrapper function for render() render
- init_all: a wrapper function, initialize all configurations
= set_image_path: reset defualt image output folder
author baiyu
modified by rozumden
"""
import sys
import os
import random
import pickle
import bpy
import glob
import numpy as np
from mathutils import Vector
from mathutils import Euler
import cv2
from PIL import Image
from skimage.draw import line_aa
from scipy import signal
from skimage.measure import regionprops
# import moviepy.editor as mpy
from array2gif import write_gif
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
import settings
import pdb
def renderTraj(pars, H):
## Input: pars is either 2x2 (line) or 2x3 (parabola)
if pars.shape[1] == 2:
pars = np.concatenate( (pars, np.zeros((2,1))),1)
ns = 2
else:
ns = 5
ns = np.max([2, ns])
rangeint = np.linspace(0,1,ns)
for timeinst in range(rangeint.shape[0]-1):
ti0 = rangeint[timeinst]
ti1 = rangeint[timeinst+1]
start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0)
end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1)
start = np.round(start).astype(np.int32)
end = np.round(end).astype(np.int32)
rr, cc, val = line_aa(start[0], start[1], end[0], end[1])
valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0))
rr = rr[valid]
cc = cc[valid]
val = val[valid]
if len(H.shape) > 2:
H[rr, cc, 0] = 0
H[rr, cc, 1] = 0
H[rr, cc, 2] = val
else:
H[rr, cc] = val
return H
def open_log(temp_folder = g_temp): # redirect output to log file
logfile = os.path.join(temp_folder,'blender_render.log')
try:
os.remove(logfile)
except OSError:
pass
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
return old
def close_log(old): # disable output redirection
os.close(1)
os.dup(old)
os.close(old)
def clear_mesh():
""" clear all meshes in the secene
"""
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select = True
bpy.ops.object.delete()
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def scene_setting_init(use_gpu):
"""initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
#output
bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format
bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite
bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension
if g_ambient_light:
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = g_bg_color
bg.inputs[1].default_value = 1.0
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False
bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True
ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices)
print('Number of devices {}'.format(ndev))
for ki in range(2,ndev):
bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
# bpy.types.CyclesRenderSettings.device = 'GPU'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
image_output_node = tree.nodes.new('CompositorNodeOutputFile')
image_output_node.base_path = g_syn_rgb_folder
links.new(render_layer_node.outputs[0], image_output_node.inputs[0])
# image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = g_temp
image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder #
def render(obj_path, viewpoint, temp_folder):
"""render rbg image
render a object rgb image by a given camera viewpoint and
choose random image as background, only render one image
at a time.
Args:
obj_path: a string variable indicate the obj file path
viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)
"""
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if not os.path.exists(g_syn_rgb_folder):
os.mkdir(g_syn_rgb_folder)
obj = bpy.data.objects['model_normalized']
ni = g_fmo_steps
maxlen = 0.5
maxrot = 1.57/6
tri = 0
# rot_base = np.array([math.pi/2,0,0])
while tri <= g_max_trials:
do_repeat = False
tri += 1
if not g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
for tempi in range(len(bpy.data.objects[oi].data.materials)):
if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0:
return True, True ## transparent object
los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))
loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni
rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi)))
rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni
old = open_log(temp_folder)
for ki in [0, ni-1]+list(range(1,ni-1)):
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.data.objects[oi].location = los_start + loc_step*ki
bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki))
bpy.context.scene.frame_set(ki + 1)
bpy.ops.render.render(write_still=True) #start rendering
if ki == 0 or ki == (ni-1):
Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0
is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0
if is_border:
if ki == 0:
close_log(old)
return False, True ## sample different starting viewpoint
else:
do_repeat = True ## just sample another motion direction
if do_repeat:
break
close_log(old)
if do_repeat == False:
break
if do_repeat: ## sample different starting viewpoint
return False, True
return False, False
def make_fmo(path, gt_path, video_path):
n_im = 5
background_images = os.listdir(g_background_image_path)
seq_name = random.choice(background_images)
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg"))
if len(seq_images) <= n_im:
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png"))
seq_images.sort()
bgri = random.randint(n_im,len(seq_images)-1)
bgr_path = seq_images[bgri]
B0 = cv2.imread(bgr_path)/255
B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
B[B > 1] = 1
B[B < 0] = 0
FH = np.zeros(B.shape)
MH = np.zeros(B.shape[:2])
pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T
FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,))
centroids = np.zeros((2,g_fmo_steps))
for ki in range(g_fmo_steps):
FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max
props = regionprops((FM[:,:,-1,ki]>0).astype(int))
if len(props) != 1:
return False
centroids[:,ki] = props[0].centroid
for ki in range(g_fmo_steps):
F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki]
M = FM[:,:,-1,ki]
if ki < g_fmo_steps-1:
pars[:,1] = centroids[:,ki+1] - centroids[:,ki]
H = renderTraj(pars, np.zeros(B.shape[:2]))
H /= H.sum()*g_fmo_steps
for kk in range(3):
FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same')
MH += signal.fftconvolve(H, M, mode='same')
Im = FH + (1 - MH)[:,:,np.newaxis]*B
Im[Im > 1] = 1
Im[Im < 0] = 0
if g_skip_low_contrast:
Diff = np.sum(np.abs(Im - B),2)
meanval = np.mean(Diff[MH > 0.05])
print("Contrast {}".format(meanval))
if meanval < 0.2:
return False
if g_skip_small:
sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1])
print("Size percentage {}".format(sizeper))
if sizeper < 0.05:
return False
Im = Im[:,:,[2,1,0]]
Ims = Image.fromarray((Im * 255).astype(np.uint8))
Ims.save(path)
Ball = np.zeros(B.shape+(n_im,))
Ball[:,:,:,0] = B
for ki in range(1,n_im):
bgrki_path = seq_images[bgri-ki]
Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
Ball[Ball > 1] = 1
Ball[Ball < 0] = 0
Bmed = np.median(Ball,3)
Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png'))
Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png'))
# Ims.save(os.path.join(g_temp,"I.png"))
# Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"FH.png"))
# Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,"MH.png"))
# Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,"M.png"))
# Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"F.png"))
# Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"B.png"))
if False:
Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:])
Fwr = (Fwr * 255).astype(np.uint8)
# Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255
out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*"MJPG"), 6, (F.shape[1],F.shape[0]),True)
for ki in range(g_fmo_steps):
out.write(Fwr[:,:,:,ki])
out.release()
return True
def render_obj(obj_path, path, objid, obj_name, temp_folder):
""" render one obj file by a given viewpoint list
a wrapper function for render()
Args:
obj_path: a string variable indicate the obj file path
"""
vps_path = random.sample(g_view_point_file, 1)[0]
vps = list(load_viewpoint(vps_path))
random.shuffle(vps)
save_path = os.path.join(path,"{}_{:04d}.png".format(obj_name,objid))
gt_path = os.path.join(path,"GT","{}_{:04d}".format(obj_name,objid))
video_path = os.path.join(path,"{}_{:04d}.avi".format(obj_name,objid))
if not os.path.exists(gt_path):
os.mkdir(gt_path)
image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = gt_path
for imt in bpy.data.images:
bpy.data.images.remove(imt)
if g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.context.scene.objects.active = bpy.data.objects[oi]
# pdb.set_trace()
# for m in bpy.data.materials:
# bpy.data.materials.remove(m)
# bpy.ops.object.material_slot_remove()
bpy.ops.object.editmode_toggle()
bpy.ops.uv.cube_project()
bpy.ops.object.editmode_toggle()
texture_images = os.listdir(g_texture_path)
texture = random.choice(texture_images)
tex_path = os.path.join(g_texture_path,texture)
# mat = bpy.data.materials.new(texture)
# mat.use_nodes = True
# nt = mat.node_tree
# nodes = nt.nodes
# links = nt.links
# # Image Texture
# textureNode = nodes.new("ShaderNodeTexImage")
# textureNode.image = bpy.data.images.load(tex_path)
# links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# mat.specular_intensity = 0
# bpy.data.objects[oi].active_material = mat
# print(bpy.data.objects[oi].active_material)
for mat in bpy.data.materials:
nodes = mat.node_tree.nodes
links = mat.node_tree.links
textureNode = nodes.new("ShaderNodeTexImage")
textureNode.image = bpy.data.images.load(tex_path)
links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# print(bpy.data.objects[oi].active_material)
tri = 0
while tri <= g_max_trials:
tri += 1
vp = random.sample(vps, 1)[0]
sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder)
if sample_different_vp:
if sample_different_object:
print('Transparent object!')
return False
print('Rendering failed, repeating')
continue
success = make_fmo(save_path, gt_path, video_path)
if success:
return True
print('Making FMO failed, repeating')
return False
def init_all():
"""init everything we need for rendering
an image
"""
scene_setting_init(g_gpu_render_enable)
node_setting_init()
cam_obj = bpy.data.objects['Camera']
cam_obj.rotation_mode = g_rotation_mode
if g_render_light:
bpy.data.objects['Lamp'].data.energy = 50
bpy.ops.object.lamp_add(type='SUN')
bpy.data.objects['Sun'].data.energy = 5
### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA
init_all()
argv = sys.argv
argv = argv[argv.index("--") + 1:]
start_index = int(argv[0])
step_index = int(argv[1])
print('Start index {}, step index {}'.format(start_index, step_index))
temp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/'
for obj_name in g_render_objs[start_index:(start_index+step_index)]:
print("Processing object {}".format(obj_name))
obj_folder = os.path.join(g_syn_rgb_folder, obj_name)
if not os.path.exists(obj_folder):
os.makedirs(obj_folder)
if not os.path.exists(os.path.join(obj_folder,"GT")):
os.mkdir(os.path.join(obj_folder,"GT"))
num = g_shapenet_categlory_pair[obj_name]
search_path = os.path.join(g_shapenet_path, num, '**','*.obj')
pathes = glob.glob(search_path, recursive=True)
random.shuffle(pathes)
objid = 1
tri = 0
while objid <= g_number_per_category:
print(" instance {}".format(objid))
clear_mesh()
path = random.sample(pathes, 1)[0]
old = open_log(temp_folder)
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_split_groups=False, use_split_objects=True)
# bpy.ops.import_scene.obj(filepath=path)
close_log(old)
#combine_objects()
#scale_objects(0.5)
result = render_obj(path, obj_folder, objid, obj_name, temp_folder)
if result:
objid += 1
tri = 0
else:
print('Error! Rendering another object from the category!')
tri += 1
if tri > g_max_trials:
print('No object find in the category!!!!!!!!!')
break
|
[
"os.open",
"numpy.array",
"bpy.data.images.load",
"bpy.data.images.remove",
"os.remove",
"bpy.ops.object.delete",
"os.path.exists",
"numpy.mean",
"os.listdir",
"scipy.signal.fftconvolve",
"bpy.data.textures.remove",
"numpy.max",
"os.dup",
"numpy.linspace",
"bpy.ops.object.lamp_add",
"os.mkdir",
"bpy.data.materials.remove",
"cv2.VideoWriter_fourcc",
"sys.stdout.flush",
"glob.glob",
"numpy.round",
"numpy.abs",
"random.sample",
"random.choice",
"bpy.data.meshes.remove",
"random.shuffle",
"random.uniform",
"os.close",
"bpy.ops.object.select_all",
"os.path.dirname",
"cv2.imread",
"bpy.ops.object.editmode_toggle",
"mathutils.Euler",
"numpy.median",
"os.makedirs",
"numpy.logical_and",
"os.path.join",
"skimage.draw.line_aa",
"bpy.context.scene.frame_set",
"numpy.sum",
"numpy.zeros",
"bpy.ops.import_scene.obj",
"bpy.ops.render.render",
"os.path.abspath",
"bpy.ops.uv.cube_project"
] |
[((869, 894), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (884, 894), False, 'import os\n'), ((911, 936), 'os.path.dirname', 'os.path.dirname', (['abs_path'], {}), '(abs_path)\n', (926, 936), False, 'import os\n'), ((1235, 1250), 'numpy.max', 'np.max', (['[2, ns]'], {}), '([2, ns])\n', (1241, 1250), True, 'import numpy as np\n'), ((1266, 1287), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ns'], {}), '(0, 1, ns)\n', (1277, 1287), True, 'import numpy as np\n'), ((2127, 2174), 'os.path.join', 'os.path.join', (['temp_folder', '"""blender_render.log"""'], {}), "(temp_folder, 'blender_render.log')\n", (2139, 2174), False, 'import os\n'), ((2284, 2293), 'os.dup', 'os.dup', (['(1)'], {}), '(1)\n', (2290, 2293), False, 'import os\n'), ((2298, 2316), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2314, 2316), False, 'import sys\n'), ((2321, 2332), 'os.close', 'os.close', (['(1)'], {}), '(1)\n', (2329, 2332), False, 'import os\n'), ((2337, 2366), 'os.open', 'os.open', (['logfile', 'os.O_WRONLY'], {}), '(logfile, os.O_WRONLY)\n', (2344, 2366), False, 'import os\n'), ((2436, 2447), 'os.close', 'os.close', (['(1)'], {}), '(1)\n', (2444, 2447), False, 'import os\n'), ((2452, 2463), 'os.dup', 'os.dup', (['old'], {}), '(old)\n', (2458, 2463), False, 'import os\n'), ((2468, 2481), 'os.close', 'os.close', (['old'], {}), '(old)\n', (2476, 2481), False, 'import os\n'), ((2553, 2597), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (2578, 2597), False, 'import bpy\n'), ((2696, 2719), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {}), '()\n', (2717, 2719), False, 'import bpy\n'), ((9367, 9402), 'os.listdir', 'os.listdir', (['g_background_image_path'], {}), '(g_background_image_path)\n', (9377, 9402), False, 'import os\n'), ((9418, 9450), 'random.choice', 'random.choice', (['background_images'], {}), '(background_images)\n', (9431, 9450), False, 'import random\n'), ((9995, 10012), 'numpy.zeros', 'np.zeros', (['B.shape'], {}), '(B.shape)\n', (10003, 10012), True, 'import numpy as np\n'), ((10022, 10043), 'numpy.zeros', 'np.zeros', (['B.shape[:2]'], {}), '(B.shape[:2])\n', (10030, 10043), True, 'import numpy as np\n'), ((10131, 10171), 'numpy.zeros', 'np.zeros', (['(B.shape[:2] + (4, g_fmo_steps))'], {}), '(B.shape[:2] + (4, g_fmo_steps))\n', (10139, 10171), True, 'import numpy as np\n'), ((10186, 10212), 'numpy.zeros', 'np.zeros', (['(2, g_fmo_steps)'], {}), '((2, g_fmo_steps))\n', (10194, 10212), True, 'import numpy as np\n'), ((11548, 11575), 'numpy.zeros', 'np.zeros', (['(B.shape + (n_im,))'], {}), '(B.shape + (n_im,))\n', (11556, 11575), True, 'import numpy as np\n'), ((11924, 11942), 'numpy.median', 'np.median', (['Ball', '(3)'], {}), '(Ball, 3)\n', (11933, 11942), True, 'import numpy as np\n'), ((13405, 13424), 'random.shuffle', 'random.shuffle', (['vps'], {}), '(vps)\n', (13419, 13424), False, 'import random\n'), ((17094, 17134), 'os.path.join', 'os.path.join', (['g_syn_rgb_folder', 'obj_name'], {}), '(g_syn_rgb_folder, obj_name)\n', (17106, 17134), False, 'import os\n'), ((17377, 17426), 'os.path.join', 'os.path.join', (['g_shapenet_path', 'num', '"""**"""', '"""*.obj"""'], {}), "(g_shapenet_path, num, '**', '*.obj')\n", (17389, 17426), False, 'import os\n'), ((17439, 17477), 'glob.glob', 'glob.glob', (['search_path'], {'recursive': '(True)'}), '(search_path, recursive=True)\n', (17448, 17477), False, 'import glob\n'), ((17482, 17504), 'random.shuffle', 'random.shuffle', (['pathes'], {}), '(pathes)\n', (17496, 17504), False, 'import random\n'), ((1644, 1687), 'skimage.draw.line_aa', 'line_aa', (['start[0]', 'start[1]', 'end[0]', 'end[1]'], {}), '(start[0], start[1], end[0], end[1])\n', (1651, 1687), False, 'from skimage.draw import line_aa\n'), ((2191, 2209), 'os.remove', 'os.remove', (['logfile'], {}), '(logfile)\n', (2200, 2209), False, 'import os\n'), ((6634, 6666), 'os.path.exists', 'os.path.exists', (['g_syn_rgb_folder'], {}), '(g_syn_rgb_folder)\n', (6648, 6666), False, 'import os\n'), ((6676, 6702), 'os.mkdir', 'os.mkdir', (['g_syn_rgb_folder'], {}), '(g_syn_rgb_folder)\n', (6684, 6702), False, 'import os\n'), ((9478, 9534), 'os.path.join', 'os.path.join', (['g_background_image_path', 'seq_name', '"""*.jpg"""'], {}), "(g_background_image_path, seq_name, '*.jpg')\n", (9490, 9534), False, 'import os\n'), ((9767, 9787), 'cv2.imread', 'cv2.imread', (['bgr_path'], {}), '(bgr_path)\n', (9777, 9787), False, 'import cv2\n'), ((10055, 10131), 'numpy.array', 'np.array', (['[[(B.shape[0] - 1) / 2 - 1, (B.shape[1] - 1) / 2 - 1], [1.0, 1.0]]'], {}), '([[(B.shape[0] - 1) / 2 - 1, (B.shape[1] - 1) / 2 - 1], [1.0, 1.0]])\n', (10063, 10131), True, 'import numpy as np\n'), ((10921, 10958), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['H', 'M'], {'mode': '"""same"""'}), "(H, M, mode='same')\n", (10939, 10958), False, 'from scipy import signal\n'), ((11124, 11148), 'numpy.mean', 'np.mean', (['Diff[MH > 0.05]'], {}), '(Diff[MH > 0.05])\n', (11131, 11148), True, 'import numpy as np\n'), ((12008, 12040), 'os.path.join', 'os.path.join', (['gt_path', '"""bgr.png"""'], {}), "(gt_path, 'bgr.png')\n", (12020, 12040), False, 'import os\n'), ((12110, 12146), 'os.path.join', 'os.path.join', (['gt_path', '"""bgr_med.png"""'], {}), "(gt_path, 'bgr_med.png')\n", (12122, 12146), False, 'import os\n'), ((13321, 13356), 'random.sample', 'random.sample', (['g_view_point_file', '(1)'], {}), '(g_view_point_file, 1)\n', (13334, 13356), False, 'import random\n'), ((13658, 13681), 'os.path.exists', 'os.path.exists', (['gt_path'], {}), '(gt_path)\n', (13672, 13681), False, 'import os\n'), ((13691, 13708), 'os.mkdir', 'os.mkdir', (['gt_path'], {}), '(gt_path)\n', (13699, 13708), False, 'import os\n'), ((13853, 13880), 'bpy.data.images.remove', 'bpy.data.images.remove', (['imt'], {}), '(imt)\n', (13875, 13880), False, 'import bpy\n'), ((16555, 16590), 'bpy.ops.object.lamp_add', 'bpy.ops.object.lamp_add', ([], {'type': '"""SUN"""'}), "(type='SUN')\n", (16578, 16590), False, 'import bpy\n'), ((17146, 17172), 'os.path.exists', 'os.path.exists', (['obj_folder'], {}), '(obj_folder)\n', (17160, 17172), False, 'import os\n'), ((17182, 17205), 'os.makedirs', 'os.makedirs', (['obj_folder'], {}), '(obj_folder)\n', (17193, 17205), False, 'import os\n'), ((17735, 17885), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'path', 'axis_forward': '"""-Z"""', 'axis_up': '"""Y"""', 'filter_glob': '"""*.obj;*.mtl"""', 'use_split_groups': '(False)', 'use_split_objects': '(True)'}), "(filepath=path, axis_forward='-Z', axis_up='Y',\n filter_glob='*.obj;*.mtl', use_split_groups=False, use_split_objects=True)\n", (17759, 17885), False, 'import bpy\n'), ((1719, 1767), 'numpy.logical_and', 'np.logical_and', (['(rr < H.shape[0])', '(cc < H.shape[1])'], {}), '(rr < H.shape[0], cc < H.shape[1])\n', (1733, 1767), True, 'import numpy as np\n'), ((1769, 1799), 'numpy.logical_and', 'np.logical_and', (['(rr > 0)', '(cc > 0)'], {}), '(rr > 0, cc > 0)\n', (1783, 1799), True, 'import numpy as np\n'), ((2795, 2824), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['block'], {}), '(block)\n', (2817, 2824), False, 'import bpy\n'), ((2904, 2936), 'bpy.data.materials.remove', 'bpy.data.materials.remove', (['block'], {}), '(block)\n', (2929, 2936), False, 'import bpy\n'), ((3015, 3046), 'bpy.data.textures.remove', 'bpy.data.textures.remove', (['block'], {}), '(block)\n', (3039, 3046), False, 'import bpy\n'), ((3123, 3152), 'bpy.data.images.remove', 'bpy.data.images.remove', (['block'], {}), '(block)\n', (3145, 3152), False, 'import bpy\n'), ((8349, 8384), 'bpy.context.scene.frame_set', 'bpy.context.scene.frame_set', (['(ki + 1)'], {}), '(ki + 1)\n', (8376, 8384), False, 'import bpy\n'), ((8397, 8436), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)'}), '(write_still=True)\n', (8418, 8436), False, 'import bpy\n'), ((9597, 9653), 'os.path.join', 'os.path.join', (['g_background_image_path', 'seq_name', '"""*.png"""'], {}), "(g_background_image_path, seq_name, '*.png')\n", (9609, 9653), False, 'import os\n'), ((10750, 10771), 'numpy.zeros', 'np.zeros', (['B.shape[:2]'], {}), '(B.shape[:2])\n', (10758, 10771), True, 'import numpy as np\n'), ((10861, 10908), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['H', 'F[:, :, kk]'], {'mode': '"""same"""'}), "(H, F[:, :, kk], mode='same')\n", (10879, 10908), False, 'from scipy import signal\n'), ((11088, 11102), 'numpy.abs', 'np.abs', (['(Im - B)'], {}), '(Im - B)\n', (11094, 11102), True, 'import numpy as np\n'), ((11284, 11301), 'numpy.sum', 'np.sum', (['(MH > 0.01)'], {}), '(MH > 0.01)\n', (11290, 11301), True, 'import numpy as np\n'), ((12883, 12914), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (12905, 12914), False, 'import cv2\n'), ((14338, 14370), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (14368, 14370), False, 'import bpy\n'), ((14383, 14408), 'bpy.ops.uv.cube_project', 'bpy.ops.uv.cube_project', ([], {}), '()\n', (14406, 14408), False, 'import bpy\n'), ((14421, 14453), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (14451, 14453), False, 'import bpy\n'), ((14484, 14510), 'os.listdir', 'os.listdir', (['g_texture_path'], {}), '(g_texture_path)\n', (14494, 14510), False, 'import os\n'), ((14533, 14562), 'random.choice', 'random.choice', (['texture_images'], {}), '(texture_images)\n', (14546, 14562), False, 'import random\n'), ((14586, 14623), 'os.path.join', 'os.path.join', (['g_texture_path', 'texture'], {}), '(g_texture_path, texture)\n', (14598, 14623), False, 'import os\n'), ((15731, 15752), 'random.sample', 'random.sample', (['vps', '(1)'], {}), '(vps, 1)\n', (15744, 15752), False, 'import random\n'), ((17232, 17262), 'os.path.join', 'os.path.join', (['obj_folder', '"""GT"""'], {}), "(obj_folder, 'GT')\n", (17244, 17262), False, 'import os\n'), ((17281, 17311), 'os.path.join', 'os.path.join', (['obj_folder', '"""GT"""'], {}), "(obj_folder, 'GT')\n", (17293, 17311), False, 'import os\n'), ((17663, 17687), 'random.sample', 'random.sample', (['pathes', '(1)'], {}), '(pathes, 1)\n', (17676, 17687), False, 'import random\n'), ((1166, 1182), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1174, 1182), True, 'import numpy as np\n'), ((1544, 1559), 'numpy.round', 'np.round', (['start'], {}), '(start)\n', (1552, 1559), True, 'import numpy as np\n'), ((1591, 1604), 'numpy.round', 'np.round', (['end'], {}), '(end)\n', (1599, 1604), True, 'import numpy as np\n'), ((7405, 7446), 'random.uniform', 'random.uniform', (['(-maxlen / 10)', '(maxlen / 10)'], {}), '(-maxlen / 10, maxlen / 10)\n', (7419, 7446), False, 'import random\n'), ((7444, 7475), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7458, 7475), False, 'import random\n'), ((7477, 7508), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7491, 7508), False, 'import random\n'), ((7685, 7715), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (7699, 7715), False, 'import random\n'), ((7715, 7745), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (7729, 7745), False, 'import random\n'), ((7745, 7775), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (7759, 7775), False, 'import random\n'), ((8305, 8336), 'mathutils.Euler', 'Euler', (['(rot_base + rot_step * ki)'], {}), '(rot_base + rot_step * ki)\n', (8310, 8336), False, 'from mathutils import Euler\n'), ((11702, 11724), 'cv2.imread', 'cv2.imread', (['bgrki_path'], {}), '(bgrki_path)\n', (11712, 11724), False, 'import cv2\n'), ((15459, 15489), 'bpy.data.images.load', 'bpy.data.images.load', (['tex_path'], {}), '(tex_path)\n', (15479, 15489), False, 'import bpy\n'), ((7538, 7579), 'random.uniform', 'random.uniform', (['(-maxlen / 10)', '(maxlen / 10)'], {}), '(-maxlen / 10, maxlen / 10)\n', (7552, 7579), False, 'import random\n'), ((7577, 7608), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7591, 7608), False, 'import random\n'), ((7610, 7641), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7624, 7641), False, 'import random\n'), ((7805, 7836), 'random.uniform', 'random.uniform', (['(-maxrot)', 'maxrot'], {}), '(-maxrot, maxrot)\n', (7819, 7836), False, 'import random\n'), ((7838, 7869), 'random.uniform', 'random.uniform', (['(-maxrot)', 'maxrot'], {}), '(-maxrot, maxrot)\n', (7852, 7869), False, 'import random\n'), ((7871, 7902), 'random.uniform', 'random.uniform', (['(-maxrot)', 'maxrot'], {}), '(-maxrot, maxrot)\n', (7885, 7902), False, 'import random\n')]
|
# coding=utf-8
import sys
import argparse
import os
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from utils.data_manager import load_data, load_data_one
from collections import defaultdict
from argparse import ArgumentParser
from decode_helper import decode_one
import sys
reload(sys)
sys.setdefaultencoding('utf8')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tf_helper import train, evaluate, decode_data, decode_data_recover
from model1 import construct_graph
def init_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--data_path',
default=os.path.dirname(os.path.abspath(__file__)) + '/data',
type=str,
help='Data path.')
arg_parser.add_argument(
'--load_data', default=False, type=bool, help='Load data.')
arg_parser.add_argument(
'--data',
choices=['wikisql', 'spider', 'overnight', 'overnight_set'],
default='wikisql',
help='data to train & test')
#arg_parser.add_argument('--tran_data', choices=['wikisql', 'spider', 'overnight'], default='overnight', help='data to transfer')
arg_parser.add_argument(
'--subset', choices=['all'], default='all', help='Subset of data.')
arg_parser.add_argument(
'--maxlen', default=60, type=int, help='Data record max length.')
arg_parser.add_argument(
'--annotation_path',
default=os.path.dirname(os.path.abspath(__file__)) +
'/data/DATA/wiki/',
type=str,
help='Data annotation path.')
arg_parser.add_argument(
'--mode',
choices=['train', 'infer', 'transfer','txt'],
default='infer',
help='Run mode')
#### Model configuration ####
arg_parser.add_argument(
'--cell',
choices=['gru'],
default='gru',
help='Type of cell used, currently only standard GRU cell is supported'
)
arg_parser.add_argument(
'--output_vocab_size',
default=20637,
#default=20452,
type=int,
help='Output vocabulary size.')
# Embedding sizes
arg_parser.add_argument(
'--embedding_dim',
default=300,
type=int,
help='Size of word embeddings')
#Hidden sizes
arg_parser.add_argument(
'--dim', default=400, type=int, help='Size of GRU hidden states')
arg_parser.add_argument(
'--hidden_size',
default=256,
type=int,
help='Size of LSTM hidden states')
arg_parser.add_argument(
'--no_copy',
default=False,
action='store_true',
help='Do not use copy mechanism')
#### Training ####
arg_parser.add_argument(
'--vocab', type=str, help='Path of the serialized vocabulary')
arg_parser.add_argument(
'--glove_embed_path',
default=None,
type=str,
help='Path to pretrained Glove mebedding')
arg_parser.add_argument(
'--batch_size', default=128, type=int, help='Batch size')
arg_parser.add_argument(
'--in_drop', default=0., type=float, help='In dropout rate')
arg_parser.add_argument(
'--out_drop', default=0., type=float, help='Out dropout rate')
# training details
arg_parser.add_argument(
'--valid_epoch_interval',
default=1,
type=int,
help='Perform validation every x epoch')
arg_parser.add_argument(
'--clip_grad', default=5., type=float, help='Clip gradients')
arg_parser.add_argument(
'--total_epochs', default=40, type=int, help='# of training epoches')
arg_parser.add_argument(
'--epochs', default=1, type=int, help='Record per x epoches')
arg_parser.add_argument(
'--lr', default=0.0001, type=float, help='Learning rate')
arg_parser.add_argument(
'--lr_decay',
default=0.5,
type=float,
help='decay learning rate if the validation performance drops')
#### decoding/validation/testing ####
arg_parser.add_argument(
'--load_model', default=False, type=bool, help='Whether to load model')
arg_parser.add_argument(
'--beam_width', default=5, type=int, help='Beam size for beam search')
arg_parser.add_argument(
'--decode_max_time_step',
default=100,
type=int,
help='Maximum number of time steps used '
'in decoding and sampling')
args = arg_parser.parse_args()
return args
def model(args, train_env, infer_env):
tf.reset_default_graph()
train_graph = tf.Graph()
infer_graph = tf.Graph()
with train_graph.as_default():
train_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
train_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
train_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
train_env.train_op, train_env.loss, train_env.acc, sample_ids, logits = construct_graph(
"train", train_env, args)
train_env.saver = tf.train.Saver()
#[print(n.name) for n in tf.get_default_graph().as_graph_def().node if 'xxxxx' in n.name]
with infer_graph.as_default():
infer_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
infer_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
infer_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
_, infer_env.loss, infer_env.acc, infer_env.pred_ids, _ = construct_graph(
"infer", infer_env, args)
infer_env.infer_saver = tf.train.Saver()
return train_graph, infer_graph
def inferrence(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========dev set============')
decode_data(sess, infer_env, X_dev, y_dev)
em = decode_data_recover(sess, infer_env, X_dev, y_dev, 'dev')
print('==========test set===========')
decode_data(sess, infer_env, X_test, y_test)
test_em = decode_data_recover(sess, infer_env, X_test, y_test,
'test')
return
def infer_one(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========decode============')
X_one = load_data_one(args.maxlen, 'qs.txt')
decode_one(sess, infer_env, X_one)
return
def train_model(args):
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
train_graph, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
args.load_model = False
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
max_em, global_test_em, best_base = -1, -1, -1
acc = 0
sess1 = tf.InteractiveSession(graph=train_graph)
sess1.run(tf.global_variables_initializer())
sess1.run(tf.local_variables_initializer())
sess2 = tf.InteractiveSession(graph=infer_graph)
sess2.run(tf.global_variables_initializer())
sess2.run(tf.global_variables_initializer())
for base in range(args.total_epochs / args.epochs):
print('\nIteration: %d (%d epochs)' % (base, args.epochs))
model2load = train(
sess1,
train_env,
X_train,
y_train,
epochs=args.epochs,
load=args.load_model,
name=args.subset,
batch_size=args.batch_size,
base=base,
model2Bload=model2load)
args.load_model = True
infer_env.infer_saver.restore(sess2, model2load)
print('===========dev set============')
dev_em = decode_data(sess2, infer_env, X_dev, y_dev)
dev_em = decode_data_recover(sess2, infer_env, X_dev, y_dev,
'dev')
print('==========test set===========')
test_em = decode_data(sess2, infer_env, X_test, y_test)
test_em = decode_data_recover(sess2, infer_env, X_test, y_test,
'test')
if dev_em > max_em:
max_em = dev_em
global_test_em = test_em
best_base = base
print('\n Saving model for best testing')
train_env.saver.save(sess1, 'best_model/{0}-{1}-{2:.2f}'.format(args.subset, base, max_em))
print('Max EM acc: %.4f during %d iteration.' % (max_em, best_base))
print('test EM acc: %.4f ' % global_test_em)
return
def transfer(args):
load_model = args.load_model if args.mode == 'train' else True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'overnight'
args.load_data = True
#X_tran, y_tran = load_data(args)
X_tran, y_tran = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight')
args.data = 'overnight_set'
#tran_sets = load_data(args)
tran_sets = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight_set')
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('========subset transfer set========')
subsets = ['basketball', 'calendar', 'housing', 'recipes', 'restaurants']
for subset, (X_tran_subset, y_tran_subset) in zip(subsets, tran_sets):
print('---------' + subset + '---------')
tran_em = decode_data(
sess,
infer_env,
X_tran_subset,
y_tran_subset,
filename=str(subset + '.txt'))
print('===========transfer set============')
tran_em = decode_data(sess, infer_env, X_tran, y_tran)
return
if __name__ == '__main__':
args = init_args()
print(args)
if args.mode == 'train':
print('\nTrain model.')
train_model(args)
elif args.mode == 'infer':
print('\nInference.')
inferrence(args)
elif args.mode == 'txt':
print('\nInference from txt.')
infer_one(args)
elif args.mode == 'transfer':
print('\nTransfer.')
transfer(args)
|
[
"tensorflow.local_variables_initializer",
"tensorflow.Graph",
"sys.setdefaultencoding",
"tensorflow.reset_default_graph",
"tensorflow.InteractiveSession",
"argparse.ArgumentParser",
"utils.data_manager.load_data",
"tensorflow.placeholder",
"tf_helper.decode_data_recover",
"tensorflow.train.Saver",
"tf_helper.train",
"decode_helper.decode_one",
"tensorflow.global_variables_initializer",
"tensorflow.placeholder_with_default",
"model1.construct_graph",
"utils.data_manager.load_data_one",
"os.path.abspath",
"tf_helper.decode_data"
] |
[((378, 408), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (400, 408), False, 'import sys\n'), ((594, 619), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (617, 619), False, 'import argparse\n'), ((4551, 4575), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4573, 4575), True, 'import tensorflow as tf\n'), ((4594, 4604), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4602, 4604), True, 'import tensorflow as tf\n'), ((4623, 4633), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4631, 4633), True, 'import tensorflow as tf\n'), ((6001, 6062), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""train"""'}), "(maxlen=args.maxlen, load=args.load_data, s='train')\n", (6010, 6062), False, 'from utils.data_manager import load_data, load_data_one\n'), ((6083, 6143), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""test"""'}), "(maxlen=args.maxlen, load=args.load_data, s='test')\n", (6092, 6143), False, 'from utils.data_manager import load_data, load_data_one\n'), ((6162, 6221), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""dev"""'}), "(maxlen=args.maxlen, load=args.load_data, s='dev')\n", (6171, 6221), False, 'from utils.data_manager import load_data, load_data_one\n'), ((6352, 6392), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'infer_graph'}), '(graph=infer_graph)\n', (6373, 6392), True, 'import tensorflow as tf\n'), ((6493, 6535), 'tf_helper.decode_data', 'decode_data', (['sess', 'infer_env', 'X_dev', 'y_dev'], {}), '(sess, infer_env, X_dev, y_dev)\n', (6504, 6535), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((6545, 6602), 'tf_helper.decode_data_recover', 'decode_data_recover', (['sess', 'infer_env', 'X_dev', 'y_dev', '"""dev"""'], {}), "(sess, infer_env, X_dev, y_dev, 'dev')\n", (6564, 6602), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((6650, 6694), 'tf_helper.decode_data', 'decode_data', (['sess', 'infer_env', 'X_test', 'y_test'], {}), '(sess, infer_env, X_test, y_test)\n', (6661, 6694), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((6709, 6769), 'tf_helper.decode_data_recover', 'decode_data_recover', (['sess', 'infer_env', 'X_test', 'y_test', '"""test"""'], {}), "(sess, infer_env, X_test, y_test, 'test')\n", (6728, 6769), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((7113, 7153), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'infer_graph'}), '(graph=infer_graph)\n', (7134, 7153), True, 'import tensorflow as tf\n'), ((7261, 7297), 'utils.data_manager.load_data_one', 'load_data_one', (['args.maxlen', '"""qs.txt"""'], {}), "(args.maxlen, 'qs.txt')\n", (7274, 7297), False, 'from utils.data_manager import load_data, load_data_one\n'), ((7302, 7336), 'decode_helper.decode_one', 'decode_one', (['sess', 'infer_env', 'X_one'], {}), '(sess, infer_env, X_one)\n', (7312, 7336), False, 'from decode_helper import decode_one\n'), ((7622, 7683), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""train"""'}), "(maxlen=args.maxlen, load=args.load_data, s='train')\n", (7631, 7683), False, 'from utils.data_manager import load_data, load_data_one\n'), ((7704, 7764), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""test"""'}), "(maxlen=args.maxlen, load=args.load_data, s='test')\n", (7713, 7764), False, 'from utils.data_manager import load_data, load_data_one\n'), ((7783, 7842), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""dev"""'}), "(maxlen=args.maxlen, load=args.load_data, s='dev')\n", (7792, 7842), False, 'from utils.data_manager import load_data, load_data_one\n'), ((8036, 8076), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'train_graph'}), '(graph=train_graph)\n', (8057, 8076), True, 'import tensorflow as tf\n'), ((8186, 8226), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'infer_graph'}), '(graph=infer_graph)\n', (8207, 8226), True, 'import tensorflow as tf\n'), ((10057, 10122), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""overnight"""'}), "(maxlen=args.maxlen, load=args.load_data, s='overnight')\n", (10066, 10122), False, 'from utils.data_manager import load_data, load_data_one\n'), ((10203, 10272), 'utils.data_manager.load_data', 'load_data', ([], {'maxlen': 'args.maxlen', 'load': 'args.load_data', 's': '"""overnight_set"""'}), "(maxlen=args.maxlen, load=args.load_data, s='overnight_set')\n", (10212, 10272), False, 'from utils.data_manager import load_data, load_data_one\n'), ((10332, 10372), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'infer_graph'}), '(graph=infer_graph)\n', (10353, 10372), True, 'import tensorflow as tf\n'), ((10910, 10954), 'tf_helper.decode_data', 'decode_data', (['sess', 'infer_env', 'X_tran', 'y_tran'], {}), '(sess, infer_env, X_tran, y_tran)\n', (10921, 10954), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((4692, 4753), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, args.maxlen]', 'name': '"""x"""'}), "(tf.int32, shape=[None, args.maxlen], name='x')\n", (4706, 4753), True, 'import tensorflow as tf\n'), ((4789, 4844), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None, args.maxlen)'], {'name': '"""y"""'}), "(tf.int32, (None, args.maxlen), name='y')\n", (4803, 4844), True, 'import tensorflow as tf\n'), ((4874, 4931), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)', '()'], {'name': '"""train_mode"""'}), "(False, (), name='train_mode')\n", (4901, 4931), True, 'import tensorflow as tf\n'), ((5025, 5066), 'model1.construct_graph', 'construct_graph', (['"""train"""', 'train_env', 'args'], {}), "('train', train_env, args)\n", (5040, 5066), False, 'from model1 import construct_graph\n'), ((5106, 5122), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5120, 5122), True, 'import tensorflow as tf\n'), ((5279, 5340), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, args.maxlen]', 'name': '"""x"""'}), "(tf.int32, shape=[None, args.maxlen], name='x')\n", (5293, 5340), True, 'import tensorflow as tf\n'), ((5376, 5431), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None, args.maxlen)'], {'name': '"""y"""'}), "(tf.int32, (None, args.maxlen), name='y')\n", (5390, 5431), True, 'import tensorflow as tf\n'), ((5461, 5518), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)', '()'], {'name': '"""train_mode"""'}), "(False, (), name='train_mode')\n", (5488, 5518), True, 'import tensorflow as tf\n'), ((5598, 5639), 'model1.construct_graph', 'construct_graph', (['"""infer"""', 'infer_env', 'args'], {}), "('infer', infer_env, args)\n", (5613, 5639), False, 'from model1 import construct_graph\n'), ((5685, 5701), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5699, 5701), True, 'import tensorflow as tf\n'), ((8091, 8124), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8122, 8124), True, 'import tensorflow as tf\n'), ((8140, 8172), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (8170, 8172), True, 'import tensorflow as tf\n'), ((8241, 8274), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8272, 8274), True, 'import tensorflow as tf\n'), ((8290, 8323), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8321, 8323), True, 'import tensorflow as tf\n'), ((8469, 8642), 'tf_helper.train', 'train', (['sess1', 'train_env', 'X_train', 'y_train'], {'epochs': 'args.epochs', 'load': 'args.load_model', 'name': 'args.subset', 'batch_size': 'args.batch_size', 'base': 'base', 'model2Bload': 'model2load'}), '(sess1, train_env, X_train, y_train, epochs=args.epochs, load=args.\n load_model, name=args.subset, batch_size=args.batch_size, base=base,\n model2Bload=model2load)\n', (8474, 8642), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((8909, 8952), 'tf_helper.decode_data', 'decode_data', (['sess2', 'infer_env', 'X_dev', 'y_dev'], {}), '(sess2, infer_env, X_dev, y_dev)\n', (8920, 8952), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((8970, 9028), 'tf_helper.decode_data_recover', 'decode_data_recover', (['sess2', 'infer_env', 'X_dev', 'y_dev', '"""dev"""'], {}), "(sess2, infer_env, X_dev, y_dev, 'dev')\n", (8989, 9028), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((9131, 9176), 'tf_helper.decode_data', 'decode_data', (['sess2', 'infer_env', 'X_test', 'y_test'], {}), '(sess2, infer_env, X_test, y_test)\n', (9142, 9176), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((9195, 9256), 'tf_helper.decode_data_recover', 'decode_data_recover', (['sess2', 'infer_env', 'X_test', 'y_test', '"""test"""'], {}), "(sess2, infer_env, X_test, y_test, 'test')\n", (9214, 9256), False, 'from tf_helper import train, evaluate, decode_data, decode_data_recover\n'), ((705, 730), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (720, 730), False, 'import os\n'), ((1498, 1523), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1513, 1523), False, 'import os\n')]
|
"""CD SEM structures."""
from functools import partial
from typing import Optional, Tuple
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.text_rectangular import text_rectangular
from gdsfactory.cross_section import strip
from gdsfactory.grid import grid
from gdsfactory.types import ComponentFactory, CrossSectionFactory
text_rectangular_mini = partial(text_rectangular, size=1)
LINE_LENGTH = 420.0
@cell
def cdsem_straight(
widths: Tuple[float, ...] = (0.4, 0.45, 0.5, 0.6, 0.8, 1.0),
length: float = LINE_LENGTH,
cross_section: CrossSectionFactory = strip,
text: Optional[ComponentFactory] = text_rectangular_mini,
spacing: float = 3,
) -> Component:
"""Returns straight waveguide lines width sweep.
Args:
widths: for the sweep
length: for the line
cross_section: for the lines
text: optional text for labels
spacing: edge to edge spacing
"""
lines = []
for width in widths:
cross_section = partial(cross_section, width=width)
line = straight_function(length=length, cross_section=cross_section)
if text:
line = line.copy()
t = line << text(str(int(width * 1e3)))
t.xmin = line.xmax + 5
t.y = 0
lines.append(line)
return grid(lines, spacing=(0, spacing))
if __name__ == "__main__":
c = cdsem_straight()
c.show()
|
[
"functools.partial",
"gdsfactory.components.straight.straight",
"gdsfactory.grid.grid"
] |
[((476, 509), 'functools.partial', 'partial', (['text_rectangular'], {'size': '(1)'}), '(text_rectangular, size=1)\n', (483, 509), False, 'from functools import partial\n'), ((1424, 1457), 'gdsfactory.grid.grid', 'grid', (['lines'], {'spacing': '(0, spacing)'}), '(lines, spacing=(0, spacing))\n', (1428, 1457), False, 'from gdsfactory.grid import grid\n'), ((1117, 1152), 'functools.partial', 'partial', (['cross_section'], {'width': 'width'}), '(cross_section, width=width)\n', (1124, 1152), False, 'from functools import partial\n'), ((1168, 1229), 'gdsfactory.components.straight.straight', 'straight_function', ([], {'length': 'length', 'cross_section': 'cross_section'}), '(length=length, cross_section=cross_section)\n', (1185, 1229), True, 'from gdsfactory.components.straight import straight as straight_function\n')]
|
#!/usr/bin/env python3
# Copyright 2019-2022 <NAME>, <NAME>, <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script tests the reduced particle diagnostics.
# The setup is a uniform plasma with electrons, protons and photons.
# Various particle and field quantities are written to file using the reduced diagnostics
# and compared with the corresponding quantities computed from the data in the plotfiles.
import os
import sys
import numpy as np
import openpmd_api as io
from scipy.constants import c
from scipy.constants import epsilon_0 as eps0
from scipy.constants import m_e, m_p
from scipy.constants import mu_0 as mu0
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
def do_analysis(single_precision = False):
fn = sys.argv[1]
ds = yt.load(fn)
ad = ds.all_data()
ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
opmd = io.Series('diags/openpmd/openpmd_%T.h5', io.Access.read_only)
opmd_i = opmd.iterations[200]
#--------------------------------------------------------------------------------------------------
# Part 1: get results from plotfiles (label '_yt')
#--------------------------------------------------------------------------------------------------
# Quantities computed from plotfiles
values_yt = dict()
domain_size = ds.domain_right_edge.value - ds.domain_left_edge.value
dx = domain_size / ds.domain_dimensions
# Electrons
x = ad['electrons', 'particle_position_x'].to_ndarray()
y = ad['electrons', 'particle_position_y'].to_ndarray()
z = ad['electrons', 'particle_position_z'].to_ndarray()
uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c
w = ad['electrons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['electrons: zavg'] = zavg / wavg_adj
values_yt['electrons: uzavg'] = uzavg / wavg_adj
values_yt['electrons: zuzavg'] = zuzavg / wavg_adj
values_yt['electrons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
# protons
x = ad['protons', 'particle_position_x'].to_ndarray()
y = ad['protons', 'particle_position_y'].to_ndarray()
z = ad['protons', 'particle_position_z'].to_ndarray()
uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c
w = ad['protons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['protons: zavg'] = zavg / wavg_adj
values_yt['protons: uzavg'] = uzavg / wavg_adj
values_yt['protons: zuzavg'] = zuzavg / wavg_adj
values_yt['protons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
# Photons (momentum in units of m_e c)
x = ad['photons', 'particle_position_x'].to_ndarray()
y = ad['photons', 'particle_position_y'].to_ndarray()
z = ad['photons', 'particle_position_z'].to_ndarray()
uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c
w = ad['photons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['photons: zavg'] = zavg / wavg_adj
values_yt['photons: uzavg'] = uzavg / wavg_adj
values_yt['photons: zuzavg'] = zuzavg / wavg_adj
values_yt['photons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
values_rd = dict()
# Load reduced particle diagnostic data from plotfiles
values_rd['electrons: zavg'] = ad0[('boxlib','z_electrons')]
values_rd['protons: zavg'] = ad0[('boxlib','z_protons')]
values_rd['photons: zavg'] = ad0[('boxlib','z_photons')]
values_rd['electrons: uzavg'] = ad0[('boxlib','uz_electrons')]
values_rd['protons: uzavg'] = ad0[('boxlib','uz_protons')]
values_rd['photons: uzavg'] = ad0[('boxlib','uz_photons')]
values_rd['electrons: zuzavg'] = ad0[('boxlib','zuz_electrons')]
values_rd['protons: zuzavg'] = ad0[('boxlib','zuz_protons')]
values_rd['photons: zuzavg'] = ad0[('boxlib','zuz_photons')]
values_rd['electrons: uzavg_filt'] = ad0[('boxlib','uz_filt_electrons')]
values_rd['protons: uzavg_filt'] = ad0[('boxlib','uz_filt_protons')]
values_rd['photons: uzavg_filt'] = ad0[('boxlib','uz_filt_photons')]
values_opmd = dict()
# Load reduced particle diagnostic data from OPMD output
values_opmd['electrons: zavg'] = opmd_i.meshes['z_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: zavg'] = opmd_i.meshes['z_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: zavg'] = opmd_i.meshes['z_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: uzavg'] = opmd_i.meshes['uz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: uzavg'] = opmd_i.meshes['uz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: uzavg'] = opmd_i.meshes['uz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: zuzavg'] = opmd_i.meshes['zuz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: zuzavg'] = opmd_i.meshes['zuz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: zuzavg'] = opmd_i.meshes['zuz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: uzavg_filt'] = opmd_i.meshes['uz_filt_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: uzavg_filt'] = opmd_i.meshes['uz_filt_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: uzavg_filt'] = opmd_i.meshes['uz_filt_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
opmd.flush()
del opmd
#--------------------------------------------------------------------------------------------------
# Part 3: compare values from plotfiles and diagnostics and print output
#--------------------------------------------------------------------------------------------------
error_plt = dict()
error_opmd = dict()
tolerance = 5e-3 if single_precision else 1e-12
# if single precision, increase tolerance from default value
check_tolerance = 5e-3 if single_precision else 1e-9
for k in values_yt.keys():
# check that the zeros line up, since we'll be ignoring them in the error calculation
assert(np.all((values_yt[k] == 0) == (values_rd[k] == 0)))
error_plt[k] = np.max(abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])
print(k, 'relative error plotfile = ', error_plt[k])
assert(error_plt[k] < tolerance)
assert(np.all((values_yt[k] == 0) == (values_opmd[k].T == 0)))
error_opmd[k] = np.max(abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])
assert(error_opmd[k] < tolerance)
print(k, 'relative error openPMD = ', error_opmd[k])
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance)
|
[
"sys.path.insert",
"numpy.where",
"openpmd_api.Series",
"os.getcwd",
"checksumAPI.evaluate_checksum",
"numpy.zeros",
"yt.load",
"numpy.all"
] |
[((667, 727), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (682, 727), False, 'import sys\n'), ((823, 834), 'yt.load', 'yt.load', (['fn'], {}), '(fn)\n', (830, 834), False, 'import yt\n'), ((964, 1025), 'openpmd_api.Series', 'io.Series', (['"""diags/openpmd/openpmd_%T.h5"""', 'io.Access.read_only'], {}), "('diags/openpmd/openpmd_%T.h5', io.Access.read_only)\n", (973, 1025), True, 'import openpmd_api as io\n'), ((2070, 2100), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2078, 2100), True, 'import numpy as np\n'), ((2113, 2143), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2121, 2143), True, 'import numpy as np\n'), ((2157, 2187), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2165, 2187), True, 'import numpy as np\n'), ((2199, 2229), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2207, 2229), True, 'import numpy as np\n'), ((2247, 2277), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2255, 2277), True, 'import numpy as np\n'), ((2294, 2324), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2302, 2324), True, 'import numpy as np\n'), ((2800, 2828), 'numpy.where', 'np.where', (['(wavg == 0)', '(1)', 'wavg'], {}), '(wavg == 0, 1, wavg)\n', (2808, 2828), True, 'import numpy as np\n'), ((2849, 2887), 'numpy.where', 'np.where', (['(wavg_filt == 0)', '(1)', 'wavg_filt'], {}), '(wavg_filt == 0, 1, wavg_filt)\n', (2857, 2887), True, 'import numpy as np\n'), ((3666, 3696), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3674, 3696), True, 'import numpy as np\n'), ((3709, 3739), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3717, 3739), True, 'import numpy as np\n'), ((3753, 3783), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3761, 3783), True, 'import numpy as np\n'), ((3795, 3825), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3803, 3825), True, 'import numpy as np\n'), ((3843, 3873), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3851, 3873), True, 'import numpy as np\n'), ((3890, 3920), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3898, 3920), True, 'import numpy as np\n'), ((4396, 4424), 'numpy.where', 'np.where', (['(wavg == 0)', '(1)', 'wavg'], {}), '(wavg == 0, 1, wavg)\n', (4404, 4424), True, 'import numpy as np\n'), ((4445, 4483), 'numpy.where', 'np.where', (['(wavg_filt == 0)', '(1)', 'wavg_filt'], {}), '(wavg_filt == 0, 1, wavg_filt)\n', (4453, 4483), True, 'import numpy as np\n'), ((5283, 5313), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5291, 5313), True, 'import numpy as np\n'), ((5326, 5356), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5334, 5356), True, 'import numpy as np\n'), ((5370, 5400), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5378, 5400), True, 'import numpy as np\n'), ((5412, 5442), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5420, 5442), True, 'import numpy as np\n'), ((5460, 5490), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5468, 5490), True, 'import numpy as np\n'), ((5507, 5537), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5515, 5537), True, 'import numpy as np\n'), ((6013, 6041), 'numpy.where', 'np.where', (['(wavg == 0)', '(1)', 'wavg'], {}), '(wavg == 0, 1, wavg)\n', (6021, 6041), True, 'import numpy as np\n'), ((6062, 6100), 'numpy.where', 'np.where', (['(wavg_filt == 0)', '(1)', 'wavg_filt'], {}), '(wavg_filt == 0, 1, wavg_filt)\n', (6070, 6100), True, 'import numpy as np\n'), ((9973, 10039), 'checksumAPI.evaluate_checksum', 'checksumAPI.evaluate_checksum', (['test_name', 'fn'], {'rtol': 'check_tolerance'}), '(test_name, fn, rtol=check_tolerance)\n', (10002, 10039), False, 'import checksumAPI\n'), ((9344, 9394), 'numpy.all', 'np.all', (['((values_yt[k] == 0) == (values_rd[k] == 0))'], {}), '((values_yt[k] == 0) == (values_rd[k] == 0))\n', (9350, 9394), True, 'import numpy as np\n'), ((9635, 9689), 'numpy.all', 'np.all', (['((values_yt[k] == 0) == (values_opmd[k].T == 0))'], {}), '((values_yt[k] == 0) == (values_opmd[k].T == 0))\n', (9641, 9689), True, 'import numpy as np\n'), ((9953, 9964), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9962, 9964), False, 'import os\n')]
|
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ..scrabTask import FileTask
import os
name = "LanguageDetector"
version = "1.1.1"
class LanguageDetector(FileTask):
cpp_extensions = ['.cpp', '.c++', '.cc',
'.cxx', '.c', '.h', '.hpp', '.hxx']
c_extensions = ['.c', '.h']
rust_extensions = ['.rs']
ruby_extensions = ['.rb']
java_extensions = ['.java']
go_extensions = ['.go']
php_extensions = ['.php', '.phtml', '.php3', '.php4', '.php5', '.php7',
'.phps']
js_extensions = ['.js']
objective_c_extensions = ['.h', '.m', '.mm', '.C']
swift_extensions = ['.swift']
c_sharp_extensions = ['.cs']
python_extensions = ['.py']
"""
Tries to detect the programming language of a library based on the file
extension
Example:
LanguageDetector:
main_language: C
languages:
- C
- C++
- Python
:param task_params: Parameter given explicitly for this task, for all
projects, defined in the task.yaml
:param global_args: Arguments that will be passed to all tasks. They
_might_ contain something that is useful for the
task, but the task has to check if it is _there_ as
these are user provided. If they are needed to work
that check should happen in the argHandler.
"""
def __init__(self, parameter, global_args):
super(LanguageDetector, self).__init__(name, version, parameter,
global_args)
# dictionary containing the common file extensions
# for each of the languages
self.__language_extensions = self.__get_language_extensions()
self.__report = self.__get_files_per_language()
def __get_language_extensions(self):
"""
:returns: A directory of the considered language extensions
"""
return {
'C++':
self.cpp_extensions,
'C':
self.c_extensions,
'Rust':
self.rust_extensions,
'Ruby':
self.ruby_extensions,
'Java':
self.java_extensions,
'Go':
self.go_extensions,
'PHP':
self.php_extensions,
'JavaScript':
self.js_extensions,
'Objective-C':
self.objective_c_extensions,
'Swift':
self.swift_extensions,
'C#':
self.c_sharp_extensions,
'Python':
self.python_extensions
}
def __get_files_per_language(self):
"""
:returns: A default directory of the considered languages, their
extensions and the amount of files that have that extension
(default=0)
"""
return {
'C++':
{extension: 0 for extension in self.cpp_extensions},
'C':
{extension: 0 for extension in self.c_extensions},
'Rust':
{extension: 0 for extension in self.rust_extensions},
'Ruby':
{extension: 0 for extension in self.ruby_extensions},
'Java':
{extension: 0 for extension in self.java_extensions},
'Go':
{extension: 0 for extension in self.go_extensions},
'PHP':
{extension: 0 for extension in self.php_extensions},
'JavaScript':
{extension: 0 for extension in self.js_extensions},
'Objective-C':
{extension: 0 for extension in self.objective_c_extensions},
'Swift':
{extension: 0 for extension in self.swift_extensions},
'C#':
{extension: 0 for extension in self.c_sharp_extensions},
'Python':
{extension: 0 for extension in self.python_extensions},
}
def __decide_h_extension(self):
"""
Decides which language 'owns' how many .h files
:returns: The report with divided header files
"""
report = self.__report
h_files = report['C']['.h']
if h_files > 0:
c_files = (sum(report['C'].values()) - h_files)
cpp_files = (sum(report['C++'].values())
- h_files
- report['C++']['.c'])
oc_files = (
sum(report['Objective-C'].values()) - h_files)
lang_fiels = c_files + cpp_files + oc_files
# Header only libraries are 'common' in C and C++
# the benefit of doubt goes to C
if lang_fiels == 0:
report['C']['.h'] = 1
report['C++']['.h'] = 0
report['Objective-C']['.h'] = 0
else:
report['C']['.h'] = (h_files *
c_files / lang_fiels)
report['C++']['.h'] = (h_files *
cpp_files / lang_fiels)
report['Objective-C']['.h'] = (h_files *
oc_files / lang_fiels)
return report
def __calculate_main_language(self, report):
"""
Calculates the main language (maximum of files extensions)
:param report: The report
:returns: The main language.
"""
max_files = 0
max_lang = None
for language in report:
lang_fiels = sum(report[language].values())
if max_files < lang_fiels:
max_lang = language
max_files = lang_fiels
return max_lang
def __calculate_used_languages(self, report):
"""
Calculates the used languages by throwing away the extension counts and
collapsing them to the language. Only languages that have at least one
file extension are kept and will appear in the report
:param report: The report
:returns: The used languages.
"""
languages = {}
for language in report:
total_files = sum(report[language].values())
if total_files > 0:
languages[language] = total_files
return sorted(languages, key=languages.get, reverse=True)
def scrab(self, project, filepath, file):
"""
Counts the files that have an extension of one of the languages
:param project: The project that the scrab task shall analyse
:param filepath: The filepath to the file that can be analysed
:param file: The file as string that can be analysed
:returns: Report that contains the scrabbed information of *this* file
- the extensions have either a count of 0 or 1
"""
filename, file_extension = os.path.splitext(filepath)
for language in self.__language_extensions:
if file_extension in self.__language_extensions[language]:
self.__report[language][file_extension] += 1
def report(self):
"""
Decides which headers files are (probable) from which language,
calculates the main language and removes redundant / unnecessary
detailed information from the report
:param report: The complete report this task created
:returns: Report that contains all scrabbed information
eg.:
LanguageDetector:
main_language: C
languages:
- C
- C++
- Python
"""
pre_report = self.__decide_h_extension()
main_language = self.__calculate_main_language(pre_report)
# write the result to the report
report = {}
report['main_language'] = main_language
report['languages'] = self.__calculate_used_languages(pre_report)
return report
|
[
"os.path.splitext"
] |
[((8112, 8138), 'os.path.splitext', 'os.path.splitext', (['filepath'], {}), '(filepath)\n', (8128, 8138), False, 'import os\n')]
|
import json
import kfp.dsl as _kfp_dsl
import kfp.components as _kfp_components
from collections import OrderedDict
from kubernetes import client as k8s_client
def step1():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], ['_b', '_a'], _kale_pipeline_parameters, "/marshal")
def step1():
a = 1
b = 2
return a, b
step1()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step2():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common.runutils import ttl as _kale_ttl
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_ttl(5)
@_kale_marshal(['_b', '_a'], ['_c'], _kale_pipeline_parameters, "/marshal")
def step2(a, b):
c = a + b
print(c)
return c
step2()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step3():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal(['_a', '_c'], [], _kale_pipeline_parameters, "/marshal")
def step3(a, c):
d = c + a
print(d)
step3()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
_kale_step1_op = _kfp_components.func_to_container_op(step1)
_kale_step2_op = _kfp_components.func_to_container_op(step2)
_kale_step3_op = _kfp_components.func_to_container_op(step3)
@_kfp_dsl.pipeline(
name='test',
description=''
)
def auto_generated_pipeline():
_kale_pvolumes_dict = OrderedDict()
_kale_volume_step_names = []
_kale_volume_name_parameters = []
_kale_marshal_vop = _kfp_dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=['ReadWriteMany'],
size="1Gi"
)
_kale_volume_step_names.append(_kale_marshal_vop.name)
_kale_volume_name_parameters.append(
_kale_marshal_vop.outputs["name"].full_name)
_kale_pvolumes_dict['/marshal'] = _kale_marshal_vop.volume
_kale_volume_step_names.sort()
_kale_volume_name_parameters.sort()
_kale_step1_task = _kale_step1_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after()
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step1_task.add_pod_label(_kale_k, _kale_v)
_kale_step_limits = {'amd/gpu': '1'}
for _kale_k, _kale_v in _kale_step_limits.items():
_kale_step1_task.container.add_resource_limit(_kale_k, _kale_v)
_kale_step1_task.container.working_dir = "/test"
_kale_step1_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step1_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step1_task.dependent_names +
_kale_volume_step_names)
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step2_task = _kale_step2_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step2_task.add_pod_label(_kale_k, _kale_v)
_kale_step2_task.set_retry_strategy(
num_retries=5,
retry_policy="Always",
backoff_duration="20",
backoff_factor=2,
backoff_max_duration=None)
_kale_step2_task.container.working_dir = "/test"
_kale_step2_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step2_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step2_task.dependent_names +
_kale_volume_step_names)
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step3_task = _kale_step3_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step2_task, _kale_step1_task)
_kale_step_annotations = {'step3-annotation': 'test'}
for _kale_k, _kale_v in _kale_step_annotations.items():
_kale_step3_task.add_pod_annotation(_kale_k, _kale_v)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step3_task.add_pod_label(_kale_k, _kale_v)
_kale_step3_task.container.working_dir = "/test"
_kale_step3_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step3_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step3_task.dependent_names +
_kale_volume_step_names)
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('test')
# Submit a pipeline run
from kale.common import kfputils
pipeline_id, version_id = kfputils.upload_pipeline(
pipeline_filename, "test")
run_result = kfputils.run_pipeline(
experiment_name=experiment.name, pipeline_id=pipeline_id, version_id=version_id)
|
[
"kale.common.kfputils.run_pipeline",
"kfp.components.func_to_container_op",
"kale.common.runutils.ttl",
"collections.OrderedDict",
"kfp.dsl.VolumeOp",
"kfp.Client",
"kubernetes.client.V1SecurityContext",
"json.dumps",
"kale.common.mlmdutils.init_metadata",
"kfp.dsl.pipeline",
"kale.common.mlmdutils.call",
"kfp.compiler.Compiler",
"kale.marshal.decorator.marshal",
"kale.common.runutils.link_artifacts",
"kale.common.kfputils.upload_pipeline"
] |
[((1939, 1982), 'kfp.components.func_to_container_op', '_kfp_components.func_to_container_op', (['step1'], {}), '(step1)\n', (1975, 1982), True, 'import kfp.components as _kfp_components\n'), ((2002, 2045), 'kfp.components.func_to_container_op', '_kfp_components.func_to_container_op', (['step2'], {}), '(step2)\n', (2038, 2045), True, 'import kfp.components as _kfp_components\n'), ((2065, 2108), 'kfp.components.func_to_container_op', '_kfp_components.func_to_container_op', (['step3'], {}), '(step3)\n', (2101, 2108), True, 'import kfp.components as _kfp_components\n'), ((2112, 2158), 'kfp.dsl.pipeline', '_kfp_dsl.pipeline', ([], {'name': '"""test"""', 'description': '""""""'}), "(name='test', description='')\n", (2129, 2158), True, 'import kfp.dsl as _kfp_dsl\n'), ((238, 269), 'kale.common.mlmdutils.init_metadata', '_kale_mlmdutils.init_metadata', ([], {}), '()\n', (267, 269), True, 'from kale.common import mlmdutils as _kale_mlmdutils\n'), ((453, 523), 'kale.marshal.decorator.marshal', '_kale_marshal', (['[]', "['_b', '_a']", '_kale_pipeline_parameters', '"""/marshal"""'], {}), "([], ['_b', '_a'], _kale_pipeline_parameters, '/marshal')\n", (466, 523), True, 'from kale.marshal.decorator import marshal as _kale_marshal\n'), ((633, 670), 'kale.common.runutils.link_artifacts', '_kale_link_artifacts', (['_kale_artifacts'], {}), '(_kale_artifacts)\n', (653, 670), True, 'from kale.common.runutils import link_artifacts as _kale_link_artifacts\n'), ((675, 722), 'kale.common.mlmdutils.call', '_kale_mlmdutils.call', (['"""mark_execution_complete"""'], {}), "('mark_execution_complete')\n", (695, 722), True, 'from kale.common import mlmdutils as _kale_mlmdutils\n'), ((799, 830), 'kale.common.mlmdutils.init_metadata', '_kale_mlmdutils.init_metadata', ([], {}), '()\n', (828, 830), True, 'from kale.common import mlmdutils as _kale_mlmdutils\n'), ((1068, 1080), 'kale.common.runutils.ttl', '_kale_ttl', (['(5)'], {}), '(5)\n', (1077, 1080), True, 'from kale.common.runutils import ttl as _kale_ttl\n'), ((1086, 1160), 'kale.marshal.decorator.marshal', '_kale_marshal', (["['_b', '_a']", "['_c']", '_kale_pipeline_parameters', '"""/marshal"""'], {}), "(['_b', '_a'], ['_c'], _kale_pipeline_parameters, '/marshal')\n", (1099, 1160), True, 'from kale.marshal.decorator import marshal as _kale_marshal\n'), ((1278, 1315), 'kale.common.runutils.link_artifacts', '_kale_link_artifacts', (['_kale_artifacts'], {}), '(_kale_artifacts)\n', (1298, 1315), True, 'from kale.common.runutils import link_artifacts as _kale_link_artifacts\n'), ((1320, 1367), 'kale.common.mlmdutils.call', '_kale_mlmdutils.call', (['"""mark_execution_complete"""'], {}), "('mark_execution_complete')\n", (1340, 1367), True, 'from kale.common import mlmdutils as _kale_mlmdutils\n'), ((1444, 1475), 'kale.common.mlmdutils.init_metadata', '_kale_mlmdutils.init_metadata', ([], {}), '()\n', (1473, 1475), True, 'from kale.common import mlmdutils as _kale_mlmdutils\n'), ((1659, 1729), 'kale.marshal.decorator.marshal', '_kale_marshal', (["['_a', '_c']", '[]', '_kale_pipeline_parameters', '"""/marshal"""'], {}), "(['_a', '_c'], [], _kale_pipeline_parameters, '/marshal')\n", (1672, 1729), True, 'from kale.marshal.decorator import marshal as _kale_marshal\n'), ((1830, 1867), 'kale.common.runutils.link_artifacts', '_kale_link_artifacts', (['_kale_artifacts'], {}), '(_kale_artifacts)\n', (1850, 1867), True, 'from kale.common.runutils import link_artifacts as _kale_link_artifacts\n'), ((1872, 1919), 'kale.common.mlmdutils.call', '_kale_mlmdutils.call', (['"""mark_execution_complete"""'], {}), "('mark_execution_complete')\n", (1892, 1919), True, 'from kale.common import mlmdutils as _kale_mlmdutils\n'), ((2226, 2239), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2237, 2239), False, 'from collections import OrderedDict\n'), ((2336, 2457), 'kfp.dsl.VolumeOp', '_kfp_dsl.VolumeOp', ([], {'name': '"""kale-marshal-volume"""', 'resource_name': '"""kale-marshal-pvc"""', 'modes': "['ReadWriteMany']", 'size': '"""1Gi"""'}), "(name='kale-marshal-volume', resource_name=\n 'kale-marshal-pvc', modes=['ReadWriteMany'], size='1Gi')\n", (2353, 2457), True, 'import kfp.dsl as _kfp_dsl\n'), ((6823, 6835), 'kfp.Client', 'kfp.Client', ([], {}), '()\n', (6833, 6835), False, 'import kfp\n'), ((6982, 7033), 'kale.common.kfputils.upload_pipeline', 'kfputils.upload_pipeline', (['pipeline_filename', '"""test"""'], {}), "(pipeline_filename, 'test')\n", (7006, 7033), False, 'from kale.common import kfputils\n'), ((7060, 7167), 'kale.common.kfputils.run_pipeline', 'kfputils.run_pipeline', ([], {'experiment_name': 'experiment.name', 'pipeline_id': 'pipeline_id', 'version_id': 'version_id'}), '(experiment_name=experiment.name, pipeline_id=\n pipeline_id, version_id=version_id)\n', (7081, 7167), False, 'from kale.common import kfputils\n'), ((3329, 3372), 'kubernetes.client.V1SecurityContext', 'k8s_client.V1SecurityContext', ([], {'run_as_user': '(0)'}), '(run_as_user=0)\n', (3357, 3372), True, 'from kubernetes import client as k8s_client\n'), ((3771, 3798), 'json.dumps', 'json.dumps', (['_kale_dep_names'], {}), '(_kale_dep_names)\n', (3781, 3798), False, 'import json\n'), ((4573, 4616), 'kubernetes.client.V1SecurityContext', 'k8s_client.V1SecurityContext', ([], {'run_as_user': '(0)'}), '(run_as_user=0)\n', (4601, 4616), True, 'from kubernetes import client as k8s_client\n'), ((5015, 5042), 'json.dumps', 'json.dumps', (['_kale_dep_names'], {}), '(_kale_dep_names)\n', (5025, 5042), False, 'import json\n'), ((5828, 5871), 'kubernetes.client.V1SecurityContext', 'k8s_client.V1SecurityContext', ([], {'run_as_user': '(0)'}), '(run_as_user=0)\n', (5856, 5871), True, 'from kubernetes import client as k8s_client\n'), ((6270, 6297), 'json.dumps', 'json.dumps', (['_kale_dep_names'], {}), '(_kale_dep_names)\n', (6280, 6297), False, 'import json\n'), ((3950, 3990), 'json.dumps', 'json.dumps', (['_kale_volume_name_parameters'], {}), '(_kale_volume_name_parameters)\n', (3960, 3990), False, 'import json\n'), ((5194, 5234), 'json.dumps', 'json.dumps', (['_kale_volume_name_parameters'], {}), '(_kale_volume_name_parameters)\n', (5204, 5234), False, 'import json\n'), ((6449, 6489), 'json.dumps', 'json.dumps', (['_kale_volume_name_parameters'], {}), '(_kale_volume_name_parameters)\n', (6459, 6489), False, 'import json\n'), ((6672, 6691), 'kfp.compiler.Compiler', 'compiler.Compiler', ([], {}), '()\n', (6689, 6691), True, 'import kfp.compiler as compiler\n')]
|
from django.urls import path
from . import views
urlpatterns = [
path('list', views.list_view),
path('add', views.add_view),
]
|
[
"django.urls.path"
] |
[((70, 99), 'django.urls.path', 'path', (['"""list"""', 'views.list_view'], {}), "('list', views.list_view)\n", (74, 99), False, 'from django.urls import path\n'), ((105, 132), 'django.urls.path', 'path', (['"""add"""', 'views.add_view'], {}), "('add', views.add_view)\n", (109, 132), False, 'from django.urls import path\n')]
|
import json
import logging
import socket
from roombapy.roomba_info import RoombaInfo
class RoombaDiscovery:
udp_bind_address = ""
udp_address = "<broadcast>"
udp_port = 5678
roomba_message = "irobotmcs"
amount_of_broadcasted_messages = 5
server_socket = None
log = None
def __init__(self):
"""Init discovery."""
self.server_socket = _get_socket()
self.log = logging.getLogger(__name__)
def find(self, ip=None):
if ip is not None:
return self.get(ip)
return self.get_all()
def get_all(self):
self._start_server()
self._broadcast_message(self.amount_of_broadcasted_messages)
robots = set()
while True:
response = self._get_response()
if response:
robots.add(response)
else:
break
return robots
def get(self, ip):
self._start_server()
self._send_message(ip)
return self._get_response(ip)
def _get_response(self, ip=None):
try:
while True:
raw_response, addr = self.server_socket.recvfrom(1024)
if ip is not None and addr[0] != ip:
continue
self.log.debug(
"Received response: %s, address: %s", raw_response, addr
)
data = raw_response.decode()
if self._is_from_irobot(data):
return _decode_data(data)
except socket.timeout:
self.log.info("Socket timeout")
return None
def _is_from_irobot(self, data):
if data == self.roomba_message:
return False
json_response = json.loads(data)
if (
"Roomba" in json_response["hostname"]
or "iRobot" in json_response["hostname"]
):
return True
return False
def _broadcast_message(self, amount):
for i in range(amount):
self.server_socket.sendto(
self.roomba_message.encode(), (self.udp_address, self.udp_port)
)
self.log.debug("Broadcast message sent: " + str(i))
def _send_message(self, udp_address):
self.server_socket.sendto(
self.roomba_message.encode(), (udp_address, self.udp_port)
)
self.log.debug("Message sent")
def _start_server(self):
self.server_socket.bind((self.udp_bind_address, self.udp_port))
self.log.debug("Socket server started, port %s", self.udp_port)
def _decode_data(data):
json_response = json.loads(data)
return RoombaInfo(
hostname=json_response["hostname"],
robot_name=json_response["robotname"],
ip=json_response["ip"],
mac=json_response["mac"],
firmware=json_response["sw"],
sku=json_response["sku"],
capabilities=json_response["cap"],
)
def _get_socket():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server_socket.settimeout(5)
return server_socket
|
[
"logging.getLogger",
"json.loads",
"roombapy.roomba_info.RoombaInfo",
"socket.socket"
] |
[((2621, 2637), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2631, 2637), False, 'import json\n'), ((2649, 2889), 'roombapy.roomba_info.RoombaInfo', 'RoombaInfo', ([], {'hostname': "json_response['hostname']", 'robot_name': "json_response['robotname']", 'ip': "json_response['ip']", 'mac': "json_response['mac']", 'firmware': "json_response['sw']", 'sku': "json_response['sku']", 'capabilities': "json_response['cap']"}), "(hostname=json_response['hostname'], robot_name=json_response[\n 'robotname'], ip=json_response['ip'], mac=json_response['mac'],\n firmware=json_response['sw'], sku=json_response['sku'], capabilities=\n json_response['cap'])\n", (2659, 2889), False, 'from roombapy.roomba_info import RoombaInfo\n'), ((2980, 3028), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2993, 3028), False, 'import socket\n'), ((418, 445), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (435, 445), False, 'import logging\n'), ((1741, 1757), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1751, 1757), False, 'import json\n')]
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
[
"pmaf.internal._shared.generate_lineages_from_taxa",
"pandas.read_csv",
"pmaf.internal._shared.indentify_taxon_notation",
"pmaf.internal._shared.extract_valid_ranks",
"pmaf.internal._shared.get_rank_upto",
"biom.load_table",
"numpy.asarray",
"pandas.DataFrame",
"warnings.simplefilter",
"pandas.notna",
"os.path.splitext",
"pmaf.internal._constants.jRegexGG.findall",
"os.path.isfile",
"pmaf.internal._constants.jRegexQIIME.findall",
"pandas.Series",
"pandas.DataFrame.from_records",
"pmaf.internal._shared.cols2ranks",
"numpy.unique",
"collections.defaultdict",
"os.path.abspath"
] |
[((17, 72), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (38, 72), False, 'import warnings\n'), ((6508, 6533), 'biom.load_table', 'biom.load_table', (['filepath'], {}), '(filepath)\n', (6523, 6533), False, 'import biom\n'), ((7990, 8049), 'numpy.asarray', 'np.asarray', (['ids'], {'dtype': 'self.__internal_taxonomy.index.dtype'}), '(ids, dtype=self.__internal_taxonomy.index.dtype)\n', (8000, 8049), True, 'import numpy as np\n'), ((9250, 9265), 'numpy.asarray', 'np.asarray', (['ids'], {}), '(ids)\n', (9260, 9265), True, 'import numpy as np\n'), ((13618, 13635), 'numpy.asarray', 'np.asarray', (['ranks'], {}), '(ranks)\n', (13628, 13635), True, 'import numpy as np\n'), ((20462, 20553), 'pmaf.internal._shared.generate_lineages_from_taxa', 'generate_lineages_from_taxa', (['self.__internal_taxonomy', '(True)', 'self.__avail_ranks', '(False)'], {}), '(self.__internal_taxonomy, True, self.\n __avail_ranks, False)\n', (20489, 20553), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((28635, 28695), 'pmaf.internal._shared.extract_valid_ranks', 'extract_valid_ranks', (['taxonomy_dataframe.columns', 'VALID_RANKS'], {}), '(taxonomy_dataframe.columns, VALID_RANKS)\n', (28654, 28695), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((5017, 5048), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath, **kwargs)\n', (5028, 5048), True, 'import pandas as pd\n'), ((9959, 9974), 'numpy.asarray', 'np.asarray', (['ids'], {}), '(ids)\n', (9969, 9974), True, 'import numpy as np\n'), ((11249, 11264), 'numpy.asarray', 'np.asarray', (['ids'], {}), '(ids)\n', (11259, 11264), True, 'import numpy as np\n'), ((11477, 11595), 'pmaf.internal._shared.generate_lineages_from_taxa', 'generate_lineages_from_taxa', (['self.__internal_taxonomy.loc[target_ids]', 'missing_rank', 'tmp_desired_ranks', 'drop_ranks'], {}), '(self.__internal_taxonomy.loc[target_ids],\n missing_rank, tmp_desired_ranks, drop_ranks)\n', (11504, 11595), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((15572, 15616), 'pmaf.internal._shared.get_rank_upto', 'get_rank_upto', (['self.avail_ranks', 'level', '(True)'], {}), '(self.avail_ranks, level, True)\n', (15585, 15616), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((24044, 24082), 'pmaf.internal._shared.indentify_taxon_notation', 'indentify_taxon_notation', (['sample_taxon'], {}), '(sample_taxon)\n', (24068, 24082), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((25125, 25229), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ordered_indices_list', 'data': 'ordered_taxa_list', 'columns': "(['lineage'] + VALID_RANKS)"}), "(index=ordered_indices_list, data=ordered_taxa_list, columns=[\n 'lineage'] + VALID_RANKS)\n", (25137, 25229), True, 'import pandas as pd\n'), ((29610, 29648), 'pmaf.internal._shared.cols2ranks', 'cols2ranks', (['taxonomy_dataframe.columns'], {}), '(taxonomy_dataframe.columns)\n', (29620, 29648), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((5399, 5421), 'os.path.abspath', 'path.abspath', (['filepath'], {}), '(filepath)\n', (5411, 5421), False, 'from os import path\n'), ((14905, 14961), 'pandas.Series', 'pd.Series', ([], {'data': 'tmp_feature_lineage', 'index': 'group_indices'}), '(data=tmp_feature_lineage, index=group_indices)\n', (14914, 14961), True, 'import pandas as pd\n'), ((15677, 15763), 'pmaf.internal._shared.generate_lineages_from_taxa', 'generate_lineages_from_taxa', (['self.__internal_taxonomy', '(False)', 'target_ranks', '(False)'], {}), '(self.__internal_taxonomy, False, target_ranks, \n False)\n', (15704, 15763), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((19641, 19656), 'pandas.notna', 'pd.notna', (['taxon'], {}), '(taxon)\n', (19649, 19656), True, 'import pandas as pd\n'), ((24628, 24656), 'pmaf.internal._constants.jRegexGG.findall', 'jRegexGG.findall', (['lineage[1]'], {}), '(lineage[1])\n', (24644, 24656), False, 'from pmaf.internal._constants import AVAIL_TAXONOMY_NOTATIONS, jRegexGG, jRegexQIIME, BIOM_TAXONOMY_NAMES, VALID_RANKS\n'), ((25965, 26010), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['tmp_taxa_dict_list'], {}), '(tmp_taxa_dict_list)\n', (25990, 26010), True, 'import pandas as pd\n'), ((2403, 2424), 'os.path.isfile', 'path.isfile', (['taxonomy'], {}), '(taxonomy)\n', (2414, 2424), False, 'from os import path\n'), ((17561, 17577), 'numpy.asarray', 'np.asarray', (['rids'], {}), '(rids)\n', (17571, 17577), True, 'import numpy as np\n'), ((21661, 21728), 'pandas.Series', 'pd.Series', ([], {'data': 'taxonomy_data.iloc[:, 0]', 'index': 'taxonomy_data.index'}), '(data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index)\n', (21670, 21728), True, 'import pandas as pd\n'), ((25552, 25583), 'pmaf.internal._constants.jRegexQIIME.findall', 'jRegexQIIME.findall', (['lineage[1]'], {}), '(lineage[1])\n', (25571, 25583), False, 'from pmaf.internal._constants import AVAIL_TAXONOMY_NOTATIONS, jRegexGG, jRegexQIIME, BIOM_TAXONOMY_NAMES, VALID_RANKS\n'), ((25669, 25686), 'collections.defaultdict', 'defaultdict', (['None'], {}), '(None)\n', (25680, 25686), False, 'from collections import defaultdict\n'), ((27120, 27165), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['tmp_taxa_dict_list'], {}), '(tmp_taxa_dict_list)\n', (27145, 27165), True, 'import pandas as pd\n'), ((5144, 5175), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath, **kwargs)\n', (5155, 5175), True, 'import pandas as pd\n'), ((5251, 5282), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath, **kwargs)\n', (5262, 5282), True, 'import pandas as pd\n'), ((11813, 11834), 'numpy.unique', 'np.unique', (['target_ids'], {}), '(target_ids)\n', (11822, 11834), True, 'import numpy as np\n'), ((16316, 16372), 'pandas.Series', 'pd.Series', ([], {'data': 'tmp_feature_lineage', 'index': 'group_indices'}), '(data=tmp_feature_lineage, index=group_indices)\n', (16325, 16372), True, 'import pandas as pd\n'), ((26757, 26774), 'collections.defaultdict', 'defaultdict', (['None'], {}), '(None)\n', (26768, 26774), False, 'from collections import defaultdict\n'), ((28893, 29017), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '""""""', 'index': 'taxonomy_dataframe.index', 'columns': '[rank for rank in VALID_RANKS if rank not in valid_ranks]'}), "(data='', index=taxonomy_dataframe.index, columns=[rank for\n rank in VALID_RANKS if rank not in valid_ranks])\n", (28905, 29017), True, 'import pandas as pd\n'), ((2459, 2482), 'os.path.splitext', 'path.splitext', (['taxonomy'], {}), '(taxonomy)\n', (2472, 2482), False, 'from os import path\n')]
|
from typing import List
import numpy as np
def mask_nan(arrays: List[np.ndarray]) -> List[np.ndarray]:
"""
Drop indices from equal-sized arrays if the element at that index is NaN in
any of the input arrays.
Parameters
----------
arrays : List[np.ndarray]
list of ndarrays containing NaNs, to be masked
Returns
-------
List[np.ndarray]
masked arrays (free of NaNs)
Notes
-----
This function find the indices where one or more elements is NaN in one or
more of the input arrays, then drops those indices from all arrays.
For example:
>> a = np.array([0, 1, np.nan, 3])
>> b = np.array([np.nan, 5, np.nan, 7])
>> c = np.array([8, 9, 10, 11])
>> mask_nan([a, b, c])
[array([ 1., 3.]), array([ 5., 7.]), array([ 9, 11])]
"""
n = arrays[0].size
assert all(a.size == n for a in arrays[1:])
mask = np.array([False] * n)
for arr in arrays:
mask = np.logical_or(mask, np.isnan(arr))
return [arr[np.where(~mask)[0]] for arr in arrays]
|
[
"numpy.where",
"numpy.array",
"numpy.isnan"
] |
[((908, 929), 'numpy.array', 'np.array', (['([False] * n)'], {}), '([False] * n)\n', (916, 929), True, 'import numpy as np\n'), ((988, 1001), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (996, 1001), True, 'import numpy as np\n'), ((1019, 1034), 'numpy.where', 'np.where', (['(~mask)'], {}), '(~mask)\n', (1027, 1034), True, 'import numpy as np\n')]
|
# Generated by Django 3.1.1 on 2020-09-08 18:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('grocery', '0003_auto_20200908_1417'),
]
operations = [
migrations.AlterField(
model_name='item',
name='list',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item', to='grocery.list'),
),
migrations.AlterField(
model_name='list',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='list', to=settings.AUTH_USER_MODEL),
),
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((465, 582), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""item"""', 'to': '"""grocery.list"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='item', to='grocery.list')\n", (482, 582), False, 'from django.db import migrations, models\n'), ((696, 823), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""list"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='list', to=settings.AUTH_USER_MODEL)\n", (713, 823), False, 'from django.db import migrations, models\n')]
|
from os import getenv
from typing import Optional, Dict
from flask import Flask
TestConfig = Optional[Dict[str, bool]]
def create_app(test_config: TestConfig = None) -> Flask:
""" App factory method to initialize the application with given configuration """
app: Flask = Flask(__name__)
if test_config is not None:
app.config.from_mapping(test_config)
@app.route("/")
def index() -> str: # pylint: disable=unused-variable
return "My Hello World App is working..."
@app.route("/version")
def version() -> str: # pylint: disable=unused-variable
"""
DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG.
It should be setup in docker build task..
It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag.
E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .
"""
return getenv("DOCKER_IMAGE_TAG") or "DOCKER_IMAGE_TAG haven't been setup"
return app
|
[
"os.getenv",
"flask.Flask"
] |
[((284, 299), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'from flask import Flask\n'), ((951, 977), 'os.getenv', 'getenv', (['"""DOCKER_IMAGE_TAG"""'], {}), "('DOCKER_IMAGE_TAG')\n", (957, 977), False, 'from os import getenv\n')]
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FeedbackList(ListResource):
def __init__(self, version, account_sid, call_sid):
"""
Initialize the FeedbackList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param call_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackList
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackList
"""
super(FeedbackList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
def get(self):
"""
Constructs a FeedbackContext
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
return FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __call__(self):
"""
Constructs a FeedbackContext
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
return FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackList>'
class FeedbackPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the FeedbackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param call_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackPage
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackPage
"""
super(FeedbackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FeedbackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackPage>'
class FeedbackContext(InstanceContext):
def __init__(self, version, account_sid, call_sid):
"""
Initialize the FeedbackContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param call_sid: The call sid that uniquely identifies the call
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
super(FeedbackContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
self._uri = '/Accounts/{account_sid}/Calls/{call_sid}/Feedback.json'.format(**self._solution)
def create(self, quality_score, issue=values.unset):
"""
Create a new FeedbackInstance
:param unicode quality_score: The quality_score
:param FeedbackInstance.Issues issue: The issue
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
data = values.of({
'QualityScore': quality_score,
'Issue': issue,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def update(self, quality_score, issue=values.unset):
"""
Update the FeedbackInstance
:param unicode quality_score: An integer from 1 to 5
:param FeedbackInstance.Issues issue: Issues experienced during the call
:returns: Updated FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
data = values.of({
'QualityScore': quality_score,
'Issue': issue,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.FeedbackContext {}>'.format(context)
class FeedbackInstance(InstanceResource):
class Issues(object):
AUDIO_LATENCY = "audio-latency"
DIGITS_NOT_CAPTURED = "digits-not-captured"
DROPPED_CALL = "dropped-call"
IMPERFECT_AUDIO = "imperfect-audio"
INCORRECT_CALLER_ID = "incorrect-caller-id"
ONE_WAY_AUDIO = "one-way-audio"
POST_DIAL_DELAY = "post-dial-delay"
UNSOLICITED_CALL = "unsolicited-call"
def __init__(self, version, payload, account_sid, call_sid):
"""
Initialize the FeedbackInstance
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
super(FeedbackInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'issues': payload['issues'],
'quality_score': deserialize.integer(payload['quality_score']),
'sid': payload['sid'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FeedbackContext for this FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
if self._context is None:
self._context = FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def issues(self):
"""
:returns: The issues
:rtype: FeedbackInstance.Issues
"""
return self._properties['issues']
@property
def quality_score(self):
"""
:returns: 1 to 5 quality score
:rtype: unicode
"""
return self._properties['quality_score']
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
def create(self, quality_score, issue=values.unset):
"""
Create a new FeedbackInstance
:param unicode quality_score: The quality_score
:param FeedbackInstance.Issues issue: The issue
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.create(
quality_score,
issue=issue,
)
def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.fetch()
def update(self, quality_score, issue=values.unset):
"""
Update the FeedbackInstance
:param unicode quality_score: An integer from 1 to 5
:param FeedbackInstance.Issues issue: Issues experienced during the call
:returns: Updated FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.update(
quality_score,
issue=issue,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.FeedbackInstance {}>'.format(context)
|
[
"twilio.base.deserialize.integer",
"twilio.base.deserialize.rfc2822_datetime",
"twilio.base.values.of"
] |
[((4678, 4736), 'twilio.base.values.of', 'values.of', (["{'QualityScore': quality_score, 'Issue': issue}"], {}), "({'QualityScore': quality_score, 'Issue': issue})\n", (4687, 4736), False, 'from twilio.base import values\n'), ((5302, 5315), 'twilio.base.values.of', 'values.of', (['{}'], {}), '({})\n', (5311, 5315), False, 'from twilio.base import values\n'), ((6028, 6086), 'twilio.base.values.of', 'values.of', (["{'QualityScore': quality_score, 'Issue': issue}"], {}), "({'QualityScore': quality_score, 'Issue': issue})\n", (6037, 6086), False, 'from twilio.base import values\n'), ((7662, 7715), 'twilio.base.deserialize.rfc2822_datetime', 'deserialize.rfc2822_datetime', (["payload['date_created']"], {}), "(payload['date_created'])\n", (7690, 7715), False, 'from twilio.base import deserialize\n'), ((7745, 7798), 'twilio.base.deserialize.rfc2822_datetime', 'deserialize.rfc2822_datetime', (["payload['date_updated']"], {}), "(payload['date_updated'])\n", (7773, 7798), False, 'from twilio.base import deserialize\n'), ((7870, 7915), 'twilio.base.deserialize.integer', 'deserialize.integer', (["payload['quality_score']"], {}), "(payload['quality_score'])\n", (7889, 7915), False, 'from twilio.base import deserialize\n')]
|
from calendar import timegm
from datetime import datetime
from typing import Any, Dict
from fastapi import HTTPException
from pydantic import BaseModel, Field
from starlette import status
from .base import UserInfoAuth
from .messages import NOT_VERIFIED
from .verification import JWKS, ExtraVerifier
class FirebaseClaims(BaseModel):
user_id: str = Field(alias="user_id")
email: str = Field(None, alias="email")
class FirebaseCurrentUser(UserInfoAuth):
"""
Verify ID token and get user info of Firebase
"""
user_info = FirebaseClaims
firebase_keys_url = "https://www.googleapis.com/robot/v1/metadata/x509/securetoken@system.gserviceaccount.com"
def __init__(self, project_id: str, *args: Any, **kwargs: Any):
self._key_refresh_locked = False
jwks = JWKS.firebase(self.firebase_keys_url)
super().__init__(
jwks,
*args,
user_info=self.user_info,
audience=project_id,
issuer=f"https://securetoken.google.com/{project_id}",
extra=FirebaseExtraVerifier(project_id=project_id),
**kwargs,
)
async def refresh_keys(self) -> None:
if not self._key_refresh_locked:
# Ensure only one key refresh can happen at once.
# This prevents a dogpile of requests the second the keys expire
# from causing a bunch of refreshes (each one is an http request).
self._key_refresh_locked = True
# Re-query the keys from firebase.
# NOTE: The expires comes from an http header which is supposed to
# be set to a time long before the keys are no longer in use.
# This allows gradual roll-out of the keys and should prevent any
# request from failing.
# The only scenario which will result in failing requests is if
# there are zero requests for the entire duration of the roll-out
# (observed to be around 1 week), followed by a burst of multiple
# requests at once.
jwks = JWKS.firebase(self.firebase_keys_url)
# Reset the keys and the expiry date.
self._verifier._jwks_to_key = jwks.keys
self._keys_expire = jwks.expires
# Remove the lock.
self._key_refresh_locked = False
class FirebaseExtraVerifier(ExtraVerifier):
def __init__(self, project_id: str):
self._pjt_id = project_id
def __call__(self, claims: Dict[str, str], auto_error: bool = True) -> bool:
# auth_time must be past time
if claims.get("auth_time"):
auth_time = int(claims["auth_time"])
now = timegm(datetime.utcnow().utctimetuple())
if now < auth_time:
if auto_error:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=NOT_VERIFIED
)
return False
return True
|
[
"pydantic.Field",
"fastapi.HTTPException",
"datetime.datetime.utcnow"
] |
[((356, 378), 'pydantic.Field', 'Field', ([], {'alias': '"""user_id"""'}), "(alias='user_id')\n", (361, 378), False, 'from pydantic import BaseModel, Field\n'), ((396, 422), 'pydantic.Field', 'Field', (['None'], {'alias': '"""email"""'}), "(None, alias='email')\n", (401, 422), False, 'from pydantic import BaseModel, Field\n'), ((2821, 2897), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': 'NOT_VERIFIED'}), '(status_code=status.HTTP_401_UNAUTHORIZED, detail=NOT_VERIFIED)\n', (2834, 2897), False, 'from fastapi import HTTPException\n'), ((2698, 2715), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2713, 2715), False, 'from datetime import datetime\n')]
|
import sqlite3
import mock
import opbeat.instrumentation.control
from tests.helpers import get_tempstoreclient
from tests.utils.compat import TestCase
class InstrumentSQLiteTest(TestCase):
def setUp(self):
self.client = get_tempstoreclient()
opbeat.instrumentation.control.instrument()
@mock.patch("opbeat.traces.RequestsStore.should_collect")
def test_connect(self, should_collect):
should_collect.return_value = False
self.client.begin_transaction("transaction.test")
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("""CREATE TABLE testdb (id integer, username text)""")
cursor.execute("""INSERT INTO testdb VALUES (1, "Ron")""")
cursor.execute("""DROP TABLE testdb""")
self.client.end_transaction("MyView")
transactions, traces = self.client.instrumentation_store.get_all()
expected_signatures = ['transaction', 'sqlite3.connect :memory:',
'CREATE TABLE', 'INSERT INTO testdb',
'DROP TABLE']
self.assertEqual(set([t['signature'] for t in traces]),
set(expected_signatures))
# Reorder according to the kinds list so we can just test them
sig_dict = dict([(t['signature'], t) for t in traces])
traces = [sig_dict[k] for k in expected_signatures]
self.assertEqual(traces[0]['signature'], 'transaction')
self.assertEqual(traces[0]['kind'], 'transaction')
self.assertEqual(traces[0]['transaction'], 'MyView')
self.assertEqual(traces[1]['signature'], 'sqlite3.connect :memory:')
self.assertEqual(traces[1]['kind'], 'db.sqlite.connect')
self.assertEqual(traces[1]['transaction'], 'MyView')
self.assertEqual(traces[2]['signature'], 'CREATE TABLE')
self.assertEqual(traces[2]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[2]['transaction'], 'MyView')
self.assertEqual(traces[3]['signature'], 'INSERT INTO testdb')
self.assertEqual(traces[3]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[3]['transaction'], 'MyView')
self.assertEqual(traces[4]['signature'], 'DROP TABLE')
self.assertEqual(traces[4]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[4]['transaction'], 'MyView')
self.assertEqual(len(traces), 5)
|
[
"sqlite3.connect",
"mock.patch",
"tests.helpers.get_tempstoreclient"
] |
[((316, 372), 'mock.patch', 'mock.patch', (['"""opbeat.traces.RequestsStore.should_collect"""'], {}), "('opbeat.traces.RequestsStore.should_collect')\n", (326, 372), False, 'import mock\n'), ((236, 257), 'tests.helpers.get_tempstoreclient', 'get_tempstoreclient', ([], {}), '()\n', (255, 257), False, 'from tests.helpers import get_tempstoreclient\n'), ((535, 562), 'sqlite3.connect', 'sqlite3.connect', (['""":memory:"""'], {}), "(':memory:')\n", (550, 562), False, 'import sqlite3\n')]
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='This MLOps project aims to use the Transformers framework from Hugging Face in order to tweak a pre-trained NLP model to accurately gauge the sentiment of an Amazon review (being able to guess the whether the rating of a product is positive or negative given only the text in a review).',
author='group9 DTU MLops',
license='MIT',
)
|
[
"setuptools.find_packages"
] |
[((81, 96), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (94, 96), False, 'from setuptools import find_packages, setup\n')]
|
import sys
import io
input_txt = """
44
"""
sys.stdin = io.StringIO(input_txt)
tmp = input()
# copy the below part and paste to the submission form.
# ---------function------------
def fibonacci(n):
if n <= 1:
return 1
fib_array = [1] * 45
for i in range(2, n+1):
fib_array[i] = fib_array[i-1] + fib_array[i-2]
return fib_array[n]
def main():
n = int(input())
fib = fibonacci(n)
print(fib)
return
main()
# -----------------------------
sys.stdin = sys.__stdin__
|
[
"io.StringIO"
] |
[((65, 87), 'io.StringIO', 'io.StringIO', (['input_txt'], {}), '(input_txt)\n', (76, 87), False, 'import io\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
# continuously differentiable
fn_dict_cdiff = {'2dpoly': 1, 'sigmoid': 2,
'sin': 3, 'frequent_sin': 4,
'3dpoly': 7, 'linear': 8}
# continuous but not differentiable
fn_dict_cont = {'abs': 0, 'abs_sqrt': 5, 'rand_pw': 9,
'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}
# discontinuous
fn_dict_disc = {'step': 6, 'band': 12, 'invband': 13,
'steplinear': 14}
# monotone
fn_dict_monotone = {'sigmoid': 2,
'step': 6, 'linear': 8,
'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}
# convex
fn_dict_convex = {'abs': 0, '2dpoly': 1, 'linear': 8,
'abspos': 10, 'sqrpos': 11}
# all functions
fn_dict = {'abs': 0, '2dpoly': 1, 'sigmoid': 2,
'sin': 3, 'frequent_sin': 4, 'abs_sqrt': 5,
'step': 6, '3dpoly': 7, 'linear': 8, 'rand_pw': 9,
'abspos': 10, 'sqrpos': 11, 'band': 12, 'invband': 13,
'steplinear': 14, 'pwlinear': 15}
def generate_random_pw_linear(lb=-2, ub=2, n_pieces=5):
splits = np.random.choice(np.arange(lb, ub, 0.1),
n_pieces - 1, replace=False)
splits.sort()
slopes = np.random.uniform(-4, 4, size=n_pieces)
start = []
start.append(np.random.uniform(-1, 1))
for t in range(n_pieces - 1):
start.append(start[t] + slopes[t] * (splits[t] -
(lb if t == 0 else splits[t - 1])))
return lambda x: [start[ind] + slopes[ind] * (x - (lb if ind == 0 else splits[ind - 1])) for ind in [np.searchsorted(splits, x)]][0]
def get_tau_fn(func):
def first(x):
return x[:, [0]] if len(x.shape) == 2 else x
# func describes the relation between response and treatment
if func == fn_dict['abs']:
def tau_fn(x): return np.abs(first(x))
elif func == fn_dict['2dpoly']:
def tau_fn(x): return -1.5 * first(x) + .9 * (first(x)**2)
elif func == fn_dict['sigmoid']:
def tau_fn(x): return 2 / (1 + np.exp(-2 * first(x)))
elif func == fn_dict['sin']:
def tau_fn(x): return np.sin(first(x))
elif func == fn_dict['frequent_sin']:
def tau_fn(x): return np.sin(3 * first(x))
elif func == fn_dict['abs_sqrt']:
def tau_fn(x): return np.sqrt(np.abs(first(x)))
elif func == fn_dict['step']:
def tau_fn(x): return 1. * (first(x) < 0) + 2.5 * (first(x) >= 0)
elif func == fn_dict['3dpoly']:
def tau_fn(x): return -1.5 * first(x) + .9 * \
(first(x)**2) + first(x)**3
elif func == fn_dict['linear']:
def tau_fn(x): return first(x)
elif func == fn_dict['rand_pw']:
pw_linear = generate_random_pw_linear()
def tau_fn(x):
return np.array([pw_linear(x_i) for x_i in first(x).flatten()]).reshape(-1, 1)
elif func == fn_dict['abspos']:
def tau_fn(x): return np.abs(first(x)) * (first(x) >= 0)
elif func == fn_dict['sqrpos']:
def tau_fn(x): return (first(x)**2) * (first(x) >= 0)
elif func == fn_dict['band']:
def tau_fn(x): return 1.0 * (first(x) >= -.75) * (first(x) <= .75)
elif func == fn_dict['invband']:
def tau_fn(x): return 1. - 1. * (first(x) >= -.75) * (first(x) <= .75)
elif func == fn_dict['steplinear']:
def tau_fn(x): return 2. * (first(x) >= 0) - first(x)
elif func == fn_dict['pwlinear']:
def tau_fn(x):
q = first(x)
return (q + 1) * (q <= -1) + (q - 1) * (q >= 1)
else:
raise NotImplementedError()
return tau_fn
def standardize(z, p, y, fn):
ym = y.mean()
ystd = y.std()
y = (y - ym) / ystd
def newfn(x): return (fn(x) - ym) / ystd
return z, p, y, newfn
def get_data(n_samples, n_instruments, iv_strength, tau_fn, dgp_num):
# Construct dataset
# z:- instruments (features included here, can be high-dimensional)
# p :- treatments (features included here as well, can be high-dimensional)
# y :- response (is a scalar always)
confounder = np.random.normal(0, 1, size=(n_samples, 1))
z = np.random.normal(0, 1, size=(n_samples, n_instruments))
fn = tau_fn
if dgp_num == 1:
# DGP 1 in the paper
p = 2 * z[:, [0]] * (z[:, [0]] > 0) * iv_strength \
+ 2 * z[:, [1]] * (z[:, [1]] < 0) * iv_strength \
+ 2 * confounder * (1 - iv_strength) + \
np.random.normal(0, .1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, .1, size=(n_samples, 1))
elif dgp_num == 2:
# DGP 2 in the paper
p = 2 * z[:, [0]] * iv_strength \
+ 2 * confounder * (1 - iv_strength) + \
np.random.normal(0, .1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, .1, size=(n_samples, 1))
elif dgp_num == 3:
# DeepIV's DGP - has feature variables as well
# z is 3-dimensional: composed of (1) 1D z, (2) t - time unif~(0,10), and (3) s - customer type {1,...,7}
# y is related to p and z in a complex non-linear, non separable manner
# p is related to z again in a non-separable manner, rho is endogeneity parameter
rho = 0.8
psd = 3.7
pmu = 17.779
ysd = 158.
ymu = -292.1
z_1 = np.random.normal(0, 1, size=(n_samples, 1))
v = np.random.normal(0, 1, size=(n_samples, 1))
t = np.random.uniform(0, 10, size=(n_samples, 1))
s = np.random.randint(1, 8, size=(n_samples, 1))
e = rho * v + \
np.random.normal(0, np.sqrt(1 - rho**2), size=(n_samples, 1))
def psi(t): return 2 * (np.power(t - 5, 4) / 600 +
np.exp(-4 * np.power(t - 5, 2)) + t / 10 - 2)
p = 25 + (z_1 + 3) * psi(t) + v
p = (p - pmu) / psd
g = (10 + p) * s * psi(t) - 2 * p + e
y = (g - ymu) / ysd
z = np.hstack((z_1, s, t))
p = np.hstack((p, s, t))
def fn(p): return ((10 + p[:, 0]) * p[:, 1]
* psi(p[:, 2]) - 2 * p[:, 0] - ymu) / ysd
elif dgp_num == 4:
# Many weak Instruments DGP - n_instruments can be very large
z = np.random.normal(0.5, 1, size=(n_samples, n_instruments))
p = np.amin(z, axis=1).reshape(-1, 1) * iv_strength + confounder * \
(1 - iv_strength) + np.random.normal(0, 0.1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, 0.1, size=(n_samples, 1))
else:
# Here we have equal number of treatments and instruments and each
# instrument affects a separate treatment. Only the first treatment
# matters for the outcome.
z = np.random.normal(0, 2, size=(n_samples, n_instruments))
U = np.random.normal(0, 2, size=(n_samples, 1))
delta = np.random.normal(0, .1, size=(n_samples, 1))
zeta = np.random.normal(0, .1, size=(n_samples, 1))
p = iv_strength * z + (1 - iv_strength) * U + delta
y = fn(p) + U + zeta
return standardize(z, p, y, fn)
|
[
"numpy.random.normal",
"numpy.sqrt",
"numpy.amin",
"numpy.hstack",
"numpy.searchsorted",
"numpy.power",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.arange"
] |
[((1272, 1311), 'numpy.random.uniform', 'np.random.uniform', (['(-4)', '(4)'], {'size': 'n_pieces'}), '(-4, 4, size=n_pieces)\n', (1289, 1311), True, 'import numpy as np\n'), ((4108, 4151), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, 1)'}), '(0, 1, size=(n_samples, 1))\n', (4124, 4151), True, 'import numpy as np\n'), ((4160, 4215), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, n_instruments)'}), '(0, 1, size=(n_samples, n_instruments))\n', (4176, 4215), True, 'import numpy as np\n'), ((1158, 1180), 'numpy.arange', 'np.arange', (['lb', 'ub', '(0.1)'], {}), '(lb, ub, 0.1)\n', (1167, 1180), True, 'import numpy as np\n'), ((1344, 1368), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1361, 1368), True, 'import numpy as np\n'), ((4470, 4515), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4486, 4515), True, 'import numpy as np\n'), ((4566, 4611), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4582, 4611), True, 'import numpy as np\n'), ((4770, 4815), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4786, 4815), True, 'import numpy as np\n'), ((4866, 4911), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4882, 4911), True, 'import numpy as np\n'), ((5384, 5427), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, 1)'}), '(0, 1, size=(n_samples, 1))\n', (5400, 5427), True, 'import numpy as np\n'), ((5440, 5483), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, 1)'}), '(0, 1, size=(n_samples, 1))\n', (5456, 5483), True, 'import numpy as np\n'), ((5496, 5541), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(n_samples, 1)'}), '(0, 10, size=(n_samples, 1))\n', (5513, 5541), True, 'import numpy as np\n'), ((5554, 5598), 'numpy.random.randint', 'np.random.randint', (['(1)', '(8)'], {'size': '(n_samples, 1)'}), '(1, 8, size=(n_samples, 1))\n', (5571, 5598), True, 'import numpy as np\n'), ((5989, 6011), 'numpy.hstack', 'np.hstack', (['(z_1, s, t)'], {}), '((z_1, s, t))\n', (5998, 6011), True, 'import numpy as np\n'), ((6024, 6044), 'numpy.hstack', 'np.hstack', (['(p, s, t)'], {}), '((p, s, t))\n', (6033, 6044), True, 'import numpy as np\n'), ((6272, 6329), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(1)'], {'size': '(n_samples, n_instruments)'}), '(0.5, 1, size=(n_samples, n_instruments))\n', (6288, 6329), True, 'import numpy as np\n'), ((6790, 6845), 'numpy.random.normal', 'np.random.normal', (['(0)', '(2)'], {'size': '(n_samples, n_instruments)'}), '(0, 2, size=(n_samples, n_instruments))\n', (6806, 6845), True, 'import numpy as np\n'), ((6858, 6901), 'numpy.random.normal', 'np.random.normal', (['(0)', '(2)'], {'size': '(n_samples, 1)'}), '(0, 2, size=(n_samples, 1))\n', (6874, 6901), True, 'import numpy as np\n'), ((6918, 6963), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6934, 6963), True, 'import numpy as np\n'), ((6978, 7023), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6994, 7023), True, 'import numpy as np\n'), ((1647, 1673), 'numpy.searchsorted', 'np.searchsorted', (['splits', 'x'], {}), '(splits, x)\n', (1662, 1673), True, 'import numpy as np\n'), ((5655, 5676), 'numpy.sqrt', 'np.sqrt', (['(1 - rho ** 2)'], {}), '(1 - rho ** 2)\n', (5662, 5676), True, 'import numpy as np\n'), ((6439, 6484), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6455, 6484), True, 'import numpy as np\n'), ((6536, 6581), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6552, 6581), True, 'import numpy as np\n'), ((5730, 5748), 'numpy.power', 'np.power', (['(t - 5)', '(4)'], {}), '(t - 5, 4)\n', (5738, 5748), True, 'import numpy as np\n'), ((6342, 6360), 'numpy.amin', 'np.amin', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (6349, 6360), True, 'import numpy as np\n'), ((5801, 5819), 'numpy.power', 'np.power', (['(t - 5)', '(2)'], {}), '(t - 5, 2)\n', (5809, 5819), True, 'import numpy as np\n')]
|
import argparse
import os
import pathlib
import cv2
import pickle
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from numpy import genfromtxt
def parse_command_line_options(print_options=False):
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, default=-1)
parser.add_argument("-d", type=pathlib.Path)
parser.add_argument("-s", type=int, choices=[0], default=0)
parser.add_argument("-e", type=int, default=0)
parser.add_argument("-a", type=str, default="ars")
parser.add_argument("-i", type=int, default=100)
parser.add_argument("-g", action="store_true")
parser.add_argument("-r", action="store_true")
args = parser.parse_args()
flags = {
"itno": args.n,
"folder": str(args.d),
"spec_num": args.s,
"env_num": args.e,
"alg": args.a,
"num_iter": args.i,
"gpu_flag": args.g,
"render": args.r
}
if print_options:
print('**** Command Line Options ****')
for key in flags:
print('{}: {}'.format(key, flags[key]))
return flags
def open_log_file(itno, folder):
'''
Open a log file to periodically flush data.
Parameters:
itno: int
folder: str
'''
fname = _get_prefix(folder) + 'log' + _get_suffix(itno) + '.txt'
open(fname, 'w').close()
file = open(fname, 'a')
return file
def save_object(name, object, itno, folder):
'''
Save any pickle-able object.
Parameters:
name: str
object: Object
itno: int
folder: str
'''
file = open(_get_prefix(folder) + name + _get_suffix(itno) + '.pkl', 'wb')
pickle.dump(object, file)
file.close()
def load_object(name, itno, folder):
'''
Load pickled object.
Parameters:
name: str
itno: int
folder: str
'''
file = open(_get_prefix(folder) + name + _get_suffix(itno) + '.pkl', 'rb')
object = pickle.load(file)
file.close()
return object
def save_log_info(log_info, itno, folder):
np.save(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.npy', log_info)
def load_log_info(itno, folder, csv=False):
if csv:
return genfromtxt(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.csv', delimiter=',')
else:
return np.load(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.npy')
def log_to_file(file, iter, num_transitions, reward, prob, additional_data={}):
'''
Log data to file.
Parameters:
file: file_handle
iter: int
num_transitions: int (number of simulation steps in each iter)
reward: float
prob: float (satisfaction probability)
additional_data: dict
'''
file.write('**** Iteration Number {} ****\n'.format(iter))
file.write('Environment Steps Taken: {}\n'.format(num_transitions))
file.write('Reward: {}\n'.format(reward))
file.write('Satisfaction Probability: {}\n'.format(prob))
for key in additional_data:
file.write('{}: {}\n'.format(key, additional_data[key]))
file.write('\n')
file.flush()
def get_image_dir(itno, folder):
image_dir = '{}img{}'.format(_get_prefix(folder), _get_suffix(itno))
if os.path.exists(image_dir) is False:
os.mkdir(image_dir)
return image_dir
def generate_video(env, policy, itno, folder, max_step=10000):
image_dir = get_image_dir(itno, folder)
done = False
state = env.reset()
step = 0
while not done:
img_arr = env.render(mode='rgb_array')
img = Image.fromarray(img_arr)
img.save(image_dir + '/' + str(step) + '.png')
action = policy.get_action(state)
state, _, done, _ = env.step(action)
step += 1
if step > max_step:
done = True
video_name = image_dir + '/' + 'video.avi'
images_temp = [img for img in os.listdir(image_dir)]
images = []
for i in range(len(images_temp)):
for j in images_temp:
directory = str(i) + '.png'
if directory == j:
images.append(j)
frame = cv2.imread(os.path.join(image_dir, images_temp[0]))
height, width, _ = frame.shape
video = cv2.VideoWriter(
video_name, cv2.VideoWriter_fourcc(*'XVID'), 20, (width, height))
for image in images:
video.write(cv2.imread(os.path.join(image_dir, image)))
cv2.destroyAllWindows()
video.release()
def plot_for_threshold(itno, folders, xs, threshold, color):
ys = []
for folder in folders:
val = 0
count = 0
for j in range(itno):
data = load_log_info(j, folder)
for pos in range(len(data)):
if data[pos][-1] >= threshold:
val += data[pos][0]
count += 1
break
ys.append(val / count)
plt.subplots_adjust(bottom=0.145, left=0.13)
plt.rcParams.update({'font.size': 18})
plt.plot(xs, ys, '-ok', label='z = {}'.format(threshold), color=color)
def plot_error_bar(x, data, color, label, points=False):
'''
Plot the error bar from the data.
Parameters:
samples_per_iter: int (number of sample rollouts per iteration of the algorithm)
data: (3+)-tuple of np.array (curve, lower error bar, upper error bar, ...)
color: color of the plot
label: string
'''
plt.subplots_adjust(bottom=0.126)
plt.rcParams.update({'font.size': 18})
if points:
plt.errorbar(x, data[0], data[0] - data[1], fmt='--o', color=color, label=label)
else:
plt.plot(x, data[0], color=color, label=label)
plt.fill_between(x, data[1], data[2], color=color, alpha=0.15)
def extract_plot_data(folder, column_num, low, up, csv=False):
'''
Load and parse log_info to generate error bars
Parameters:
folder: string (name of folder)
column_num: int (column number in log.npy to use)
l: int (lower limit on run number)
u: int (upper limit on run number)
Returns:
4-tuple of numpy arrays (curve, lower error bar, upper error bar, max_over_runs)
'''
log_infos = []
min_length = 1000000
for itno in range(low, up):
log_info = np.transpose(load_log_info(
itno, folder, csv=csv))[column_num]
log_info = np.append([0], log_info)
min_length = min(min_length, len(log_info))
log_infos.append(log_info)
log_infos = [log_info[:min_length] for log_info in log_infos]
data = np.array(log_infos)
curve = np.mean(data, axis=0)
std = np.std(data, axis=0)
max_curve = np.amax(data, axis=0)
return curve, (curve - std), (curve + std), max_curve
# save and render current plot
def save_plot(folder, name, show=True, scientific=True):
plt.rcParams.update({'font.size': 14})
plt.legend()
ax = plt.gca()
ax.xaxis.major.formatter._useMathText = True
if scientific:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.savefig(_get_prefix(folder) + name + '.pdf', format='pdf')
if show:
plt.show()
# get prefix for file name
def _get_prefix(folder):
if folder == '':
return ''
else:
return folder + '/'
# get suffix from itno
def _get_suffix(itno):
if itno < 0:
return ''
else:
return str(itno)
|
[
"matplotlib.pyplot.fill_between",
"numpy.array",
"cv2.destroyAllWindows",
"matplotlib.pyplot.errorbar",
"numpy.mean",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"matplotlib.pyplot.gca",
"pickle.load",
"numpy.std",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"PIL.Image.fromarray",
"pickle.dump",
"os.path.join",
"numpy.append",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.ticklabel_format",
"numpy.amax"
] |
[((243, 268), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (266, 268), False, 'import argparse\n'), ((1698, 1723), 'pickle.dump', 'pickle.dump', (['object', 'file'], {}), '(object, file)\n', (1709, 1723), False, 'import pickle\n'), ((1986, 2003), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1997, 2003), False, 'import pickle\n'), ((4414, 4437), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4435, 4437), False, 'import cv2\n'), ((4888, 4932), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.145)', 'left': '(0.13)'}), '(bottom=0.145, left=0.13)\n', (4907, 4932), True, 'from matplotlib import pyplot as plt\n'), ((4937, 4975), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (4956, 4975), True, 'from matplotlib import pyplot as plt\n'), ((5413, 5446), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.126)'}), '(bottom=0.126)\n', (5432, 5446), True, 'from matplotlib import pyplot as plt\n'), ((5451, 5489), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (5470, 5489), True, 'from matplotlib import pyplot as plt\n'), ((6545, 6564), 'numpy.array', 'np.array', (['log_infos'], {}), '(log_infos)\n', (6553, 6564), True, 'import numpy as np\n'), ((6577, 6598), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6584, 6598), True, 'import numpy as np\n'), ((6609, 6629), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6615, 6629), True, 'import numpy as np\n'), ((6646, 6667), 'numpy.amax', 'np.amax', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6653, 6667), True, 'import numpy as np\n'), ((6820, 6858), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (6839, 6858), True, 'from matplotlib import pyplot as plt\n'), ((6863, 6875), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6873, 6875), True, 'from matplotlib import pyplot as plt\n'), ((6885, 6894), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6892, 6894), True, 'from matplotlib import pyplot as plt\n'), ((3256, 3281), 'os.path.exists', 'os.path.exists', (['image_dir'], {}), '(image_dir)\n', (3270, 3281), False, 'import os\n'), ((3300, 3319), 'os.mkdir', 'os.mkdir', (['image_dir'], {}), '(image_dir)\n', (3308, 3319), False, 'import os\n'), ((3586, 3610), 'PIL.Image.fromarray', 'Image.fromarray', (['img_arr'], {}), '(img_arr)\n', (3601, 3610), False, 'from PIL import Image\n'), ((4140, 4179), 'os.path.join', 'os.path.join', (['image_dir', 'images_temp[0]'], {}), '(image_dir, images_temp[0])\n', (4152, 4179), False, 'import os\n'), ((4265, 4296), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (4287, 4296), False, 'import cv2\n'), ((5513, 5598), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'data[0]', '(data[0] - data[1])'], {'fmt': '"""--o"""', 'color': 'color', 'label': 'label'}), "(x, data[0], data[0] - data[1], fmt='--o', color=color, label=label\n )\n", (5525, 5598), True, 'from matplotlib import pyplot as plt\n'), ((5612, 5658), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'data[0]'], {'color': 'color', 'label': 'label'}), '(x, data[0], color=color, label=label)\n', (5620, 5658), True, 'from matplotlib import pyplot as plt\n'), ((5667, 5729), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'data[1]', 'data[2]'], {'color': 'color', 'alpha': '(0.15)'}), '(x, data[1], data[2], color=color, alpha=0.15)\n', (5683, 5729), True, 'from matplotlib import pyplot as plt\n'), ((6356, 6380), 'numpy.append', 'np.append', (['[0]', 'log_info'], {}), '([0], log_info)\n', (6365, 6380), True, 'import numpy as np\n'), ((6971, 7032), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (6991, 7032), True, 'from matplotlib import pyplot as plt\n'), ((7121, 7131), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7129, 7131), True, 'from matplotlib import pyplot as plt\n'), ((3905, 3926), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (3915, 3926), False, 'import os\n'), ((4376, 4406), 'os.path.join', 'os.path.join', (['image_dir', 'image'], {}), '(image_dir, image)\n', (4388, 4406), False, 'import os\n')]
|
from pygments import highlight as _highlight
from pygments.lexers import SqlLexer
from pygments.formatters import HtmlFormatter
def style():
style = HtmlFormatter().get_style_defs()
return style
def highlight(text):
# Generated HTML contains unnecessary newline at the end
# before </pre> closing tag.
# We need to remove that newline because it's screwing up
# QTextEdit formatting and is being displayed
# as a non-editable whitespace.
highlighted_text = _highlight(text, SqlLexer(), HtmlFormatter()).strip()
# Split generated HTML by last newline in it
# argument 1 indicates that we only want to split the string
# by one specified delimiter from the right.
parts = highlighted_text.rsplit("\n", 1)
# Glue back 2 split parts to get the HTML without last
# unnecessary newline
highlighted_text_no_last_newline = "".join(parts)
return highlighted_text_no_last_newline
|
[
"pygments.lexers.SqlLexer",
"pygments.formatters.HtmlFormatter"
] |
[((155, 170), 'pygments.formatters.HtmlFormatter', 'HtmlFormatter', ([], {}), '()\n', (168, 170), False, 'from pygments.formatters import HtmlFormatter\n'), ((510, 520), 'pygments.lexers.SqlLexer', 'SqlLexer', ([], {}), '()\n', (518, 520), False, 'from pygments.lexers import SqlLexer\n'), ((522, 537), 'pygments.formatters.HtmlFormatter', 'HtmlFormatter', ([], {}), '()\n', (535, 537), False, 'from pygments.formatters import HtmlFormatter\n')]
|
import numpy as np
def histogram_r(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = I_min
elif (r_array[i][j] > I_max):
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 255
else:
p_out = int((r_array[i][j] - I_min) * ((255 - I_min) / (I_max - I_min)))+ I_min
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def histogram_g(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 0
elif (r_array[i][j] > I_max):
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 255
else:
p_out = int((r_array[i][j] - I_min) * ((255) / (I_max - I_min)) )
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def histogram_b(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 0
elif (r_array[i][j] > I_max):
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = I_max
else:
p_out = int((r_array[i][j] - I_min) * ((I_max) / (I_max - I_min)))
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def stretching(img):
height = len(img)
width = len(img[0])
img[:, :, 2] = histogram_r(img[:, :, 2], height, width)
img[:, :, 1] = histogram_g(img[:, :, 1], height, width)
img[:, :, 0] = histogram_b(img[:, :, 0], height, width)
return img
|
[
"numpy.zeros"
] |
[((349, 374), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (357, 374), True, 'import numpy as np\n'), ((1279, 1304), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (1287, 1304), True, 'import numpy as np\n'), ((2189, 2214), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (2197, 2214), True, 'import numpy as np\n')]
|
# Copyright 2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
def test_show_graph():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
nn.clear_parameters()
x = nn.Variable((2, 3, 4, 4))
with nn.parameter_scope('c1'):
h = PF.convolution(x, 8, (3, 3), pad=(1, 1))
h = F.relu(PF.batch_normalization(h))
with nn.parameter_scope('f1'):
y = PF.affine(h, 10)
with TBGraphWriter(log_dir='log_out') as tb:
tb.from_variable(y, output_name="y")
def test_show_curve():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
with TBGraphWriter(log_dir='log_out') as tb:
values = []
for i in range(360):
s = np.sin(i / 180.0 * np.pi)
tb.add_scalar("show_curve/sin", s, i)
values.append(s)
nd_values = np.array(values)
for i in range(10):
tb.add_histogram("histogram", nd_values, i)
nd_values += 0.05
|
[
"nnabla.parametric_functions.affine",
"nnabla.parametric_functions.convolution",
"nnabla.clear_parameters",
"nnabla.parametric_functions.batch_normalization",
"nnabla.parameter_scope",
"numpy.array",
"nnabla.Variable",
"numpy.sin",
"pytest.skip",
"nnabla.experimental.tb_graph_writer.TBGraphWriter"
] |
[((922, 943), 'nnabla.clear_parameters', 'nn.clear_parameters', ([], {}), '()\n', (941, 943), True, 'import nnabla as nn\n'), ((952, 977), 'nnabla.Variable', 'nn.Variable', (['(2, 3, 4, 4)'], {}), '((2, 3, 4, 4))\n', (963, 977), True, 'import nnabla as nn\n'), ((987, 1011), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""c1"""'], {}), "('c1')\n", (1005, 1011), True, 'import nnabla as nn\n'), ((1025, 1065), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', '(8)', '(3, 3)'], {'pad': '(1, 1)'}), '(x, 8, (3, 3), pad=(1, 1))\n', (1039, 1065), True, 'import nnabla.parametric_functions as PF\n'), ((1121, 1145), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""f1"""'], {}), "('f1')\n", (1139, 1145), True, 'import nnabla as nn\n'), ((1159, 1175), 'nnabla.parametric_functions.affine', 'PF.affine', (['h', '(10)'], {}), '(h, 10)\n', (1168, 1175), True, 'import nnabla.parametric_functions as PF\n'), ((1186, 1218), 'nnabla.experimental.tb_graph_writer.TBGraphWriter', 'TBGraphWriter', ([], {'log_dir': '"""log_out"""'}), "(log_dir='log_out')\n", (1199, 1218), False, 'from nnabla.experimental.tb_graph_writer import TBGraphWriter\n'), ((1492, 1524), 'nnabla.experimental.tb_graph_writer.TBGraphWriter', 'TBGraphWriter', ([], {'log_dir': '"""log_out"""'}), "(log_dir='log_out')\n", (1505, 1524), False, 'from nnabla.experimental.tb_graph_writer import TBGraphWriter\n'), ((1723, 1739), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1731, 1739), True, 'import numpy as np\n'), ((830, 903), 'pytest.skip', 'pytest.skip', (['"""Skip because tensorboardX and tensorflow is not installed."""'], {}), "('Skip because tensorboardX and tensorflow is not installed.')\n", (841, 903), False, 'import pytest\n'), ((1085, 1110), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['h'], {}), '(h)\n', (1107, 1110), True, 'import nnabla.parametric_functions as PF\n'), ((1395, 1468), 'pytest.skip', 'pytest.skip', (['"""Skip because tensorboardX and tensorflow is not installed."""'], {}), "('Skip because tensorboardX and tensorflow is not installed.')\n", (1406, 1468), False, 'import pytest\n'), ((1597, 1622), 'numpy.sin', 'np.sin', (['(i / 180.0 * np.pi)'], {}), '(i / 180.0 * np.pi)\n', (1603, 1622), True, 'import numpy as np\n')]
|
import json
import logging
LOGGER = logging.getLogger(__name__)
def start(self):
self.start_consuming()
def on_message(self, channel, method, properties, body):
"""
Invoked by pika when a message is delivered from the AMQP broker. The
channel is passed for convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param channel: The channel object.
:type channel: pika.channel.Channel
:param method: basic_deliver method.
:type method: pika.Spec.Basic.Deliver
:param properties: The properties.
:type properties: pika.Spec.BasicProperties
:param body: The message body.
:type body: bytes
"""
try:
print('message received')
print(properties.correlation_id)
if properties.correlation_id == self.correlation_id_reference:
print("SUCCEEDEEDRT")
self.callback_method(json.loads(body), properties)
self.acknowledge_message(method.delivery_tag)
self.channel.stop_consuming()
except Exception:
LOGGER.exception("Synchronous callback method exception:")
|
[
"logging.getLogger",
"json.loads"
] |
[((38, 65), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (55, 65), False, 'import logging\n'), ((1110, 1126), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (1120, 1126), False, 'import json\n')]
|
import numpy as np
import tensorflow as tf
from time import perf_counter as timer
def main():
x = np.load('data/cifar_test_x.npy')
y = np.load('data/cifar_test_y.npy').flatten()
interpreter = tf.lite.Interpreter(model_path='data/fbnet.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
pred = []
t0 = timer()
for i in range(len(x)):
interpreter.set_tensor(input_details[0]['index'], x[i:i+1])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
pred.append(output_data.argmax())
t = timer() - t0
print('total time: {:.2f}s, average: {:.2f}ms'.format(t, t * 1000 / len(x)))
print('accuracy: {}/{}'.format(sum(y == pred), len(x)))
return output_data
if __name__ == '__main__':
main()
|
[
"tensorflow.lite.Interpreter",
"numpy.load",
"time.perf_counter"
] |
[((104, 136), 'numpy.load', 'np.load', (['"""data/cifar_test_x.npy"""'], {}), "('data/cifar_test_x.npy')\n", (111, 136), True, 'import numpy as np\n'), ((207, 258), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': '"""data/fbnet.tflite"""'}), "(model_path='data/fbnet.tflite')\n", (226, 258), True, 'import tensorflow as tf\n'), ((425, 432), 'time.perf_counter', 'timer', ([], {}), '()\n', (430, 432), True, 'from time import perf_counter as timer\n'), ((683, 690), 'time.perf_counter', 'timer', ([], {}), '()\n', (688, 690), True, 'from time import perf_counter as timer\n'), ((145, 177), 'numpy.load', 'np.load', (['"""data/cifar_test_y.npy"""'], {}), "('data/cifar_test_y.npy')\n", (152, 177), True, 'import numpy as np\n')]
|
from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase.ToonBaseGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from toontown.battle import MovieUtil
import math
class TwoDBattleMgr(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDBattleMgr')
def __init__(self, game, toon):
self.game = game
self.toon = toon
self.waterBulletIval = None
self.shootTrack = None
self.showCollSpheres = False
self.WATER_SPRAY_COLOR = Point4(1, 1, 1, 1)
self.WATER_BULLET_SCALE = 0.2
self.SHOOT_DISTANCE = 10
self.WATER_BULLET_START_POINT = Point3(0, 1, 3)
self.WATER_BULLET_END_POINT = Point3(0, self.WATER_BULLET_START_POINT.getY() + self.SHOOT_DISTANCE, self.WATER_BULLET_START_POINT.getZ())
self.WATER_BULLET_HIDE_POINT = Point3(0, 0, 1.5)
self.sprayProp = self.game.assetMgr.sprayProp.copyTo(self.game.assetMgr.world)
self.setupPistol()
if self.toon == base.localAvatar:
self.createShootCollision()
return
def destroy(self):
if self.toon == base.localAvatar:
if self.waterBulletIval:
self.waterBulletIval.finish()
del self.waterBulletIval
self.waterBulletIval = None
self.ignore('enter' + self.collSphereName)
base.localAvatar.controlManager.currentControls.cTrav.removeCollider(self.waterBullet)
self.waterBullet.removeNode()
del self.waterBullet
self.hand_jointpath0.removeNode()
MovieUtil.removeProp(self.pistol)
if self.shootTrack != None:
self.shootTrack.finish()
self.shootTrack = None
self.game = None
self.toon = None
return
def start(self):
pass
def stop(self):
pass
def setupPistol(self):
self.pistol = globalPropPool.getProp('water-gun')
hands = self.toon.getRightHands()
self.hand_jointpath0 = hands[0].attachNewNode('handJoint0-path')
pistolPos = Point3(0.28, 0.1, 0.08)
pistolHpr = VBase3(85.6, -4.44, 94.43)
MovieUtil.showProp(self.pistol, self.hand_jointpath0, pistolPos, pistolHpr)
def shoot(self):
if not self.shootTrack:
self.shootTrack = Parallel(self.getToonShootTrack(), self.getSprayTrack())
if self.toon == base.localAvatar:
self.shootTrack.append(Func(self.game.assetMgr.playWatergunSound))
self.shootTrack.append(self.getWaterBulletIval())
self.shootTrack.start()
return
elif self.shootTrack.isStopped():
self.shootTrack = Parallel(self.getToonShootTrack(), self.getSprayTrack())
if self.toon == base.localAvatar:
self.shootTrack.append(Func(self.game.assetMgr.playWatergunSound))
self.shootTrack.append(self.getWaterBulletIval())
self.shootTrack.start()
def createShootCollision(self):
self.notify.debug('entering createShootCollision')
collSphere = CollisionSphere(0, 0, 0, 1)
collSphere.setTangible(0)
self.collSphereName = self.game.uniqueName('waterBullet')
collNode = CollisionNode(self.collSphereName)
collNode.setFromCollideMask(ToontownGlobals.WallBitmask)
collNode.addSolid(collSphere)
self.waterBullet = base.localAvatar.attachNewNode(collNode)
self.waterBullet.setPos(self.WATER_BULLET_HIDE_POINT)
self.waterBullet.setScale(self.WATER_BULLET_SCALE)
self.waterBullet.hide()
if self.showCollSpheres:
self.waterBullet.show()
bulletEvent = CollisionHandlerEvent()
bulletEvent.addInPattern('enter%fn')
bulletEvent.addOutPattern('exit%fn')
cTrav = base.localAvatar.controlManager.currentControls.cTrav
cTrav.addCollider(self.waterBullet, bulletEvent)
self.accept('enter' + self.collSphereName, self.handleBulletCollision)
self.waterBulletIval = Sequence(Wait(0.15))
self.waterBulletIval.append(LerpPosInterval(self.waterBullet, 0.25, pos=Point3(self.WATER_BULLET_END_POINT), startPos=Point3(self.WATER_BULLET_START_POINT), name='waterBulletMoveFront'))
self.waterBulletIval.append(Func(self.waterBullet.setPos, self.WATER_BULLET_HIDE_POINT))
def getToonShootTrack(self):
def returnToLastAnim(toon):
if hasattr(toon, 'playingAnim') and toon.playingAnim:
toon.loop(toon.playingAnim)
else:
toon.loop('neutral')
torso = self.toon.getPart('torso', '1000')
toonTrack = Sequence(ActorInterval(self.toon, 'water-gun', startFrame=48, endFrame=58, partName='torso'), ActorInterval(self.toon, 'water-gun', startFrame=107, endFrame=126, playRate=2, partName='torso'), Func(returnToLastAnim, self.toon))
return toonTrack
def calcSprayStartPos(self):
if self.toon:
self.toon.update(0)
joint = self.pistol.find('**/joint_nozzle')
p = joint.getPos(render)
self.origin = p
def calcSprayEndPos(self):
if self.toon:
xDirection = -math.sin(self.toon.getH())
else:
xDirection = -math.sin(-90)
endPos = Point3(self.origin.getX() + self.SHOOT_DISTANCE * xDirection, self.origin.getY(), self.origin.getZ())
self.target = endPos
def getSprayTrack(self):
dSprayScale = 0.15
dSprayHold = 0.035
color = self.WATER_SPRAY_COLOR
parent = render
horizScale = 1.0
vertScale = 1.0
def showSpray(sprayScale, sprayRot, sprayProp, parent):
sprayRot.reparentTo(parent)
sprayRot.clearMat()
sprayScale.reparentTo(sprayRot)
sprayScale.clearMat()
sprayProp.reparentTo(sprayScale)
sprayProp.clearMat()
sprayRot.setPos(self.origin)
sprayRot.lookAt(Point3(self.target))
def calcTargetScale(horizScale = horizScale, vertScale = vertScale):
distance = Vec3(self.target - self.origin).length()
yScale = distance / MovieUtil.SPRAY_LEN
targetScale = Point3(yScale * horizScale, yScale, yScale * vertScale)
return targetScale
def prepareToShrinkSpray(spray, sprayProp):
sprayProp.setPos(Point3(0.0, -MovieUtil.SPRAY_LEN, 0.0))
spray.setPos(self.target)
def hideSpray(spray, sprayScale, sprayRot, sprayProp, propPool):
sprayProp.detachNode()
sprayRot.removeNode()
sprayScale.removeNode()
sprayProp = self.sprayProp
sprayScale = hidden.attachNewNode('spray-parent')
sprayRot = hidden.attachNewNode('spray-rotate')
spray = sprayRot
spray.setColor(color)
if color[3] < 1.0:
spray.setTransparency(1)
track = Sequence(Wait(0.1), Func(self.calcSprayStartPos), Func(self.calcSprayEndPos), Func(showSpray, sprayScale, sprayRot, sprayProp, parent), LerpScaleInterval(sprayScale, dSprayScale, calcTargetScale, startScale=MovieUtil.PNT3_NEARZERO), Wait(dSprayHold), Func(prepareToShrinkSpray, spray, sprayProp), LerpScaleInterval(sprayScale, dSprayScale, MovieUtil.PNT3_NEARZERO), Func(hideSpray, spray, sprayScale, sprayRot, sprayProp, globalPropPool))
return track
def handleBulletCollision(self, cevent):
if cevent.getIntoNodePath().getName()[:5] == 'Enemy':
sectionIndex = int(cevent.getIntoNodePath().getName()[6:8])
enemyIndex = int(cevent.getIntoNodePath().getName()[9:11])
messenger.send('enemyShot', [sectionIndex, enemyIndex])
def clearWaterBulletIval(self):
if self.waterBulletIval:
self.waterBulletIval.finish()
del self.waterBulletIval
self.waterBulletIval = None
return
def getWaterBulletIval(self):
if not self.waterBulletIval.isPlaying():
return self.waterBulletIval
|
[
"direct.directnotify.DirectNotifyGlobal.directNotify.newCategory",
"math.sin",
"toontown.battle.MovieUtil.showProp",
"toontown.battle.MovieUtil.removeProp"
] |
[((365, 425), 'direct.directnotify.DirectNotifyGlobal.directNotify.newCategory', 'DirectNotifyGlobal.directNotify.newCategory', (['"""TwoDBattleMgr"""'], {}), "('TwoDBattleMgr')\n", (408, 425), False, 'from direct.directnotify import DirectNotifyGlobal\n'), ((1723, 1756), 'toontown.battle.MovieUtil.removeProp', 'MovieUtil.removeProp', (['self.pistol'], {}), '(self.pistol)\n', (1743, 1756), False, 'from toontown.battle import MovieUtil\n'), ((2299, 2374), 'toontown.battle.MovieUtil.showProp', 'MovieUtil.showProp', (['self.pistol', 'self.hand_jointpath0', 'pistolPos', 'pistolHpr'], {}), '(self.pistol, self.hand_jointpath0, pistolPos, pistolHpr)\n', (2317, 2374), False, 'from toontown.battle import MovieUtil\n'), ((5409, 5422), 'math.sin', 'math.sin', (['(-90)'], {}), '(-90)\n', (5417, 5422), False, 'import math\n')]
|
from collections import defaultdict
from pathlib import Path
import re
import yaml
import json
from botok import Text
import pyewts
conv = pyewts.pyewts()
def dictify_text(string, is_split=False, selection_yaml='data/dictionaries/dict_cats.yaml', expandable=True, mode='en_bo'):
"""
takes segmented text and finds entries from dictionaries
:param expandable: will segment definitions into senses if True, not if False
:param selection_yaml: add None or "" to prevent selection
:param string: segmented text to be processed
:return: list of tuples containing the word and a dict containing the definitions(selected or not) and an url
"""
words = []
if is_split:
for w in string:
if w:
words.append((w, {}))
else:
string = string.replace('\n', ' ')
for w in string.split(' '):
if w:
words.append((w, {}))
dicts = load_dicts()
for num, word in enumerate(words):
lemma = word[0].rstrip('་')
defs = dicts[lemma]
# filter
if selection_yaml:
defs = select_defs(defs, yaml_path=selection_yaml, mode=mode)
# split in senses
if expandable:
if defs and 'en' in defs:
entry_en = defs['en'][1]
defs['en'][1] = split_in_senses(entry_en, lang='en')
if defs and 'bo' in defs:
entry_bo = defs['bo'][1]
defs['bo'][1] = split_in_senses(entry_bo, lang='bo')
words[num][1]['defs'] = defs
# url
url = gen_link(lemma)
words[num][1]['url'] = url
return words
def load_dicts():
dicts = defaultdict(dict)
dict_path = Path(__file__).parent / 'data/dictionaries/converted'
dict_other = Path(__file__).parent / 'data/dictionaries/other'
dict_files = sorted(list(dict_path.glob('*.txt')) + list(dict_other.glob('*.txt')))
for f in dict_files:
name = f.stem
if name.startswith('monlam'):
name = name[:-2] # remove file number suffix "_1", "_2" and "_3"
lines = f.read_text().split('\n')
for line in lines:
if '|' not in line:
continue
lemma, entry = line.split('|')
dicts[lemma][name] = f'{dicts[lemma][name]} {entry}' if name in dicts[lemma] else entry
return dicts
def split_in_senses(entry, lang):
header_size = 10 # syllables
tsikchen_dagsar = r' ([༡༢༣༤༥༦༧༨༩༠]+\.)'
tsikchen_dagsar_start = r'(?: |^)([༡༢༣༤༥༦༧༨༩༠]+\.)'
tsikchen = r' ([༡༢༣༤༥༦༧༨༩༠]+༽) '
tsikchen_start = r'(?: |^)([༡༢༣༤༥༦༧༨༩༠]+༽) '
monlam = r' ((?:[^་]+་[^་]+ )?[0-9]+\.) '
ry_start = r'^([0-9]+\)) ' # line must start with this pattern
ry = r'(?: |^)([0-9]+\)) '
senses = []
if lang == 'bo':
if re.findall(monlam, entry):
parts = [e for e in re.split(monlam, entry) if e]
try:
parts = [f'{parts[n]} {parts[n + 1]}' for n in range(0, len(parts), 2)]
except IndexError as e:
print(entry[:100])
raise SyntaxError(e)
for p in parts:
t = Text(p).tokenize_chunks_plaintext.split(' ')
if len(t) > header_size:
header, body = ''.join(t[:header_size]).replace('_', ' '), ''.join(t[header_size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
elif re.findall(tsikchen_dagsar, entry):
parts = [e for e in re.split(tsikchen_dagsar_start, entry) if e]
if not re.findall(r'^[༡༢༣༤༥༦༧༨༩༠]', parts[0]):
parts = [f'{parts[0]} {parts[1]}'] + parts[2:]
try:
parts = [f'{parts[n]}{parts[n + 1]}' for n in range(0, len(parts), 2)]
except IndexError as e:
print(entry[:100])
raise SyntaxError(e)
for p in parts:
t = Text(p).tokenize_chunks_plaintext.split(' ')
if len(t) > header_size:
header, body = ''.join(t[:header_size]).replace('_', ' '), ''.join(t[header_size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
elif re.findall(tsikchen, entry):
parts = [e for e in re.split(tsikchen_start, entry) if e]
if parts[0].startswith('༼'):
parts = [f'{parts[0]} {parts[1]}'] + parts[2:]
try:
parts = [f'{parts[n]} {parts[n + 1]}' for n in range(0, len(parts), 2)]
except IndexError as e:
print(entry[:100])
raise SyntaxError(e)
for p in parts:
t = Text(p).tokenize_chunks_plaintext.split(' ')
if len(t) > header_size:
header, body = ''.join(t[:header_size]).replace('_', ' '), ''.join(t[header_size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
else:
return entry
elif lang == 'en' and re.findall(ry_start, entry):
parts = [e for e in re.split(ry, entry) if e]
parts = [f'{parts[n]} {parts[n+1]}' for n in range(0, len(parts), 2)]
for p in parts:
t = p.split(' ')
size = header_size - 4 if header_size - 4 > 0 else 0
if len(t) > size:
header, body = ' '.join(t[:size]).replace('_', ' '), ' '.join(t[size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
else:
return entry
return senses
def select_defs(defs, yaml_path, mode):
cats = yaml.safe_load(Path(yaml_path).read_text())
english, tibetan = cats['english']['dictionary'], cats['tibetan']['dictionary']
selected = {}
# selecting the first English definition from the list in dict_cats.yaml
if 'en' in mode:
for full, name in english:
if full in defs:
selected['en'] = (name, defs[full])
break
# selecting the first Tibetan definition from the list in dict_cats.yaml
if 'bo' in mode:
for full, name in tibetan:
if full in defs:
selected['bo'] = (name, defs[full])
break
# format selected
if 'en' in selected and 'bo' in selected:
return {'en': [selected['en'][0], selected['en'][1]], 'bo': [selected['bo'][0], selected['bo'][1]]}
elif 'en' in selected:
return {'en': [selected['en'][0], selected['en'][1]]}
elif 'bo' in selected:
return {'bo': [selected['bo'][0], selected['bo'][1]]}
else:
return None
def gen_link(word):
link_pattern = 'https://dictionary.christian-steinert.de/#%7B%22activeTerm%22%3A%22{word}%22%2C%22' \
'lang%22%3A%22tib%22%2C%22inputLang%22%3A%22tib%22%2C%22currentListTerm%22%3A%22{word}%22%2C%22' \
'forceLeftSideVisible%22%3Atrue%2C%22offset%22%3A0%7D'
wylie = conv.toWylie(word).replace(' ', '%20')
return link_pattern.format(word=wylie)
if __name__ == '__main__':
for f in Path('input').glob('*.txt'):
dump = f.read_text(encoding='utf-8')
out = dictify_text(dump, expandable=True)
out_f = Path('output') / f.name
out_f.write_text(json.dumps(out, ensure_ascii=False, indent=4))
__all__ = [dictify_text]
|
[
"re.split",
"pathlib.Path",
"json.dumps",
"pyewts.pyewts",
"collections.defaultdict",
"re.findall",
"botok.Text"
] |
[((142, 157), 'pyewts.pyewts', 'pyewts.pyewts', ([], {}), '()\n', (155, 157), False, 'import pyewts\n'), ((1689, 1706), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1700, 1706), False, 'from collections import defaultdict\n'), ((2833, 2858), 're.findall', 're.findall', (['monlam', 'entry'], {}), '(monlam, entry)\n', (2843, 2858), False, 'import re\n'), ((1723, 1737), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1727, 1737), False, 'from pathlib import Path\n'), ((1794, 1808), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1798, 1808), False, 'from pathlib import Path\n'), ((3513, 3547), 're.findall', 're.findall', (['tsikchen_dagsar', 'entry'], {}), '(tsikchen_dagsar, entry)\n', (3523, 3547), False, 'import re\n'), ((5184, 5211), 're.findall', 're.findall', (['ry_start', 'entry'], {}), '(ry_start, entry)\n', (5194, 5211), False, 'import re\n'), ((7260, 7273), 'pathlib.Path', 'Path', (['"""input"""'], {}), "('input')\n", (7264, 7273), False, 'from pathlib import Path\n'), ((7400, 7414), 'pathlib.Path', 'Path', (['"""output"""'], {}), "('output')\n", (7404, 7414), False, 'from pathlib import Path\n'), ((7449, 7494), 'json.dumps', 'json.dumps', (['out'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(out, ensure_ascii=False, indent=4)\n', (7459, 7494), False, 'import json\n'), ((4338, 4365), 're.findall', 're.findall', (['tsikchen', 'entry'], {}), '(tsikchen, entry)\n', (4348, 4365), False, 'import re\n'), ((5814, 5829), 'pathlib.Path', 'Path', (['yaml_path'], {}), '(yaml_path)\n', (5818, 5829), False, 'from pathlib import Path\n'), ((2892, 2915), 're.split', 're.split', (['monlam', 'entry'], {}), '(monlam, entry)\n', (2900, 2915), False, 'import re\n'), ((3645, 3682), 're.findall', 're.findall', (['"""^[༡༢༣༤༥༦༧༨༩༠]"""', 'parts[0]'], {}), "('^[༡༢༣༤༥༦༧༨༩༠]', parts[0])\n", (3655, 3682), False, 'import re\n'), ((5241, 5260), 're.split', 're.split', (['ry', 'entry'], {}), '(ry, entry)\n', (5249, 5260), False, 'import re\n'), ((3581, 3619), 're.split', 're.split', (['tsikchen_dagsar_start', 'entry'], {}), '(tsikchen_dagsar_start, entry)\n', (3589, 3619), False, 'import re\n'), ((3183, 3190), 'botok.Text', 'Text', (['p'], {}), '(p)\n', (3187, 3190), False, 'from botok import Text\n'), ((4399, 4430), 're.split', 're.split', (['tsikchen_start', 'entry'], {}), '(tsikchen_start, entry)\n', (4407, 4430), False, 'import re\n'), ((4008, 4015), 'botok.Text', 'Text', (['p'], {}), '(p)\n', (4012, 4015), False, 'from botok import Text\n'), ((4802, 4809), 'botok.Text', 'Text', (['p'], {}), '(p)\n', (4806, 4809), False, 'from botok import Text\n')]
|
# coding: utf-8
import pytest
from edipy import fields, validators, exceptions
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.Range(1, 5)]), '1'),
(fields.Integer(1, validators=[validators.MaxValue(3)]), '2'),
(fields.Integer(1, validators=[validators.MinValue(1)]), '5'),
(fields.String(5, validators=[validators.Regex(r"[0-9]+")]), '12345'),
(fields.String(12, validators=[validators.Email()]), '<EMAIL>'),
])
def test_using_validators(fixed_type, data):
try:
fixed_type.encode(data)
except exceptions.ValidationError:
pytest.fail(u"ValidationError should not be thrown")
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.Range(1, 5)]), '0'),
(fields.Integer(1, validators=[validators.Range(1, 5)]), '6'),
])
def test_validate_range(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.MaxValue(1)]), '2'),
(fields.Integer(1, validators=[validators.MaxValue(5)]), '6'),
])
def test_validate_max_value(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.MinValue(1)]), '0'),
(fields.Integer(1, validators=[validators.MinValue(5)]), '4'),
])
def test_validate_min_value(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.String(5, validators=[validators.Regex(r"[0-9]+")]), 'a123f'),
(fields.String(5, validators=[validators.Regex(r"\d")]), 'abcde'),
(fields.String(5, validators=[validators.Regex(r"[A-Z]{6}")]), 'ABCDE'),
])
def test_validate_regex(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
def test_throws_exception_when_regex_is_invalid():
with pytest.raises(ValueError):
field = fields.String(5, validators=[validators.Regex(")")])
@pytest.mark.parametrize('fixed_type, data', [
(fields.String(11, validators=[validators.Email()]), 'edimail.com'),
(fields.String(11, validators=[validators.Email()]), '<EMAIL>'),
])
def test_validate_email(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
|
[
"edipy.validators.MaxValue",
"edipy.validators.Email",
"pytest.fail",
"pytest.raises",
"edipy.validators.Range",
"edipy.validators.Regex",
"edipy.validators.MinValue"
] |
[((903, 944), 'pytest.raises', 'pytest.raises', (['exceptions.ValidationError'], {}), '(exceptions.ValidationError)\n', (916, 944), False, 'import pytest\n'), ((1220, 1261), 'pytest.raises', 'pytest.raises', (['exceptions.ValidationError'], {}), '(exceptions.ValidationError)\n', (1233, 1261), False, 'import pytest\n'), ((1537, 1578), 'pytest.raises', 'pytest.raises', (['exceptions.ValidationError'], {}), '(exceptions.ValidationError)\n', (1550, 1578), False, 'import pytest\n'), ((1939, 1980), 'pytest.raises', 'pytest.raises', (['exceptions.ValidationError'], {}), '(exceptions.ValidationError)\n', (1952, 1980), False, 'import pytest\n'), ((2076, 2101), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2089, 2101), False, 'import pytest\n'), ((2418, 2459), 'pytest.raises', 'pytest.raises', (['exceptions.ValidationError'], {}), '(exceptions.ValidationError)\n', (2431, 2459), False, 'import pytest\n'), ((611, 663), 'pytest.fail', 'pytest.fail', (['u"""ValidationError should not be thrown"""'], {}), "(u'ValidationError should not be thrown')\n", (622, 663), False, 'import pytest\n'), ((2148, 2169), 'edipy.validators.Regex', 'validators.Regex', (['""")"""'], {}), "(')')\n", (2164, 2169), False, 'from edipy import fields, validators, exceptions\n'), ((165, 187), 'edipy.validators.Range', 'validators.Range', (['(1)', '(5)'], {}), '(1, 5)\n', (181, 187), False, 'from edipy import fields, validators, exceptions\n'), ((232, 254), 'edipy.validators.MaxValue', 'validators.MaxValue', (['(3)'], {}), '(3)\n', (251, 254), False, 'from edipy import fields, validators, exceptions\n'), ((299, 321), 'edipy.validators.MinValue', 'validators.MinValue', (['(1)'], {}), '(1)\n', (318, 321), False, 'from edipy import fields, validators, exceptions\n'), ((365, 391), 'edipy.validators.Regex', 'validators.Regex', (['"""[0-9]+"""'], {}), "('[0-9]+')\n", (381, 391), False, 'from edipy import fields, validators, exceptions\n'), ((441, 459), 'edipy.validators.Email', 'validators.Email', ([], {}), '()\n', (457, 459), False, 'from edipy import fields, validators, exceptions\n'), ((749, 771), 'edipy.validators.Range', 'validators.Range', (['(1)', '(5)'], {}), '(1, 5)\n', (765, 771), False, 'from edipy import fields, validators, exceptions\n'), ((816, 838), 'edipy.validators.Range', 'validators.Range', (['(1)', '(5)'], {}), '(1, 5)\n', (832, 838), False, 'from edipy import fields, validators, exceptions\n'), ((1062, 1084), 'edipy.validators.MaxValue', 'validators.MaxValue', (['(1)'], {}), '(1)\n', (1081, 1084), False, 'from edipy import fields, validators, exceptions\n'), ((1129, 1151), 'edipy.validators.MaxValue', 'validators.MaxValue', (['(5)'], {}), '(5)\n', (1148, 1151), False, 'from edipy import fields, validators, exceptions\n'), ((1379, 1401), 'edipy.validators.MinValue', 'validators.MinValue', (['(1)'], {}), '(1)\n', (1398, 1401), False, 'from edipy import fields, validators, exceptions\n'), ((1446, 1468), 'edipy.validators.MinValue', 'validators.MinValue', (['(5)'], {}), '(5)\n', (1465, 1468), False, 'from edipy import fields, validators, exceptions\n'), ((1695, 1721), 'edipy.validators.Regex', 'validators.Regex', (['"""[0-9]+"""'], {}), "('[0-9]+')\n", (1711, 1721), False, 'from edipy import fields, validators, exceptions\n'), ((1770, 1793), 'edipy.validators.Regex', 'validators.Regex', (['"""\\\\d"""'], {}), "('\\\\d')\n", (1786, 1793), False, 'from edipy import fields, validators, exceptions\n'), ((1841, 1869), 'edipy.validators.Regex', 'validators.Regex', (['"""[A-Z]{6}"""'], {}), "('[A-Z]{6}')\n", (1857, 1869), False, 'from edipy import fields, validators, exceptions\n'), ((2256, 2274), 'edipy.validators.Email', 'validators.Email', ([], {}), '()\n', (2272, 2274), False, 'from edipy import fields, validators, exceptions\n'), ((2329, 2347), 'edipy.validators.Email', 'validators.Email', ([], {}), '()\n', (2345, 2347), False, 'from edipy import fields, validators, exceptions\n')]
|
################################################################################
# Module: schedule.py
# Description: Functions for handling conversion of EnergyPlus schedule objects
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import functools
import io
import logging as lg
from datetime import datetime, timedelta
import archetypal
import numpy as np
import pandas as pd
from archetypal import log
class Schedule(object):
"""An object designed to handle any EnergyPlys schedule object"""
def __init__(self, sch_name, idf=None, start_day_of_the_week=0,
strict=False, base_year=2018, schType=None, **kwargs):
"""
Args:
idf (IDF): IDF object
sch_name (str): The schedule name in the idf file
start_day_of_the_week (int): 0-based day of week (Monday=0)
strict (bool): if True, schedules that have the Field-Sets such
as Holidays and CustomDay will raise an error if they are absent
from the IDF file. If False, any missing qualifiers will be
ignored.
base_year (int): The base year of the schedule. Defaults to 2018
since the first day of that year is a Monday.
"""
super(Schedule, self).__init__(**kwargs)
self.strict = strict
self.idf = idf
self.schName = sch_name
self.startDayOfTheWeek = self.get_sdow(start_day_of_the_week)
self.year = base_year
self.startDate = self.start_date()
self.count = 0
self.startHOY = 1
self.endHOY = 24
self.unit = "unknown"
self.index_ = None
self.values = None
self.schType = schType
_type = kwargs.get('Type', None)
if _type is None:
self.schTypeLimitsName = self.get_schedule_type_limits_name(
sch_type=self.schType)
else:
self.schTypeLimitsName = _type
@classmethod
def constant_schedule(cls, hourly_value=1, Name='AlwaysOn', **kwargs):
idftxt = "VERSION, 8.9;" # Not an emplty string. has just the
# version number
# we can make a file handle of a string
fhandle = io.StringIO(idftxt)
# initialize the IDF object with the file handle
idf_scratch = archetypal.IDF(fhandle)
idf_scratch.add_object(ep_object='Schedule:Constant'.upper(),
**dict(Name=Name,
Schedule_Type_Limits_Name='',
Hourly_Value=hourly_value),
save=False)
sched = Schedule(sch_name=Name, idf=idf_scratch, **kwargs)
return sched
@property
def all_values(self):
"""returns the values array"""
if self.values is None:
self.values = self.get_schedule_values(sch_name=self.schName,
sch_type=self.schType)
return self.values
else:
return self.values
@property
def max(self):
return max(self.all_values)
@property
def min(self):
return min(self.all_values)
@property
def mean(self):
return np.mean(self.all_values)
@property
def series(self):
"""Returns the schedule values as a pd.Series object with a
DateTimeIndex"""
index = pd.date_range(start=self.startDate, periods=len(
self.all_values), freq='1H')
return pd.Series(self.all_values, index=index)
def get_schedule_type_limits_name(self, sch_name=None, sch_type=None):
"""Return the Schedule Type Limits name associated to a schedule
name"""
if sch_name is None:
sch_name = self.schName
if sch_type is None:
schedule_values = self.idf.get_schedule_data_by_name(sch_name,
sch_type=sch_type)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
return 'unknown'
else:
return schedule_limit_name
def get_schedule_type_limits_data(self, sch_name=None):
"""Returns Schedule Type Limits data from schedule name"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
# this schedule is probably a 'Schedule:Week:Daily' which does
# not have a Schedule_Type_Limits_Name field
return '', '', '', ''
else:
lower_limit, upper_limit, numeric_type, unit_type = \
self.idf.get_schedule_type_limits_data_by_name(
schedule_limit_name)
self.unit = unit_type
if self.unit == "unknown":
self.unit = numeric_type
return lower_limit, upper_limit, numeric_type, unit_type
def get_schedule_type(self, sch_name=None):
"""Return the schedule type"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
sch_type = schedule_values.fieldvalues[0]
return sch_type
def start_date(self):
"""The start date of the schedule. Satisfies `startDayOfTheWeek`"""
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
start_date = c.monthdatescalendar(self.year, 1)[0][0]
return datetime(start_date.year, start_date.month, start_date.day)
def plot(self, slice=None, **kwargs):
hourlyvalues = self.all_values
index = pd.date_range(self.startDate, periods=len(
hourlyvalues),
freq='1H')
series = pd.Series(hourlyvalues, index=index, dtype=float)
if slice is None:
slice = pd.IndexSlice[:]
elif len(slice) > 1:
slice = pd.IndexSlice[slice[0]:slice[1]]
ax = series.loc[slice].plot(**kwargs, label=self.schName)
return ax
def get_interval_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Interval"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Interval'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
number_of_day_sch = int((len(values.fieldvalues) - 3) / 2)
hourly_values = np.arange(24)
start_hour = 0
for i in range(number_of_day_sch):
value = float(values['Value_Until_Time_{}'.format(i + 1)])
until_time = [int(s.strip()) for s in
values['Time_{}'.format(i + 1)].split(":") if
s.strip().isdigit()]
end_hour = int(until_time[0] + until_time[1] / 60)
for hour in range(start_hour, end_hour):
hourly_values[hour] = value
start_hour = end_hour
if numeric_type.strip().lower() == "discrete":
hourly_values = hourly_values.astype(int)
return hourly_values
def get_hourly_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Hourly'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Hourly'.upper(), sch_name)
fieldvalues_ = np.array(values.fieldvalues[3:])
return fieldvalues_
def get_compact_weekly_ep_schedule_values(self, sch_name=None,
start_date=None, index=None):
"""'schedule:week:compact'"""
if start_date is None:
start_date = self.startDate
if index is None:
idx = pd.date_range(start=start_date, periods=168, freq='1H')
slicer_ = pd.Series([False] * (len(idx)), index=idx)
else:
slicer_ = pd.Series([False] * (len(index)), index=index)
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:compact'.upper(), sch_name)
weekly_schedules = pd.Series([0] * len(slicer_), index=slicer_.index)
# update last day of schedule
if self.count == 0:
self.schType = values.key
self.endHOY = 168
num_of_daily_schedules = int(len(values.fieldvalues[2:]) / 2)
for i in range(num_of_daily_schedules):
day_type = values['DayType_List_{}'.format(i + 1)].lower()
how = self.field_set(day_type, slicer_)
if not weekly_schedules.loc[how].empty:
# Loop through days and replace with day:schedule values
days = []
for name, day in weekly_schedules.loc[how].groupby(pd.Grouper(
freq='D')):
if not day.empty:
ref = values.get_referenced_object(
"ScheduleDay_Name_{}".format(i + 1))
day.loc[:] = self.get_schedule_values(
sch_name=ref.Name, sch_type=ref.key)
days.append(day)
new = pd.concat(days)
slicer_.update(
pd.Series([True] * len(new.index), index=new.index))
slicer_ = slicer_.apply(lambda x: x == True)
weekly_schedules.update(new)
else:
return weekly_schedules.values
return weekly_schedules.values
def get_daily_weekly_ep_schedule_values(self, sch_name=None):
"""'schedule:week:daily'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:daily'.upper(), sch_name)
# 7 list for 7 days of the week
hourly_values = []
for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']:
ref = values.get_referenced_object(
'{}_ScheduleDay_Name'.format(day))
h = self.get_schedule_values(sch_name=ref.Name, sch_type=ref.key)
hourly_values.append(h)
hourly_values = np.array(hourly_values)
# shift days earlier by self.startDayOfTheWeek
hourly_values = np.roll(hourly_values, -self.startDayOfTheWeek, axis=0)
return hourly_values.ravel()
def get_list_day_ep_schedule_values(self, sch_name=None):
"""'schedule:day:list'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:day:list'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
import pandas as pd
freq = int(values['Minutes_per_Item']) # Frequency of the values
num_values = values.fieldvalues[5:] # List of values
method = values['Interpolate_to_Timestep'] # How to resample
# fill a list of available values and pad with zeros (this is safer
# but should not occur)
all_values = np.arange(int(24 * 60 / freq))
for i in all_values:
try:
all_values[i] = num_values[i]
except:
all_values[i] = 0
# create a fake index to help us with the resampling
index = pd.date_range(start=self.startDate,
periods=(24 * 60) / freq,
freq='{}T'.format(freq))
series = pd.Series(all_values, index=index)
# resample series to hourly values and apply resampler function
series = series.resample('1H').apply(_how(method))
return series.values
def get_constant_ep_schedule_values(self, sch_name=None):
"""'schedule:constant'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:constant'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
hourly_values = np.arange(8760)
value = float(values['Hourly_Value'])
for hour in hourly_values:
hourly_values[hour] = value
if numeric_type.strip().lower() == 'discrete':
hourly_values = hourly_values.astype(int)
return hourly_values
def get_file_ep_schedule_values(self, sch_name=None):
"""'schedule:file'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:file'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
filename = values['File_Name']
column = values['Column_Number']
rows = values['Rows_to_Skip_at_Top']
hours = values['Number_of_Hours_of_Data']
sep = values['Column_Separator']
interp = values['Interpolate_to_Timestep']
import pandas as pd
import os
idfdir = os.path.dirname(self.idf.idfname)
file = os.path.join(idfdir, filename)
delimeter = _separator(sep)
skip_rows = int(rows) - 1 # We want to keep the column
col = [int(column) - 1] # zero-based
values = pd.read_csv(file, delimiter=delimeter, skiprows=skip_rows,
usecols=col)
return values.iloc[:, 0].values
def get_compact_ep_schedule_values(self, sch_name=None):
"""'schedule:compact'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:compact'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
field_sets = ['through', 'for', 'interpolate', 'until', 'value']
fields = values.fieldvalues[3:]
index = pd.date_range(start=self.startDate, periods=8760, freq='H')
zeros = np.zeros(len(index))
slicer_ = pd.Series([False] * len(index), index=index)
series = pd.Series(zeros, index=index)
from_day = self.startDate
ep_from_day = datetime(self.year, 1, 1)
from_time = '00:00'
how_interpolate = None
for field in fields:
if any([spe in field.lower() for spe in field_sets]):
f_set, hour, minute, value = self.field_interpreter(field)
if f_set.lower() == 'through':
# main condition. All sub-conditions must obey a
# `Through` condition
# First, initialize the slice (all False for now)
through_conditions = self.invalidate_condition(series)
# reset from_time
from_time = '00:00'
# Prepare ep_to_day variable
ep_to_day = self.date_field_interpretation(value) + \
timedelta(days=1)
# Calculate Timedelta in days
days = (ep_to_day - ep_from_day).days
# Add timedelta to start_date
to_day = from_day + timedelta(days=days) + timedelta(
hours=-1)
# slice the conditions with the range and apply True
through_conditions.loc[from_day:to_day] = True
from_day = to_day + timedelta(hours=1)
ep_from_day = ep_to_day
elif f_set.lower() == 'for':
# slice specific days
# reset from_time
from_time = '00:00'
for_condition = self.invalidate_condition(series)
values = value.split()
if len(values) > 1:
# if multiple `For`. eg.: For: Weekends Holidays,
# Combine both conditions
for value in values:
if value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
how = self.field_set(value, slicer_)
for_condition.loc[how] = True
elif value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
# Apply condition to slice
how = self.field_set(value)
for_condition.loc[how] = True
# Combine the for_condition with all_conditions
all_conditions = through_conditions & for_condition
# update in memory slice
# self.sliced_day_.loc[all_conditions] = True
elif 'interpolate' in f_set.lower():
# we need to upsample to series to 8760 * 60 values
new_idx = pd.date_range(start=self.startDate,
periods=525600, closed='left',
freq='T')
series = series.resample('T').pad()
series = series.reindex(new_idx)
series.fillna(method='pad', inplace=True)
through_conditions = through_conditions.resample('T').pad()
through_conditions = through_conditions.reindex(new_idx)
through_conditions.fillna(method='pad', inplace=True)
for_condition = for_condition.resample('T').pad()
for_condition = for_condition.reindex(new_idx)
for_condition.fillna(method='pad', inplace=True)
how_interpolate = value.lower()
elif f_set.lower() == 'until':
until_condition = self.invalidate_condition(series)
if series.index.freq.name == 'T':
# until_time = str(int(hour) - 1) + ':' + minute
until_time = timedelta(hours=int(hour),
minutes=int(minute)) - timedelta(
minutes=1)
else:
until_time = str(int(hour) - 1) + ':' + minute
until_condition.loc[until_condition.between_time(from_time,
str(
until_time)).index] = True
all_conditions = for_condition & through_conditions & \
until_condition
from_time = str(int(hour)) + ':' + minute
elif f_set.lower() == 'value':
# If the therm `Value: ` field is used, we will catch it
# here.
# update in memory slice
slicer_.loc[all_conditions] = True
series[all_conditions] = value
else:
# Do something here before looping to the next Field
pass
else:
# If the term `Value: ` is not used; the variable is simply
# passed in the Field
value = float(field)
series[all_conditions] = value
# update in memory slice
slicer_.loc[all_conditions] = True
if how_interpolate:
return series.resample('H').mean().values
else:
return series.values
def field_interpreter(self, field):
"""dealing with a Field-Set (Through, For, Interpolate,
# Until, Value) and return the parsed string"""
if 'through' in field.lower():
# deal with through
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
hour = None
minute = None
value = statement.strip()
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'for' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
# parse without a colon
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'interpolate' in field.lower():
msg = 'The schedule "{sch}" contains sub-hourly values (' \
'Field-Set="{field}"). The average over the hour is ' \
'taken'.format(sch=self.schName, field=field)
log(msg, lg.WARNING)
f_set, value = field.split(':')
hour = None
minute = None
elif 'until' in field.lower():
if ':' in field.lower():
# parse colon
try:
f_set, hour, minute = field.split(':')
hour = hour.strip() # remove trailing spaces
minute = minute.strip() # remove trailing spaces
value = None
except:
f_set = 'until'
hour, minute = field.split(':')
hour = hour[-2:].strip()
minute = minute.strip()
value = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'value' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
else:
# deal with the data value
f_set = field
hour = None
minute = None
value = field[len(field) + 1:].strip()
return f_set, hour, minute, value
@staticmethod
def invalidate_condition(series):
index = series.index
periods = len(series)
return pd.Series([False] * periods, index=index)
def get_yearly_ep_schedule_values(self, sch_name=None):
"""'schedule:year'"""
# first week
start_date = self.startDate
idx = pd.date_range(start=start_date, periods=8760, freq='1H')
hourly_values = pd.Series([0] * 8760, index=idx)
# update last day of schedule
self.endHOY = 8760
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:year'.upper(), sch_name)
# generate weekly schedules
num_of_weekly_schedules = int(len(values.fieldvalues[3:]) / 5)
for i in range(num_of_weekly_schedules):
ref = values.get_referenced_object(
'ScheduleWeek_Name_{}'.format(i + 1))
start_month = values['Start_Month_{}'.format(i + 1)]
end_month = values['End_Month_{}'.format(i + 1)]
start_day = values['Start_Day_{}'.format(i + 1)]
end_day = values['End_Day_{}'.format(i + 1)]
start = datetime.strptime(
'{}/{}/{}'.format(self.year, start_month, start_day),
'%Y/%m/%d')
end = datetime.strptime(
'{}/{}/{}'.format(self.year, end_month, end_day),
'%Y/%m/%d')
days = (end - start).days + 1
end_date = start_date + timedelta(days=days) + timedelta(hours=23)
how = pd.IndexSlice[start_date:end_date]
weeks = []
for name, week in hourly_values.loc[how].groupby(
pd.Grouper(freq='168H')):
if not week.empty:
try:
week.loc[:] = self.get_schedule_values(
sch_name=ref.Name, start_date=week.index[0],
index=week.index, sch_type=ref.key)
except ValueError:
week.loc[:] = self.get_schedule_values(
ref.Name, week.index[0])[0:len(week)]
finally:
weeks.append(week)
new = pd.concat(weeks)
hourly_values.update(new)
start_date += timedelta(days=days)
return hourly_values.values
def get_schedule_values(self, sch_name=None, start_date=None, index=None,
sch_type=None):
"""Main function that returns the schedule values
Args:
sch_type:
index:
start_date:
"""
if sch_name is None:
sch_name = self.schName
if sch_type is None:
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
self.schType = schedule_values.key.upper()
sch_type = self.schType
if self.count == 0:
# This is the first time, get the schedule type and the type limits.
self.schTypeLimitsName = self.get_schedule_type_limits_name()
self.count += 1
if sch_type.upper() == "schedule:year".upper():
hourly_values = self.get_yearly_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:interval".upper():
hourly_values = self.get_interval_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:hourly".upper():
hourly_values = self.get_hourly_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:list".upper():
hourly_values = self.get_list_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:week:compact".upper():
hourly_values = self.get_compact_weekly_ep_schedule_values(
sch_name, start_date, index)
elif sch_type.upper() == "schedule:week:daily".upper():
hourly_values = self.get_daily_weekly_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:constant".upper():
hourly_values = self.get_constant_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:compact".upper():
hourly_values = self.get_compact_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:file".upper():
hourly_values = self.get_file_ep_schedule_values(
sch_name)
else:
log('Archetypal does not support "{}" currently'.format(
self.schType), lg.WARNING)
hourly_values = []
return hourly_values
def is_schedule(self, sch_name):
"""Returns True if idfobject is one of 'schedule_types'"""
if sch_name.upper() in self.idf.schedules_dict:
return True
else:
return False
def to_year_week_day(self):
"""convert a Schedule Class to the 'Schedule:Year',
'Schedule:Week:Daily' and 'Schedule:Day:Hourly' representation
Returns:
'Schedule:Year', list of ['Schedule:Week:Daily'],
list of ['Schedule:Day:Hourly']
"""
full_year = np.array(self.all_values) # array of shape (8760,)
values = full_year.reshape(-1, 24) # shape (365, 24)
# create unique days
unique_days, nds = np.unique(values, axis=0, return_inverse=True)
ep_days = []
dict_day = {}
count_day = 0
for unique_day in unique_days:
name = 'd_' + self.schName + '_' + '%03d' % count_day
name, count_day = archetypal.check_unique_name('d', count_day,
name,
archetypal.settings.unique_schedules,
suffix=True)
dict_day[name] = unique_day
archetypal.settings.unique_schedules.append(name)
# Create idf_objects for schedule:day:hourly
ep_day = self.idf.add_object(
ep_object='Schedule:Day:Hourly'.upper(),
save=False,
**dict(Name=name,
Schedule_Type_Limits_Name=self.schType,
**{'Hour_{}'.format(i + 1): unique_day[i]
for i in range(24)})
)
ep_days.append(ep_day)
# create unique weeks from unique days
unique_weeks, nwsi, nws, count = np.unique(
full_year[:364 * 24, ...].reshape(-1, 168), return_index=True,
axis=0, return_inverse=True, return_counts=True)
# Appending unique weeks in dictionary with name and values of weeks as
# keys
# {'name_week': {'dayName':[]}}
dict_week = {}
count_week = 0
for unique_week in unique_weeks:
week_id = 'w_' + self.schName + '_' + '%03d' % count_week
week_id, count_week = archetypal.check_unique_name('w',
count_week,
week_id,
archetypal.settings.unique_schedules,
suffix=True)
archetypal.settings.unique_schedules.append(week_id)
dict_week[week_id] = {}
for i in list(range(0, 7)):
day_of_week = unique_week[..., i * 24:(i + 1) * 24]
for key in dict_day:
if (day_of_week == dict_day[key]).all():
dict_week[week_id]['day_{}'.format(i)] = key
# Create idf_objects for schedule:week:daily
list_day_of_week = ['Sunday', 'Monday', 'Tuesday',
'Wednesday', 'Thursday', 'Friday', 'Saturday']
ordered_day_n = np.array([6, 0, 1, 2, 3, 4, 5])
ordered_day_n = np.roll(ordered_day_n, self.startDayOfTheWeek)
ep_weeks = []
for week_id in dict_week:
ep_week = self.idf.add_object(
ep_object='Schedule:Week:Daily'.upper(),
save=False,
**dict(Name=week_id,
**{'{}_ScheduleDay_Name'.format(
weekday): dict_week[week_id][
'day_{}'.format(i)] for
i, weekday in
zip(ordered_day_n, list_day_of_week)
},
Holiday_ScheduleDay_Name=
dict_week[week_id]['day_6'],
SummerDesignDay_ScheduleDay_Name=
dict_week[week_id]['day_1'],
WinterDesignDay_ScheduleDay_Name=
dict_week[week_id]['day_1'],
CustomDay1_ScheduleDay_Name=
dict_week[week_id]['day_2'],
CustomDay2_ScheduleDay_Name=
dict_week[week_id]['day_5'])
)
ep_weeks.append(ep_week)
import itertools
blocks = {}
from_date = datetime(self.year, 1, 1)
bincount = [sum(1 for _ in group)
for key, group in itertools.groupby(nws + 1) if key]
week_order = {i: v for i, v in enumerate(np.array(
[key for key, group in itertools.groupby(nws + 1) if key]) - 1)}
for i, (week_n, count) in enumerate(
zip(week_order, bincount)):
week_id = list(dict_week)[week_order[i]]
to_date = from_date + timedelta(days=int(count * 7), hours=-1)
blocks[i] = {}
blocks[i]['week_id'] = week_id
blocks[i]['from_day'] = from_date.day
blocks[i]['end_day'] = to_date.day
blocks[i]['from_month'] = from_date.month
blocks[i]['end_month'] = to_date.month
from_date = to_date + timedelta(hours=1)
# If this is the last block, force end of year
if i == len(bincount) - 1:
blocks[i]['end_day'] = 31
blocks[i]['end_month'] = 12
new_dict = dict(Name=self.schName + '_',
Schedule_Type_Limits_Name=self.schTypeLimitsName)
for i in blocks:
new_dict.update({"ScheduleWeek_Name_{}".format(i + 1):
blocks[i]['week_id'],
"Start_Month_{}".format(i + 1):
blocks[i]['from_month'],
"Start_Day_{}".format(i + 1):
blocks[i]['from_day'],
"End_Month_{}".format(i + 1):
blocks[i]['end_month'],
"End_Day_{}".format(i + 1):
blocks[i]['end_day']})
ep_year = self.idf.add_object(ep_object='Schedule:Year'.upper(),
save=False, **new_dict)
return ep_year, ep_weeks, ep_days
def date_field_interpretation(self, field):
"""Date Field Interpretation
Args:
field (str): The EnergyPlus Field Contents
Returns:
(datetime): The datetime object
Info:
See EnergyPlus documentation for more details:
1.6.8.1.2 Field: Start Date (Table 1.4: Date Field Interpretation)
"""
# < number > Weekday in Month
formats = ['%m/%d', '%d %B', '%B %d', '%d %b', '%b %d']
date = None
for format_str in formats:
# Tru to parse using each defined formats
try:
date = datetime.strptime(field, format_str)
except:
pass
else:
date = datetime(self.year, date.month, date.day)
if date is None:
# if the defined formats did not work, try the fancy parse
try:
date = self.parse_fancy_string(field)
except:
msg = "the schedule '{sch}' contains a " \
"Field that is not understood: '{field}'".format(
sch=self.schName,
field=field)
raise ValueError(msg)
else:
return date
else:
return date
def parse_fancy_string(self, field):
"""Will try to parse cases such as `3rd Monday in February` or `Last
Weekday In Month`
Args:
field (str): The EnergyPlus Field Contents
Returns:
(datetime): The datetime object
"""
import re
# split the string at the term ' in '
time, month = field.lower().split(' in ')
month = datetime.strptime(month, '%B').month
# split the first part into nth and dayofweek
nth, dayofweek = time.split(' ')
if 'last' in nth:
nth = -1 # Use the last one
else:
nth = re.findall(r'\d+', nth) # use the nth one
nth = int(nth[0]) - 1 # python is zero-based
weekday = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3,
'friday': 4, 'saturday': 5, 'sunday': 6}
# parse the dayofweek eg. monday
dayofweek = weekday.get(dayofweek, 6)
# create list of possible days using Calendar
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
monthcal = c.monthdatescalendar(self.year, month)
# iterate though the month and get the nth weekday
date = [day for week in monthcal for day in week if \
day.weekday() == dayofweek and \
day.month == month][nth]
return datetime(date.year, date.month, date.day)
def field_set(self, field, slicer_=None):
"""helper function to return the proper slicer depending on the
field_set value.
Available values are:
Weekdays, Weekends, Holidays, Alldays, SummerDesignDay,
WinterDesignDay, Sunday, Monday, Tuesday, Wednesday, Thursday,
Friday, Saturday, CustomDay1, CustomDay2, AllOtherDays
Args:
field (str): The EnergyPlus field set value.
slicer_ (pd.Series): The persistent slicer for this schedule
Returns:
(indexer-like): Returns the appropriate indexer for the series.
"""
if field.lower() == 'weekdays':
# return only days of weeks
return lambda x: x.index.dayofweek < 5
elif field.lower() == 'weekends':
# return only weekends
return lambda x: x.index.dayofweek >= 5
elif field.lower() == 'alldays':
log('For schedule "{}", the field-set "AllDays" may be overridden '
'by the "AllOtherDays" field-set'.format(
self.schName), lg.WARNING)
# return all days := equivalenet to .loc[:]
return pd.IndexSlice[:]
elif field.lower() == 'allotherdays':
# return unused days (including special days). Uses the global
# variable `slicer_`
import operator
if slicer_ is not None:
return _conjunction(*[self.special_day(field, slicer_),
~slicer_], logical=operator.or_)
else:
raise NotImplementedError
elif field.lower() == 'sunday':
# return only sundays
return lambda x: x.index.dayofweek == 6
elif field.lower() == 'monday':
# return only mondays
return lambda x: x.index.dayofweek == 0
elif field.lower() == 'tuesday':
# return only Tuesdays
return lambda x: x.index.dayofweek == 1
elif field.lower() == 'wednesday':
# return only Wednesdays
return lambda x: x.index.dayofweek == 2
elif field.lower() == 'thursday':
# return only Thursdays
return lambda x: x.index.dayofweek == 3
elif field.lower() == 'friday':
# return only Fridays
return lambda x: x.index.dayofweek == 4
elif field.lower() == 'saturday':
# return only Saturdays
return lambda x: x.index.dayofweek == 5
elif field.lower() == 'summerdesignday':
# return design_day(self, field)
return None
elif field.lower() == 'winterdesignday':
# return design_day(self, field)
return None
elif field.lower() == 'holiday' or field.lower() == 'holidays':
field = 'holiday'
return self.special_day(field, slicer_)
elif not self.strict:
# If not strict, ignore missing field-sets such as CustomDay1
return pd.IndexSlice[:]
else:
raise NotImplementedError(
'Archetypal does not yet support The '
'Field_set "{}"'.format(field))
def __len__(self):
"""returns the length of all values of the schedule"""
return len(self.all_values)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Schedule):
return self.all_values == other.all_values
else:
raise NotImplementedError
def __ne__(self, other):
return ~(self.__eq__(other))
def __add__(self, other):
if isinstance(other, Schedule):
return self.all_values + other.all_values
elif isinstance(other, list):
return self.all_values + other
else:
raise NotImplementedError
def __sub__(self, other):
if isinstance(other, Schedule):
return self.all_values - other.all_values
elif isinstance(other, list):
return self.all_values - other
else:
raise NotImplementedError
def __mul__(self, other):
if isinstance(other, Schedule):
return self.all_values * other.all_values
elif isinstance(other, list):
return self.all_values * other
else:
raise NotImplementedError
def get_sdow(self, start_day_of_week):
"""Returns the start day of the week"""
if start_day_of_week is None:
return self.idf.day_of_week_for_start_day
else:
return start_day_of_week
def special_day(self, field, slicer_):
"""try to get the RunPeriodControl:SpecialDays for the corresponding
Day Type"""
sp_slicer_ = slicer_.copy()
sp_slicer_.loc[:] = False
special_day_types = ['holiday', 'customday1', 'customday2']
dds = self.idf.idfobjects['RunPeriodControl:SpecialDays'.upper()]
dd = [dd for dd in dds if dd.Special_Day_Type.lower() == field
or dd.Special_Day_Type.lower() in special_day_types]
if len(dd) > 0:
slice = []
for dd in dd:
# can have more than one special day types
data = dd.Start_Date
ep_start_date = self.date_field_interpretation(data)
ep_orig = datetime(self.year, 1, 1)
days_to_speciald = (ep_start_date - ep_orig).days
duration = int(dd.Duration)
from_date = self.startDate + timedelta(days=days_to_speciald)
to_date = from_date + timedelta(days=duration) + timedelta(
hours=-1)
sp_slicer_.loc[from_date:to_date] = True
return sp_slicer_
elif not self.strict:
return sp_slicer_
else:
msg = 'Could not find a "SizingPeriod:DesignDay" object ' \
'needed for schedule "{}" with Day Type "{}"'.format(
self.schName, field.capitalize()
)
raise ValueError(msg)
def design_day(schedule, field):
# try to get the SizingPeriod:DesignDay for the corresponding Day Type
dds = schedule.idf.idfobjects['SizingPeriod:DesignDay'.upper()]
dd = [dd for dd in dds if dd.Day_Type.lower() == field]
if len(dd) > 0:
# should have found only one design day matching the Day Type
data = [dd[0].Month, dd[0].Day_of_Month]
date = '/'.join([str(item).zfill(2) for item in data])
date = schedule.date_field_interpretation(date)
return lambda x: x.index == date
else:
msg = 'Could not find a "SizingPeriod:DesignDay" object ' \
'needed for schedule "{}" with Day Type "{}"'.format(
schedule.schName, field.capitalize()
)
raise ValueError(msg)
def _conjunction(*conditions, logical=np.logical_and):
"""Applies a logical function on n conditions"""
return functools.reduce(logical, conditions)
def _separator(sep):
"""helper function to return the correct delimiter"""
if sep == 'Comma':
return ','
elif sep == 'Tab':
return '\t'
elif sep == 'Fixed':
return None
elif sep == 'Semicolon':
return ';'
else:
return ','
def _how(how):
"""Helper function to return the correct resampler"""
if how.lower() == 'average':
return 'mean'
elif how.lower() == 'linear':
return 'interpolate'
elif how.lower() == 'no':
return 'max'
else:
return 'max'
|
[
"archetypal.check_unique_name",
"pandas.read_csv",
"pandas.Grouper",
"archetypal.settings.unique_schedules.append",
"numpy.array",
"datetime.timedelta",
"archetypal.log",
"pandas.date_range",
"numpy.arange",
"datetime.datetime",
"numpy.mean",
"io.StringIO",
"functools.reduce",
"os.path.dirname",
"re.findall",
"pandas.Series",
"calendar.Calendar",
"numpy.roll",
"numpy.unique",
"itertools.groupby",
"datetime.datetime.strptime",
"os.path.join",
"archetypal.IDF",
"pandas.concat"
] |
[((44582, 44619), 'functools.reduce', 'functools.reduce', (['logical', 'conditions'], {}), '(logical, conditions)\n', (44598, 44619), False, 'import functools\n'), ((2337, 2356), 'io.StringIO', 'io.StringIO', (['idftxt'], {}), '(idftxt)\n', (2348, 2356), False, 'import io\n'), ((2436, 2459), 'archetypal.IDF', 'archetypal.IDF', (['fhandle'], {}), '(fhandle)\n', (2450, 2459), False, 'import archetypal\n'), ((3372, 3396), 'numpy.mean', 'np.mean', (['self.all_values'], {}), '(self.all_values)\n', (3379, 3396), True, 'import numpy as np\n'), ((3648, 3687), 'pandas.Series', 'pd.Series', (['self.all_values'], {'index': 'index'}), '(self.all_values, index=index)\n', (3657, 3687), True, 'import pandas as pd\n'), ((5639, 5693), 'calendar.Calendar', 'calendar.Calendar', ([], {'firstweekday': 'self.startDayOfTheWeek'}), '(firstweekday=self.startDayOfTheWeek)\n', (5656, 5693), False, 'import calendar\n'), ((5771, 5830), 'datetime.datetime', 'datetime', (['start_date.year', 'start_date.month', 'start_date.day'], {}), '(start_date.year, start_date.month, start_date.day)\n', (5779, 5830), False, 'from datetime import datetime, timedelta\n'), ((6057, 6106), 'pandas.Series', 'pd.Series', (['hourlyvalues'], {'index': 'index', 'dtype': 'float'}), '(hourlyvalues, index=index, dtype=float)\n', (6066, 6106), True, 'import pandas as pd\n'), ((6798, 6811), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (6807, 6811), True, 'import numpy as np\n'), ((7721, 7753), 'numpy.array', 'np.array', (['values.fieldvalues[3:]'], {}), '(values.fieldvalues[3:])\n', (7729, 7753), True, 'import numpy as np\n'), ((10508, 10531), 'numpy.array', 'np.array', (['hourly_values'], {}), '(hourly_values)\n', (10516, 10531), True, 'import numpy as np\n'), ((10611, 10666), 'numpy.roll', 'np.roll', (['hourly_values', '(-self.startDayOfTheWeek)'], {'axis': '(0)'}), '(hourly_values, -self.startDayOfTheWeek, axis=0)\n', (10618, 10666), True, 'import numpy as np\n'), ((11845, 11879), 'pandas.Series', 'pd.Series', (['all_values'], {'index': 'index'}), '(all_values, index=index)\n', (11854, 11879), True, 'import pandas as pd\n'), ((12424, 12439), 'numpy.arange', 'np.arange', (['(8760)'], {}), '(8760)\n', (12433, 12439), True, 'import numpy as np\n'), ((13378, 13411), 'os.path.dirname', 'os.path.dirname', (['self.idf.idfname'], {}), '(self.idf.idfname)\n', (13393, 13411), False, 'import os\n'), ((13427, 13457), 'os.path.join', 'os.path.join', (['idfdir', 'filename'], {}), '(idfdir, filename)\n', (13439, 13457), False, 'import os\n'), ((13621, 13692), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': 'delimeter', 'skiprows': 'skip_rows', 'usecols': 'col'}), '(file, delimiter=delimeter, skiprows=skip_rows, usecols=col)\n', (13632, 13692), True, 'import pandas as pd\n'), ((14249, 14308), 'pandas.date_range', 'pd.date_range', ([], {'start': 'self.startDate', 'periods': '(8760)', 'freq': '"""H"""'}), "(start=self.startDate, periods=8760, freq='H')\n", (14262, 14308), True, 'import pandas as pd\n'), ((14427, 14456), 'pandas.Series', 'pd.Series', (['zeros'], {'index': 'index'}), '(zeros, index=index)\n', (14436, 14456), True, 'import pandas as pd\n'), ((14514, 14539), 'datetime.datetime', 'datetime', (['self.year', '(1)', '(1)'], {}), '(self.year, 1, 1)\n', (14522, 14539), False, 'from datetime import datetime, timedelta\n'), ((23732, 23773), 'pandas.Series', 'pd.Series', (['([False] * periods)'], {'index': 'index'}), '([False] * periods, index=index)\n', (23741, 23773), True, 'import pandas as pd\n'), ((23937, 23993), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'periods': '(8760)', 'freq': '"""1H"""'}), "(start=start_date, periods=8760, freq='1H')\n", (23950, 23993), True, 'import pandas as pd\n'), ((24018, 24050), 'pandas.Series', 'pd.Series', (['([0] * 8760)'], {'index': 'idx'}), '([0] * 8760, index=idx)\n', (24027, 24050), True, 'import pandas as pd\n'), ((28886, 28911), 'numpy.array', 'np.array', (['self.all_values'], {}), '(self.all_values)\n', (28894, 28911), True, 'import numpy as np\n'), ((29057, 29103), 'numpy.unique', 'np.unique', (['values'], {'axis': '(0)', 'return_inverse': '(True)'}), '(values, axis=0, return_inverse=True)\n', (29066, 29103), True, 'import numpy as np\n'), ((31640, 31671), 'numpy.array', 'np.array', (['[6, 0, 1, 2, 3, 4, 5]'], {}), '([6, 0, 1, 2, 3, 4, 5])\n', (31648, 31671), True, 'import numpy as np\n'), ((31696, 31742), 'numpy.roll', 'np.roll', (['ordered_day_n', 'self.startDayOfTheWeek'], {}), '(ordered_day_n, self.startDayOfTheWeek)\n', (31703, 31742), True, 'import numpy as np\n'), ((32904, 32929), 'datetime.datetime', 'datetime', (['self.year', '(1)', '(1)'], {}), '(self.year, 1, 1)\n', (32912, 32929), False, 'from datetime import datetime, timedelta\n'), ((37196, 37250), 'calendar.Calendar', 'calendar.Calendar', ([], {'firstweekday': 'self.startDayOfTheWeek'}), '(firstweekday=self.startDayOfTheWeek)\n', (37213, 37250), False, 'import calendar\n'), ((37536, 37577), 'datetime.datetime', 'datetime', (['date.year', 'date.month', 'date.day'], {}), '(date.year, date.month, date.day)\n', (37544, 37577), False, 'from datetime import datetime, timedelta\n'), ((8080, 8135), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'periods': '(168)', 'freq': '"""1H"""'}), "(start=start_date, periods=168, freq='1H')\n", (8093, 8135), True, 'import pandas as pd\n'), ((25856, 25872), 'pandas.concat', 'pd.concat', (['weeks'], {}), '(weeks)\n', (25865, 25872), True, 'import pandas as pd\n'), ((25937, 25957), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (25946, 25957), False, 'from datetime import datetime, timedelta\n'), ((29305, 29411), 'archetypal.check_unique_name', 'archetypal.check_unique_name', (['"""d"""', 'count_day', 'name', 'archetypal.settings.unique_schedules'], {'suffix': '(True)'}), "('d', count_day, name, archetypal.settings.\n unique_schedules, suffix=True)\n", (29333, 29411), False, 'import archetypal\n'), ((29637, 29686), 'archetypal.settings.unique_schedules.append', 'archetypal.settings.unique_schedules.append', (['name'], {}), '(name)\n', (29680, 29686), False, 'import archetypal\n'), ((30693, 30803), 'archetypal.check_unique_name', 'archetypal.check_unique_name', (['"""w"""', 'count_week', 'week_id', 'archetypal.settings.unique_schedules'], {'suffix': '(True)'}), "('w', count_week, week_id, archetypal.settings.\n unique_schedules, suffix=True)\n", (30721, 30803), False, 'import archetypal\n'), ((31063, 31115), 'archetypal.settings.unique_schedules.append', 'archetypal.settings.unique_schedules.append', (['week_id'], {}), '(week_id)\n', (31106, 31115), False, 'import archetypal\n'), ((36546, 36576), 'datetime.datetime.strptime', 'datetime.strptime', (['month', '"""%B"""'], {}), "(month, '%B')\n", (36563, 36576), False, 'from datetime import datetime, timedelta\n'), ((36778, 36801), 're.findall', 're.findall', (['"""\\\\d+"""', 'nth'], {}), "('\\\\d+', nth)\n", (36788, 36801), False, 'import re\n'), ((9506, 9521), 'pandas.concat', 'pd.concat', (['days'], {}), '(days)\n', (9515, 9521), True, 'import pandas as pd\n'), ((25131, 25150), 'datetime.timedelta', 'timedelta', ([], {'hours': '(23)'}), '(hours=23)\n', (25140, 25150), False, 'from datetime import datetime, timedelta\n'), ((25310, 25333), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""168H"""'}), "(freq='168H')\n", (25320, 25333), True, 'import pandas as pd\n'), ((33010, 33036), 'itertools.groupby', 'itertools.groupby', (['(nws + 1)'], {}), '(nws + 1)\n', (33027, 33036), False, 'import itertools\n'), ((33704, 33722), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (33713, 33722), False, 'from datetime import datetime, timedelta\n'), ((35454, 35490), 'datetime.datetime.strptime', 'datetime.strptime', (['field', 'format_str'], {}), '(field, format_str)\n', (35471, 35490), False, 'from datetime import datetime, timedelta\n'), ((35573, 35614), 'datetime.datetime', 'datetime', (['self.year', 'date.month', 'date.day'], {}), '(self.year, date.month, date.day)\n', (35581, 35614), False, 'from datetime import datetime, timedelta\n'), ((42965, 42990), 'datetime.datetime', 'datetime', (['self.year', '(1)', '(1)'], {}), '(self.year, 1, 1)\n', (42973, 42990), False, 'from datetime import datetime, timedelta\n'), ((9104, 9124), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""D"""'}), "(freq='D')\n", (9114, 9124), True, 'import pandas as pd\n'), ((21922, 21942), 'archetypal.log', 'log', (['msg', 'lg.WARNING'], {}), '(msg, lg.WARNING)\n', (21925, 21942), False, 'from archetypal import log\n'), ((25108, 25128), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (25117, 25128), False, 'from datetime import datetime, timedelta\n'), ((43146, 43178), 'datetime.timedelta', 'timedelta', ([], {'days': 'days_to_speciald'}), '(days=days_to_speciald)\n', (43155, 43178), False, 'from datetime import datetime, timedelta\n'), ((43244, 43263), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-1)'}), '(hours=-1)\n', (43253, 43263), False, 'from datetime import datetime, timedelta\n'), ((15309, 15326), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15318, 15326), False, 'from datetime import datetime, timedelta\n'), ((15549, 15568), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-1)'}), '(hours=-1)\n', (15558, 15568), False, 'from datetime import datetime, timedelta\n'), ((15776, 15794), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (15785, 15794), False, 'from datetime import datetime, timedelta\n'), ((43217, 43241), 'datetime.timedelta', 'timedelta', ([], {'days': 'duration'}), '(days=duration)\n', (43226, 43241), False, 'from datetime import datetime, timedelta\n'), ((15526, 15546), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (15535, 15546), False, 'from datetime import datetime, timedelta\n'), ((17753, 17829), 'pandas.date_range', 'pd.date_range', ([], {'start': 'self.startDate', 'periods': '(525600)', 'closed': '"""left"""', 'freq': '"""T"""'}), "(start=self.startDate, periods=525600, closed='left', freq='T')\n", (17766, 17829), True, 'import pandas as pd\n'), ((33139, 33165), 'itertools.groupby', 'itertools.groupby', (['(nws + 1)'], {}), '(nws + 1)\n', (33156, 33165), False, 'import itertools\n'), ((18958, 18978), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (18967, 18978), False, 'from datetime import datetime, timedelta\n')]
|
import os
# import torch
import argparse
import base64
import sys
import io
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
model_list = []
f = open(sys.argv[1], "r")
models = f.read().split(",")
f.close()
print(models)
for m in models:
model_list.append(base642fullmodel(m))
new_model_state = model_list[0].state_dict()
#sum the weight of the model
for m in model_list[1:]:
state_m = m.state_dict()
for key in state_m:
new_model_state[key] += state_m[key]
#average the model weight
for key in new_model_state:
new_model_state[key] /= len(model_list)
new_model = model_list[0]
new_model.load_state_dict(new_model_state)
output = fullmodel2base64(new_model)
print(output)
|
[
"base64.b64encode",
"io.BytesIO",
"base64.b64decode",
"torch.save"
] |
[((288, 300), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (298, 300), False, 'import io\n'), ((305, 330), 'torch.save', 'torch.save', (['model', 'buffer'], {}), '(model, buffer)\n', (315, 330), False, 'import torch\n'), ((490, 516), 'base64.b64decode', 'base64.b64decode', (['inputrpc'], {}), '(inputrpc)\n', (506, 516), False, 'import base64\n'), ((544, 565), 'io.BytesIO', 'io.BytesIO', (['inputrpc_'], {}), '(inputrpc_)\n', (554, 565), False, 'import io\n'), ((369, 389), 'base64.b64encode', 'base64.b64encode', (['bg'], {}), '(bg)\n', (385, 389), False, 'import base64\n')]
|
# coding: utf-8
import io
import os
import shutil
import tempfile
import unittest
from edo_client import WoClient
class ContentApi_DownloadTestCase(unittest.TestCase):
'''
- Basically this is to ensure
all the facilities related to HTTP range headers are working properly;
'''
@classmethod
def setUpClass(cls):
cls.file_size = 10 * (2 ** 20)
cls.download_url = 'http://192.168.1.115/docker/unittest/10mb.test'
cls.api_url = 'https://httpbin.org/redirect-to?url={}'.format(
cls.download_url
)
cls.empty_file_url = 'http://19192.168.3.11/docker/unittest/empty_file.bin'
# We're just testing some basic util functions,
# and don't want a real WoClient instance
cls.client = WoClient(
cls.api_url + '#',
'', '', '', '',
account='', instance=''
)
cls.tmpdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def test_01_get_download_url(self):
self.assertEqual(
self.client.content.get_download_url(uid=''),
self.download_url,
'Should be able to extract direct download URL from 302 redirect'
)
def test_11_download_to_stream_all(self):
'''测试:下载完整文件到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url
)
self.assertEqual(
self.file_size,
stream.tell(),
'Cursor should be at the end of stream after download'
)
stream.seek(0, os.SEEK_SET)
self.assertEqual(
self.file_size,
len(stream.read()),
'File length should be 10240 bytes'
)
def test_12_download_stream_first_byte(self):
'''测试:下载第一个字节到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=0,
)
self.assertEqual(1, stream.tell(), 'Download first byte of file')
def test_13_download_stream_head_part(self):
'''测试:从头下载一部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=(5 * (2 ** 20) - 1),
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_14_download_stream_tail_part(self):
'''测试:从中间开始,下载文件后半部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=(5 * (2 ** 20)), end=None,
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_15_download_partial(self):
'''测试:从中间开始,下载一部分到流'''
stream = io.BytesIO()
start, end = 1234, 54321
self.client.content.download_to_stream(
stream, url=self.download_url, start=start, end=end,
)
self.assertEqual(stream.tell(), end - start + 1)
def test_21_get_data_full_size(self):
'''测试:完整读取文件内容'''
self.assertEqual(
self.file_size,
len(self.client.content.get_data(url=self.download_url)),
'.get_data shoule be able to download the whole file by default',
)
def test_22_get_data_first_byte(self):
'''测试:读取文件第一个字节'''
self.assertEqual(
1,
len(self.client.content.get_data(url=self.download_url, size=1)),
'.get_data should be able to download the 1st byte of given file',
)
def test_23_get_data_head_part(self):
'''测试:从头读取文件的一部分内容'''
size = 5432
self.assertEqual(
size,
len(self.client.content.get_data(url=self.download_url, size=size)), # noqa E501
'.get_data should download the first {} bytes'.format(size),
)
def test_24_get_data_tail_part(self):
'''测试:从中间开始,读取文件后半部分内容'''
start = 12345
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size
)),
'.get_data shoule download last {} bytes'.format(size),
)
def test_25_get_data_partial(self):
'''测试:从中间开始,读取文件一部分的内容'''
start = 23451
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size,
)),
'.get_data should download {} bytes starting from offset {}'.format(size, start), # noqa E501
)
def test_31_download_to_file(self):
'''测试:完整下载文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.download_url)
self.assertEqual(self.file_size, os.stat(fpath).st_size)
def test_41_download_empty_file(self):
'''测试:下载空文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.empty_file_url)
self.assertEqual(0, os.stat(fpath).st_size)
|
[
"os.close",
"io.BytesIO",
"tempfile.mkdtemp",
"edo_client.WoClient",
"shutil.rmtree",
"os.stat",
"tempfile.mkstemp"
] |
[((777, 845), 'edo_client.WoClient', 'WoClient', (["(cls.api_url + '#')", '""""""', '""""""', '""""""', '""""""'], {'account': '""""""', 'instance': '""""""'}), "(cls.api_url + '#', '', '', '', '', account='', instance='')\n", (785, 845), False, 'from edo_client import WoClient\n'), ((913, 931), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (929, 931), False, 'import tempfile\n'), ((986, 1011), 'shutil.rmtree', 'shutil.rmtree', (['cls.tmpdir'], {}), '(cls.tmpdir)\n', (999, 1011), False, 'import shutil\n'), ((1346, 1358), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1356, 1358), False, 'import io\n'), ((1894, 1906), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1904, 1906), False, 'import io\n'), ((2192, 2204), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2202, 2204), False, 'import io\n'), ((2496, 2508), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2506, 2508), False, 'import io\n'), ((2787, 2799), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2797, 2799), False, 'import io\n'), ((4813, 4846), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'dir': 'self.tmpdir'}), '(dir=self.tmpdir)\n', (4829, 4846), False, 'import tempfile\n'), ((4855, 4867), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (4863, 4867), False, 'import os\n'), ((5110, 5143), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'dir': 'self.tmpdir'}), '(dir=self.tmpdir)\n', (5126, 5143), False, 'import tempfile\n'), ((5152, 5164), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (5160, 5164), False, 'import os\n'), ((4996, 5010), 'os.stat', 'os.stat', (['fpath'], {}), '(fpath)\n', (5003, 5010), False, 'import os\n'), ((5282, 5296), 'os.stat', 'os.stat', (['fpath'], {}), '(fpath)\n', (5289, 5296), False, 'import os\n')]
|
import higher
from leap import Leap
import numpy as np
import os
import torch
import torch.nn as nn
import gc
def train(model, source_corpus, char2idx, args, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,
threshold=args.threshold)
best_valid_cosine = 1
for epoch in np.arange(args.n_epochs):
valid_cosine = []
valid_ce = []
model.train()
for batch in np.arange(args.n_batch):
train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
fixed=args.fixed_shot,
return_inds=True)
optimizer.zero_grad()
if args.lang_model:
pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, train_inds)
loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
else:
pred_emb = model.forward(train_contexts, train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
use_valid=True,
fixed=args.fixed_shot,
return_inds=True)
if args.lang_model:
pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()
valid_ce += [loss.cpu().numpy()]
else:
pred_emb = model.forward(valid_contexts, valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()
valid_cosine += [loss.cpu().numpy()]
avg_valid = np.average(valid_cosine)
lr_scheduler.step(avg_valid)
if args.lang_model:
avg_ce = np.average(valid_ce)
print(f"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}")
else:
print(f"Average cosine loss: {avg_valid}")
if avg_valid < best_valid_cosine:
best_valid_cosine = avg_valid
torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))
if optimizer.param_groups[0]['lr'] < args.lr_early_stop:
print('LR early stop')
break
def maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
gc.collect()
source_valid_cosine = []
target_valid_cosine = []
model.train()
with torch.backends.cudnn.flags(benchmark=True):
for meta_batch in np.arange(args.n_meta_batch):
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)
meta_optimizer.zero_grad()
with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):
for inner_batch in np.arange(args.n_inner_batch):
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
diffopt.step(loss)
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
meta_optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:
print('LR early stop')
break
def leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
leap = Leap(model)
meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
source_valid_cosine = []
target_valid_cosine = []
model.train()
for meta_batch in np.arange(args.n_meta_batch):
meta_optimizer.zero_grad()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = model.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.normalize()
meta_optimizer.step()
leap.to(model)
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:
print('LR early stop')
break
|
[
"higher.innerloop_ctx",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.average",
"torch.backends.cudnn.flags",
"torch.nn.functional.cosine_similarity",
"os.path.join",
"gc.collect",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"leap.Leap",
"numpy.arange"
] |
[((287, 416), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'factor': 'args.lr_decay', 'patience': 'args.patience', 'threshold': 'args.threshold'}), '(optimizer, factor=args.lr_decay,\n patience=args.patience, threshold=args.threshold)\n', (329, 416), False, 'import torch\n'), ((519, 543), 'numpy.arange', 'np.arange', (['args.n_epochs'], {}), '(args.n_epochs)\n', (528, 543), True, 'import numpy as np\n'), ((3952, 4087), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['meta_optimizer'], {'factor': 'args.lr_decay', 'patience': 'args.patience', 'threshold': 'args.threshold'}), '(meta_optimizer, factor=args.\n lr_decay, patience=args.patience, threshold=args.threshold)\n', (3994, 4087), False, 'import torch\n'), ((4187, 4216), 'numpy.arange', 'np.arange', (['args.n_meta_epochs'], {}), '(args.n_meta_epochs)\n', (4196, 4216), True, 'import numpy as np\n'), ((7560, 7571), 'leap.Leap', 'Leap', (['model'], {}), '(model)\n', (7564, 7571), False, 'from leap import Leap\n'), ((7675, 7810), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['meta_optimizer'], {'factor': 'args.lr_decay', 'patience': 'args.patience', 'threshold': 'args.threshold'}), '(meta_optimizer, factor=args.\n lr_decay, patience=args.patience, threshold=args.threshold)\n', (7717, 7810), False, 'import torch\n'), ((7910, 7939), 'numpy.arange', 'np.arange', (['args.n_meta_epochs'], {}), '(args.n_meta_epochs)\n', (7919, 7939), True, 'import numpy as np\n'), ((637, 660), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (646, 660), True, 'import numpy as np\n'), ((3135, 3159), 'numpy.average', 'np.average', (['valid_cosine'], {}), '(valid_cosine)\n', (3145, 3159), True, 'import numpy as np\n'), ((4226, 4238), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4236, 4238), False, 'import gc\n'), ((8056, 8084), 'numpy.arange', 'np.arange', (['args.n_meta_batch'], {}), '(args.n_meta_batch)\n', (8065, 8084), True, 'import numpy as np\n'), ((1829, 1844), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1842, 1844), False, 'import torch\n'), ((1871, 1894), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (1880, 1894), True, 'import numpy as np\n'), ((3247, 3267), 'numpy.average', 'np.average', (['valid_ce'], {}), '(valid_ce)\n', (3257, 3267), True, 'import numpy as np\n'), ((4341, 4383), 'torch.backends.cudnn.flags', 'torch.backends.cudnn.flags', ([], {'benchmark': '(True)'}), '(benchmark=True)\n', (4367, 4383), False, 'import torch\n'), ((4415, 4443), 'numpy.arange', 'np.arange', (['args.n_meta_batch'], {}), '(args.n_meta_batch)\n', (4424, 4443), True, 'import numpy as np\n'), ((5805, 5820), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5818, 5820), False, 'import torch\n'), ((5847, 5870), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (5856, 5870), True, 'import numpy as np\n'), ((6894, 6925), 'numpy.average', 'np.average', (['source_valid_cosine'], {}), '(source_valid_cosine)\n', (6904, 6925), True, 'import numpy as np\n'), ((6927, 6958), 'numpy.average', 'np.average', (['target_valid_cosine'], {}), '(target_valid_cosine)\n', (6937, 6958), True, 'import numpy as np\n'), ((8308, 8336), 'numpy.arange', 'np.arange', (['args.n_task_steps'], {}), '(args.n_task_steps)\n', (8317, 8336), True, 'import numpy as np\n'), ((9065, 9093), 'numpy.arange', 'np.arange', (['args.n_task_steps'], {}), '(args.n_task_steps)\n', (9074, 9093), True, 'import numpy as np\n'), ((9812, 9827), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9825, 9827), False, 'import torch\n'), ((9854, 9877), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (9863, 9877), True, 'import numpy as np\n'), ((10901, 10932), 'numpy.average', 'np.average', (['source_valid_cosine'], {}), '(source_valid_cosine)\n', (10911, 10932), True, 'import numpy as np\n'), ((10934, 10965), 'numpy.average', 'np.average', (['target_valid_cosine'], {}), '(target_valid_cosine)\n', (10944, 10965), True, 'import numpy as np\n'), ((1420, 1469), 'torch.nn.functional.cross_entropy', 'nn.functional.cross_entropy', (['pred_ind', 'train_inds'], {}), '(pred_ind, train_inds)\n', (1447, 1469), True, 'import torch.nn as nn\n'), ((3558, 3597), 'os.path.join', 'os.path.join', (['args.save_dir', '"""model.pt"""'], {}), "(args.save_dir, 'model.pt')\n", (3570, 3597), False, 'import os\n'), ((7244, 7288), 'os.path.join', 'os.path.join', (['args.save_dir', '"""maml_model.pt"""'], {}), "(args.save_dir, 'maml_model.pt')\n", (7256, 7288), False, 'import os\n'), ((11251, 11295), 'os.path.join', 'os.path.join', (['args.save_dir', '"""leap_model.pt"""'], {}), "(args.save_dir, 'leap_model.pt')\n", (11263, 11295), False, 'import os\n'), ((4609, 4681), 'higher.innerloop_ctx', 'higher.innerloop_ctx', (['model', 'inner_optimizer'], {'copy_initial_weights': '(False)'}), '(model, inner_optimizer, copy_initial_weights=False)\n', (4629, 4681), False, 'import higher\n'), ((4743, 4772), 'numpy.arange', 'np.arange', (['args.n_inner_batch'], {}), '(args.n_inner_batch)\n', (4752, 4772), True, 'import numpy as np\n'), ((1495, 1551), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'train_targets'], {}), '(pred_emb, train_targets)\n', (1526, 1551), True, 'import torch.nn as nn\n'), ((1672, 1728), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'train_targets'], {}), '(pred_emb, train_targets)\n', (1703, 1728), True, 'import torch.nn as nn\n'), ((2765, 2814), 'torch.nn.functional.cross_entropy', 'nn.functional.cross_entropy', (['pred_ind', 'valid_inds'], {}), '(pred_ind, valid_inds)\n', (2792, 2814), True, 'import torch.nn as nn\n'), ((2997, 3053), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'valid_targets'], {}), '(pred_emb, valid_targets)\n', (3028, 3053), True, 'import torch.nn as nn\n'), ((6201, 6264), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_valid_targets'], {}), '(pred_emb, source_valid_targets)\n', (6232, 6264), True, 'import torch.nn as nn\n'), ((6717, 6780), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_valid_targets'], {}), '(pred_emb, target_valid_targets)\n', (6748, 6780), True, 'import torch.nn as nn\n'), ((8699, 8762), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_train_targets'], {}), '(pred_emb, source_train_targets)\n', (8730, 8762), True, 'import torch.nn as nn\n'), ((9507, 9570), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_train_targets'], {}), '(pred_emb, target_train_targets)\n', (9538, 9570), True, 'import torch.nn as nn\n'), ((10208, 10271), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_valid_targets'], {}), '(pred_emb, source_valid_targets)\n', (10239, 10271), True, 'import torch.nn as nn\n'), ((10724, 10787), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_valid_targets'], {}), '(pred_emb, target_valid_targets)\n', (10755, 10787), True, 'import torch.nn as nn\n'), ((5624, 5687), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_train_targets'], {}), '(pred_emb, target_train_targets)\n', (5655, 5687), True, 'import torch.nn as nn\n'), ((5120, 5183), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_train_targets'], {}), '(pred_emb, source_train_targets)\n', (5151, 5183), True, 'import torch.nn as nn\n')]
|
"""
pyexcel_xlsw
~~~~~~~~~~~~~~~~~~~
The lower level xls file format handler using xlwt
:copyright: (c) 2016-2021 by Onni Software Ltd
:license: New BSD License
"""
import datetime
import xlrd
from xlwt import XFStyle, Workbook
from pyexcel_io import constants
from pyexcel_io.plugin_api import IWriter, ISheetWriter
DEFAULT_DATE_FORMAT = "DD/MM/YY"
DEFAULT_TIME_FORMAT = "HH:MM:SS"
DEFAULT_LONGTIME_FORMAT = "[HH]:MM:SS"
DEFAULT_DATETIME_FORMAT = "%s %s" % (DEFAULT_DATE_FORMAT, DEFAULT_TIME_FORMAT)
EMPTY_SHEET_NOT_ALLOWED = "xlwt does not support a book without any sheets"
class XLSheetWriter(ISheetWriter):
"""
xls sheet writer
"""
def __init__(self, xls_book, xls_sheet, sheet_name):
if sheet_name is None:
sheet_name = constants.DEFAULT_SHEET_NAME
self._xls_book = xls_book
self._xls_sheet = xls_sheet
self._xls_sheet = self._xls_book.add_sheet(sheet_name)
self.current_row = 0
def write_row(self, array):
"""
write a row into the file
"""
for i, value in enumerate(array):
style = None
tmp_array = []
if isinstance(value, datetime.datetime):
tmp_array = [
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
]
value = xlrd.xldate.xldate_from_datetime_tuple(tmp_array, 0)
style = XFStyle()
style.num_format_str = DEFAULT_DATETIME_FORMAT
elif isinstance(value, datetime.timedelta):
value = value.days + value.seconds / 86_400
style = XFStyle()
style.num_format_str = DEFAULT_LONGTIME_FORMAT
elif isinstance(value, datetime.date):
tmp_array = [value.year, value.month, value.day]
value = xlrd.xldate.xldate_from_date_tuple(tmp_array, 0)
style = XFStyle()
style.num_format_str = DEFAULT_DATE_FORMAT
elif isinstance(value, datetime.time):
tmp_array = [value.hour, value.minute, value.second]
value = xlrd.xldate.xldate_from_time_tuple(tmp_array)
style = XFStyle()
style.num_format_str = DEFAULT_TIME_FORMAT
if style:
self._xls_sheet.write(self.current_row, i, value, style)
else:
self._xls_sheet.write(self.current_row, i, value)
self.current_row += 1
def close(self):
pass
class XLSWriter(IWriter):
"""
xls writer
"""
def __init__(
self,
file_alike_object,
_, # file_type not used
encoding="ascii",
style_compression=2,
**keywords,
):
self.file_alike_object = file_alike_object
self.work_book = Workbook(
style_compression=style_compression, encoding=encoding
)
def create_sheet(self, name):
return XLSheetWriter(self.work_book, None, name)
def write(self, incoming_dict):
if incoming_dict:
IWriter.write(self, incoming_dict)
else:
raise NotImplementedError(EMPTY_SHEET_NOT_ALLOWED)
def close(self):
"""
This call actually save the file
"""
self.work_book.save(self.file_alike_object)
|
[
"xlrd.xldate.xldate_from_time_tuple",
"xlwt.XFStyle",
"pyexcel_io.plugin_api.IWriter.write",
"xlrd.xldate.xldate_from_datetime_tuple",
"xlrd.xldate.xldate_from_date_tuple",
"xlwt.Workbook"
] |
[((2968, 3032), 'xlwt.Workbook', 'Workbook', ([], {'style_compression': 'style_compression', 'encoding': 'encoding'}), '(style_compression=style_compression, encoding=encoding)\n', (2976, 3032), False, 'from xlwt import XFStyle, Workbook\n'), ((3222, 3256), 'pyexcel_io.plugin_api.IWriter.write', 'IWriter.write', (['self', 'incoming_dict'], {}), '(self, incoming_dict)\n', (3235, 3256), False, 'from pyexcel_io.plugin_api import IWriter, ISheetWriter\n'), ((1486, 1538), 'xlrd.xldate.xldate_from_datetime_tuple', 'xlrd.xldate.xldate_from_datetime_tuple', (['tmp_array', '(0)'], {}), '(tmp_array, 0)\n', (1524, 1538), False, 'import xlrd\n'), ((1563, 1572), 'xlwt.XFStyle', 'XFStyle', ([], {}), '()\n', (1570, 1572), False, 'from xlwt import XFStyle, Workbook\n'), ((1776, 1785), 'xlwt.XFStyle', 'XFStyle', ([], {}), '()\n', (1783, 1785), False, 'from xlwt import XFStyle, Workbook\n'), ((1989, 2037), 'xlrd.xldate.xldate_from_date_tuple', 'xlrd.xldate.xldate_from_date_tuple', (['tmp_array', '(0)'], {}), '(tmp_array, 0)\n', (2023, 2037), False, 'import xlrd\n'), ((2062, 2071), 'xlwt.XFStyle', 'XFStyle', ([], {}), '()\n', (2069, 2071), False, 'from xlwt import XFStyle, Workbook\n'), ((2275, 2320), 'xlrd.xldate.xldate_from_time_tuple', 'xlrd.xldate.xldate_from_time_tuple', (['tmp_array'], {}), '(tmp_array)\n', (2309, 2320), False, 'import xlrd\n'), ((2345, 2354), 'xlwt.XFStyle', 'XFStyle', ([], {}), '()\n', (2352, 2354), False, 'from xlwt import XFStyle, Workbook\n')]
|
# Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple coloring problem (MIP approach) in OR-tools CP-SAT Solver.
Inspired by the GLPK:s model color.mod
'''
COLOR, Graph Coloring Problem
Written in GNU MathProg by <NAME> <<EMAIL>>
Given an undirected loopless graph G = (V, E), where V is a set of
nodes, E <= V x V is a set of arcs, the Graph Coloring Problem is to
find a mapping (coloring) F: V -> C, where C = {1, 2, ... } is a set
of colors whose cardinality is as small as possible, such that
F(i) != F(j) for every arc (i,j) in E, that is adjacent nodes must
be assigned different colors.
'''
This is a port of my old OR-tools CP solver coloring_ip.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tols models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main():
model = cp.CpModel()
# max number of colors
# [we know that 4 suffices for normal maps]
nc = 5
# number of nodes
n = 11
# set of nodes
V = list(range(n))
num_edges = 20
#
# Neighbours
#
# This data correspond to the instance myciel3.col from:
# http://mat.gsia.cmu.edu/COLOR/instances.html
#
# Note: 1-based (adjusted below)
E = [[1, 2], [1, 4], [1, 7], [1, 9], [2, 3], [2, 6], [2, 8], [3, 5], [3, 7],
[3, 10], [4, 5], [4, 6], [4, 10], [5, 8], [5, 9], [6, 11], [7, 11],
[8, 11], [9, 11], [10, 11]]
#
# declare variables
#
# x[i,c] = 1 means that node i is assigned color c
x = {}
for v in V:
for j in range(nc):
x[v, j] = model.NewIntVar(0, 1, 'v[%i,%i]' % (v, j))
# u[c] = 1 means that color c is used, i.e. assigned to some node
u = [model.NewIntVar(0, 1, 'u[%i]' % i) for i in range(nc)]
# number of colors used, to minimize
num_colors = model.NewIntVar(0,nc, "num_colors")
model.Add(num_colors == sum(u))
#
# constraints
#
# each node must be assigned exactly one color
for i in V:
model.Add(sum([x[i, c] for c in range(nc)]) == 1)
# adjacent nodes cannot be assigned the same color
# (and adjust to 0-based)
for i in range(num_edges):
for c in range(nc):
model.Add(x[E[i][0] - 1, c] + x[E[i][1] - 1, c] <= u[c])
# objective
model.Minimize(num_colors)
#
# solution
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print()
print('number of colors:', solver.Value(num_colors))
print('colors used:', [solver.Value(u[i]) for i in range(nc)])
print()
for v in V:
print('v%i' % v, ' color ', end=' ')
for c in range(nc):
if solver.Value(x[v, c]) == 1:
print(c)
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
|
[
"ortools.sat.python.cp_model.CpSolver",
"ortools.sat.python.cp_model.CpModel"
] |
[((1497, 1509), 'ortools.sat.python.cp_model.CpModel', 'cp.CpModel', ([], {}), '()\n', (1507, 1509), True, 'from ortools.sat.python import cp_model as cp\n'), ((2902, 2915), 'ortools.sat.python.cp_model.CpSolver', 'cp.CpSolver', ([], {}), '()\n', (2913, 2915), True, 'from ortools.sat.python import cp_model as cp\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
"""
# @Time : 2019/5/27
# @Author : Jiaqi&Zecheng
# @File : sem_utils.py
# @Software: PyCharm
"""
import os
import json
import re as regex
import spacy
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def partial_match(query, table_name):
query = [token.lemma_ for token in nlp(query)]
table_name = [nlp(token)[0].lemma_ for token in table_name]
if query in table_name:
return True
return False
def is_partial_match(query, table_names):
query = nlp(query)[0].lemma_
table_names = [[token.lemma_ for token in nlp(names)] for names in table_names]
same_count = 0
result = None
for names in table_names:
if query in names:
same_count += 1
result = names
return result if same_count == 1 else False
def multi_option(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
re = is_partial_match(question[i][0], names)
if re is not False:
return re
return False
def multi_equal(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
if question[i] == names:
return i
return False
def random_choice(question_arg, question_arg_type, names, ground_col_labels, q_ind, N, origin_name):
# first try if there are other table
for t_ind, t_val in enumerate(question_arg_type):
if t_val == ['table']:
return names[origin_name.index(question_arg[t_ind])]
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question_arg):
if len(ground_col_labels) == 0:
for n in names:
if partial_match(question_arg[i][0], n) is True:
return n
else:
for n_id, n in enumerate(names):
if n_id in ground_col_labels and partial_match(question_arg[i][0], n) is True:
return n
if len(ground_col_labels) > 0:
return names[ground_col_labels[0]]
else:
return names[0]
def alter_column0(datas):
"""
Attach column * table
:return: model_result_replace
"""
zero_count = 0
count = 0
result = []
for d in datas:
if 'C(0)' in d['model_result']:
pattern = regex.compile('C\(.*?\) T\(.*?\)')
result_pattern = list(set(pattern.findall(d['model_result'])))
ground_col_labels = []
for pa in result_pattern:
pa = pa.split(' ')
if pa[0] != 'C(0)':
index = int(pa[1][2:-1])
ground_col_labels.append(index)
ground_col_labels = list(set(ground_col_labels))
question_arg_type = d['question_arg_type']
question_arg = d['question_arg']
table_names = [[token.lemma_ for token in nlp(names)] for names in d['table_names']]
origin_table_names = [[wordnet_lemmatizer.lemmatize(x.lower()) for x in names.split(' ')] for names in
d['table_names']]
count += 1
easy_flag = False
for q_ind, q in enumerate(d['question_arg']):
q_str = " ".join(" ".join(x) for x in d['question_arg'])
if 'how many' in q_str or 'number of' in q_str or 'count of' in q_str:
easy_flag = True
if easy_flag:
# check for the last one is a table word
for q_ind, q in enumerate(d['question_arg']):
if (q_ind > 0 and q == ['many'] and d['question_arg'][q_ind - 1] == ['how']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['number']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['count']):
re = multi_equal(question_arg_type, q_ind, ['table'], 2)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 2)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
pass
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
zero_count += 1
break
else:
M_OP = False
for q_ind, q in enumerate(d['question_arg']):
if M_OP is False and q in [['than'], ['least'], ['most'], ['msot'], ['fewest']] or \
question_arg_type[q_ind] == ['M_OP']:
M_OP = True
re = multi_equal(question_arg_type, q_ind, ['table'], 3)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 3)
if re is not False:
table_result = re
# print(table_result)
result.append((d['query'], d['question'], table_result, d))
pass
else:
# zero_count += 1
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
pass
if M_OP is False:
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names, ground_col_labels=ground_col_labels, q_ind=q_ind,
N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
for re in result:
table_names = [[token.lemma_ for token in nlp(names)] for names in re[3]['table_names']]
origin_table_names = [[x for x in names.split(' ')] for names in re[3]['table_names']]
if re[2] in table_names:
re[3]['rule_count'] = table_names.index(re[2])
else:
re[3]['rule_count'] = origin_table_names.index(re[2])
for data in datas:
if 'rule_count' in data:
str_replace = 'C(0) T(' + str(data['rule_count']) + ')'
replace_result = regex.sub('C\(0\) T\(.\)', str_replace, data['model_result'])
data['model_result_replace'] = replace_result
else:
data['model_result_replace'] = data['model_result']
|
[
"spacy.load",
"re.sub",
"nltk.stem.WordNetLemmatizer",
"re.compile"
] |
[((321, 340), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (338, 340), False, 'from nltk.stem import WordNetLemmatizer\n'), ((347, 402), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {'disable': "['parser', 'ner']"}), "('en_core_web_sm', disable=['parser', 'ner'])\n", (357, 402), False, 'import spacy\n'), ((2535, 2573), 're.compile', 'regex.compile', (['"""C\\\\(.*?\\\\) T\\\\(.*?\\\\)"""'], {}), "('C\\\\(.*?\\\\) T\\\\(.*?\\\\)')\n", (2548, 2573), True, 'import re as regex\n'), ((9508, 9573), 're.sub', 'regex.sub', (['"""C\\\\(0\\\\) T\\\\(.\\\\)"""', 'str_replace', "data['model_result']"], {}), "('C\\\\(0\\\\) T\\\\(.\\\\)', str_replace, data['model_result'])\n", (9517, 9573), True, 'import re as regex\n')]
|
from flask import Blueprint
from controllers.show import shows, create_shows, create_show_submission
show_bp = Blueprint('show_bp', __name__)
show_bp.route('/', methods=['GET'])(shows)
show_bp.route('/create', methods=['GET'])(create_shows)
show_bp.route('/create', methods=['POST'])(create_show_submission)
|
[
"flask.Blueprint"
] |
[((113, 143), 'flask.Blueprint', 'Blueprint', (['"""show_bp"""', '__name__'], {}), "('show_bp', __name__)\n", (122, 143), False, 'from flask import Blueprint\n')]
|
#!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that finds and runs a binary by looking in the likely locations."""
import os
import subprocess
import sys
def run_command(args):
"""Runs a program from the command line and returns stdout.
Args:
args: Command line to run, as a list of string parameters. args[0] is the
binary to run.
Returns:
stdout from the program, as a single string.
Raises:
Exception: the program exited with a nonzero return code.
"""
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode is not 0:
raise Exception('command "%s" failed: %s' % (args, stderr))
return stdout
def find_path_to_program(program):
"""Returns path to an existing program binary.
Args:
program: Basename of the program to find (e.g., 'render_pictures').
Returns:
Absolute path to the program binary, as a string.
Raises:
Exception: unable to find the program binary.
"""
trunk_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
possible_paths = [os.path.join(trunk_path, 'out', 'Release', program),
os.path.join(trunk_path, 'out', 'Debug', program),
os.path.join(trunk_path, 'out', 'Release',
program + '.exe'),
os.path.join(trunk_path, 'out', 'Debug',
program + '.exe')]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find %s in paths %s; maybe you need to '
'build %s?' % (program, possible_paths, program))
|
[
"os.path.isfile",
"subprocess.Popen",
"os.path.dirname",
"os.path.join"
] |
[((652, 722), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (668, 722), False, 'import subprocess\n'), ((1373, 1424), 'os.path.join', 'os.path.join', (['trunk_path', '"""out"""', '"""Release"""', 'program'], {}), "(trunk_path, 'out', 'Release', program)\n", (1385, 1424), False, 'import os\n'), ((1446, 1495), 'os.path.join', 'os.path.join', (['trunk_path', '"""out"""', '"""Debug"""', 'program'], {}), "(trunk_path, 'out', 'Debug', program)\n", (1458, 1495), False, 'import os\n'), ((1517, 1577), 'os.path.join', 'os.path.join', (['trunk_path', '"""out"""', '"""Release"""', "(program + '.exe')"], {}), "(trunk_path, 'out', 'Release', program + '.exe')\n", (1529, 1577), False, 'import os\n'), ((1632, 1690), 'os.path.join', 'os.path.join', (['trunk_path', '"""out"""', '"""Debug"""', "(program + '.exe')"], {}), "(trunk_path, 'out', 'Debug', program + '.exe')\n", (1644, 1690), False, 'import os\n'), ((1766, 1790), 'os.path.isfile', 'os.path.isfile', (['try_path'], {}), '(try_path)\n', (1780, 1790), False, 'import os\n'), ((1270, 1295), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1285, 1295), False, 'import os\n')]
|
import os
import asposewordscloud
import asposewordscloud.models.requests
from asposewordscloud.rest import ApiException
from shutil import copyfile
words_api = WordsApi(client_id = '####-####-####-####-####', client_secret = '##################')
file_name = 'test_doc.docx'
# Upload original document to cloud storage.
my_var1 = open(file_name, 'rb')
my_var2 = file_name
upload_file_request = asposewordscloud.models.requests.UploadFileRequest(file_content=my_var1, path=my_var2)
words_api.upload_file(upload_file_request)
# Calls AcceptAllRevisions method for document in cloud.
my_var3 = file_name
request = asposewordscloud.models.requests.AcceptAllRevisionsRequest(name=my_var3)
words_api.accept_all_revisions(request)
|
[
"asposewordscloud.models.requests.UploadFileRequest",
"asposewordscloud.models.requests.AcceptAllRevisionsRequest"
] |
[((398, 488), 'asposewordscloud.models.requests.UploadFileRequest', 'asposewordscloud.models.requests.UploadFileRequest', ([], {'file_content': 'my_var1', 'path': 'my_var2'}), '(file_content=my_var1,\n path=my_var2)\n', (448, 488), False, 'import asposewordscloud\n'), ((616, 688), 'asposewordscloud.models.requests.AcceptAllRevisionsRequest', 'asposewordscloud.models.requests.AcceptAllRevisionsRequest', ([], {'name': 'my_var3'}), '(name=my_var3)\n', (674, 688), False, 'import asposewordscloud\n')]
|
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import logging.handlers
log = logging.getLogger('imc')
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
def enable_file_logging(filename="imcsdk.log"):
file_handler = logging.handlers.RotatingFileHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
log.addHandler(file_handler)
def set_log_level(level=logging.DEBUG):
"""
Allows setting log level
Args:
level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)
Returns:
None
Example:
from imcsdk import set_log_level
import logging
set_log_level(logging.INFO)
"""
log.setLevel(level)
console.setLevel(level)
set_log_level(logging.DEBUG)
log.addHandler(console)
if os.path.exists('/tmp/imcsdk_debug'):
enable_file_logging()
__author__ = 'Cisco Systems'
__email__ = '<EMAIL>'
__version__ = '0.9.11'
|
[
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"logging.Formatter",
"logging.handlers.RotatingFileHandler"
] |
[((636, 660), 'logging.getLogger', 'logging.getLogger', (['"""imc"""'], {}), "('imc')\n", (653, 660), False, 'import logging\n'), ((671, 694), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (692, 694), False, 'import logging\n'), ((707, 780), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (724, 780), False, 'import logging\n'), ((1464, 1499), 'os.path.exists', 'os.path.exists', (['"""/tmp/imcsdk_debug"""'], {}), "('/tmp/imcsdk_debug')\n", (1478, 1499), False, 'import os\n'), ((887, 979), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['filename'], {'maxBytes': '(10 * 1024 * 1024)', 'backupCount': '(5)'}), '(filename, maxBytes=10 * 1024 * 1024,\n backupCount=5)\n', (923, 979), False, 'import logging\n')]
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from datetime import datetime
from api_request import Weather
builder = Gtk.Builder()
builder.add_from_file('./glade/main.glade')
class Handler:
def __init__(self, *args, **kwargs):
super(Handler, self).__init__(*args, **kwargs)
self.weather_instance = Weather()
self.entry = builder.get_object('entry')
self.btn_search = builder.get_object('btn_search')
self.city_name = builder.get_object('city_name')
self.city_text = builder.get_object('city_text')
self.main_temp = builder.get_object('main_temp')
self.which_temp_simbol_is = 'Celsius'
self.weekday_name = builder.get_object('weekday_name')
self.weekday_name_today = builder.get_object('weekday_name_today')
self.temp_today_max = builder.get_object('today_max')
self.temp_today_min = builder.get_object('today_min')
self.hour_1_now = builder.get_object('hour_1_now')
self.hour_1_chance_of_rain = builder.get_object('hour_1_chance_of_rain')
self.hour_1_icon = builder.get_object('hour_1_icon')
self.hour_1_temp = builder.get_object('hour_1_temp')
self.hour_2_clock = builder.get_object('hour_2_clock')
self.hour_2_chance_of_rain = builder.get_object('hour_2_chance_of_rain')
self.hour_2_icon = builder.get_object('hour_2_icon')
self.hour_2_temp = builder.get_object('hour_2_temp')
self.hour_3_clock = builder.get_object('hour_3_clock')
self.hour_3_chance_of_rain = builder.get_object('hour_3_chance_of_rain')
self.hour_3_icon = builder.get_object('hour_3_icon')
self.hour_3_temp = builder.get_object('hour_3_temp')
self.hour_4_clock = builder.get_object('hour_4_clock')
self.hour_4_chance_of_rain = builder.get_object('hour_4_chance_of_rain')
self.hour_4_icon = builder.get_object('hour_4_icon')
self.hour_4_temp = builder.get_object('hour_4_temp')
self.hour_5_clock = builder.get_object('hour_5_clock')
self.hour_5_chance_of_rain = builder.get_object('hour_5_chance_of_rain')
self.hour_5_icon = builder.get_object('hour_5_icon')
self.hour_5_temp = builder.get_object('hour_5_temp')
self.day_1_name = builder.get_object('day_1_name')
self.day_1_icon = builder.get_object('day_1_icon')
self.day_1_temp_max = builder.get_object('day_1_temp_max')
self.day_1_temp_min = builder.get_object('day_1_temp_min')
self.day_2_name = builder.get_object('day_2_name')
self.day_2_icon = builder.get_object('day_2_icon')
self.day_2_temp_max = builder.get_object('day_2_temp_max')
self.day_2_temp_min = builder.get_object('day_2_temp_min')
def onDestroy(self, *args):
Gtk.main_quit()
def on_button_search_clicked(self, widget):
# now.strftime('%A') to know how weekday is
import re, unicodedata
word = unicodedata.normalize('NFD', self.entry.get_text())
word = re.sub('[\u0300-\u036f]', '', word)
try:
now = datetime.now()
current_hour = int(now.strftime('%H'))
current_search = self.weather_instance.get_weather_info(word, current_hour=current_hour)
self.city_name.set_text(current_search['location']['name'] + '/' + current_search['location']['region'])
self.city_text.set_text(current_search['current']['condition']['text'])
self.main_temp.set_text(str(int(current_search['current']['temp_c'])) + '°')
weekday = now.strftime('%A')
self.weekday_name.set_text(weekday)
self.weekday_name_today.set_text('Today')
today_max_temp = str(int(current_search['forecast']['forecastday'][0]['day']['maxtemp_c']))
today_min_temp = str(int(current_search['forecast']['forecastday'][0]['day']['mintemp_c']))
self.temp_today_max.set_text(today_max_temp)
self.temp_today_min.set_text(today_min_temp)
### Hours informations ######################################################
def is_available(increase: int) -> bool:
return not (current_hour + increase > 23)
if is_available(0):
self.hour_1_now.set_text('Now')
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour]['chance_of_rain'])>0:
self.hour_1_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_1_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour]['temp_c'])))
else:
self.hour_1_now.set_text('unavailable')
self.hour_1_temp.set_text('tomorrow')
self.hour_1_icon.set_from_file('./images/hour_icon/1.png')
if is_available(1):
self.hour_2_clock.set_text(str(int(now.strftime('%I'))+1) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+1]['chance_of_rain'])>0:
self.hour_1_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_2_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+1]['temp_c'])))
else:
self.hour_2_clock.set_text('unavailable')
self.hour_2_temp.set_text('tomorrow')
self.hour_2_icon.set_from_file('./images/hour_icon/2.png')
if is_available(2):
self.hour_3_clock.set_text(str(int(now.strftime('%I'))+2) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+2]['chance_of_rain'])>0:
self.hour_3_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_3_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+2]['temp_c'])))
else:
self.hour_3_clock.set_text('unavailable')
self.hour_3_temp.set_text('tomorrow')
self.hour_3_icon.set_from_file('./images/hour_icon/3.png')
if is_available(3):
self.hour_4_clock.set_text(str(int(now.strftime('%I'))+3) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_4_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_4_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['temp_c'])))
else:
self.hour_4_clock.set_text('unavailable')
self.hour_4_temp.set_text('tomorrow')
self.hour_4_icon.set_from_file('./images/hour_icon/4.png')
if is_available(4):
self.hour_5_clock.set_text(str(int(now.strftime('%I'))+4) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_5_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_5_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+4]['temp_c'])))
else:
self.hour_5_clock.set_text('unavailable')
self.hour_5_temp.set_text('tomorrow')
self.hour_5_icon.set_from_file('./images/hour_icon/5.png')
### days informations ######################################################
self.day_1_name.set_text(datetime.fromisoformat(current_search['forecast']['forecastday'][1]['date']).strftime('%A'))
self.day_1_icon.set_from_file('./images/days_icon/1.png')
self.day_1_temp_max.set_text(str(int(current_search['forecast']['forecastday'][1]['day']['maxtemp_c'])))
self.day_1_temp_min.set_text(str(int(current_search['forecast']['forecastday'][1]['day']['mintemp_c'])))
self.day_2_name.set_text(datetime.fromisoformat(current_search['forecast']['forecastday'][2]['date']).strftime('%A'))
self.day_2_icon.set_from_file('./images/days_icon/2.png')
self.day_2_temp_max.set_text(str(int(current_search['forecast']['forecastday'][2]['day']['maxtemp_c'])))
self.day_2_temp_min.set_text(str(int(current_search['forecast']['forecastday'][2]['day']['mintemp_c'])))
except Exception as error:
print(f'error {error}')
builder.connect_signals(Handler())
window = builder.get_object('window')
window.show_all()
Gtk.main()
|
[
"gi.repository.Gtk.main_quit",
"gi.repository.Gtk.Builder",
"api_request.Weather",
"gi.require_version",
"datetime.datetime.now",
"datetime.datetime.fromisoformat",
"re.sub",
"gi.repository.Gtk.main"
] |
[((10, 42), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (28, 42), False, 'import gi\n'), ((147, 160), 'gi.repository.Gtk.Builder', 'Gtk.Builder', ([], {}), '()\n', (158, 160), False, 'from gi.repository import Gtk\n'), ((8964, 8974), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (8972, 8974), False, 'from gi.repository import Gtk\n'), ((363, 372), 'api_request.Weather', 'Weather', ([], {}), '()\n', (370, 372), False, 'from api_request import Weather\n'), ((2916, 2931), 'gi.repository.Gtk.main_quit', 'Gtk.main_quit', ([], {}), '()\n', (2929, 2931), False, 'from gi.repository import Gtk\n'), ((3163, 3188), 're.sub', 're.sub', (['"""[̀-ͯ]"""', '""""""', 'word'], {}), "('[̀-ͯ]', '', word)\n", (3169, 3188), False, 'import re, unicodedata\n'), ((3230, 3244), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3242, 3244), False, 'from datetime import datetime\n'), ((7960, 8036), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["current_search['forecast']['forecastday'][1]['date']"], {}), "(current_search['forecast']['forecastday'][1]['date'])\n", (7982, 8036), False, 'from datetime import datetime\n'), ((8395, 8471), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["current_search['forecast']['forecastday'][2]['date']"], {}), "(current_search['forecast']['forecastday'][2]['date'])\n", (8417, 8471), False, 'from datetime import datetime\n')]
|
import re
from itertools import combinations
from utils.solution_base import SolutionBase
class Solution(SolutionBase):
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if len(r):
if func(i) == int(r[0]):
print(f"test {test_counter} passed")
else:
print(func(i))
print(r[0])
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
addition = data[0]
for i in data[1:]:
addition = f"[{addition},{i}]"
while (t := self.reduction(addition)) != addition:
addition = t
return self.calc_magnitude(addition)
def reduction(self, s: str):
# explode
depth = 0
for i, v in enumerate(s):
if v.isnumeric() and depth > 4:
pair_close_pos = s[i:].index("]")
before_pair, pair, after_pair = s[: i - 1], s[i : i + pair_close_pos], s[i + pair_close_pos + 1 :]
pair = [*map(int, pair.split(","))]
before_pair = self.add_exploded_pair(before_pair, pair, 0)
after_pair = self.add_exploded_pair(after_pair, pair, 1)
return before_pair + "0" + after_pair
else:
depth += [1, -1]["[]".index(v)] if v in "[]" else 0
# split
large_regulars = [i for i in re.findall(r"\d+", s) if int(i) > 9]
if len(large_regulars):
reg = large_regulars[0]
reg_pos = s.index(reg)
before_reg, after_reg = s[:reg_pos], s[reg_pos + len(reg) :]
reg = int(reg)
elem_left = reg // 2
elem_right = reg - elem_left
s = before_reg + f"[{elem_left},{elem_right}]" + after_reg
return s
def add_exploded_pair(self, line, pair, pair_index):
all_regulars = re.findall(r"\d+", line)
if len(all_regulars):
reg = all_regulars[pair_index - 1]
reg_pos = [line.rindex, line.index][pair_index](reg)
line = line[:reg_pos] + str(int(reg) + pair[pair_index]) + line[reg_pos + len(reg) :]
return line
def calc_magnitude(self, s: str):
while s.count("["):
pairs = re.findall(r"\[(\d+),(\d+)\]", s)
for a, b in pairs:
s = s.replace(f"[{a},{b}]", str(int(a) * 3 + int(b) * 2))
return int(s)
def part2(self, data):
return max(max(self.part1(i), self.part1(i[::-1])) for i in combinations(data, 2))
|
[
"itertools.combinations",
"re.findall"
] |
[((2333, 2357), 're.findall', 're.findall', (['"""\\\\d+"""', 'line'], {}), "('\\\\d+', line)\n", (2343, 2357), False, 'import re\n'), ((2705, 2741), 're.findall', 're.findall', (['"""\\\\[(\\\\d+),(\\\\d+)\\\\]"""', 's'], {}), "('\\\\[(\\\\d+),(\\\\d+)\\\\]', s)\n", (2715, 2741), False, 'import re\n'), ((1848, 1869), 're.findall', 're.findall', (['"""\\\\d+"""', 's'], {}), "('\\\\d+', s)\n", (1858, 1869), False, 'import re\n'), ((2962, 2983), 'itertools.combinations', 'combinations', (['data', '(2)'], {}), '(data, 2)\n', (2974, 2983), False, 'from itertools import combinations\n')]
|
import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
class BitfinexTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_should_have_server(self):
self.assertEqual("https://api.bitfinex.com/v1", self.client.server())
def test_should_have_url_for_foo(self):
expected = "https://api.bitfinex.com/v1/foo"
self.assertEqual(expected, self.client.url_for("foo"))
def test_should_have_url_for_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
actual = self.client.url_for('foo/%s', path_arg="bar")
self.assertEqual(expected, actual)
def test_should_have_url_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1&b=2"
actual = self.client.url_for('foo', parameters={'a': 1, 'b': 2})
self.assertEqual(expected, actual)
def test_should_have_url_for(self):
expected = self.client.url_for("foo")
self.assertEqual("https://api.bitfinex.com/v1/foo", expected)
def test_should_have_url_for_with_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar'))
self.assertEqual(expected, self.client.url_for(path, 'bar'))
def test_should_have_url_for_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1"
self.assertEqual(expected, self.client.url_for("foo", parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for("foo", None, {'a': 1}))
def test_should_have_url_for_with_path_arg_and_parameters(self):
expected = "https://api.bitfinex.com/v1/foo/bar?a=1"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar', parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for(path, 'bar', {'a': 1}))
@httpretty.activate
def test_should_have_symbols(self):
# mock out the request
mock_body = '["btcusd","ltcusd","ltcbtc"]'
url = self.client.url_for('symbols')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = ["btcusd","ltcusd","ltcbtc"]
self.assertEqual(expected, self.client.symbols())
@httpretty.activate
def test_should_have_ticker(self):
# mock out the request
mock_body = '{"mid":"562.56495","bid":"562.15","ask":"562.9799","last_price":"562.25","timestamp":"1395552658.339936691"}'
url = self.client.url_for('ticker/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"mid": 562.56495,
"bid": 562.15,
"ask": 562.9799,
"last_price": 562.25,
"timestamp": 1395552658.339936691
}
self.assertEqual(expected, self.client.ticker('btcusd'))
@httpretty.activate
def test_should_have_today(self):
# mock out the request
mock_body = '{"low":"550.09","high":"572.2398","volume":"7305.33119836"}'
url = self.client.url_for('today/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"low": 550.09,
"high": 572.2398,
"volume": 7305.33119836
}
self.assertEqual(expected, self.client.today('btcusd'))
@httpretty.activate
def test_should_have_stats(self):
# mock out the request
mock_body = '[{"period":1,"volume":"7410.27250155"},{"period":7,"volume":"52251.37118006"},{"period":30,"volume":"464505.07753251"}]'
url = self.client.url_for('stats/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = [
{"period": 1, "volume": 7410.27250155},
{"period": 7, "volume": 52251.37118006},
{"period": 30,"volume": 464505.07753251}
]
self.assertEqual(expected, self.client.stats('btcusd'))
@httpretty.activate
def test_should_have_lendbook(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}'
url = self.client.url_for('lendbook/%s', 'btc')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
{"rate": 6.351, "amount": 15.5180735, "period": 5, "timestamp": 1395549996.0, "frr": False},
{"rate": 6.3588, "amount": 626.94808249, "period": 30, "timestamp": 1395400654.0, "frr": True}
]
}
self.assertEqual(expected, self.client.lendbook('btc'))
@httpretty.activate
def test_should_have_lendbook_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[]}'
parameters = {'limit_bids': 2, 'limit_asks': 0}
url = self.client.url_for('lendbook/%s', 'btc', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
]
}
self.assertEqual(expected, self.client.lendbook('btc', parameters))
@httpretty.activate
def test_should_have_order_book(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[{"price":"563.001","amount":"0.3","timestamp":"1395532200.0"}]}'
url = self.client.url_for('book/%s', 'btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": [
{"price": 563.001, "amount": 0.3, "timestamp": 1395532200.0}
]
}
self.assertEqual(expected, self.client.order_book('btcusd'))
@httpretty.activate
def test_should_have_order_book_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[]}'
parameters = {'limit_asks': 0}
url = self.client.url_for('book/%s', 'btcusd', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": []
}
self.assertEqual(expected, self.client.order_book('btcusd', parameters))
class TestTradeClient(unittest.TestCase):
def setUp(self):
self.tc = TradeClient(API_KEY, API_SECRET)
def test_instantiate_tradeclient(self):
self.assertIsInstance(self.tc, TradeClient)
def test_get_active_orders_returns_json(self):
ao = self.tc.active_orders()
self.assertIsInstance(ao, list)
def test_get_active_positions_returns_json(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
def test_get_full_history(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
|
[
"bitfinex.client.TradeClient",
"bitfinex.client.Client",
"httpretty.register_uri"
] |
[((271, 279), 'bitfinex.client.Client', 'Client', ([], {}), '()\n', (277, 279), False, 'from bitfinex.client import Client, TradeClient\n'), ((2261, 2331), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (2283, 2331), False, 'import httpretty\n'), ((2740, 2810), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (2762, 2810), False, 'import httpretty\n'), ((3325, 3395), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (3347, 3395), False, 'import httpretty\n'), ((3896, 3966), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (3918, 3966), False, 'import httpretty\n'), ((4785, 4855), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (4807, 4855), False, 'import httpretty\n'), ((5932, 6002), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (5954, 6002), False, 'import httpretty\n'), ((6736, 6806), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (6758, 6806), False, 'import httpretty\n'), ((7477, 7547), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.GET', 'url'], {'body': 'mock_body', 'status': '(200)'}), '(httpretty.GET, url, body=mock_body, status=200)\n', (7499, 7547), False, 'import httpretty\n'), ((7885, 7917), 'bitfinex.client.TradeClient', 'TradeClient', (['API_KEY', 'API_SECRET'], {}), '(API_KEY, API_SECRET)\n', (7896, 7917), False, 'from bitfinex.client import Client, TradeClient\n')]
|
#!/usr/bin/env python
# encoding=utf-8
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
# 配置文件
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
"""
:param words_list: [[w11, w12, ...], [w21, w22, ...], ...]
:return:
"""
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
# 将多标签意图转为 one_hot
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
# 截断
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, prefix, label]], shape = [n, 3]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
# t2_len = len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
# out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
"""
使用LCQMC数据集,并将其转为word_id
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
# return test_set, test_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
"""
使用LCQMC数据集,并将每个query其转为word_id,
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
# test_set = test_set[:100]
# return test_set, test_set, test_set
def get_test(file_:str, vocab:Vocabulary):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
t1_ids = vocab._transform_seq2id(t1, padding=1)
t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)
t2_ids = vocab._transform_seq2id(t2, padding=1)
t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len])
return out_arr, test_arr
def get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)
return out_arr, test_arr
def get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):
# test_arr # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])
return out_arr, test_arr
def get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_) # [q1,...]
out_arr = []
for line in test_arr:
t1 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
return out_arr, test_arr
def get_batch(dataset, batch_size=None, is_test=0):
# tf Dataset太难用,不如自己实现
# https://stackoverflow.com/questions/50539342/getting-batches-in-tensorflow
# dataset:每个元素是一个特征,[[x1, x2, x3,...], ...], 如果是测试集,可能就没有标签
if not batch_size:
batch_size = 32
if not is_test:
random.shuffle(dataset)
steps = int(math.ceil(float(len(dataset)) / batch_size))
for i in range(steps):
idx = i * batch_size
cur_set = dataset[idx: idx + batch_size]
cur_set = zip(*cur_set)
yield cur_set
if __name__ == '__main__':
# prefix, query_prediction, title, tag, label
# query_prediction 为json格式。
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
# data_train = get_data(file_train)
# data_train = get_data(file_vali)
# print(len(data_train['query']), len(data_train['doc_pos']), len(data_train['doc_neg']))
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
|
[
"numpy.mean",
"json.loads",
"random.shuffle",
"util.read_file",
"config.Config",
"numpy.zeros",
"numpy.core.fromnumeric.mean",
"paddlehub.dataset.LCQMC"
] |
[((308, 316), 'config.Config', 'Config', ([], {}), '()\n', (314, 316), False, 'from config import Config\n'), ((6858, 6879), 'numpy.zeros', 'np.zeros', (['conf.nwords'], {}), '(conf.nwords)\n', (6866, 6879), True, 'import numpy as np\n'), ((10812, 10831), 'paddlehub.dataset.LCQMC', 'hub.dataset.LCQMC', ([], {}), '()\n', (10829, 10831), True, 'import paddlehub as hub\n'), ((12184, 12203), 'paddlehub.dataset.LCQMC', 'hub.dataset.LCQMC', ([], {}), '()\n', (12201, 12203), True, 'import paddlehub as hub\n'), ((12593, 12615), 'util.read_file', 'read_file', (['file_', '"""\t"""'], {}), "(file_, '\\t')\n", (12602, 12615), False, 'from util import read_file\n'), ((13234, 13256), 'util.read_file', 'read_file', (['file_', '"""\t"""'], {}), "(file_, '\\t')\n", (13243, 13256), False, 'from util import read_file\n'), ((14284, 14300), 'util.read_file', 'read_file', (['file_'], {}), '(file_)\n', (14293, 14300), False, 'from util import read_file\n'), ((2577, 2614), 'numpy.zeros', 'np.zeros', (['self.size'], {'dtype': 'np.float32'}), '(self.size, dtype=np.float32)\n', (2585, 2614), True, 'import numpy as np\n'), ((10634, 10648), 'numpy.core.fromnumeric.mean', 'mean', (['text_len'], {}), '(text_len)\n', (10638, 10648), False, 'from numpy.core.fromnumeric import mean\n'), ((10665, 10717), 'numpy.mean', 'np.mean', (['[(x <= conf.max_seq_len) for x in text_len]'], {}), '([(x <= conf.max_seq_len) for x in text_len])\n', (10672, 10717), True, 'import numpy as np\n'), ((11965, 11979), 'numpy.core.fromnumeric.mean', 'mean', (['text_len'], {}), '(text_len)\n', (11969, 11979), False, 'from numpy.core.fromnumeric import mean\n'), ((11996, 12048), 'numpy.mean', 'np.mean', (['[(x <= conf.max_seq_len) for x in text_len]'], {}), '([(x <= conf.max_seq_len) for x in text_len])\n', (12003, 12048), True, 'import numpy as np\n'), ((14906, 14929), 'random.shuffle', 'random.shuffle', (['dataset'], {}), '(dataset)\n', (14920, 14929), False, 'import random\n'), ((6189, 6211), 'json.loads', 'json.loads', (['query_pred'], {}), '(query_pred)\n', (6199, 6211), False, 'import json\n'), ((7697, 7719), 'json.loads', 'json.loads', (['query_pred'], {}), '(query_pred)\n', (7707, 7719), False, 'import json\n')]
|
import numpy as np
import numpy.linalg as la
from MdlUtilities import Field, FieldList
import MdlUtilities as mdl
def get_osaCasing_fields():
OD = Field(2030)
ID = Field(2031)
Weight = Field(2032)
Density = Field(2039)
E = Field(2040)
osaCasing_fields = FieldList()
osaCasing_fields.append( OD )
osaCasing_fields.append( ID )
osaCasing_fields.append( Weight )
osaCasing_fields.append( Density )
osaCasing_fields.append( E )
return osaCasing_fields
def get_osaCent_fields():
Type = Field(2049)
IPOD = Field(2009)
CentOD = Field(2011)
#CentID = Field(2012)
ResF_SO67 = Field(2018)
minResF = Field(2017)
SO_minResF = Field(2019)
ResF_SO67.set_representation('Res. Force @ SO=67%')
minResF.set_representation('minimum Res. Force')
SO_minResF.set_representation('StandOff @ min. Res. F.')
osaCent_fields = FieldList()
osaCent_fields.append( Type )
osaCent_fields.append( IPOD )
osaCent_fields.append( CentOD )
#osaCent_fields.append( CentID )
osaCent_fields.append( ResF_SO67 )
osaCent_fields.append( minResF )
osaCent_fields.append( SO_minResF )
return osaCent_fields
def get_osaWellbore_fields():
HoleID = Field(2010)
MaxSpan = Field(2061)
MudIPDensity = Field(2077)
MudOPDensity = Field(2077)
HoleID.set_representation('Hole ID')
HoleID.set_abbreviation('HoleID')
MaxSpan.set_representation('Max span')
MaxSpan.set_abbreviation('MaxSpan')
MudIPDensity.set_representation('Mud inside pipe')
MudIPDensity.set_abbreviation('MudIPDensity')
MudOPDensity.set_representation('Mud in annulus')
MudOPDensity.set_abbreviation('MudOPDensity')
osaWellbore_fields = FieldList()
osaWellbore_fields.append( HoleID )
osaWellbore_fields.append( MaxSpan )
osaWellbore_fields.append( MudIPDensity )
osaWellbore_fields.append( MudOPDensity )
return osaWellbore_fields
def get_osaOutputdata1_fields():
clearanceA = Field(2073, altBg=True, altFg=True)
clearanceB = Field(2073, altBg=True, altFg=True)
clearanceM = Field(2073, altBg=True, altFg=True)
sideForceA = Field(2074, altBg=True, altFg=True)
sideForceB = Field(2074, altBg=True, altFg=True)
sideForceM = Field(2074, altBg=True, altFg=True)
standoffA = Field(2078, altBg=True, altFg=True)
standoffB = Field(2078, altBg=True, altFg=True)
standoffM = Field(2078, altBg=True, altFg=True)
clearanceA.set_representation('Annular clearance @ cent. A')
clearanceA.set_abbreviation('ClearanceA')
clearanceB.set_representation('Annular clearance @ cent. B')
clearanceB.set_abbreviation('ClearanceB')
clearanceM.set_representation('Annular clearance @ mid span')
clearanceM.set_abbreviation('ClearanceM')
sideForceA.set_representation('Side force @ cent. A')
sideForceA.set_abbreviation('SideForceA')
sideForceB.set_representation('Side force @ cent. B')
sideForceB.set_abbreviation('SideForceB')
sideForceM.set_representation('Side force @ mid span')
sideForceM.set_abbreviation('SideForceM')
standoffA.set_representation('Standoff @ cent. A')
standoffA.set_abbreviation('StandoffA')
standoffB.set_representation('Standoff @ cent. B')
standoffB.set_abbreviation('StandoffB')
standoffM.set_representation('Standoff @ mid span')
standoffM.set_abbreviation('StandoffM')
osaOutputdata1_fields = FieldList()
osaOutputdata1_fields.append( clearanceA )
osaOutputdata1_fields.append( clearanceB )
osaOutputdata1_fields.append( clearanceM )
osaOutputdata1_fields.append( sideForceA )
osaOutputdata1_fields.append( sideForceB )
osaOutputdata1_fields.append( sideForceM )
osaOutputdata1_fields.append( standoffA )
osaOutputdata1_fields.append( standoffB )
osaOutputdata1_fields.append( standoffM )
return osaOutputdata1_fields
def get_osaOutputdata2_fields():
axialForce = Field(2075, altBg=True, altFg=True)
deflection = Field(2076, altBg=True, altFg=True)
wClearance = Field(2073, altBg=True, altFg=True)
wStandoff = Field(2078, altBg=True, altFg=True)
axialForce.set_representation('Axial extra force @ top')
axialForce.set_abbreviation('AxialForce')
deflection.set_representation('Max. pipe deflection')
deflection.set_abbreviation('MaxDeflection')
wClearance.set_representation('Mean wellbore clearance')
wClearance.set_abbreviation('WellboreClearance')
wStandoff.set_representation('Mean wellbore standoff')
wStandoff.set_abbreviation('WellboreStandoff')
osaOutputdata2_fields = FieldList()
osaOutputdata2_fields.append( axialForce )
osaOutputdata2_fields.append( deflection )
osaOutputdata2_fields.append( wClearance )
osaOutputdata2_fields.append( wStandoff )
return osaOutputdata2_fields
def get_casingDeflectionCurve(self):
# Equation(s) Reference 1:
# <NAME>, <NAME>. Casing Deflection and Centralizer Spacing Calculations.
# SPE Drilling Engineering (December 1992).
# Equation(s) Reference 2:
# <NAME>, <NAME>. Discussion of Optimal Spacing for Casing Centralizers.
# SPE Drilling Engineering (December 1988).
# Equation(s) Reference 3:
# <NAME>, <NAME>. Optimizing of Centralizer Distribution.
# SPE Latin American Petroleum Engineering Conference (October 1990).
self.osaCasing_fields.referenceUnitConvert_fields()
self.osaCentA_fields.referenceUnitConvert_fields()
self.osaCentB_fields.referenceUnitConvert_fields()
self.osaWellbore_fields.referenceUnitConvert_fields()
Rot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )
dH = self.osaWellbore_fields.HoleID[0]
L = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100
ρe = self.osaWellbore_fields.MudOPDensity[0]
ρi = self.osaWellbore_fields.MudIPDensity[0]
ρs = self.osaCasing_fields.Density[0]
E = self.osaCasing_fields.E[0]
w = self.osaCasing_fields.PW[0]
D = self.osaCasing_fields.OD[0]
d = self.osaCasing_fields.ID[0]
Type_A = self.osaCentA_fields.Type[0]
F_So67_A = self.osaCentA_fields.ResF_SO67[0]
minF_A = self.osaCentA_fields.minResF[0]
So_minF_A = self.osaCentA_fields.SO_minResF[0]
DA = self.osaCentA_fields.COD[0]
dA = self.osaCentA_fields.IPOD[0]
Type_B = self.osaCentB_fields.Type[0]
F_So67_B = self.osaCentB_fields.ResF_SO67[0]
minF_B = self.osaCentB_fields.minResF[0]
So_minF_B = self.osaCentB_fields.SO_minResF[0]
DB = self.osaCentB_fields.COD[0]
dB = self.osaCentB_fields.IPOD[0]
#kA = ResFA/(DA/2-0.335*(DA-D)) # Con esto se calculan los coeficientes de los resortes ( 0.335=0.67/2 )
#kB = ResFB/(DB/2-0.335*(DB-D))
for field in self.osaWellbore_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCasing_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentA_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentB_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
if dA!=D or dB!=D or dH<=D:
raise mdl.LogicalError('The selected devices are not size-consistent.')
θ = np.pi*self.osaInclination_slider.sliderPosition()/180
I = np.pi/64*(D**4-d**4) # [Ref.3] Momento de inercia diferente a momento de inercia polar.
F = 30000 # [Ref.1]
Radio = L*1e6
aspr = L*0.02
buoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi ) # [Ref.2]
w *= buoyancyFactor
fC = w*L*np.sin(θ)/2
if Type_A=='Resin': #mdl.isNoneEntry(ResFA):
yA = 0
dA = d
else:
kA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)
yA = fC/kA if (DA<dH) else fC/kA/2
if Type_B=='Resin': #mdl.isNoneEntry(ResFB):
yB = 0
dB = d
else:
kB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)
yB = fC/kB if (DB<dH) else fC/kB/2
R = D/2
rH = dH/2
rA_min = R+(DA/2-R)*0.1
rB_min = R+(DB/2-R)*0.1
rA = (DA/2-yA) if (DA<dH) else (rH-yA)
rB = (DB/2-yB) if (DB<dH) else (rH-yB)
rA = rA_min if (rA<=rA_min) else rA
rB = rB_min if (rB<=rB_min) else rB
α = np.arctan( (rB-rA)/L )
Lα = L/np.cos(α)
x = np.linspace( 0, Lα, 101 )
K = np.sqrt(F/E/I)
y = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2 # [Ref.1]
Rα = Rot(α)
xy = np.array([x,y])
x,y = np.dot(Rα,xy)
Δy = rH-rB
y += Δy
cH = rH-R
cA = rA-R
cB = rB-R
indexes = y>cH
y[indexes] = cH
indexes = y<-cH
y[indexes] =-cH
cy = cH-y
rM = rH-y[50]
if y[50]==cH:
fM = fC
fC = 0
else:
fM = 0
cM = rM-R
x -= L/2
yoh = y*0
ohc = np.array([x, yoh])
ohp = np.array([x, (yoh+rH)*aspr])
ohm = np.array([x, (yoh-rH)*aspr])
xyc = np.array([x, y*aspr])
xyp = np.array([x, (y+R)*aspr])
xym = np.array([x, (y-R)*aspr])
φ = θ + np.pi/2
Rφ = Rot(φ)
OHc = np.dot(Rφ,ohc)
OHp = np.dot(Rφ,ohp)
OHm = np.dot(Rφ,ohm)
XYc = np.dot(Rφ,xyc)
XYp = np.dot(Rφ,xyp)
XYm = np.dot(Rφ,xym)
SA = cA/cH
SB = cB/cH
SM = cM/cH
Sy = cy/cH
δ = (cA+cB)/2-cM
self.osaOutputdata1_fields.clear_content()
self.osaOutputdata2_fields.clear_content()
self.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )
self.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )
self.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )
self.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )
self.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )
self.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )
self.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )
self.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )
self.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )
self.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )
self.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )
self.osaCasing_fields.inverseReferenceUnitConvert_fields()
self.osaCentA_fields.inverseReferenceUnitConvert_fields()
self.osaCentB_fields.inverseReferenceUnitConvert_fields()
self.osaWellbore_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()
lim = L/2*1.05
return OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM
|
[
"MdlUtilities.physicalValue",
"numpy.mean",
"numpy.sqrt",
"MdlUtilities.Field",
"MdlUtilities.LogicalError",
"numpy.tanh",
"numpy.sinh",
"numpy.array",
"numpy.linspace",
"numpy.dot",
"MdlUtilities.calculate_buoyancyFactor",
"numpy.cos",
"numpy.cosh",
"numpy.sin",
"MdlUtilities.FieldList",
"numpy.arctan"
] |
[((167, 178), 'MdlUtilities.Field', 'Field', (['(2030)'], {}), '(2030)\n', (172, 178), False, 'from MdlUtilities import Field, FieldList\n'), ((191, 202), 'MdlUtilities.Field', 'Field', (['(2031)'], {}), '(2031)\n', (196, 202), False, 'from MdlUtilities import Field, FieldList\n'), ((215, 226), 'MdlUtilities.Field', 'Field', (['(2032)'], {}), '(2032)\n', (220, 226), False, 'from MdlUtilities import Field, FieldList\n'), ((239, 250), 'MdlUtilities.Field', 'Field', (['(2039)'], {}), '(2039)\n', (244, 250), False, 'from MdlUtilities import Field, FieldList\n'), ((263, 274), 'MdlUtilities.Field', 'Field', (['(2040)'], {}), '(2040)\n', (268, 274), False, 'from MdlUtilities import Field, FieldList\n'), ((296, 307), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (305, 307), False, 'from MdlUtilities import Field, FieldList\n'), ((573, 584), 'MdlUtilities.Field', 'Field', (['(2049)'], {}), '(2049)\n', (578, 584), False, 'from MdlUtilities import Field, FieldList\n'), ((602, 613), 'MdlUtilities.Field', 'Field', (['(2009)'], {}), '(2009)\n', (607, 613), False, 'from MdlUtilities import Field, FieldList\n'), ((631, 642), 'MdlUtilities.Field', 'Field', (['(2011)'], {}), '(2011)\n', (636, 642), False, 'from MdlUtilities import Field, FieldList\n'), ((690, 701), 'MdlUtilities.Field', 'Field', (['(2018)'], {}), '(2018)\n', (695, 701), False, 'from MdlUtilities import Field, FieldList\n'), ((719, 730), 'MdlUtilities.Field', 'Field', (['(2017)'], {}), '(2017)\n', (724, 730), False, 'from MdlUtilities import Field, FieldList\n'), ((748, 759), 'MdlUtilities.Field', 'Field', (['(2019)'], {}), '(2019)\n', (753, 759), False, 'from MdlUtilities import Field, FieldList\n'), ((943, 954), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (952, 954), False, 'from MdlUtilities import Field, FieldList\n'), ((1304, 1315), 'MdlUtilities.Field', 'Field', (['(2010)'], {}), '(2010)\n', (1309, 1315), False, 'from MdlUtilities import Field, FieldList\n'), ((1334, 1345), 'MdlUtilities.Field', 'Field', (['(2061)'], {}), '(2061)\n', (1339, 1345), False, 'from MdlUtilities import Field, FieldList\n'), ((1364, 1375), 'MdlUtilities.Field', 'Field', (['(2077)'], {}), '(2077)\n', (1369, 1375), False, 'from MdlUtilities import Field, FieldList\n'), ((1394, 1405), 'MdlUtilities.Field', 'Field', (['(2077)'], {}), '(2077)\n', (1399, 1405), False, 'from MdlUtilities import Field, FieldList\n'), ((1784, 1795), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (1793, 1795), False, 'from MdlUtilities import Field, FieldList\n'), ((2059, 2094), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (2064, 2094), False, 'from MdlUtilities import Field, FieldList\n'), ((2111, 2146), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (2116, 2146), False, 'from MdlUtilities import Field, FieldList\n'), ((2163, 2198), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (2168, 2198), False, 'from MdlUtilities import Field, FieldList\n'), ((2215, 2250), 'MdlUtilities.Field', 'Field', (['(2074)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2074, altBg=True, altFg=True)\n', (2220, 2250), False, 'from MdlUtilities import Field, FieldList\n'), ((2267, 2302), 'MdlUtilities.Field', 'Field', (['(2074)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2074, altBg=True, altFg=True)\n', (2272, 2302), False, 'from MdlUtilities import Field, FieldList\n'), ((2319, 2354), 'MdlUtilities.Field', 'Field', (['(2074)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2074, altBg=True, altFg=True)\n', (2324, 2354), False, 'from MdlUtilities import Field, FieldList\n'), ((2371, 2406), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (2376, 2406), False, 'from MdlUtilities import Field, FieldList\n'), ((2423, 2458), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (2428, 2458), False, 'from MdlUtilities import Field, FieldList\n'), ((2475, 2510), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (2480, 2510), False, 'from MdlUtilities import Field, FieldList\n'), ((3446, 3457), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (3455, 3457), False, 'from MdlUtilities import Field, FieldList\n'), ((3953, 3988), 'MdlUtilities.Field', 'Field', (['(2075)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2075, altBg=True, altFg=True)\n', (3958, 3988), False, 'from MdlUtilities import Field, FieldList\n'), ((4005, 4040), 'MdlUtilities.Field', 'Field', (['(2076)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2076, altBg=True, altFg=True)\n', (4010, 4040), False, 'from MdlUtilities import Field, FieldList\n'), ((4057, 4092), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (4062, 4092), False, 'from MdlUtilities import Field, FieldList\n'), ((4109, 4144), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (4114, 4144), False, 'from MdlUtilities import Field, FieldList\n'), ((4593, 4604), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (4602, 4604), False, 'from MdlUtilities import Field, FieldList\n'), ((7541, 7602), 'MdlUtilities.calculate_buoyancyFactor', 'mdl.calculate_buoyancyFactor', ([], {'OD': 'D', 'ID': 'd', 'ρs': 'ρs', 'ρe': 'ρe', 'ρi': 'ρi'}), '(OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi)\n', (7569, 7602), True, 'import MdlUtilities as mdl\n'), ((8246, 8270), 'numpy.arctan', 'np.arctan', (['((rB - rA) / L)'], {}), '((rB - rA) / L)\n', (8255, 8270), True, 'import numpy as np\n'), ((8293, 8316), 'numpy.linspace', 'np.linspace', (['(0)', 'Lα', '(101)'], {}), '(0, Lα, 101)\n', (8304, 8316), True, 'import numpy as np\n'), ((8327, 8345), 'numpy.sqrt', 'np.sqrt', (['(F / E / I)'], {}), '(F / E / I)\n', (8334, 8345), True, 'import numpy as np\n'), ((8498, 8514), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (8506, 8514), True, 'import numpy as np\n'), ((8522, 8536), 'numpy.dot', 'np.dot', (['Rα', 'xy'], {}), '(Rα, xy)\n', (8528, 8536), True, 'import numpy as np\n'), ((8801, 8819), 'numpy.array', 'np.array', (['[x, yoh]'], {}), '([x, yoh])\n', (8809, 8819), True, 'import numpy as np\n'), ((8828, 8860), 'numpy.array', 'np.array', (['[x, (yoh + rH) * aspr]'], {}), '([x, (yoh + rH) * aspr])\n', (8836, 8860), True, 'import numpy as np\n'), ((8865, 8897), 'numpy.array', 'np.array', (['[x, (yoh - rH) * aspr]'], {}), '([x, (yoh - rH) * aspr])\n', (8873, 8897), True, 'import numpy as np\n'), ((8904, 8927), 'numpy.array', 'np.array', (['[x, y * aspr]'], {}), '([x, y * aspr])\n', (8912, 8927), True, 'import numpy as np\n'), ((8934, 8963), 'numpy.array', 'np.array', (['[x, (y + R) * aspr]'], {}), '([x, (y + R) * aspr])\n', (8942, 8963), True, 'import numpy as np\n'), ((8968, 8997), 'numpy.array', 'np.array', (['[x, (y - R) * aspr]'], {}), '([x, (y - R) * aspr])\n', (8976, 8997), True, 'import numpy as np\n'), ((9038, 9053), 'numpy.dot', 'np.dot', (['Rφ', 'ohc'], {}), '(Rφ, ohc)\n', (9044, 9053), True, 'import numpy as np\n'), ((9061, 9076), 'numpy.dot', 'np.dot', (['Rφ', 'ohp'], {}), '(Rφ, ohp)\n', (9067, 9076), True, 'import numpy as np\n'), ((9084, 9099), 'numpy.dot', 'np.dot', (['Rφ', 'ohm'], {}), '(Rφ, ohm)\n', (9090, 9099), True, 'import numpy as np\n'), ((9109, 9124), 'numpy.dot', 'np.dot', (['Rφ', 'xyc'], {}), '(Rφ, xyc)\n', (9115, 9124), True, 'import numpy as np\n'), ((9132, 9147), 'numpy.dot', 'np.dot', (['Rφ', 'xyp'], {}), '(Rφ, xyp)\n', (9138, 9147), True, 'import numpy as np\n'), ((9155, 9170), 'numpy.dot', 'np.dot', (['Rφ', 'xym'], {}), '(Rφ, xym)\n', (9161, 9170), True, 'import numpy as np\n'), ((7244, 7309), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""The selected devices are not size-consistent."""'], {}), "('The selected devices are not size-consistent.')\n", (7260, 7309), True, 'import MdlUtilities as mdl\n'), ((8278, 8287), 'numpy.cos', 'np.cos', (['α'], {}), '(α)\n', (8284, 8287), True, 'import numpy as np\n'), ((9386, 9460), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['cA', 'self.osaOutputdata1_fields.ClearanceA.referenceUnit'], {}), '(cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit)\n', (9403, 9460), True, 'import MdlUtilities as mdl\n'), ((9513, 9587), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['cB', 'self.osaOutputdata1_fields.ClearanceB.referenceUnit'], {}), '(cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit)\n', (9530, 9587), True, 'import MdlUtilities as mdl\n'), ((9640, 9714), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['cM', 'self.osaOutputdata1_fields.ClearanceM.referenceUnit'], {}), '(cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit)\n', (9657, 9714), True, 'import MdlUtilities as mdl\n'), ((9769, 9843), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['fC', 'self.osaOutputdata1_fields.SideForceA.referenceUnit'], {}), '(fC, self.osaOutputdata1_fields.SideForceA.referenceUnit)\n', (9786, 9843), True, 'import MdlUtilities as mdl\n'), ((9896, 9970), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['fC', 'self.osaOutputdata1_fields.SideForceB.referenceUnit'], {}), '(fC, self.osaOutputdata1_fields.SideForceB.referenceUnit)\n', (9913, 9970), True, 'import MdlUtilities as mdl\n'), ((10023, 10097), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['fM', 'self.osaOutputdata1_fields.SideForceM.referenceUnit'], {}), '(fM, self.osaOutputdata1_fields.SideForceM.referenceUnit)\n', (10040, 10097), True, 'import MdlUtilities as mdl\n'), ((10151, 10224), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['SA', 'self.osaOutputdata1_fields.StandoffA.referenceUnit'], {}), '(SA, self.osaOutputdata1_fields.StandoffA.referenceUnit)\n', (10168, 10224), True, 'import MdlUtilities as mdl\n'), ((10276, 10349), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['SB', 'self.osaOutputdata1_fields.StandoffB.referenceUnit'], {}), '(SB, self.osaOutputdata1_fields.StandoffB.referenceUnit)\n', (10293, 10349), True, 'import MdlUtilities as mdl\n'), ((10401, 10474), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['SM', 'self.osaOutputdata1_fields.StandoffM.referenceUnit'], {}), '(SM, self.osaOutputdata1_fields.StandoffM.referenceUnit)\n', (10418, 10474), True, 'import MdlUtilities as mdl\n'), ((10670, 10746), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['δ', 'self.osaOutputdata2_fields.MaxDeflection.referenceUnit'], {}), '(δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit)\n', (10687, 10746), True, 'import MdlUtilities as mdl\n'), ((6739, 6803), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (6755, 6803), True, 'import MdlUtilities as mdl\n'), ((6870, 6934), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (6886, 6934), True, 'import MdlUtilities as mdl\n'), ((7004, 7068), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (7020, 7068), True, 'import MdlUtilities as mdl\n'), ((7138, 7202), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (7154, 7202), True, 'import MdlUtilities as mdl\n'), ((7648, 7657), 'numpy.sin', 'np.sin', (['θ'], {}), '(θ)\n', (7654, 7657), True, 'import numpy as np\n'), ((10825, 10836), 'numpy.mean', 'np.mean', (['cy'], {}), '(cy)\n', (10832, 10836), True, 'import numpy as np\n'), ((10974, 10985), 'numpy.mean', 'np.mean', (['Sy'], {}), '(Sy)\n', (10981, 10985), True, 'import numpy as np\n'), ((8433, 8447), 'numpy.sinh', 'np.sinh', (['(K * x)'], {}), '(K * x)\n', (8440, 8447), True, 'import numpy as np\n'), ((10552, 10561), 'numpy.cos', 'np.cos', (['θ'], {}), '(θ)\n', (10558, 10561), True, 'import numpy as np\n'), ((5585, 5594), 'numpy.cos', 'np.cos', (['φ'], {}), '(φ)\n', (5591, 5594), True, 'import numpy as np\n'), ((5610, 5619), 'numpy.sin', 'np.sin', (['φ'], {}), '(φ)\n', (5616, 5619), True, 'import numpy as np\n'), ((5621, 5630), 'numpy.cos', 'np.cos', (['φ'], {}), '(φ)\n', (5627, 5630), True, 'import numpy as np\n'), ((5597, 5606), 'numpy.sin', 'np.sin', (['φ'], {}), '(φ)\n', (5603, 5606), True, 'import numpy as np\n'), ((8408, 8427), 'numpy.tanh', 'np.tanh', (['(K * Lα / 2)'], {}), '(K * Lα / 2)\n', (8415, 8427), True, 'import numpy as np\n'), ((8452, 8461), 'numpy.sin', 'np.sin', (['θ'], {}), '(θ)\n', (8458, 8461), True, 'import numpy as np\n'), ((8392, 8406), 'numpy.cosh', 'np.cosh', (['(K * x)'], {}), '(K * x)\n', (8399, 8406), True, 'import numpy as np\n'), ((8371, 8380), 'numpy.sin', 'np.sin', (['θ'], {}), '(θ)\n', (8377, 8380), True, 'import numpy as np\n')]
|
"""Test converting an image to a pyramid.
"""
import numpy as np
import napari
points = np.random.randint(100, size=(50_000, 2))
with napari.gui_qt():
viewer = napari.view_points(points, face_color='red')
|
[
"napari.gui_qt",
"numpy.random.randint",
"napari.view_points"
] |
[((90, 129), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(50000, 2)'}), '(100, size=(50000, 2))\n', (107, 129), True, 'import numpy as np\n'), ((137, 152), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (150, 152), False, 'import napari\n'), ((167, 211), 'napari.view_points', 'napari.view_points', (['points'], {'face_color': '"""red"""'}), "(points, face_color='red')\n", (185, 211), False, 'import napari\n')]
|
from unittest import TestCase
from mock import patch
from .. import constants
class mock_service_exeTestCase(TestCase):
def setUp(self):
super(mock_service_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service.bat')
class provider_verifier_exeTestCase(TestCase):
def setUp(self):
super(provider_verifier_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier.bat')
|
[
"mock.patch.object"
] |
[((261, 305), 'mock.patch.object', 'patch.object', (['constants', '"""os"""'], {'autospec': '(True)'}), "(constants, 'os', autospec=True)\n", (273, 305), False, 'from mock import patch\n'), ((786, 830), 'mock.patch.object', 'patch.object', (['constants', '"""os"""'], {'autospec': '(True)'}), "(constants, 'os', autospec=True)\n", (798, 830), False, 'from mock import patch\n')]
|
import pytest
from array import array
from game_map import GameMap
from tests.conftest import get_relative_path
sample_map_data = tuple(
reversed(
(
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
)
)
)
def test_game_map_from_file(sample_game_map, sample_tiles):
assert sample_game_map.map_data == sample_map_data
assert sample_game_map.width == 21
assert sample_game_map.height == 21
assert sample_game_map.tile_data == sample_tiles
# Assert map is read right-up
assert sample_game_map.get(16, 2) == 0
assert sample_game_map.get(16, 18) == 1
def test_game_map_get_out_of_bounds(sample_game_map):
with pytest.raises(AssertionError):
sample_game_map.get(-1, 0)
sample_game_map.get(0, -1)
sample_game_map.get(-1, -1)
sample_game_map.get(21, 0)
sample_game_map.get(0, 21)
sample_game_map.get(21, 21)
def test_game_map_load_mapfile_nonrectangular():
with pytest.raises(AssertionError):
GameMap.load_mapfile(get_relative_path("fixtures/map_nonrectangular.csv"))
def test_game_map_traversable(sample_game_map):
assert sample_game_map.traversable(2, 2)
assert not sample_game_map.traversable(1, 1)
assert sample_game_map.traversable(16, 2)
assert not sample_game_map.traversable(16, 18)
|
[
"pytest.raises",
"array.array",
"tests.conftest.get_relative_path"
] |
[((2487, 2516), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2500, 2516), False, 'import pytest\n'), ((2790, 2819), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2803, 2819), False, 'import pytest\n'), ((176, 251), 'array.array', 'array', (['"""I"""', '(0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)'], {}), "('I', (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0))\n", (181, 251), False, 'from array import array\n'), ((265, 340), 'array.array', 'array', (['"""I"""', '(0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)'], {}), "('I', (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0))\n", (270, 340), False, 'from array import array\n'), ((354, 429), 'array.array', 'array', (['"""I"""', '(1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1)'], {}), "('I', (1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1))\n", (359, 429), False, 'from array import array\n'), ((443, 518), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1))\n", (448, 518), False, 'from array import array\n'), ((532, 607), 'array.array', 'array', (['"""I"""', '(1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (537, 607), False, 'from array import array\n'), ((621, 696), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (626, 696), False, 'from array import array\n'), ((710, 785), 'array.array', 'array', (['"""I"""', '(1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (715, 785), False, 'from array import array\n'), ((799, 874), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (804, 874), False, 'from array import array\n'), ((888, 963), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (893, 963), False, 'from array import array\n'), ((977, 1052), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (982, 1052), False, 'from array import array\n'), ((1066, 1141), 'array.array', 'array', (['"""I"""', '(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)'], {}), "('I', (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))\n", (1071, 1141), False, 'from array import array\n'), ((1155, 1230), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (1160, 1230), False, 'from array import array\n'), ((1244, 1319), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (1249, 1319), False, 'from array import array\n'), ((1333, 1408), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (1338, 1408), False, 'from array import array\n'), ((1422, 1497), 'array.array', 'array', (['"""I"""', '(1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (1427, 1497), False, 'from array import array\n'), ((1511, 1586), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (1516, 1586), False, 'from array import array\n'), ((1600, 1675), 'array.array', 'array', (['"""I"""', '(1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (1605, 1675), False, 'from array import array\n'), ((1689, 1764), 'array.array', 'array', (['"""I"""', '(1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)'], {}), "('I', (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n", (1694, 1764), False, 'from array import array\n'), ((1778, 1853), 'array.array', 'array', (['"""I"""', '(1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)'], {}), "('I', (1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1))\n", (1783, 1853), False, 'from array import array\n'), ((1867, 1942), 'array.array', 'array', (['"""I"""', '(0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)'], {}), "('I', (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0))\n", (1872, 1942), False, 'from array import array\n'), ((1956, 2031), 'array.array', 'array', (['"""I"""', '(0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)'], {}), "('I', (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0))\n", (1961, 2031), False, 'from array import array\n'), ((2850, 2902), 'tests.conftest.get_relative_path', 'get_relative_path', (['"""fixtures/map_nonrectangular.csv"""'], {}), "('fixtures/map_nonrectangular.csv')\n", (2867, 2902), False, 'from tests.conftest import get_relative_path\n')]
|
# encoding: utf-8
from __future__ import unicode_literals
import six
from django.db.models import Manager
from django.db.models.query import QuerySet
from .compat import (ANNOTATION_SELECT_CACHE_NAME, ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP, chain_query, chain_queryset,
ModelIterable, ValuesQuerySet)
from .exceptions import QueryablePropertyDoesNotExist, QueryablePropertyError
from .query import QueryablePropertiesQueryMixin
from .utils import get_queryable_property
from .utils.internal import InjectableMixin, QueryPath, QueryablePropertyReference
class QueryablePropertiesIterable(InjectableMixin):
"""
An iterable that yields the actual results of a queryset while correctly
processing columns of queryable properties. It is closely related to
Django's BaseIterable and will be used as a mixin for its subclasses in all
(recent) Django versions that have it. In all other (older) versions, this
class will be used as a standalone iterable instead.
"""
def __init__(self, queryset, *args, **kwargs):
"""
Initialize a new iterable for the given queryset. If an iterable is
given it will be used to retrieve the model instances before applying
queryable properties logic (standalone usage for older Django
versions). Otherwise, the __iter__ implementation of the base class
is used to get the model instances (usage as mixin).
:param QuerySet queryset: The queryset to perform the database query
for.
:param collections.Iterable iterable: The optional iterable to use for
standalone usage.
:param args: Positional arguments to pass through to the base class
initialization when used as a mixin.
:param kwargs: Keyword arguments to pass through to the base class
initialization when used as a mixin.
:keyword collections.Iterable iterable: The optional iterable to use
for standalone usage.
"""
self.queryset = queryset
# Only perform the super call if the class is used as a mixin
if self.__class__.__bases__ != (InjectableMixin,):
super(QueryablePropertiesIterable, self).__init__(queryset, *args, **kwargs)
self.iterable = kwargs.get('iterable') or super(QueryablePropertiesIterable, self).__iter__()
self.yields_model_instances = ((ModelIterable is not None and isinstance(self, ModelIterable)) or
(ValuesQuerySet is not None and not isinstance(self.queryset, ValuesQuerySet)))
def __iter__(self):
"""
Yield the model objects for the queryset associated with this iterator
with their correctly processed selected queryable properties.
:return: A generator that yields the model objects.
"""
original_query = self.queryset.query
try:
self.queryset.query = chain_query(original_query)
final_aliases = self._setup_queryable_properties()
for obj in self.iterable:
if self.yields_model_instances:
# Retrieve the annotation values from each renamed
# attribute and use it to populate the cache for the
# corresponding queryable property on each object while
# removing the weird, renamed attributes.
for changed_name, property_ref in six.iteritems(final_aliases):
value = getattr(obj, changed_name)
delattr(obj, changed_name)
if property_ref:
property_ref.descriptor.set_cached_value(obj, value)
yield obj
finally:
self.queryset.query = original_query
def _setup_queryable_properties(self):
"""
Perform the required setup to correctly process queryable property
values.
Change the internal aliases of the annotations that belong to queryable
properties in the query of the associated queryset to something unique
and return a dictionary mapping the queryable properties to the changed
aliases. This is necessary to allow Django to populate the annotation
attributes on the resulting model instances, which would otherwise call
the setter of the queryable properties. This way, Django can populate
attributes with different names and avoid using the setter methods.
Also make sure that ordering by queryable properties works in older
Django versions.
:return: A dictionary mapping the final aliases for queryable
properties to the corresponding references to be able to
retrieve the values from the DB and apply them to the correct
property. The property reference may be None, indicating that
the retrieved value should be discarded.
:rtype: dict[str, QueryablePropertyReference | None]
"""
query = self.queryset.query
final_aliases = {}
select = dict(query.annotation_select)
for property_ref in query._queryable_property_annotations:
annotation_name = six.text_type(property_ref.full_path)
# Older Django versions don't work with the annotation select dict
# when it comes to ordering, so queryable property annotations used
# for ordering need special treatment.
order_by_occurrences = []
if ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP: # pragma: no cover
order_by_occurrences = [index for index, field_name in enumerate(query.order_by)
if field_name in (annotation_name, '-{}'.format(annotation_name))]
if order_by_occurrences and annotation_name not in select and annotation_name in query.annotations:
select[annotation_name] = query.annotations[annotation_name]
final_aliases[annotation_name] = None
if not self.yields_model_instances or annotation_name not in select:
# The queryable property annotation does not require selection
# or no renaming needs to occur since the queryset doesn't
# yield model instances.
continue
# Suffix the original annotation name with the lookup separator to
# create a non-clashing name: both model field an queryable
# property names are not allowed to contain the separator and a
# relation path ending with the separator would be invalid as well.
changed_name = six.text_type(property_ref.full_path + '')
final_aliases[changed_name] = final_aliases.pop(annotation_name, property_ref)
select[changed_name] = select.pop(annotation_name)
for index in order_by_occurrences: # pragma: no cover
# Apply the changed names to the ORDER BY clause.
query.order_by[index] = query.order_by[index].replace(annotation_name, changed_name)
# Patch the correct select property on the query with the new names,
# since this property is used by the SQL compiler to build the actual
# SQL query (which is where the changed names should be used).
setattr(query, ANNOTATION_SELECT_CACHE_NAME, select)
return final_aliases
class QueryablePropertiesQuerySetMixin(InjectableMixin):
"""
A mixin for Django's :class:`django.db.models.QuerySet` objects that allows
to use queryable properties in filters, annotations and update queries.
"""
def init_injected_attrs(self):
# To work correctly, a query using the QueryablePropertiesQueryMixin is
# required. If the current query is not using the mixin already, it
# will be dynamically injected into the query. That way, other Django
# extensions using custom query objects are also supported.
class_name = 'QueryableProperties' + self.query.__class__.__name__
self.query = QueryablePropertiesQueryMixin.inject_into_object(chain_query(self.query), class_name)
@property
def _iterable_class(self):
# Override the regular _iterable_class attribute of recent Django
# versions with a property that also stores the value in the instance
# dict, but automatically mixes the QueryablePropertiesModelIterable
# into the base class on getter access if the base class yields model
# instances. That way, the queryable properties extensions stays
# compatible to custom iterable classes while querysets can still be
# pickled due to the base class being in the instance dict.
cls = self.__dict__['_iterable_class']
return QueryablePropertiesIterable.mix_with_class(cls, 'QueryableProperties' + cls.__name__)
@_iterable_class.setter
def _iterable_class(self, value):
self.__dict__['_iterable_class'] = value
def _clone(self, klass=None, *args, **kwargs):
if klass: # pragma: no cover
# In older Django versions, the class of the queryset may be
# replaced with a dynamically created class based on the current
# class and the value of klass while cloning (e.g when using
# .values()). Therefore this needs to be re-injected to be on top
# of the MRO again to enable queryable properties functionality.
klass = QueryablePropertiesQuerySetMixin.mix_with_class(klass, 'QueryableProperties' + klass.__name__)
args = (klass,) + args
clone = super(QueryablePropertiesQuerySetMixin, self)._clone(*args, **kwargs)
# Since the _iterable_class property may return a dynamically created
# class, the value of a clone must be reset to the base class.
if '_iterable_class' in self.__dict__:
clone._iterable_class = self.__dict__['_iterable_class']
return clone
def _resolve_update_kwargs(self, **kwargs):
"""
Look for the names of queryable properties in the given keyword
arguments for an update query and correctly resolve them into their
actual keyword arguments.
:param kwargs: Keyword arguments of an update query.
:return: A dictionary containing the resolved arguments.
:rtype: dict
"""
original_names = set(kwargs)
for original_name in original_names:
try:
prop = get_queryable_property(self.model, original_name)
except QueryablePropertyDoesNotExist:
continue
if not prop.get_update_kwargs:
raise QueryablePropertyError('Queryable property "{}" does not implement queryset updating.'
.format(prop))
# Call the method recursively since queryable properties can build
# upon each other.
additional_kwargs = self._resolve_update_kwargs(
**prop.get_update_kwargs(self.model, kwargs.pop(original_name)))
# Make sure that there are no conflicting values after resolving
# the update keyword arguments of the queryable properties.
for additional_name, value in six.iteritems(additional_kwargs):
if additional_name in kwargs and kwargs[additional_name] != value:
raise QueryablePropertyError(
'Updating queryable property "{prop}" would change field "{field}", but a conflicting value '
'was set for this field by another queryable property or explicitly in the update arguments.'
.format(prop=prop, field=additional_name)
)
kwargs[additional_name] = value
return kwargs
def select_properties(self, *names):
"""
Add the annotations of the queryable properties with the specified
names to this query. The annotation values will be cached in the
properties of resulting model instances, regardless of the regular
caching behavior of the queried properties.
:param names: Names of queryable properties.
:return: A copy of this queryset with the added annotations.
:rtype: QuerySet
"""
queryset = chain_queryset(self)
for name in names:
property_ref = QueryablePropertyReference(get_queryable_property(self.model, name), self.model, QueryPath())
# A full GROUP BY is required if the query is not limited to
# certain fields. Since only certain types of queries had the
# _fields attribute in old Django versions, fall back to checking
# for existing selection, on which the GROUP BY would be based.
full_group_by = not getattr(self, '_fields', self.query.select)
with queryset.query._add_queryable_property_annotation(property_ref, full_group_by, select=True):
pass
return queryset
def iterator(self, *args, **kwargs):
# Recent Django versions use the associated iterable class for the
# iterator() implementation, where the QueryablePropertiesModelIterable
# will be already mixed in. In older Django versions, use a standalone
# QueryablePropertiesModelIterable instead to perform the queryable
# properties processing.
iterable = super(QueryablePropertiesQuerySetMixin, self).iterator(*args, **kwargs)
if '_iterable_class' not in self.__dict__: # pragma: no cover
return iter(QueryablePropertiesIterable(self, iterable=iterable))
return iterable
def update(self, **kwargs):
# Resolve any queryable properties into their actual update kwargs
# before calling the base update method.
kwargs = self._resolve_update_kwargs(**kwargs)
return super(QueryablePropertiesQuerySetMixin, self).update(**kwargs)
class QueryablePropertiesQuerySet(QueryablePropertiesQuerySetMixin, QuerySet):
"""
A special queryset class that allows to use queryable properties in its
filter conditions, annotations and update queries.
"""
pass
if hasattr(Manager, 'from_queryset'):
QueryablePropertiesManager = Manager.from_queryset(QueryablePropertiesQuerySet)
else: # pragma: no cover
class QueryablePropertiesManager(Manager):
def get_queryset(self):
return QueryablePropertiesQuerySet(self.model, using=self._db)
get_query_set = get_queryset
def select_properties(self, *names):
return self.get_queryset().select_properties(*names)
|
[
"six.text_type",
"six.iteritems",
"django.db.models.Manager.from_queryset"
] |
[((14474, 14524), 'django.db.models.Manager.from_queryset', 'Manager.from_queryset', (['QueryablePropertiesQuerySet'], {}), '(QueryablePropertiesQuerySet)\n', (14495, 14524), False, 'from django.db.models import Manager\n'), ((5366, 5403), 'six.text_type', 'six.text_type', (['property_ref.full_path'], {}), '(property_ref.full_path)\n', (5379, 5403), False, 'import six\n'), ((6824, 6866), 'six.text_type', 'six.text_type', (["(property_ref.full_path + '')"], {}), "(property_ref.full_path + '')\n", (6837, 6866), False, 'import six\n'), ((11454, 11486), 'six.iteritems', 'six.iteritems', (['additional_kwargs'], {}), '(additional_kwargs)\n', (11467, 11486), False, 'import six\n'), ((3569, 3597), 'six.iteritems', 'six.iteritems', (['final_aliases'], {}), '(final_aliases)\n', (3582, 3597), False, 'import six\n')]
|
from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
|
[
"numpy.testing.assert_allclose",
"math.sqrt",
"math.log",
"numpy.array",
"pandas.DataFrame",
"numpy.testing.assert_array_equal"
] |
[((244, 306), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})\n", (256, 306), True, 'import pandas as pd\n'), ((509, 599), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h',\n 'i', 'j']})\n", (521, 599), True, 'import pandas as pd\n'), ((822, 878), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3]})\n", (834, 878), True, 'import pandas as pd\n'), ((1099, 1173), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})\n", (1111, 1173), True, 'import pandas as pd\n'), ((1387, 1483), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (1399, 1483), True, 'import pandas as pd\n'), ((1675, 1771), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (1687, 1771), True, 'import pandas as pd\n'), ((1981, 2077), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (1993, 2077), True, 'import pandas as pd\n'), ((2358, 2454), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (2370, 2454), True, 'import pandas as pd\n'), ((2719, 2815), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (2731, 2815), True, 'import pandas as pd\n'), ((3165, 3261), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (3177, 3261), True, 'import pandas as pd\n'), ((3549, 3645), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (3561, 3645), True, 'import pandas as pd\n'), ((4007, 4103), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (4019, 4103), True, 'import pandas as pd\n'), ((4397, 4447), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (4409, 4447), True, 'import pandas as pd\n'), ((4507, 4594), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 35, 40, 30, 29, 48]', "df['n_' + col]"], {}), "([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' +\n col])\n", (4536, 4594), True, 'import numpy as np\n'), ((4649, 4699), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (4661, 4699), True, 'import pandas as pd\n'), ((4760, 4865), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16]', "df['n_' + col]"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df\n ['n_' + col], rtol=0.001)\n", (4786, 4865), True, 'import numpy as np\n'), ((4920, 4970), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (4932, 4970), True, 'import pandas as pd\n'), ((5033, 5120), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 12, 13, 13, 12, 12]', "df['n_' + col]"], {}), "([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' +\n col])\n", (5062, 5120), True, 'import numpy as np\n'), ((5174, 5224), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5186, 5224), True, 'import pandas as pd\n'), ((5284, 5369), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 10, 12, 2, 2, 2]', "df['n_' + col]"], {}), "([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col]\n )\n", (5313, 5369), True, 'import numpy as np\n'), ((5422, 5472), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5434, 5472), True, 'import pandas as pd\n'), ((5532, 5619), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 13, 15, 15, 15, 34]', "df['n_' + col]"], {}), "([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' +\n col])\n", (5561, 5619), True, 'import numpy as np\n'), ((5673, 5723), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5685, 5723), True, 'import pandas as pd\n'), ((5783, 5891), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371]', "df['n_' + col]"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371],\n df['n_' + col], rtol=0.001)\n", (5809, 5891), True, 'import numpy as np\n'), ((5944, 5994), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5956, 5994), True, 'import pandas as pd\n'), ((6054, 6161), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268]', "df['n_' + col]"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268],\n df['n_' + col], rtol=0.001)\n", (6080, 6161), True, 'import numpy as np\n'), ((6238, 6323), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (6250, 6323), True, 'import pandas as pd\n'), ((6374, 6474), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 35, 40, 30, 29, 48]', "df['n_rolling_sum(n_1,3)']"], {}), "([np.nan, np.nan, 35, 40, 30, 29, 48], df[\n 'n_rolling_sum(n_1,3)'])\n", (6403, 6474), True, 'import numpy as np\n'), ((6475, 6572), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 6, 10, 10, 9, 8]', "df['n_rolling_sum(n_2,3)']"], {}), "([np.nan, np.nan, 6, 10, 10, 9, 8], df[\n 'n_rolling_sum(n_2,3)'])\n", (6504, 6572), True, 'import numpy as np\n'), ((6626, 6711), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (6638, 6711), True, 'import pandas as pd\n'), ((6763, 6881), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16]', "df['n_rolling_mean(n_1,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df\n ['n_rolling_mean(n_1,3)'], rtol=0.001)\n", (6789, 6881), True, 'import numpy as np\n'), ((6881, 6998), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666]', "df['n_rolling_mean(n_2,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df[\n 'n_rolling_mean(n_2,3)'], rtol=0.001)\n", (6907, 6998), True, 'import numpy as np\n'), ((7053, 7138), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (7065, 7138), True, 'import pandas as pd\n'), ((7192, 7295), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 12, 13, 13, 12, 12]', "df['n_rolling_median(n_1,3)']"], {}), "([np.nan, np.nan, 12, 13, 13, 12, 12], df[\n 'n_rolling_median(n_1,3)'])\n", (7221, 7295), True, 'import numpy as np\n'), ((7296, 7394), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 2, 3, 3, 2, 2]', "df['n_rolling_median(n_2,3)']"], {}), "([np.nan, np.nan, 2, 3, 3, 2, 2], df[\n 'n_rolling_median(n_2,3)'])\n", (7325, 7394), True, 'import numpy as np\n'), ((7447, 7532), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (7459, 7532), True, 'import pandas as pd\n'), ((7583, 7680), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 10, 12, 2, 2, 2]', "df['n_rolling_min(n_1,3)']"], {}), "([np.nan, np.nan, 10, 12, 2, 2, 2], df[\n 'n_rolling_min(n_1,3)'])\n", (7612, 7680), True, 'import numpy as np\n'), ((7681, 7776), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 1, 2, 2, 2, 2]', "df['n_rolling_min(n_2,3)']"], {}), "([np.nan, np.nan, 1, 2, 2, 2, 2], df[\n 'n_rolling_min(n_2,3)'])\n", (7710, 7776), True, 'import numpy as np\n'), ((7829, 7914), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (7841, 7914), True, 'import pandas as pd\n'), ((7965, 8065), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 13, 15, 15, 15, 34]', "df['n_rolling_max(n_1,3)']"], {}), "([np.nan, np.nan, 13, 15, 15, 15, 34], df[\n 'n_rolling_max(n_1,3)'])\n", (7994, 8065), True, 'import numpy as np\n'), ((8066, 8161), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 3, 5, 5, 5, 4]', "df['n_rolling_max(n_2,3)']"], {}), "([np.nan, np.nan, 3, 5, 5, 5, 4], df[\n 'n_rolling_max(n_2,3)'])\n", (8095, 8161), True, 'import numpy as np\n'), ((8214, 8299), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (8226, 8299), True, 'import pandas as pd\n'), ((8350, 8470), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371]', "df['n_rolling_std(n_1,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371],\n df['n_rolling_std(n_1,3)'], rtol=0.001)\n", (8376, 8470), True, 'import numpy as np\n'), ((8471, 8591), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547]', "df['n_rolling_std(n_2,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547],\n df['n_rolling_std(n_2,3)'], rtol=0.001)\n", (8497, 8591), True, 'import numpy as np\n'), ((8644, 8729), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (8656, 8729), True, 'import pandas as pd\n'), ((8780, 8899), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268]', "df['n_rolling_var(n_1,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268],\n df['n_rolling_var(n_1,3)'], rtol=0.001)\n", (8806, 8899), True, 'import numpy as np\n'), ((8900, 9016), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333]', "df['n_rolling_var(n_2,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df[\n 'n_rolling_var(n_2,3)'], rtol=0.001)\n", (8926, 9016), True, 'import numpy as np\n'), ((9058, 9178), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2, 3], 'n_3': [\n 4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2,\n 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})\n", (9070, 9178), True, 'import pandas as pd\n'), ((9714, 9834), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2, 3], 'n_3': [\n 4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2,\n 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})\n", (9726, 9834), True, 'import pandas as pd\n'), ((10272, 10392), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2, 3], 'n_3': [\n 4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2,\n 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})\n", (10284, 10392), True, 'import pandas as pd\n'), ((10832, 10882), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [1, 2, 3], 'n_2': [4, 5, 6]}"], {}), "({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})\n", (10844, 10882), True, 'import pandas as pd\n'), ((10898, 10948), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [1, 2, 3], 'n_2': [4, 5, 6]}"], {}), "({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})\n", (10910, 10948), True, 'import pandas as pd\n'), ((11275, 11371), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_mult(n_1,n_2)'].values", "df2['n_mult(n_1,n_2)'].values"], {}), "(df1['n_mult(n_1,n_2)'].values, df2[\n 'n_mult(n_1,n_2)'].values)\n", (11304, 11371), True, 'import numpy as np\n'), ((11373, 11444), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_pow(n_1,3)']", "df2['n_pow(n_1,3)']"], {}), "(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)'])\n", (11402, 11444), True, 'import numpy as np\n'), ((11451, 11530), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_lg(pow(n_1,3))']", "df2['n_lg(pow(n_1,3))']"], {}), "(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))'])\n", (11480, 11530), True, 'import numpy as np\n'), ((11537, 11627), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_lg(mult(n_1,n_2))']", "df2['n_lg(mult(n_1,n_2))']"], {}), "(df1['n_lg(mult(n_1,n_2))'], df2[\n 'n_lg(mult(n_1,n_2))'])\n", (11566, 11627), True, 'import numpy as np\n'), ((11629, 11766), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']", "df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']"], {}), "(df1[\n 'n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2[\n 'n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'])\n", (11658, 11766), True, 'import numpy as np\n'), ((2158, 2293), 'numpy.array', 'np.array', (["[['a', 1, 4, 7, 1 * 1, 4 * 4, 7 * 7], ['b', 2, 5, 8, 2 * 2, 5 * 5, 8 * 8],\n ['c', 3, 6, 9, 3 * 3, 6 * 6, 9 * 9]]", '"""object"""'], {}), "([['a', 1, 4, 7, 1 * 1, 4 * 4, 7 * 7], ['b', 2, 5, 8, 2 * 2, 5 * 5,\n 8 * 8], ['c', 3, 6, 9, 3 * 3, 6 * 6, 9 * 9]], 'object')\n", (2166, 2293), True, 'import numpy as np\n'), ((2540, 2634), 'numpy.array', 'np.array', (["[['a', 1, 4, 7, 4 * 4], ['b', 2, 5, 8, 5 * 5], ['c', 3, 6, 9, 6 * 6]]", '"""object"""'], {}), "([['a', 1, 4, 7, 4 * 4], ['b', 2, 5, 8, 5 * 5], ['c', 3, 6, 9, 6 * \n 6]], 'object')\n", (2548, 2634), True, 'import numpy as np\n'), ((423, 461), 'numpy.array', 'np.array', (["['ad', 'be', 'cf']", '"""object"""'], {}), "(['ad', 'be', 'cf'], 'object')\n", (431, 461), True, 'import numpy as np\n'), ((721, 762), 'numpy.array', 'np.array', (["['had', 'ibe', 'jcf']", '"""object"""'], {}), "(['had', 'ibe', 'jcf'], 'object')\n", (729, 762), True, 'import numpy as np\n'), ((994, 1032), 'numpy.array', 'np.array', (["['a1', 'b2', 'c3']", '"""object"""'], {}), "(['a1', 'b2', 'c3'], 'object')\n", (1002, 1032), True, 'import numpy as np\n'), ((1297, 1338), 'numpy.array', 'np.array', (["['4a1', '5b2', '6c3']", '"""object"""'], {}), "(['4a1', '5b2', '6c3'], 'object')\n", (1305, 1338), True, 'import numpy as np\n'), ((1592, 1619), 'numpy.array', 'np.array', (['[4, 10, 18]', 'long'], {}), '([4, 10, 18], long)\n', (1600, 1619), True, 'import numpy as np\n'), ((1889, 1924), 'numpy.array', 'np.array', (['[4 * 7, 80, 18 * 9]', 'long'], {}), '([4 * 7, 80, 18 * 9], long)\n', (1897, 1924), True, 'import numpy as np\n'), ((2930, 2941), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (2938, 2941), False, 'import unittest, math\n'), ((2943, 2954), 'math.log', 'math.log', (['(4)'], {}), '(4)\n', (2951, 2954), False, 'import unittest, math\n'), ((2956, 2967), 'math.log', 'math.log', (['(7)'], {}), '(7)\n', (2964, 2967), False, 'import unittest, math\n'), ((2994, 3005), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (3002, 3005), False, 'import unittest, math\n'), ((3007, 3018), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (3015, 3018), False, 'import unittest, math\n'), ((3020, 3031), 'math.log', 'math.log', (['(8)'], {}), '(8)\n', (3028, 3031), False, 'import unittest, math\n'), ((3058, 3069), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (3066, 3069), False, 'import unittest, math\n'), ((3071, 3082), 'math.log', 'math.log', (['(6)'], {}), '(6)\n', (3079, 3082), False, 'import unittest, math\n'), ((3084, 3095), 'math.log', 'math.log', (['(9)'], {}), '(9)\n', (3092, 3095), False, 'import unittest, math\n'), ((3379, 3390), 'math.log', 'math.log', (['(4)'], {}), '(4)\n', (3387, 3390), False, 'import unittest, math\n'), ((3417, 3428), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (3425, 3428), False, 'import unittest, math\n'), ((3455, 3466), 'math.log', 'math.log', (['(6)'], {}), '(6)\n', (3463, 3466), False, 'import unittest, math\n'), ((3762, 3774), 'math.sqrt', 'math.sqrt', (['(1)'], {}), '(1)\n', (3771, 3774), False, 'import unittest, math\n'), ((3776, 3788), 'math.sqrt', 'math.sqrt', (['(4)'], {}), '(4)\n', (3785, 3788), False, 'import unittest, math\n'), ((3790, 3802), 'math.sqrt', 'math.sqrt', (['(7)'], {}), '(7)\n', (3799, 3802), False, 'import unittest, math\n'), ((3829, 3841), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (3838, 3841), False, 'import unittest, math\n'), ((3843, 3855), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (3852, 3855), False, 'import unittest, math\n'), ((3857, 3869), 'math.sqrt', 'math.sqrt', (['(8)'], {}), '(8)\n', (3866, 3869), False, 'import unittest, math\n'), ((3896, 3908), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (3905, 3908), False, 'import unittest, math\n'), ((3910, 3922), 'math.sqrt', 'math.sqrt', (['(6)'], {}), '(6)\n', (3919, 3922), False, 'import unittest, math\n'), ((3924, 3936), 'math.sqrt', 'math.sqrt', (['(9)'], {}), '(9)\n', (3933, 3936), False, 'import unittest, math\n'), ((4223, 4235), 'math.sqrt', 'math.sqrt', (['(4)'], {}), '(4)\n', (4232, 4235), False, 'import unittest, math\n'), ((4262, 4274), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (4271, 4274), False, 'import unittest, math\n'), ((4301, 4313), 'math.sqrt', 'math.sqrt', (['(6)'], {}), '(6)\n', (4310, 4313), False, 'import unittest, math\n'), ((9479, 9490), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (9487, 9490), False, 'import unittest, math\n'), ((9543, 9554), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (9551, 9554), False, 'import unittest, math\n'), ((9607, 9618), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (9615, 9618), False, 'import unittest, math\n'), ((10042, 10053), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (10050, 10053), False, 'import unittest, math\n'), ((10106, 10117), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (10114, 10117), False, 'import unittest, math\n'), ((10170, 10181), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (10178, 10181), False, 'import unittest, math\n'), ((10611, 10622), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (10619, 10622), False, 'import unittest, math\n'), ((10675, 10686), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (10683, 10686), False, 'import unittest, math\n'), ((10739, 10750), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (10747, 10750), False, 'import unittest, math\n')]
|
import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
class LoCoBotBase(object):
"""docstring for SimpleBase"""
def __init__(self, configs, simulator):
self.configs = configs
self.sim = simulator.sim
self.agent = self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)
self.transform = None
self.init_state = self.get_full_state()
def execute_action(self, action_name, actuation):
# actions = "turn_right" or "turn_left" or "move_forward"
# returns a bool showing if collided or not
return self._act(action_name, actuation)
def get_full_state(self):
# Returns habitat_sim.agent.AgentState
return self.agent.get_state()
def _rot_matrix(self, habitat_quat):
quat_list = [habitat_quat.x, habitat_quat.y, habitat_quat.z, habitat_quat.w]
return prutil.quat_to_rot_mat(quat_list)
def get_state(self, state_type="odom"):
# Returns (x, y, yaw)
assert state_type == "odom", "Error: Only Odom state is available"
cur_state = self.get_full_state()
init_rotation = self._rot_matrix(self.init_state.rotation)
# true position here refers to the relative position from
# where `self.init_state` is treated as origin
true_position = cur_state.position - self.init_state.position
true_position = np.matmul(init_rotation.transpose(), true_position, dtype=np.float64)
cur_rotation = self._rot_matrix(cur_state.rotation)
cur_rotation = np.matmul(init_rotation.transpose(), cur_rotation, dtype=np.float64)
(r, pitch, yaw) = euler_from_matrix(cur_rotation, axes="sxzy")
# Habitat has y perpendicular to map where as ROS has z perpendicular
# to the map. Where as x is same.
# Here ROS_X = -1 * habitat_z and ROS_Y = -1*habitat_x
return (-1 * true_position[2], -1 * true_position[0], yaw)
def stop(self):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def set_vel(self, fwd_speed, turn_speed, exe_time=1):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def go_to_relative(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given
goal state relative to its initial pose.
:param xyt_position: The relative goal state of the form (x,y,t)
:param use_map: When set to "True", ensures that controler is
using only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
abs_yaw = cur_yaw + xyt_position[2]
return self._go_to_relative_pose(xyt_position[0], xyt_position[1], abs_yaw)
def go_to_absolute(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given goal state in the world frame.
:param xyt_position: The goal state of the form (x,y,t)
in the world (map) frame.
:param use_map: When set to "True", ensures that controler is using
only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_X = xyt_position[0] - cur_x
rel_Y = xyt_position[1] - cur_y
abs_yaw = xyt_position[2]
# convert rel_X & rel_Y from global frame to current frame
R = np.array([[np.cos(cur_yaw), np.sin(cur_yaw)],
[-np.sin(cur_yaw), np.cos(cur_yaw)]])
rel_x, rel_y = np.matmul(R, np.array([rel_X, rel_Y]).reshape(-1,1))
return self._go_to_relative_pose(rel_x[0], rel_y[0], abs_yaw)
def _act(self, action_name, actuation):
"""Take the action specified by action_id
:param action_id: ID of the action. Retreives the action from
`agent_config.action_space <AgentConfiguration.action_space>`
:return: Whether or not the action taken resulted in a collision
"""
did_collide = False
act_spec = ActuationSpec(actuation)
did_collide = self.agent.controls.action(
self.agent.scene_node, action_name, act_spec, apply_filter=True
)
return did_collide
def _go_to_relative_pose(self, rel_x, rel_y, abs_yaw):
# clip relative movements beyond 10 micrometer precision
# this is done to improve determinism, as habitat-sim doesn't
# seem to precisely move the robot beyond sub milimeter precision anyways
if abs(rel_x) < 1e-5:
rel_x = 0
if abs(rel_y) < 1e-5:
rel_y = 0
if math.sqrt(rel_x ** 2 + rel_y ** 2) > 0.0:
# rotate to point to (x, y) point
action_name = "turn_left"
if rel_y < 0.0:
action_name = "turn_right"
v1 = np.asarray([1, 0], dtype=np.float64)
v2 = np.asarray([rel_x, rel_y], dtype=np.float64)
cosine_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cosine_angle)
did_collide = self._act(action_name, math.degrees(angle))
if did_collide:
print("Error: Collision accured while 1st rotating!")
return False
# move to (x,y) point
did_collide = self._act("move_forward", math.sqrt(rel_x ** 2 + rel_y ** 2))
if did_collide:
print("Error: Collision accured while moving straight!")
return False
# rotate to match the final yaw!
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_yaw = abs_yaw - cur_yaw
# clip to micro-degree precision to preserve determinism
if abs(rel_yaw) < 1e-4:
rel_yaw = 0
action_name = "turn_left"
if rel_yaw < 0.0:
action_name = "turn_right"
rel_yaw *= -1
did_collide = self._act(action_name, math.degrees(rel_yaw))
if did_collide:
print("Error: Collision accured while rotating!")
return False
return True
def track_trajectory(self, states, controls, close_loop):
"""
State trajectory that the robot should track.
:param states: sequence of (x,y,t) states that the robot should track.
:param controls: optionally specify control sequence as well.
:param close_loop: whether to close loop on the
computed control sequence or not.
:type states: list
:type controls: list
:type close_loop: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
raise NotImplementedError
|
[
"numpy.arccos",
"tf.transformations.euler_from_matrix",
"math.sqrt",
"numpy.asarray",
"habitat_sim.agent.controls.ActuationSpec",
"math.degrees",
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"pyrobot.utils.util.quat_to_rot_mat"
] |
[((1145, 1178), 'pyrobot.utils.util.quat_to_rot_mat', 'prutil.quat_to_rot_mat', (['quat_list'], {}), '(quat_list)\n', (1167, 1178), True, 'import pyrobot.utils.util as prutil\n'), ((1905, 1949), 'tf.transformations.euler_from_matrix', 'euler_from_matrix', (['cur_rotation'], {'axes': '"""sxzy"""'}), "(cur_rotation, axes='sxzy')\n", (1922, 1949), False, 'from tf.transformations import euler_from_quaternion, euler_from_matrix\n'), ((6431, 6455), 'habitat_sim.agent.controls.ActuationSpec', 'ActuationSpec', (['actuation'], {}), '(actuation)\n', (6444, 6455), False, 'from habitat_sim.agent.controls import ActuationSpec\n'), ((7013, 7047), 'math.sqrt', 'math.sqrt', (['(rel_x ** 2 + rel_y ** 2)'], {}), '(rel_x ** 2 + rel_y ** 2)\n', (7022, 7047), False, 'import math\n'), ((7228, 7264), 'numpy.asarray', 'np.asarray', (['[1, 0]'], {'dtype': 'np.float64'}), '([1, 0], dtype=np.float64)\n', (7238, 7264), True, 'import numpy as np\n'), ((7282, 7326), 'numpy.asarray', 'np.asarray', (['[rel_x, rel_y]'], {'dtype': 'np.float64'}), '([rel_x, rel_y], dtype=np.float64)\n', (7292, 7326), True, 'import numpy as np\n'), ((7433, 7456), 'numpy.arccos', 'np.arccos', (['cosine_angle'], {}), '(cosine_angle)\n', (7442, 7456), True, 'import numpy as np\n'), ((8331, 8352), 'math.degrees', 'math.degrees', (['rel_yaw'], {}), '(rel_yaw)\n', (8343, 8352), False, 'import math\n'), ((7354, 7368), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (7360, 7368), True, 'import numpy as np\n'), ((7507, 7526), 'math.degrees', 'math.degrees', (['angle'], {}), '(angle)\n', (7519, 7526), False, 'import math\n'), ((7743, 7777), 'math.sqrt', 'math.sqrt', (['(rel_x ** 2 + rel_y ** 2)'], {}), '(rel_x ** 2 + rel_y ** 2)\n', (7752, 7777), False, 'import math\n'), ((5842, 5857), 'numpy.cos', 'np.cos', (['cur_yaw'], {}), '(cur_yaw)\n', (5848, 5857), True, 'import numpy as np\n'), ((5859, 5874), 'numpy.sin', 'np.sin', (['cur_yaw'], {}), '(cur_yaw)\n', (5865, 5874), True, 'import numpy as np\n'), ((5918, 5933), 'numpy.cos', 'np.cos', (['cur_yaw'], {}), '(cur_yaw)\n', (5924, 5933), True, 'import numpy as np\n'), ((5973, 5997), 'numpy.array', 'np.array', (['[rel_X, rel_Y]'], {}), '([rel_X, rel_Y])\n', (5981, 5997), True, 'import numpy as np\n'), ((7372, 7390), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (7386, 7390), True, 'import numpy as np\n'), ((7393, 7411), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (7407, 7411), True, 'import numpy as np\n'), ((5901, 5916), 'numpy.sin', 'np.sin', (['cur_yaw'], {}), '(cur_yaw)\n', (5907, 5916), True, 'import numpy as np\n')]
|
from django.contrib import admin
from .models import Chat
class ChatAdmin(admin.ModelAdmin):
list_display = ("pk",)
admin.site.register(Chat, ChatAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((125, 161), 'django.contrib.admin.site.register', 'admin.site.register', (['Chat', 'ChatAdmin'], {}), '(Chat, ChatAdmin)\n', (144, 161), False, 'from django.contrib import admin\n')]
|
# -*- coding: utf-8 -*-
from pythainlp.tokenize import etcc
print(etcc.etcc("คืนความสุข")) # /คืน/ความสุข
|
[
"pythainlp.tokenize.etcc.etcc"
] |
[((68, 91), 'pythainlp.tokenize.etcc.etcc', 'etcc.etcc', (['"""คืนความสุข"""'], {}), "('คืนความสุข')\n", (77, 91), False, 'from pythainlp.tokenize import etcc\n')]
|
"""
PyColourChooser
Copyright (C) 2002 <NAME> <<EMAIL>>
This file is part of PyColourChooser.
This version of PyColourChooser is open source; you can redistribute it
and/or modify it under the licensed terms.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
# 12/14/2003 - <NAME> (<EMAIL>)
#
# o 2.5 compatibility update.
#
# 12/21/2003 - <NAME> (<EMAIL>)
#
# o wxPyColorChooser -> PyColorChooser
# o wxPyColourChooser -> PyColourChooser
#
# Tags: phoenix-port
import wx
class BitmapBuffer(wx.MemoryDC):
"""A screen buffer class.
This class implements a screen output buffer. Data is meant to
be drawn in the buffer class and then blitted directly to the
output device, or on-screen window.
"""
def __init__(self, width, height, colour):
"""Initialize the empty buffer object."""
wx.MemoryDC.__init__(self)
self.width = width
self.height = height
self.colour = colour
self.bitmap = wx.Bitmap(self.width, self.height)
self.SelectObject(self.bitmap)
# Initialize the buffer to the background colour
self.SetBackground(wx.Brush(self.colour, wx.BRUSHSTYLE_SOLID))
self.Clear()
# Make each logical unit of the buffer equal to 1 pixel
self.SetMapMode(wx.MM_TEXT)
def GetBitmap(self):
"""Returns the internal bitmap for direct drawing."""
return self.bitmap
# GetPixel seems to always return (-1, -1, -1, 255)
# on OS X so this is a workaround for that issue.
def GetPixelColour(self, x, y):
"""Gets the color value of the pixel at the given
cords.
"""
img = self.GetAsBitmap().ConvertToImage()
red = img.GetRed(x, y)
green = img.GetGreen(x, y)
blue = img.GetBlue(x, y)
return wx.Colour(red, green, blue)
class Canvas(wx.Window):
"""A canvas class for arbitrary drawing.
The Canvas class implements a window that allows for drawing
arbitrary graphics. It implements a double buffer scheme and
blits the off-screen buffer to the window during paint calls
by the windowing system for speed.
Some other methods for determining the canvas colour and size
are also provided.
"""
def __init__(self, parent, id,
pos=wx.DefaultPosition,
style=wx.SIMPLE_BORDER,
forceClientSize=None):
"""Creates a canvas instance and initializes the off-screen
buffer. Also sets the handler for rendering the canvas
automatically via size and paint calls from the windowing
system."""
wx.Window.__init__(self, parent, id, pos, style=style)
if forceClientSize:
self.SetMaxClientSize(forceClientSize)
self.SetMinClientSize(forceClientSize)
# Perform an intial sizing
self.ReDraw()
# Register event handlers
self.Bind(wx.EVT_SIZE, self.onSize)
self.Bind(wx.EVT_PAINT, self.onPaint)
def MakeNewBuffer(self):
size = self.GetClientSize()
self.buffer = BitmapBuffer(size[0], size[1],
self.GetBackgroundColour())
def onSize(self, event):
"""Perform actual redraw to off-screen buffer only when the
size of the canvas has changed. This saves a lot of computation
since the same image can be re-used, provided the canvas size
hasn't changed."""
self.MakeNewBuffer()
self.DrawBuffer()
self.Refresh()
def ReDraw(self):
"""Explicitly tells the canvas to redraw it's contents."""
self.onSize(None)
def Refresh(self):
"""Re-draws the buffer contents on-screen."""
dc = wx.ClientDC(self)
self.Blit(dc)
def onPaint(self, event):
"""Renders the off-screen buffer on-screen."""
dc = wx.PaintDC(self)
self.Blit(dc)
def Blit(self, dc):
"""Performs the blit of the buffer contents on-screen."""
width, height = self.buffer.GetSize()
dc.Blit(0, 0, width, height, self.buffer, 0, 0)
def GetBoundingRect(self):
"""Returns a tuple that contains the co-ordinates of the
top-left and bottom-right corners of the canvas."""
x, y = self.GetPosition()
w, h = self.GetSize()
return(x, y + h, x + w, y)
def DrawBuffer(self):
"""Actual drawing function for drawing into the off-screen
buffer. To be overrideen in the implementing class. Do nothing
by default."""
pass
|
[
"wx.MemoryDC.__init__",
"wx.Colour",
"wx.PaintDC",
"wx.Brush",
"wx.Bitmap",
"wx.Window.__init__",
"wx.ClientDC"
] |
[((967, 993), 'wx.MemoryDC.__init__', 'wx.MemoryDC.__init__', (['self'], {}), '(self)\n', (987, 993), False, 'import wx\n'), ((1103, 1137), 'wx.Bitmap', 'wx.Bitmap', (['self.width', 'self.height'], {}), '(self.width, self.height)\n', (1112, 1137), False, 'import wx\n'), ((1940, 1967), 'wx.Colour', 'wx.Colour', (['red', 'green', 'blue'], {}), '(red, green, blue)\n', (1949, 1967), False, 'import wx\n'), ((2753, 2807), 'wx.Window.__init__', 'wx.Window.__init__', (['self', 'parent', 'id', 'pos'], {'style': 'style'}), '(self, parent, id, pos, style=style)\n', (2771, 2807), False, 'import wx\n'), ((3855, 3872), 'wx.ClientDC', 'wx.ClientDC', (['self'], {}), '(self)\n', (3866, 3872), False, 'import wx\n'), ((3994, 4010), 'wx.PaintDC', 'wx.PaintDC', (['self'], {}), '(self)\n', (4004, 4010), False, 'import wx\n'), ((1262, 1304), 'wx.Brush', 'wx.Brush', (['self.colour', 'wx.BRUSHSTYLE_SOLID'], {}), '(self.colour, wx.BRUSHSTYLE_SOLID)\n', (1270, 1304), False, 'import wx\n')]
|
#!/usr/bin/env python
"""
generate-all-graphs.py
python generate-all-graphs.py | gzip -c > all-graphs.gz
"""
import sys
import json
import itertools
import numpy as np
from tqdm import tqdm
from nasbench.lib import graph_util
from joblib import delayed, Parallel
max_vertices = 7
num_ops = 3
max_edges = 9
def make_graphs(vertices, bits):
matrix = np.fromfunction(graph_util.gen_is_edge_fn(bits), (vertices, vertices), dtype=np.int8)
if graph_util.num_edges(matrix) > max_edges:
return []
if not graph_util.is_full_dag(matrix):
return []
out = []
for labeling in itertools.product(*[range(num_ops) for _ in range(vertices-2)]):
labeling = [-1] + list(labeling) + [-2]
out.append({
"hash" : graph_util.hash_module(matrix, labeling),
"adj" : matrix.tolist(),
"labeling" : labeling,
})
return out
adjs = []
for vertices in range(2, max_vertices+1):
for bits in range(2 ** (vertices * (vertices-1) // 2)):
adjs.append((vertices, bits))
adjs = [adjs[i] for i in np.random.permutation(len(adjs))]
jobs = [delayed(make_graphs)(*adj) for adj in adjs]
res = Parallel(n_jobs=40, backend='multiprocessing', verbose=10)(jobs)
for r in res:
for rr in r:
print(json.dumps(rr))
|
[
"nasbench.lib.graph_util.is_full_dag",
"nasbench.lib.graph_util.num_edges",
"nasbench.lib.graph_util.gen_is_edge_fn",
"json.dumps",
"joblib.Parallel",
"nasbench.lib.graph_util.hash_module",
"joblib.delayed"
] |
[((1197, 1255), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(40)', 'backend': '"""multiprocessing"""', 'verbose': '(10)'}), "(n_jobs=40, backend='multiprocessing', verbose=10)\n", (1205, 1255), False, 'from joblib import delayed, Parallel\n'), ((387, 418), 'nasbench.lib.graph_util.gen_is_edge_fn', 'graph_util.gen_is_edge_fn', (['bits'], {}), '(bits)\n', (412, 418), False, 'from nasbench.lib import graph_util\n'), ((469, 497), 'nasbench.lib.graph_util.num_edges', 'graph_util.num_edges', (['matrix'], {}), '(matrix)\n', (489, 497), False, 'from nasbench.lib import graph_util\n'), ((545, 575), 'nasbench.lib.graph_util.is_full_dag', 'graph_util.is_full_dag', (['matrix'], {}), '(matrix)\n', (567, 575), False, 'from nasbench.lib import graph_util\n'), ((1146, 1166), 'joblib.delayed', 'delayed', (['make_graphs'], {}), '(make_graphs)\n', (1153, 1166), False, 'from joblib import delayed, Parallel\n'), ((1302, 1316), 'json.dumps', 'json.dumps', (['rr'], {}), '(rr)\n', (1312, 1316), False, 'import json\n'), ((791, 831), 'nasbench.lib.graph_util.hash_module', 'graph_util.hash_module', (['matrix', 'labeling'], {}), '(matrix, labeling)\n', (813, 831), False, 'from nasbench.lib import graph_util\n')]
|
from bs4 import BeautifulSoup as soup
from selenium import webdriver
class Scrapper:
def getArticles(self, cryptoName):
url = 'https://coinmarketcap.com/currencies/' + cryptoName + '/news/'
driver = webdriver.Firefox()
driver.get(url)
page = driver.page_source
page_soup = soup(page, 'html.parser')
headers = page_soup.findAll("h3", {"class": "sc-1q9q90x-0", "class": "gEZmSc"})
paragraphs = page_soup.findAll("p", {"class": "sc-1eb5slv-0", "class": "svowul-3", "class": "ddtKCV"})
print('Latest news about', cryptoName.capitalize(), end=':')
print()
for i in range(0, min(len(headers), len(paragraphs))):
print('Article', (i + 1), end=':')
print()
print(headers[i].text.strip(), '\n', 'More:', paragraphs[i].text.strip(), '\n')
|
[
"bs4.BeautifulSoup",
"selenium.webdriver.Firefox"
] |
[((221, 240), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (238, 240), False, 'from selenium import webdriver\n'), ((320, 345), 'bs4.BeautifulSoup', 'soup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (324, 345), True, 'from bs4 import BeautifulSoup as soup\n')]
|
#!/usr/bin/python3
"""qsubm -- generic queue submission for task-oriented batch scripts
Environment variables:
MCSCRIPT_DIR should specify the directory in which the mcscript package is
installed, i.e., the directory where the file qsubm.py is found. (Note that
qsubm uses this information to locate certain auxiliary script files used as
part of the job submission process.)
MCSCRIPT_RUN_HOME must specify the directory in which job files are found.
MCSCRIPT_WORK_HOME should specify the parent directory in which run scratch
directories should be made.
MCSCRIPT_INSTALL_HOME must specify the directory in which executables are found.
MCSCRIPT_LAUNCH_HOME (optional) should specify the parent directory in which
run subdirectories for qsub invocation and output logging should be made.
Otherwise, this will default to MCSCRIPT_WORK_HOME.
MCSCRIPT_PYTHON should give the full qualified filename (i.e., including
path) to the Python 3 executable for running run script files. A typical
value will simply be "python3", assuming the Python 3 executable is in the
shell's command search PATH. However, see note on "Availability of Python"
in INSTALL.md.
MCSCRIPT_RUN_PREFIX should specify the prefix for run names, e.g., set to
"run" if your scripts are to be named run<XXXX>.py.
Requires local definitions file config.py to translate options into
arguments for local batch server. See directions in readme.txt. Your local
definitions might not make use of or support all the parallel environment
options.
Language: Python 3
<NAME>
University of Notre Dame
+ 3/6/13 (mac): Based on earlier qsubm csh script.
+ 7/4/13 (mac): Support for multiple cluster flavors via qsubm_local.
+ 1/22/14 (mac): Python 3 update.
+ 10/27/14 (mac): Updates to --archive handling.
+ 5/14/15 (mac):
- Insert "future" statements for Python 2 legacy support.
- Add --noredirect switch.
- Mandatory environment variable QSUBM_PYTHON.
+ 8/4/15 (mac): Make user environment variable definitions into option.
+ 6/13/16 (mac): Rename environment variables to MCSCRIPT_*.
+ 6/22/16 (mac): Update to use config.py for local configuration.
+ 12/14/16 (mac): Add --here option.
+ 12/29/16 (mac):
- Add --spread option.
- Remove --pernode option.
- Make --opt option repeatable.
+ 1/16/17 (mac): Add --serialthreads option.
+ 2/23/17 (mac): Switch from os.mkdir to mcscript.utils.mkdir.
+ 3/16/17 (mac):
- Add --setup option.
- Change environment interface to pass MCSCRIPT_TASK_MODE.
+ 3/18/17 (mac):
- Revise to support updated hybrid run parameters.
- Rename option --setup to --prerun.
+ 5/22/17 (mac): Fix processing of boolean option --redirect.
+ 10/11/17 (pjf): Add --switchwaittime option.
+ 01/05/18 (pjf): Sort arguments into groups.
+ 02/11/18 (pjf):
- Pass through MCSCRIPT_INSTALL_HOME.
- Use job_environ for submission.
+ 07/06/18 (pjf):
- Pass queue via MCSCRIPT_RUN_QUEUE.
- Remove MCSCRIPT_HYBRID_NODESIZE.
+ 06/04/19 (pjf):
- Add hook for individual configurations to add command-line arguments.
- Move --switchwaittime option into config-slurm-nersc.py.
+ 09/11/19 (pjf): Add expert mode argument.
"""
import argparse
import os
import shutil
import subprocess
import sys
import mcscript.config # local configuration (usually symlink)
import mcscript.utils
################################################################
# argument parsing
################################################################
parser = argparse.ArgumentParser(
description="Queue submission for numbered run.",
usage=
"%(prog)s [option] run queue|RUN wall [var1=val1, ...]\n",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=
"""Simply omit the queue name and leave off the wall time for a
local interactive run.
Environment variables for qsubm are described in INSTALL.md.
Note that qsubm relies upon code in the local `config.py`
configuration file for the system or cluster you are running on, in
order to interpret the following arguments and translate them into
arguments for your local batch system. Your local configuration
file might not make use of or support all the parallel environment
options listed below.
"""
)
# general arguments
parser.add_argument("run", help="Run number (e.g., 0000 for run0000)")
# latter arguments are made optional to simplify bare-bones syntax for --toc, etc., calls
parser.add_argument("queue", nargs='?', help="Submission queue, or RUN for direct interactive run", default="RUN")
parser.add_argument("wall", type=int, nargs='?', help="Wall time (minutes)", default=60)
##parser.add_argument("vars", nargs="?", help="Environment variables to pass to script, with optional values, comma delimited (e.g., METHOD2, PARAM=1.0)")
parser.add_argument("--here", action="store_true", help="Force run in current working directory")
parser.add_argument("--vars", help="Environment variables to pass to script, with optional values, comma delimited (e.g., --vars=METHOD2, PARAM=1.0)")
## parser.add_argument("--stat", action="store_true", help="Display queue status information")
parser.add_argument("--num", type=int, default=1, help="Number of repetitions")
parser.add_argument("--opt", action="append", help="Additional option arguments to be passed to job submission command (e.g., --opt=\"-m ae\" or --opt=\"--mail-type=END,FAIL\"), may be repeated (e.g., --opt=\"-A acct\" --opt=\"-a 1200\"); beware the spaces may be important to the job submission command")
parser.add_argument("--expert", action="store_true", help="Run mcscript in expert mode")
# serial run parallelization parameters
serial_group = parser.add_argument_group("serial run options (single-node, non-MPI)")
serial_group.add_argument("--serialthreads", type=int, default=1, help="OMP threads")
# hybrid run parallelization parameters
#
# Not all local configuration files need necessarily require or
# respect all of the following parameters.
hybrid_group = parser.add_argument_group("hybrid run options")
hybrid_group.add_argument("--nodes", type=int, default=1, help="number of nodes")
hybrid_group.add_argument("--ranks", type=int, default=1, help="number of MPI ranks")
hybrid_group.add_argument("--threads", type=int, default=1, help="OMP threads per rank)")
hybrid_group.add_argument("--nodesize", type=int, default=0, help="logical threads available per node"
" (might instead be interpreted physical CPUs depending on local config file)")
##hybrid_group.add_argument("--undersubscription", type=int, default=1, help="undersubscription factor (e.g., spread=2 requests twice the cores needed)")
# multi-task interface: invocation modes
task_mode_group = parser.add_mutually_exclusive_group()
task_mode_group.add_argument("--toc", action="store_true", help="Invoke run script to generate task table of contents")
task_mode_group.add_argument("--unlock", action="store_true", help="Delete any .lock or .fail flags for tasks")
task_mode_group.add_argument("--archive", action="store_true", help="Invoke archive-generation run")
task_mode_group.add_argument("--prerun", action="store_true", help="Invoke prerun mode, for argument validation and file staging only")
task_mode_group.add_argument("--offline", action="store_true", help="Invoke offline mode, to create batch scripts for later submission instead of running compute codes")
# multi-task interface: task selection
task_selection_group = parser.add_argument_group("multi-task run options")
task_selection_group.add_argument("--pool", help="Set task pool (or ALL) for task selection")
task_selection_group.add_argument("--phase", type=int, default=0, help="Set task phase for task selection")
task_selection_group.add_argument("--start", type=int, help="Set starting task number for task selection")
task_selection_group.add_argument("--limit", type=int, help="Set task count limit for task selection")
task_selection_group.add_argument("--redirect", default="True", choices=["True", "False"], help="Allow redirection of standard"
" output/error to file (may want to disable for interactive debugging)")
# some special options (deprecated?)
##parser.add_argument("--epar", type=int, default=None, help="Width for embarassingly parallel job")
##parser.add_argument("--nopar", action="store_true", help="Disable parallel resource requests (for use on special serial queues)")
# site-local options
try:
mcscript.config.qsubm_arguments(parser)
except AttributeError:
# local config doesn't provide arguments, ignore gracefully
pass
##parser.print_help()
##print
args = parser.parse_args()
##printargs
################################################################
# special mode: status display
################################################################
# TODO
# will have to modify argument processing to allow no arguments, local
# customization for qstat
# @ i = 0
# while (($i == 0) || ($loop))
# @ i++
# clear
# echo "****************************************************************"
# qstat -u $user
# if ($loop) sleep 5
# end
## if (args.stat):
## pass
################################################################
# environment processing
################################################################
if (args.here):
run_home = os.environ["PWD"]
elif ("MCSCRIPT_RUN_HOME" in os.environ):
run_home = os.environ["MCSCRIPT_RUN_HOME"]
else:
print("MCSCRIPT_RUN_HOME not found in environment")
exit(1)
if (args.here):
work_home = os.environ["PWD"]
elif ("MCSCRIPT_WORK_HOME" in os.environ):
work_home = os.environ["MCSCRIPT_WORK_HOME"]
else:
print("MCSCRIPT_WORK_HOME not found in environment")
exit(1)
if (args.here):
launch_home = os.environ["PWD"]
elif ("MCSCRIPT_LAUNCH_HOME" in os.environ):
launch_home = os.environ["MCSCRIPT_LAUNCH_HOME"]
else:
launch_home = work_home
if ("MCSCRIPT_RUN_PREFIX" in os.environ):
run_prefix = os.environ["MCSCRIPT_RUN_PREFIX"]
else:
print("MCSCRIPT_RUN_PREFIX not found in environment")
exit(1)
if ("MCSCRIPT_PYTHON" in os.environ):
python_executable = os.environ["MCSCRIPT_PYTHON"]
else:
print("MCSCRIPT_PYTHON not found in environment")
exit(1)
if ("MCSCRIPT_DIR" in os.environ):
qsubm_path = os.environ["MCSCRIPT_DIR"]
else:
print("MCSCRIPT_DIR not found in environment")
exit(1)
################################################################
# argument processing
################################################################
# set run name
run = run_prefix + args.run
print("Run:", run)
# ...and process run file
script_extensions = [".py", ".csh"]
job_file = None
for extension in script_extensions:
filename = os.path.join(run_home, run+extension)
if (filename):
job_file = filename
job_extension = extension
break
print(" Run home:", run_home) # useful to report now, in case job file missing
if (job_file is None):
print("No job file %s.* found with an extension in the set %s." % (run, script_extensions))
exit(1)
print(" Job file:", job_file)
# set queue and flag batch or local mode
# force local run for task.py toc mode
if ((args.queue == "RUN") or args.toc or args.unlock):
run_mode = "local"
run_queue = "local"
print(" Mode:", run_mode)
else:
run_mode = "batch"
run_queue = args.queue
print(" Mode:", run_mode, "(%s)" % args.queue)
# set wall time
wall_time_min = args.wall
print(" Wall time (min): {:d}".format(wall_time_min))
wall_time_sec = wall_time_min*60
# environment definitions: general run parameters
environment_definitions = [
"MCSCRIPT_RUN={:s}".format(run),
"MCSCRIPT_JOB_FILE={:s}".format(job_file),
"MCSCRIPT_RUN_MODE={:s}".format(run_mode),
"MCSCRIPT_RUN_QUEUE={:s}".format(run_queue),
"MCSCRIPT_WALL_SEC={:d}".format(wall_time_sec)
]
# environment definitions: serial run parameters
environment_definitions += [
"MCSCRIPT_SERIAL_THREADS={:d}".format(args.serialthreads)
]
# environment definitions: hybrid run parameters
environment_definitions += [
"MCSCRIPT_HYBRID_NODES={:d}".format(args.nodes),
"MCSCRIPT_HYBRID_RANKS={:d}".format(args.ranks),
"MCSCRIPT_HYBRID_THREADS={:d}".format(args.threads),
]
# set multi-task run parameters
if (args.toc):
task_mode = mcscript.task.TaskMode.kTOC
elif (args.unlock):
task_mode = mcscript.task.TaskMode.kUnlock
elif (args.archive):
task_mode = mcscript.task.TaskMode.kArchive
elif (args.prerun):
task_mode = mcscript.task.TaskMode.kPrerun
elif (args.offline):
task_mode = mcscript.task.TaskMode.kOffline
else:
task_mode = mcscript.task.TaskMode.kRun
# TODO (mac): neaten up so that these arguments are always provided
# (and simplify this code to a simple list += as above)
environment_definitions.append("MCSCRIPT_TASK_MODE={:d}".format(task_mode.value))
if (args.pool is not None):
environment_definitions.append("MCSCRIPT_TASK_POOL={:s}".format(args.pool))
if (args.phase is not None):
environment_definitions.append("MCSCRIPT_TASK_PHASE={:d}".format(args.phase))
if (args.start is not None):
environment_definitions.append("MCSCRIPT_TASK_START_INDEX={:d}".format(args.start))
if (args.limit is not None):
environment_definitions.append("MCSCRIPT_TASK_COUNT_LIMIT={:d}".format(args.limit))
environment_definitions.append("MCSCRIPT_TASK_REDIRECT={:s}".format(args.redirect))
# pass through install directory
if os.environ.get("MCSCRIPT_INSTALL_HOME"):
environment_definitions += [
"MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_HOME"])
]
elif os.environ.get("MCSCRIPT_INSTALL_DIR"):
# TODO remove deprecated environment variable
print("****************************************************************")
print("MCSCRIPT_INSTALL_DIR is now MCSCRIPT_INSTALL_HOME.")
print("Please update your environment variables.")
print("****************************************************************")
environment_definitions += [
"MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_DIR"])
]
else:
print("MCSCRIPT_INSTALL_HOME not found in environment")
exit(1)
# include additional environment setup if defined
if os.environ.get("MCSCRIPT_SOURCE"):
environment_definitions += [
"MCSCRIPT_SOURCE={:s}".format(os.environ["MCSCRIPT_SOURCE"])
]
# set user-specified variable definitions
# Note conditional is required since "".split(", ") is [""] rather than [].
if (args.vars is None):
user_environment_definitions = []
else:
user_environment_definitions = args.vars.split(",")
print(" User environment definitions:", user_environment_definitions)
environment_definitions += user_environment_definitions
################################################################
# directory setup
################################################################
# set up scratch directory (for batch job work)
# name is defined here, but creation is left up to job script,
# in case scratch is local to the compute note
work_dir = os.path.join(work_home, run)
## if ( not os.path.exists(work_dir)):
## mcscript.utils.mkdir(work_dir)
environment_definitions.append("MCSCRIPT_WORK_DIR=%s" % work_dir)
# set up run launch directory (for batch job output logging)
launch_dir_parent = os.path.join(launch_home, run)
if ( not os.path.exists(launch_home)):
mcscript.utils.mkdir(launch_home)
if ( not os.path.exists(launch_dir_parent)):
mcscript.utils.mkdir(launch_dir_parent)
if (args.archive):
# archive mode
# launch in archive directory rather than usual batch job output directory
# (important since if batch job server directs output to the
# regular output directory while tar is archiving that directory,
# tar will return with an error code, torpedoing the archive task)
launch_dir = os.path.join(launch_home, run, "archive")
else:
# standard run mode
launch_dir = os.path.join(launch_home, run, "batch")
if ( not os.path.exists(launch_dir)):
mcscript.utils.mkdir(launch_dir)
environment_definitions.append("MCSCRIPT_LAUNCH_DIR=%s" % launch_dir)
################################################################
# job environment setup
################################################################
# construct job name
job_name = "%s" % run
##job_name += "-w%d" % args.width
if (args.pool is not None):
job_name += "-%s" % args.pool
job_name += "-%s" % args.phase
print(" Job name:", job_name)
# process environment definitions
# regularize environment definitions
# Convert all plain variable name definitions "VAR" into definition
# as null string "VAR=". Note that "VAR" would be an environment
# variable pass-through request to qsub, but it causes trouble with
# defining an environment for local execution. So doing this
# regularization simplifies further processing and ensures
# uniformity of the environment between batch and local runs.
for i in range(len(environment_definitions)):
if (not "=" in environment_definitions[i]):
environment_definitions[i] += "="
print()
print("Vars:", ",".join(environment_definitions))
# for local run
job_environ=os.environ
environment_keyvalues = [
entry.split("=")
for entry in environment_definitions
]
job_environ.update(dict(environment_keyvalues))
################################################################
# run invocation
################################################################
# flush script output before invoking job
print()
sys.stdout.flush()
# handle batch run
if (run_mode == "batch"):
# set local qsub arguments
(submission_args, submission_input_string, repetitions) = mcscript.config.submission(job_name, job_file, qsubm_path, environment_definitions, args)
# notes: options must come before command on some platforms (e.g., Univa)
print(" ".join(submission_args))
print(submission_input_string)
print()
print("-"*64)
for i in range(repetitions):
process = subprocess.Popen(
submission_args,
stdin=subprocess.PIPE, # to take input from communicate
stdout=subprocess.PIPE, # to send output to communicate -- default merged stderr
env=job_environ,
cwd=launch_dir
)
stdout_bytes = process.communicate(input=submission_input_string)[0]
stdout_string = stdout_bytes.decode("utf-8")
print(stdout_string)
# handle interactive run
# Note: We call interpreter rather than trying to directly execute
# job file since this saves us from bothering with execute permissions.
# But, beware the interpreter enforced by the script's shebang line might
# be different from the version of the interpreter found in the below invocation,
# especially in a "module" environment.
elif (run_mode == "local"):
if (extension == ".py"):
popen_args = [python_executable, job_file]
elif (extension == ".csh"):
popen_args = ["csh", job_file]
print()
print("-"*64)
process = subprocess.Popen(popen_args, cwd=launch_dir, env=job_environ)
process.wait()
|
[
"os.path.exists",
"argparse.ArgumentParser",
"subprocess.Popen",
"os.path.join",
"os.environ.get",
"sys.stdout.flush"
] |
[((3734, 4494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Queue submission for numbered run."""', 'usage': '"""%(prog)s [option] run queue|RUN wall [var1=val1, ...]\n"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'epilog': '"""Simply omit the queue name and leave off the wall time for a\n local interactive run.\n\n Environment variables for qsubm are described in INSTALL.md.\n\n Note that qsubm relies upon code in the local `config.py`\n configuration file for the system or cluster you are running on, in\n order to interpret the following arguments and translate them into\n arguments for your local batch system. Your local configuration\n file might not make use of or support all the parallel environment\n options listed below.\n """'}), '(description=\'Queue submission for numbered run.\',\n usage="""%(prog)s [option] run queue|RUN wall [var1=val1, ...]\n""",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter, epilog=\n """Simply omit the queue name and leave off the wall time for a\n local interactive run.\n\n Environment variables for qsubm are described in INSTALL.md.\n\n Note that qsubm relies upon code in the local `config.py`\n configuration file for the system or cluster you are running on, in\n order to interpret the following arguments and translate them into\n arguments for your local batch system. Your local configuration\n file might not make use of or support all the parallel environment\n options listed below.\n """\n )\n', (3757, 4494), False, 'import argparse\n'), ((13722, 13761), 'os.environ.get', 'os.environ.get', (['"""MCSCRIPT_INSTALL_HOME"""'], {}), "('MCSCRIPT_INSTALL_HOME')\n", (13736, 13761), False, 'import os\n'), ((14504, 14537), 'os.environ.get', 'os.environ.get', (['"""MCSCRIPT_SOURCE"""'], {}), "('MCSCRIPT_SOURCE')\n", (14518, 14537), False, 'import os\n'), ((15346, 15374), 'os.path.join', 'os.path.join', (['work_home', 'run'], {}), '(work_home, run)\n', (15358, 15374), False, 'import os\n'), ((15600, 15630), 'os.path.join', 'os.path.join', (['launch_home', 'run'], {}), '(launch_home, run)\n', (15612, 15630), False, 'import os\n'), ((17813, 17831), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (17829, 17831), False, 'import sys\n'), ((10998, 11037), 'os.path.join', 'os.path.join', (['run_home', '(run + extension)'], {}), '(run_home, run + extension)\n', (11010, 11037), False, 'import os\n'), ((13888, 13926), 'os.environ.get', 'os.environ.get', (['"""MCSCRIPT_INSTALL_DIR"""'], {}), "('MCSCRIPT_INSTALL_DIR')\n", (13902, 13926), False, 'import os\n'), ((15640, 15667), 'os.path.exists', 'os.path.exists', (['launch_home'], {}), '(launch_home)\n', (15654, 15667), False, 'import os\n'), ((15717, 15750), 'os.path.exists', 'os.path.exists', (['launch_dir_parent'], {}), '(launch_dir_parent)\n', (15731, 15750), False, 'import os\n'), ((16137, 16178), 'os.path.join', 'os.path.join', (['launch_home', 'run', '"""archive"""'], {}), "(launch_home, run, 'archive')\n", (16149, 16178), False, 'import os\n'), ((16226, 16265), 'os.path.join', 'os.path.join', (['launch_home', 'run', '"""batch"""'], {}), "(launch_home, run, 'batch')\n", (16238, 16265), False, 'import os\n'), ((16275, 16301), 'os.path.exists', 'os.path.exists', (['launch_dir'], {}), '(launch_dir)\n', (16289, 16301), False, 'import os\n'), ((18294, 18412), 'subprocess.Popen', 'subprocess.Popen', (['submission_args'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'env': 'job_environ', 'cwd': 'launch_dir'}), '(submission_args, stdin=subprocess.PIPE, stdout=subprocess.\n PIPE, env=job_environ, cwd=launch_dir)\n', (18310, 18412), False, 'import subprocess\n'), ((19322, 19383), 'subprocess.Popen', 'subprocess.Popen', (['popen_args'], {'cwd': 'launch_dir', 'env': 'job_environ'}), '(popen_args, cwd=launch_dir, env=job_environ)\n', (19338, 19383), False, 'import subprocess\n')]
|
from scripts.downloader import *
import fiona
from shapely.geometry import shape
import geopandas as gpd
import matplotlib.pyplot as plt
from pprint import pprint
import requests
import json
import time
import os
# Constant variables
input_min_lat = 50.751797561
input_min_lon = 5.726110232
input_max_lat = 50.938216069
input_max_lon = 6.121604582
route_search_url = "https://api.routeyou.com/2.0/json/Route/k-9aec2fc1705896b901c3ea17d6223f0a/mapSearch"
route_search_headers = {"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "nl,en-US;q=0.7,en;q=0.3",
"Connection": "keep-alive",
"Content-Length": "331",
"Content-Type": "text/plain;charset=UTF-8",
"DNT": "1",
"Host": "api.routeyou.com",
"Origin": "https://www.routeyou.com",
"Referer": "https://www.routeyou.com/route/search/2/walking-route-search",
"TE": "Trailers",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"}
default_headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "nl,en-US;q=0.7,en;q=0.3",
"Connection": "test",
"Cookie": "rtysid=5gf59rik6gf8o7b5an7nalcsh0; "
"_ga=GA1.2.1811204879.1553438381; _"
"gid=GA1.2.1815573989.1553438381; __"
"gads=ID=fab95f7aaf65227e:T=1553438384:S=ALNI_MaIjkdo1dKpYiyQKfWZEymqT7HgUQ",
"Host": "download.routeyou.com",
"Referer": "https://www.routeyou.com/nl-be/route/view/5653357/wandelroute/"
"in-het-spoor-van-napoleon-kasteel-reinhardstein-en-de-stuwdam-van-robertville",
"TE": "Trailers",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0"}
# # Setup script
# bounding_boxes_list = create_bounding_boxes(input_min_lat, input_min_lon, input_max_lat, input_max_lon,
# nr_of_rows=12, nr_of_columns=12)
# for index, bounding_box in enumerate(bounding_boxes_list):
# route_search_data = '{"jsonrpc":"2.0","id":"3","method":"searchAdvanced","params":' \
# '[{"bounds":{"min":{"lat":%s,"lon":%s},"max":{"lat":%s,"lon":%s}},' \
# '"type.id":2,"score.min":0.5,"bounds.comparator":"geometry"},null,100,0,' \
# '{"clusters":false,"addLanguage":"en","media":false,"description":false}]}' \
# % (bounding_box['min_lat'], bounding_box['min_lon'], bounding_box['max_lat'], bounding_box['max_lon'])
# response = requests.post(url=route_search_url, headers=route_search_headers,
# data=route_search_data)
# with open("D:/Wandelroutes/Text/routes_{}.txt".format(index), "wb") as file:
# file.write(response.content)
# data = json.loads(response.content)
# print("Index / routes count / total routes: ", index, "/", len(data['result']['routes']), "/", data['result']['total'])
#
# for route in data['result']['routes']:
# time.sleep(0.5)
# route_url = "https://download.routeyou.com/k-9aec2fc1705896b901c3ea17d6223f0a/route/{}.gpx?language=nl".format(route['id'])
# filepath = "D:/Wandelroutes/GPX/{}.gpx".format(route['id'])
# download_to_file(route_url, default_headers, filepath)
dir_filepath = "D:/Wandelroutes/GPX"
filenames = os.listdir(dir_filepath)
rows_list = []
for filename in filenames:
layer = fiona.open(os.path.join(dir_filepath, filename), layer='tracks')
geom = layer[0]
route_name = geom['properties']['name']
route_geodata = {'type': 'MultiLineString',
'coordinates': geom['geometry']['coordinates']}
route_geometry = shape(route_geodata)
route_id = os.path.splitext(os.path.basename(filename))[0]
route_dict = {'id': str(route_id),
'name': route_name,
'url': "https://www.routeyou.com/nl-nl/route/view/" + str(route_id),
'geometry': route_geometry}
rows_list.append(route_dict)
routes_gdf = gpd.GeoDataFrame(rows_list)
routes_gdf.crs = {'init': 'epsg:4326', 'no_defs': True}
routes_gdf.to_file("D:/Wandelroutes/walking_routes.shp")
|
[
"os.listdir",
"os.path.join",
"os.path.basename",
"shapely.geometry.shape",
"geopandas.GeoDataFrame"
] |
[((3860, 3884), 'os.listdir', 'os.listdir', (['dir_filepath'], {}), '(dir_filepath)\n', (3870, 3884), False, 'import os\n'), ((4547, 4574), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['rows_list'], {}), '(rows_list)\n', (4563, 4574), True, 'import geopandas as gpd\n'), ((4206, 4226), 'shapely.geometry.shape', 'shape', (['route_geodata'], {}), '(route_geodata)\n', (4211, 4226), False, 'from shapely.geometry import shape\n'), ((3950, 3986), 'os.path.join', 'os.path.join', (['dir_filepath', 'filename'], {}), '(dir_filepath, filename)\n', (3962, 3986), False, 'import os\n'), ((4259, 4285), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4275, 4285), False, 'import os\n')]
|
import discord.ext.commands as dec
import database.song
from commands.common import *
class Song:
"""Song insertion, querying and manipulation"""
def __init__(self, bot):
self._bot = bot
self._db = database.song.SongInterface(bot.loop)
_help_messages = {
'group': 'Song information, querying and manipulation',
'blacklist': '* Puts the specified song to the blacklist\n\n'
'Song ID can be located in the square brackets just before the title. It is included in the status message '
'and all the listings.\nThis does not prevent users from including blacklisted song in their playlist, song '
'is skipped just before playing.',
'deduplicate': '* Marks a song as a duplicate of another song\n\n'
'This is a destructive operation. The duplicate is replaced by the "original" just before playing. All tests '
'(blacklist, length, overplay) are performed on the "original" song.\nThis function is useful for replacing '
'songs with a bad quality and is necessary for overplay protection to work correctly.\nSong IDs can be located '
'in the square brackets just before the title. It is included in the status message and all the listings. You '
'can also use \'search\' command to obtain the IDs.',
'failed_clear': '* Removes the songs from the failed list\n\n'
'Songs marked as duplicates are not affected. Individual songs can be removed by specifying their ID. You can '
'use the command to fix the automatic playlist after a service outage or bot connection problems.',
'failed_list': 'Lists all the songs that have failed to download\n\n'
'Up to 20 songs are returned. Songs marked as a duplicate are considered resolved and are excluded from the '
'list. Songs are automatically removed from this list after a successful download, or manually by using '
'\'clear\' subcommand.\n\nSongs that are marked as failed to download are excluded from the automatic '
'playlist. Bot operators are expected to investigate download issues and provide an alternative source for '
'the songs if necessary.',
'info': 'Displays information about the song stored in the database\n\n'
'Mainly for debugging purposes, as an aid for the bot operators.',
'permit': '* Removes the specified song from the blacklist\n\n'
'Song ID can be located in the square brackets just before the title. It is included in the status message '
'and all the listings.',
'rename': '* Changes the title of a specified song\n\n'
'This command can be used to rename the song stored in the database. It does not update the status message; '
'the new name is used next time the song is played.\nSong ID can be located in the square brackets just before '
'the title. It is included in the status message and all the listings.',
'search': 'Queries the database for songs\n\n'
'Title and UURI are matched against the specified keywords. All the keywords must match either the title or '
'UURI. Up to 20 results are returned.\nThis command can be used to lookup song IDs.',
'split': '* Marks a given song as an original\n\n'
'This command can be used to fix duplication status of the song. After this command is issued, the song '
'specified won\'t be marked as a duplicate anymore.\nThis is the inverse command to the \'deduplicate\'. '
'Just like the \'deduplicate\', this command does not manipulate with timestamps nor credit counts.\nSong ID '
'can be located in the square brackets just before the song title. It is included in the status message and '
'all the listings.'
}
@dec.group(invoke_without_command=True, aliases=['s'], help=_help_messages['group'])
async def song(self, subcommand: str, *arguments: str):
raise dec.UserInputError('Command *song* has no subcommand named {}. Please use `{}help song` to list all '
'the available subcommands.'
.format(subcommand, self._bot.config['ddmbot']['delimiter']))
@privileged
@song.command(ignore_extra=False, help=_help_messages['blacklist'])
async def blacklist(self, song_id: int):
await self._db.blacklist(song_id)
await self._bot.message('Song [{}] has been blacklisted'.format(song_id))
@privileged
@song.command(ignore_extra=False, help=_help_messages['deduplicate'])
async def deduplicate(self, which_id: int, target_id: int):
await self._db.merge(which_id, target_id)
await self._bot.message('Song [{}] has been marked as a duplicate of the song [{}]'.format(which_id, target_id))
@song.group(ignore_extra=False, invoke_without_command=True)
async def failed(self):
raise dec.UserInputError('You need to provide a subcommand to the *song failed* command')
@privileged
@failed.command(name='clear', ignore_extra=False, help=_help_messages['failed_clear'])
async def failed_clear(self, song_id: int = None):
raise dec.UserInputError('You need to provide a subcommand to the *song failed* command')
@failed.command(name='list', ignore_extra=False, aliases=['l'], help=_help_messages['failed_list'])
async def failed_list(self):
items, total = await self._db.list_failed(20)
if not items:
await self._bot.whisper('There are no songs flagged because of a download failure')
return
reply = '**{} songs (out of {}) flagged because of a download failure:**\n **>** '.format(len(items), total) + \
'\n **>** '.join(['[{}] {}'.format(*item) for item in items])
await self._bot.whisper(reply)
@song.command(ignore_extra=False, aliases=['i'], help=_help_messages['info'])
async def info(self, song_id: int):
info = await self._db.get_info(song_id)
reply = '**Song [{id}] information:**\n' \
' **Source URL:** [{url}]\n' \
' **Title:** {title}\n' \
' **Last played:** {last_played!s}\n' \
' **Listener count:** {total_listener_count} ({listener_count})\n' \
' **Skip vote count:** {total_skip_vote_count} ({skip_vote_count})\n' \
' **Duration:** {duration}s\n' \
' **Credits remaining:** {credit_count}\n\n' \
' **Blacklisted:** {is_blacklisted}\n' \
' **Has failed to download:** {has_failed}\n\n' \
' **Marked as a duplicate of:** {duplicates}\n' \
' **Is duplicated by:** {duplicated_by}'.format_map(info)
await self._bot.whisper(reply)
@privileged
@song.command(ignore_extra=False, help=_help_messages['permit'])
async def permit(self, song_id: int):
await self._db.permit(song_id)
await self._bot.message('Song [{}] has been removed from blacklist'.format(song_id))
@privileged
@song.command(ignore_extra=False, help=_help_messages['rename'])
async def rename(self, song_id: int, new_title: str):
await self._db.rename(song_id, new_title)
await self._bot.message('Song [{}] has been renamed to "{}"'.format(song_id, new_title))
@song.command(ignore_extra=False, aliases=['s'], help=_help_messages['search'])
async def search(self, *keywords: str):
items, total = await self._db.search(keywords, 20)
if not items:
await self._bot.whisper('Search for songs with keywords {} has not returned any result'.format(keywords))
return
reply = '**{} songs (out of {}) matching the keywords {}:**\n **>** '.format(len(items), total, keywords) + \
'\n **>** '.join(['[{}] {}'.format(*item) for item in items])
await self._bot.whisper(reply)
@privileged
@song.command(ignore_extra=False, help=_help_messages['split'])
async def split(self, song_id: int):
await self._db.merge(song_id, song_id)
await self._bot.message('Song [{}] has been marked as unique'.format(song_id))
|
[
"discord.ext.commands.group",
"discord.ext.commands.UserInputError"
] |
[((3790, 3878), 'discord.ext.commands.group', 'dec.group', ([], {'invoke_without_command': '(True)', 'aliases': "['s']", 'help': "_help_messages['group']"}), "(invoke_without_command=True, aliases=['s'], help=_help_messages[\n 'group'])\n", (3799, 3878), True, 'import discord.ext.commands as dec\n'), ((4899, 4987), 'discord.ext.commands.UserInputError', 'dec.UserInputError', (['"""You need to provide a subcommand to the *song failed* command"""'], {}), "(\n 'You need to provide a subcommand to the *song failed* command')\n", (4917, 4987), True, 'import discord.ext.commands as dec\n'), ((5160, 5248), 'discord.ext.commands.UserInputError', 'dec.UserInputError', (['"""You need to provide a subcommand to the *song failed* command"""'], {}), "(\n 'You need to provide a subcommand to the *song failed* command')\n", (5178, 5248), True, 'import discord.ext.commands as dec\n')]
|
"""
main.py
Main driver for the Linear Error Analysis program.
Can be run using `lea.sh`.
Can choose which plots to see by toggling on/off `show_fig` param.
Author(s): <NAME>, <NAME>, <NAME>
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import config
import libs.gta_xch4 as gta_xch4
import libs.photon_noise as pn
from errors import Errors
from forward import Forward
from isrf import ISRF
from optim import Optim
if __name__ == "__main__":
cfg = config.parse_config()
forward = Forward(cfg)
surface, molec, atm, sun_lbl = forward.get_atm_params()
optics = forward.opt_properties()
(
wave_meas,
rad_tot,
rad_ch4,
rad_co2,
rad_h2o,
d_rad_ch4,
d_rad_co2,
d_rad_h2o,
rad_conv_tot,
rad_conv_ch4,
rad_conv_co2,
rad_conv_h2o,
dev_conv_ch4,
dev_conv_co2,
dev_conv_h2o,
) = forward.plot_transmittance(show_fig=False)
state_vector = forward.produce_state_vec()
isrf = ISRF(cfg)
isrf_func = isrf.define_isrf(show_fig=False)
isrf_conv = isrf.convolve_isrf(rad_tot, show_fig=False)
lea = Errors(cfg, wave_meas)
sys_errors = lea.sys_errors()
rand_errors = lea.rand_errors()
# sys_nonlinearity = lea.sys_err_vector(1)
# sys_stray_light = lea.sys_err_vector(2)
# sys_crosstalk = lea.sys_err_vector(3)
# sys_flat_field = lea.sys_err_vector(4)
# sys_bad_px = lea.sys_err_vector(5)
# sys_key_smile = lea.sys_err_vector(6)
# sys_striping = lea.sys_err_vector(7)
# sys_memory = lea.sys_err_vector(8)
ecm = lea.error_covariance()
path_root = os.path.dirname(os.path.dirname(__file__))
np.savetxt(os.path.join(path_root, "outputs", "ecm.csv"), ecm, delimiter=",")
optim = Optim(cfg, wave_meas)
jacobian = optim.jacobian(dev_conv_ch4, dev_conv_co2, dev_conv_h2o, show_fig=False)
gain = optim.gain(ecm)
modified_meas_vector = optim.modify_meas_vector(state_vector, rad_conv_tot, ecm)
spectral_res, snr = optim.state_estimate(ecm, modified_meas_vector, sys_errors)
print("Estimated Solution: " + str(spectral_res))
print("Uncertainty of Solution: " + str(snr))
# plot interpolated photon noise
# plt.plot(lea.wave_meas, lea.photon_noise_interp)
# plt.title("Interpolated Photon Noise")
# plt.xlabel("Wavelength (nm)")
# plt.ylabel("Photon Noise (UNITS?)") # TODO
# plt.show()
|
[
"errors.Errors",
"optim.Optim",
"config.parse_config",
"os.path.join",
"os.path.dirname",
"forward.Forward",
"isrf.ISRF"
] |
[((478, 499), 'config.parse_config', 'config.parse_config', ([], {}), '()\n', (497, 499), False, 'import config\n'), ((515, 527), 'forward.Forward', 'Forward', (['cfg'], {}), '(cfg)\n', (522, 527), False, 'from forward import Forward\n'), ((1040, 1049), 'isrf.ISRF', 'ISRF', (['cfg'], {}), '(cfg)\n', (1044, 1049), False, 'from isrf import ISRF\n'), ((1170, 1192), 'errors.Errors', 'Errors', (['cfg', 'wave_meas'], {}), '(cfg, wave_meas)\n', (1176, 1192), False, 'from errors import Errors\n'), ((1803, 1824), 'optim.Optim', 'Optim', (['cfg', 'wave_meas'], {}), '(cfg, wave_meas)\n', (1808, 1824), False, 'from optim import Optim\n'), ((1681, 1706), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1696, 1706), False, 'import os\n'), ((1723, 1768), 'os.path.join', 'os.path.join', (['path_root', '"""outputs"""', '"""ecm.csv"""'], {}), "(path_root, 'outputs', 'ecm.csv')\n", (1735, 1768), False, 'import os\n')]
|
import logging
import os
import re
import sublime
# external dependencies (see dependencies.json)
import jsonschema
import yaml # pyyaml
# This plugin generates a hidden syntax file containing rules for additional
# chainloading commands defined by the user. The syntax is stored in the cache
# directory to avoid the possibility of it falling under user version control in
# the usual packages directory
userSyntaxName = 'execline-user-chainload.sublime-syntax'
pkgName = 'execline'
settingsName = 'execline.sublime-settings'
mainSyntaxPath = 'Packages/{}/execline.sublime-syntax'.format(pkgName)
schemaPath = 'Packages/{}/execline.sublime-settings.schema.json'.format(pkgName)
ruleNamespaces = {
'keyword': 'keyword.other',
'function': 'support.function',
}
ruleContexts = {
'argument': {
'generic': 'command-call-common-arg-aside-&pop',
'variable': 'command-call-common-variable-&pop',
'pattern': 'command-call-common-glob-&pop',
},
'block': {
'program': 'block-run-prog',
'arguments': 'block-run-arg',
'trap': 'block-trap',
'multidefine': 'block-multidefine',
},
'options': {
'list': 'command-call-common-opt-list-&pop',
'list-with-args': {
'match': '(?=-[{}])',
'push': 'command-call-common-opt-arg-&pop',
'include': 'command-call-common-opt-list-&pop',
},
},
}
logging.basicConfig()
logger = logging.getLogger(__name__)
# Fully resolve the name of a context in the main syntax file
def _resolve_context(context):
return mainSyntaxPath + '#' + context
# Create a match rule describing a command of a certain type, made of a list of
# elements
def _make_rule(cmd_name, cmd_elements, cmd_type):
try:
namespace = ruleNamespaces[cmd_type]
except KeyError:
logger.warning("Ignoring command of unrecognised type '{}'".format(cmd_type))
return
rule = {}
# Careful to sanitise user input. Only literal command names accepted here
rule['match'] = r'{{chain_pre}}' + re.escape(cmd_name) + r'{{chain_post}}'
rule['scope'] = ' '.join([
'meta.function-call.name.execline',
'{}.user.{}.execline'.format(namespace, cmd_name),
'meta.string.unquoted.execline',
])
contextSeq = []
for elem in cmd_elements:
context = None
# Resolve the element into a name and possible argument
elemType,elemSubtype = elem[0:2]
try:
elemArg = elem[2]
except IndexError:
elemArg = ''
# Look up the context named by this element
try:
contextData = ruleContexts[elemType][elemSubtype]
if isinstance(contextData, str):
contextData = { 'include': contextData }
except KeyError:
logger.warning("Ignoring key '{}' not found in context dictionary".format(elem))
continue
if len(contextData) > 1 and not elemArg:
logger.warning("Ignoring element '{}' with missing data".format(elem))
continue
if len(contextData) == 1:
# context = _resolve_context(contextData['include'])
# Although a basic include could be provided as the target context name
# directly to the 'push' list, this can break if there are a mix of other
# types of contexts being pushed to the stack. A context containing a sole
# include is safe from this
context = [ {'include': _resolve_context(contextData['include'])} ]
elif elemType == 'options':
# Careful to sanitise user input, this must behave as a list of characters
matchPattern = contextData['match'].format( re.escape(elemArg) )
context = [
{'match': matchPattern, 'push': _resolve_context(contextData['push'])},
{'include': _resolve_context(contextData['include'])},
]
if context:
contextSeq.append(context)
# Convert context sequence into context stack
if contextSeq:
rule['push'] = contextSeq
rule['push'].reverse()
return rule
def _validate_settings():
# Read the schema using Sublime Text's builtin JSON parser
try:
schema = sublime.decode_value( sublime.load_resource(schemaPath) )
except Exception as ex:
logger.error("Failed loading schema: {}".format(ex))
return validSets
settings = sublime.load_settings(settingsName)
activeSets = settings.get('user_chainload_active')
if not activeSets:
return []
validSets = []
for setName in activeSets:
if not setName:
sublime.error_message("Error in {}: Set name cannot be the empty string".format(settingsName))
continue
setName = 'user_chainload_set_' + setName
setDict = settings.get(setName)
if setDict == None:
sublime.error_message("Error in {}: Couldn't find expected setting '{}'".format(settingsName, setName))
continue
try:
jsonschema.validate(setDict, schema)
logger.debug("Validation success for {}".format(setName))
validSets.append(setName)
except jsonschema.exceptions.SchemaError as ex:
# A problem in the schema itself for me as the developer to resolve
logger.error("Failed validating schema: {}".format(ex))
break
except jsonschema.exceptions.ValidationError as ex:
# A problem in the settings file for the user to resolve
sublime.error_message("Error in {} in setting '{}': \n{}".format(settingsName, setName, str(ex)))
continue
return validSets if validSets else None
def _write_user_chainload():
# Read settings file and validate
settings = sublime.load_settings(settingsName)
validSets = _validate_settings()
# Prepare output syntax file
cacheDir = os.path.join(sublime.cache_path(), pkgName)
if not os.path.isdir(cacheDir):
os.mkdir(cacheDir)
userSyntaxPath = os.path.join(cacheDir, userSyntaxName)
userSyntaxExists = os.path.isfile(userSyntaxPath)
# Skip writing the syntax if it already exists in a valid form and we don't
# have a valid set of rules for regenerating it
if userSyntaxExists:
if validSets == None:
logger.warning("Not regenerating syntax due to lack of any valid settings")
return
else:
logger.info("Regenerating syntax with sets: {}".format(validSets))
else:
logger.info("Generating syntax with sets: {}".format(validSets))
userSyntax = open(userSyntaxPath, 'w')
# Can't seem to get PyYAML to write a header, so do it manually
header = '\n'.join([
r'%YAML 1.2',
r'# THIS IS AN AUTOMATICALLY GENERATED FILE.',
r'# DO NOT EDIT. CHANGES WILL BE LOST.',
r'---',
'',
])
userSyntax.write(header)
yaml.dump({'hidden': True, 'scope': 'source.shell.execline'}, userSyntax)
# Repeat all the variables from the main syntax file, for convenience
mainDB = yaml.load(sublime.load_resource(mainSyntaxPath),
Loader = yaml.BaseLoader)
yaml.dump({'variables': mainDB['variables']}, userSyntax)
# Create list of rules from the sets of user settings which are currently
# valid
rulesList = []
for rule in [r for s in validSets for r in settings.get(s)]:
# Schema validation guarantees we can trust all the following inputs
# Read a name or list of names
cmdNames = rule['name']
if isinstance(cmdNames, str):
cmdNames = [cmdNames]
# Get type with 'function' being default if not provided
cmdType = rule.get('type', 'function')
cmdElements = []
for elem in rule['elements']:
# Get the sole kv pair, apparently this is most efficient way
key,value = next( iter(elem.items()) )
if key in ruleContexts:
cmdElements.append( (key,value) )
elif 'options_then_' in key:
opts = ''.join( value.get('options_taking_arguments', []) )
if opts:
cmdElements.append( ('options', 'list-with-args', opts) )
else:
cmdElements.append( ('options', 'list') )
then = key.split('_')[-1]
if then == 'end':
# Ignore all further elements
break
else:
# Add the block, etc
cmdElements.append( (then, value[then]) )
for cmdName in cmdNames:
rulesList.append( _make_rule(cmdName, cmdElements, cmdType) )
# Only keep non-empty rules. Sublime doesn't mind if the list of rules ends up
# empty
content = {'contexts': {'main': [r for r in rulesList if r]}}
yaml.dump(content, userSyntax)
def plugin_loaded():
settings = sublime.load_settings(settingsName)
settings.clear_on_change(__name__)
settings.add_on_change(__name__, _write_user_chainload)
if settings.get('user_chainload_debugging'):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
_write_user_chainload()
|
[
"logging.basicConfig",
"logging.getLogger",
"re.escape",
"sublime.cache_path",
"yaml.dump",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"jsonschema.validate",
"os.mkdir",
"sublime.load_resource",
"sublime.load_settings"
] |
[((1350, 1371), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1369, 1371), False, 'import logging\n'), ((1381, 1408), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1398, 1408), False, 'import logging\n'), ((4149, 4184), 'sublime.load_settings', 'sublime.load_settings', (['settingsName'], {}), '(settingsName)\n', (4170, 4184), False, 'import sublime\n'), ((5400, 5435), 'sublime.load_settings', 'sublime.load_settings', (['settingsName'], {}), '(settingsName)\n', (5421, 5435), False, 'import sublime\n'), ((5637, 5675), 'os.path.join', 'os.path.join', (['cacheDir', 'userSyntaxName'], {}), '(cacheDir, userSyntaxName)\n', (5649, 5675), False, 'import os\n'), ((5697, 5727), 'os.path.isfile', 'os.path.isfile', (['userSyntaxPath'], {}), '(userSyntaxPath)\n', (5711, 5727), False, 'import os\n'), ((6462, 6535), 'yaml.dump', 'yaml.dump', (["{'hidden': True, 'scope': 'source.shell.execline'}", 'userSyntax'], {}), "({'hidden': True, 'scope': 'source.shell.execline'}, userSyntax)\n", (6471, 6535), False, 'import yaml\n'), ((6701, 6758), 'yaml.dump', 'yaml.dump', (["{'variables': mainDB['variables']}", 'userSyntax'], {}), "({'variables': mainDB['variables']}, userSyntax)\n", (6710, 6758), False, 'import yaml\n'), ((8196, 8226), 'yaml.dump', 'yaml.dump', (['content', 'userSyntax'], {}), '(content, userSyntax)\n', (8205, 8226), False, 'import yaml\n'), ((8263, 8298), 'sublime.load_settings', 'sublime.load_settings', (['settingsName'], {}), '(settingsName)\n', (8284, 8298), False, 'import sublime\n'), ((5529, 5549), 'sublime.cache_path', 'sublime.cache_path', ([], {}), '()\n', (5547, 5549), False, 'import sublime\n'), ((5569, 5592), 'os.path.isdir', 'os.path.isdir', (['cacheDir'], {}), '(cacheDir)\n', (5582, 5592), False, 'import os\n'), ((5598, 5616), 'os.mkdir', 'os.mkdir', (['cacheDir'], {}), '(cacheDir)\n', (5606, 5616), False, 'import os\n'), ((6630, 6667), 'sublime.load_resource', 'sublime.load_resource', (['mainSyntaxPath'], {}), '(mainSyntaxPath)\n', (6651, 6667), False, 'import sublime\n'), ((1975, 1994), 're.escape', 're.escape', (['cmd_name'], {}), '(cmd_name)\n', (1984, 1994), False, 'import re\n'), ((3995, 4028), 'sublime.load_resource', 'sublime.load_resource', (['schemaPath'], {}), '(schemaPath)\n', (4016, 4028), False, 'import sublime\n'), ((4706, 4742), 'jsonschema.validate', 'jsonschema.validate', (['setDict', 'schema'], {}), '(setDict, schema)\n', (4725, 4742), False, 'import jsonschema\n'), ((3485, 3503), 're.escape', 're.escape', (['elemArg'], {}), '(elemArg)\n', (3494, 3503), False, 'import re\n')]
|
import os
import numpy as np
import sys
sys.path.append("../")
for model in ['lenet1', 'lenet4', 'lenet5']:
for attack in ['fgsm', 'cw', 'jsma']:
for mu_var in ['gf', 'nai', 'ns', 'ws']:
os.system('CUDA_VISIBLE_DEVICES=0 python retrain_mu_mnist.py --datasets=mnist --attack=' + attack + ' --model_name=' + model + ' --mu_var=' + mu_var + ' --epochs=50')
|
[
"os.system",
"sys.path.append"
] |
[((40, 62), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (55, 62), False, 'import sys\n'), ((212, 392), 'os.system', 'os.system', (["(\n 'CUDA_VISIBLE_DEVICES=0 python retrain_mu_mnist.py --datasets=mnist --attack='\n + attack + ' --model_name=' + model + ' --mu_var=' + mu_var +\n ' --epochs=50')"], {}), "(\n 'CUDA_VISIBLE_DEVICES=0 python retrain_mu_mnist.py --datasets=mnist --attack='\n + attack + ' --model_name=' + model + ' --mu_var=' + mu_var +\n ' --epochs=50')\n", (221, 392), False, 'import os\n')]
|
#!/usr/bin/env python3
from __future__ import print_function
import gzip
import json
import re
import sys
# import time
from argparse import ArgumentParser
# from datetime import datetime
class Options:
def __init__(self):
self.usage = 'characterise_inauthentic_tweets.py -i <file of tweets> [-v|--verbose]'
self._init_parser()
def _init_parser(self):
self.parser = ArgumentParser(usage=self.usage, conflict_handler='resolve')
self.parser.add_argument(
'-i', '--tweets-file',
default='-',
required=True,
dest='tweets_file',
help='Tweets file (default: all)'
)
self.parser.add_argument(
'-v', '--verbose',
action='store_true',
default=False,
dest='verbose',
help='Turn on verbose logging (default: False)'
)
def parse(self, args=None):
return self.parser.parse_args(args)
TWITTER_TS_FORMAT = '%a %b %d %H:%M:%S +0000 %Y' #Tue Apr 26 08:57:55 +0000 2011
# def parse_ts(ts_str, fmt=TWITTER_TS_FORMAT):
# try:
# time_struct = time.strptime(ts_str, fmt)
# except TypeError:
# return int(ts_str) # epoch millis
# return datetime.fromtimestamp(time.mktime(time_struct))
def extract_text(tweet):
"""Gets the full text from a tweet if it's short or long (extended)."""
def get_available_text(t):
if t['truncated'] and 'extended_tweet' in t:
# if a tweet is retreived in 'compatible' mode, it may be
# truncated _without_ the associated extended_tweet
#eprint('#%s' % t['id_str'])
return t['extended_tweet']['full_text']
else:
return t['text'] if 'text' in t else t['full_text']
if 'retweeted_status' in tweet:
rt = tweet['retweeted_status']
return extract_text(rt)
# return 'RT @%s: %s' % (rt['user']['screen_name'], extract_text(rt))
# if 'quoted_status' in tweet:
# qt = tweet['quoted_status']
# return get_available_text(tweet) + " --> " + extract_text(qt)
return get_available_text(tweet)
def fetch_lines(file=None):
"""Gets the lines from the given file or stdin if it's None or '' or '-'."""
if file and file != '-':
with gzip.open(file, 'rt') if file[-1] in 'zZ' else open(file, 'r', encoding='utf-8') as f:
return [l.strip() for l in f.readlines()]
else:
return [l.strip() for l in sys.stdin]
def extract_tokens(pattern, str):
return list(
filter(
lambda t: len(t) > 0,
map(
lambda t: t.strip(),
re.findall(pattern, str)
)
)
)
def count_tokens_starting_with(chars, tokens):
return sum([1 for _ in tokens if _[0] in chars])
def eprint(*args, **kwargs):
"""Print to stderr"""
print(*args, file=sys.stderr, **kwargs)
DEBUG=False
def log(msg):
if DEBUG: eprint(msg)
if __name__=='__main__':
options = Options()
opts = options.parse(sys.argv[1:])
DEBUG=opts.verbose
tweets_file = opts.tweets_file
# pretty = opts.pretty
tweets = [json.loads(l) for l in fetch_lines(tweets_file)]
log(f'read: {len(tweets)} tweets')
hashtags_only = 0
hashtags_plus_url = 0
mentions_plus_hashtags = 0
mentions_hashtags_plus_url = 0
ht_splitter_re = '[a-zA-Z#]+'
me_splitter_re = '[a-zA-Z@]+'
htme_splitter_re = '[a-zA-Z#@]+'
X = 0
for t in tweets:
text = extract_text(t)
# hashtag(s) only
if '#' in text:
tokens = extract_tokens(ht_splitter_re, text)
if len(tokens) == count_tokens_starting_with('#', tokens):
hashtags_only += 1
log(tokens)
# hashtag(s) and URL
if '#' in text and 'http' in text:
tokens = extract_tokens(htme_splitter_re, text[:text.index('http')])
if len(tokens) == count_tokens_starting_with('#', tokens):
hashtags_plus_url += 1
# print(tokens)
log(text)
# mention(s) and hashtag(s)
if '#' in text and '@' in text:
tokens = extract_tokens(htme_splitter_re, text)
if len(tokens) == count_tokens_starting_with('#@', tokens):
mentions_plus_hashtags += 1
log(tokens)
# mention(s), hashtag(s) and URL
if '#' in text and '@' in text and 'http' in text:
tokens = extract_tokens(htme_splitter_re, text[:text.index('http')])
if len(tokens) == count_tokens_starting_with('#@', tokens):
mentions_hashtags_plus_url += 1
# print(tokens)
log(text)
print(f'All: {len(tweets):,}')
print(f'HT: {hashtags_only:>6} ({float(hashtags_only)/len(tweets):.1%})')
print(f'HT+URL: {hashtags_plus_url:>6} ({float(hashtags_plus_url)/len(tweets):.1%})')
print(f'@m+HT: {mentions_plus_hashtags:>6} ({float(mentions_plus_hashtags)/len(tweets):.1%})')
print(f'@m+HT+URL: {mentions_hashtags_plus_url:>6} ({float(mentions_hashtags_plus_url)/len(tweets):.1%})')
|
[
"re.findall",
"json.loads",
"argparse.ArgumentParser",
"gzip.open"
] |
[((403, 463), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'usage': 'self.usage', 'conflict_handler': '"""resolve"""'}), "(usage=self.usage, conflict_handler='resolve')\n", (417, 463), False, 'from argparse import ArgumentParser\n'), ((3187, 3200), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (3197, 3200), False, 'import json\n'), ((2312, 2333), 'gzip.open', 'gzip.open', (['file', '"""rt"""'], {}), "(file, 'rt')\n", (2321, 2333), False, 'import gzip\n'), ((2682, 2706), 're.findall', 're.findall', (['pattern', 'str'], {}), '(pattern, str)\n', (2692, 2706), False, 'import re\n')]
|
# coding=utf-8
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims.browser.bika_listing import BikaListingTable
from bika.lims.browser.worksheet.views.analyses import AnalysesView
class AnalysesTransposedView(AnalysesView):
""" The view for displaying the table of manage_results transposed.
Analysis Requests are displayed in columns and analyses in rows.
Uses most of the logic provided by BikaListingView through
bika.lims.worksheet.views.AnalysesView to generate the items,
but renders its own template, which is highly specific for
display analysis results. Because of this, some generic
BikaListing functionalities, such as sorting, pagination,
contextual menus for columns, etc. will not work in this view.
"""
def contents_table(self, table_only = True):
""" Overrides contents_table method from the parent class
BikaListingView, using the transposed template instead
of the classic template.
"""
table = AnalysesTransposedTable(bika_listing = self, table_only = True)
return table.render(self)
class AnalysesTransposedTable(BikaListingTable):
""" The BikaListingTable that uses a transposed template for
displaying the results.
"""
render = ViewPageTemplateFile("../templates/analyses_transposed.pt")
render_cell = ViewPageTemplateFile("../templates/analyses_transposed_cell.pt")
def __init__(self, bika_listing = None, table_only = False):
BikaListingTable.__init__(self, bika_listing, True)
self.rows_headers = []
self.trans_items = {}
self.positions = []
self._transpose_data()
def _transpose_data(self):
cached = []
index = 0
#ignore = ['Analysis', 'Service', 'Result', 'ResultDM']
include = ['Attachments', 'DetectionLimit', 'DueDate','Pos', 'ResultDM']
for col in self.bika_listing.review_state['columns']:
if col == 'Result':
# Further interims will be inserted in this position
resindex = index
if col not in include:
continue
lcol = self.bika_listing.columns[col]
self.rows_headers.append({'id': col,
'title': lcol['title'],
'type': lcol.get('type',''),
'row_type': 'field',
'hidden': not lcol.get('toggle', True),
'input_class': lcol.get('input_class',''),
'input_width': lcol.get('input_width','')})
cached.append(col)
index += 1
for item in self.items:
if item['Service'] not in cached:
self.rows_headers.insert(resindex,
{'id': item['Service'],
'title': item['title'],
'type': item.get('type',''),
'row_type': 'analysis',
'index': index})
resindex += 1
cached.append(item['Service'])
pos = item['Pos']
if pos in self.trans_items:
self.trans_items[pos][item['Service']] = item
else:
self.trans_items[pos] = {item['Service']: item}
if pos not in self.positions:
self.positions.append(pos)
def rendered_items(self, cat=None, **kwargs):
return ''
def render_row_cell(self, rowheader, position = ''):
self.current_rowhead = rowheader
self.current_position = position
if rowheader['row_type'] == 'field':
# Only the first item for this position contains common
# data for all the analyses with the same position
its = [i for i in self.items if i['Pos'] == position]
self.current_item = its[0] if its else {}
elif position in self.trans_items \
and rowheader['id'] in self.trans_items[position]:
self.current_item = self.trans_items[position][rowheader['id']]
else:
return ''
return self.render_cell()
|
[
"bika.lims.browser.bika_listing.BikaListingTable.__init__",
"Products.Five.browser.pagetemplatefile.ViewPageTemplateFile"
] |
[((1334, 1393), 'Products.Five.browser.pagetemplatefile.ViewPageTemplateFile', 'ViewPageTemplateFile', (['"""../templates/analyses_transposed.pt"""'], {}), "('../templates/analyses_transposed.pt')\n", (1354, 1393), False, 'from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n'), ((1412, 1476), 'Products.Five.browser.pagetemplatefile.ViewPageTemplateFile', 'ViewPageTemplateFile', (['"""../templates/analyses_transposed_cell.pt"""'], {}), "('../templates/analyses_transposed_cell.pt')\n", (1432, 1476), False, 'from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n'), ((1551, 1602), 'bika.lims.browser.bika_listing.BikaListingTable.__init__', 'BikaListingTable.__init__', (['self', 'bika_listing', '(True)'], {}), '(self, bika_listing, True)\n', (1576, 1602), False, 'from bika.lims.browser.bika_listing import BikaListingTable\n')]
|
from enum import Enum
from typing import Generator, Tuple, Iterable, Dict, List
import cv2
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.ndimage import label, generate_binary_structure
from scipy.ndimage.morphology import distance_transform_edt as dist_trans
import trainer.lib as lib
class ImageNormalizations(Enum):
UnitRange = 1
def duplicate_columns(data, minoccur=2):
ind = np.lexsort(data)
diff = np.any(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)
edges = np.where(diff)[0] + 1
result = np.split(ind, edges)
result = [group for group in result if len(group) >= minoccur]
return result
def pad(small_arr: np.ndarray, size=(30, 30)) -> np.ndarray:
# if small_arr.shape[0] < size[0] or small_arr.shape[1] < size[1]:
size = max(small_arr.shape[0], size[0]), max(small_arr.shape[1], size[1])
res = np.zeros(size, dtype=np.int32)
res[:small_arr.shape[0], :small_arr.shape[1]] = small_arr
return res
# else:
# return small_arr # There is no need for padding
def split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]:
"""
Splits an array into its coherent regions.
:param mode: 0 for orthogonal connection, 1 for full connection
:param arr: Numpy array with shape [W, H]
:return: A list with length #NumberOfRegions of arrays with shape [W, H]
"""
res = []
if mode == 0:
rs, num_regions = label(arr)
elif mode == 1:
rs, num_regions = label(arr, structure=generate_binary_structure(2, 2))
else:
raise Exception("Please specify a valid Neighborhood mode for split_into_regions")
for i in range(1, num_regions + 1):
res.append(rs == i)
return res
def normalize_im(im: np.ndarray, norm_type=ImageNormalizations.UnitRange) -> np.ndarray:
"""
Currently just normalizes an image with pixel intensities in range [0, 255] to [-1, 1]
:return: The normalized image
"""
if norm_type == ImageNormalizations.UnitRange:
return (im.astype(np.float32) / 127.5) - 1
else:
raise Exception("Unknown Normalization type")
def distance_transformed(mask: np.ndarray) -> np.ndarray:
if mask.dtype != np.bool:
mask = mask.astype(np.bool)
return dist_trans(np.invert(mask).astype(np.float32))
def one_hot_to_cont(x: np.ndarray) -> np.ndarray:
"""
Convert a one hot encoded image into the same image with integer representations.
:param x: np.ndarray with (C, W, H)
:return: np.ndarray with (W, H)
"""
return np.argmax(x, axis=len(x.shape) - 3)
def cont_to_ont_hot(arr: np.ndarray, n_values=-1) -> np.ndarray:
if n_values == -1:
n_values = np.max(arr) + 1
res = np.zeros((n_values,) + arr.shape)
for v in np.unique(arr):
res[v, :, :][arr == v] = 1
return res
def reduce_by_attention(arr: np.ndarray, att: np.ndarray):
"""
Reduce an array by a field of attention, such that the result is a rectangle with the empty borders cropped.
:param arr: Target array. The last two dimensions need to be of the same shape as the attention field
:param att: field of attention
:return: cropped array
"""
assert arr.shape[-2] == att.shape[0] and arr.shape[-1] == att.shape[1]
ones = np.argwhere(att)
lmost, rmost = np.min(ones[:, 0]), np.max(ones[:, 0]) + 1
bmost, tmost = np.min(ones[:, 1]), np.max(ones[:, 1]) + 1
grid_slice = [slice(None) for _ in range(len(arr.shape) - 2)]
grid_slice.extend([slice(lmost, rmost), slice(bmost, tmost)])
return arr[tuple(grid_slice)], att[lmost:rmost, bmost:tmost], (lmost, rmost, bmost, tmost)
def pair_augmentation(g: Iterable[Tuple[np.ndarray, np.ndarray]], aug_ls) -> Iterable[Tuple[np.ndarray, np.ndarray]]:
import imgaug.augmenters as iaa
seq = iaa.Sequential(aug_ls)
for im, gt, frame_number in g:
im_prep = im[frame_number] if im.shape[3] > 1 else im.squeeze()
gt_prep = np.expand_dims(gt, len(gt.shape))
images_aug = seq(images=[im_prep], segmentation_maps=[gt_prep])
yield images_aug[0][0].astype(np.float32), images_aug[1][0][:, :, 0].astype(np.float32), frame_number
def insert_np_at(a1: np.ndarray, a2: np.ndarray, pos: Tuple[int, int], filter_arr=None) -> np.ndarray:
assert len(a1.shape) == 2 and len(a2.shape) == 2
if filter_arr is None:
filter_arr = np.ones_like(a2).astype(np.bool)
x, y = pos
res = np.copy(a1)
a1_x = slice(x, min(x + a2.shape[0], a1.shape[0]))
a1_y = slice(y, min(y + a2.shape[1], a1.shape[1]))
if x + a2.shape[0] <= a1.shape[0]:
a2_x = slice(0, a2.shape[0])
else:
a2_x = slice(0, a1.shape[0] - (x + a2.shape[0]))
if y + a2.shape[1] <= a1.shape[1]:
a2_y = slice(0, a2.shape[1])
else:
a2_y = slice(0, a1.shape[1] - (y + a2.shape[1]))
item_filter = filter_arr[(a2_x, a2_y)]
assert res[(a1_x, a1_y)].shape == a2[(a2_x, a2_y)].shape
res[(a1_x, a1_y)][item_filter] = a2[(a2_x, a2_y)][item_filter]
return res
if __name__ == '__main__':
fit = insert_np_at(np.ones((10, 10)), np.ones((3, 3)) * 2, (2, 3))
too_big1 = insert_np_at(np.ones((10, 10)), np.ones((3, 10)) * 2, (2, 3))
too_big = insert_np_at(np.ones((10, 10)), np.ones((10, 10)) * 2, (2, 3))
# def put_array(big_arr: np.ndarray, small_arr: np.ndarray, offset=(0, 0)) -> np.ndarray:
# """
# Puts the small array into the big array. Ignores problems and does its best to fulfill the task
# """
# b, t =
# big_arr[]
# big_arr = np.putmask(big_arr, )
# if __name__ == '__main__':
# # a = np.zeros((10, 10))
# # b = np.random.random((4, 4))
# # c = put_array(a, b)
# # lib.logger.debug_var(c)
|
[
"numpy.copy",
"numpy.ones_like",
"numpy.unique",
"numpy.ones",
"numpy.where",
"scipy.ndimage.generate_binary_structure",
"scipy.ndimage.label",
"numpy.any",
"numpy.max",
"numpy.invert",
"numpy.lexsort",
"numpy.split",
"numpy.zeros",
"numpy.argwhere",
"imgaug.augmenters.Sequential",
"numpy.min"
] |
[((431, 447), 'numpy.lexsort', 'np.lexsort', (['data'], {}), '(data)\n', (441, 447), True, 'import numpy as np\n'), ((459, 510), 'numpy.any', 'np.any', (['(data.T[ind[1:]] != data.T[ind[:-1]])'], {'axis': '(1)'}), '(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)\n', (465, 510), True, 'import numpy as np\n'), ((558, 578), 'numpy.split', 'np.split', (['ind', 'edges'], {}), '(ind, edges)\n', (566, 578), True, 'import numpy as np\n'), ((886, 916), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.int32'}), '(size, dtype=np.int32)\n', (894, 916), True, 'import numpy as np\n'), ((2739, 2772), 'numpy.zeros', 'np.zeros', (['((n_values,) + arr.shape)'], {}), '((n_values,) + arr.shape)\n', (2747, 2772), True, 'import numpy as np\n'), ((2786, 2800), 'numpy.unique', 'np.unique', (['arr'], {}), '(arr)\n', (2795, 2800), True, 'import numpy as np\n'), ((3297, 3313), 'numpy.argwhere', 'np.argwhere', (['att'], {}), '(att)\n', (3308, 3313), True, 'import numpy as np\n'), ((3831, 3853), 'imgaug.augmenters.Sequential', 'iaa.Sequential', (['aug_ls'], {}), '(aug_ls)\n', (3845, 3853), True, 'import imgaug.augmenters as iaa\n'), ((4459, 4470), 'numpy.copy', 'np.copy', (['a1'], {}), '(a1)\n', (4466, 4470), True, 'import numpy as np\n'), ((1448, 1458), 'scipy.ndimage.label', 'label', (['arr'], {}), '(arr)\n', (1453, 1458), False, 'from scipy.ndimage import label, generate_binary_structure\n'), ((3333, 3351), 'numpy.min', 'np.min', (['ones[:, 0]'], {}), '(ones[:, 0])\n', (3339, 3351), True, 'import numpy as np\n'), ((3395, 3413), 'numpy.min', 'np.min', (['ones[:, 1]'], {}), '(ones[:, 1])\n', (3401, 3413), True, 'import numpy as np\n'), ((5107, 5124), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5114, 5124), True, 'import numpy as np\n'), ((5183, 5200), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5190, 5200), True, 'import numpy as np\n'), ((5259, 5276), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5266, 5276), True, 'import numpy as np\n'), ((523, 537), 'numpy.where', 'np.where', (['diff'], {}), '(diff)\n', (531, 537), True, 'import numpy as np\n'), ((2713, 2724), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (2719, 2724), True, 'import numpy as np\n'), ((3353, 3371), 'numpy.max', 'np.max', (['ones[:, 0]'], {}), '(ones[:, 0])\n', (3359, 3371), True, 'import numpy as np\n'), ((3415, 3433), 'numpy.max', 'np.max', (['ones[:, 1]'], {}), '(ones[:, 1])\n', (3421, 3433), True, 'import numpy as np\n'), ((5126, 5141), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (5133, 5141), True, 'import numpy as np\n'), ((5202, 5218), 'numpy.ones', 'np.ones', (['(3, 10)'], {}), '((3, 10))\n', (5209, 5218), True, 'import numpy as np\n'), ((5278, 5295), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5285, 5295), True, 'import numpy as np\n'), ((2290, 2305), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (2299, 2305), True, 'import numpy as np\n'), ((4401, 4417), 'numpy.ones_like', 'np.ones_like', (['a2'], {}), '(a2)\n', (4413, 4417), True, 'import numpy as np\n'), ((1526, 1557), 'scipy.ndimage.generate_binary_structure', 'generate_binary_structure', (['(2)', '(2)'], {}), '(2, 2)\n', (1551, 1557), False, 'from scipy.ndimage import label, generate_binary_structure\n')]
|
from PyQt5.QtCore import Qt, pyqtSignal, QSize
from PyQt5.QtWidgets import (
QLabel, QWidget, QTreeWidgetItem, QHeaderView,
QVBoxLayout, QHBoxLayout,
)
from .ImageLabel import ImageLabel
from .AdaptiveTreeWidget import AdaptiveTreeWidget
class ImageDetailArea(QWidget):
# signal
imageLoaded = pyqtSignal()
imageCleared = pyqtSignal()
# static strings
strings = {
'filename': 'filename: %s (%s)',
'size': 'size: %d x %d',
'component': 'Components (%d in total)',
'quantization': 'Quantization tables (%d in total)',
'huffman': 'Huffman tables (%d for DC, %d for AC)',
'showedComponentsInfo': [
'dc_tbl_no',
'ac_tbl_no',
'quant_tbl_no',
'h_samp_factor',
'v_samp_factor',
],
}
def __init__(self, parent=None):
super().__init__(parent)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.setObjectName("verticalLayout")
# title
self.lb_title = QLabel(self)
self.lb_title.setAlignment(Qt.AlignCenter)
self.lb_title.setObjectName("lb_title")
self.verticalLayout.addWidget(self.lb_title)
# filename && size
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lb_filename = QLabel(self)
self.lb_filename.setObjectName("lb_filename")
self.horizontalLayout.addWidget(self.lb_filename)
self.lb_size = QLabel(self)
self.lb_size.setObjectName("lb_size")
self.horizontalLayout.addWidget(self.lb_size)
self.verticalLayout.addLayout(self.horizontalLayout)
# image preview
self.lb_image = ImageLabel(self)
self.lb_image.setMinimumSize(QSize(250, 250))
self.lb_image.setAlignment(Qt.AlignCenter)
self.lb_image.setObjectName("lb_image")
self.verticalLayout.addWidget(self.lb_image)
# components
self.lb_components = QLabel(self)
self.lb_components.setObjectName("lb_components")
self.verticalLayout.addWidget(self.lb_components)
self.treeWidget_components = AdaptiveTreeWidget(self)
self.treeWidget_components.setUniformRowHeights(True)
self.treeWidget_components.setObjectName("treeWidget_components")
self.treeWidget_components.setColumnCount(3)
self.treeWidget_components.headerItem().setTextAlignment(
0, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_components.headerItem().setTextAlignment(
1, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_components.headerItem().setText(0, "ID")
self.treeWidget_components.headerItem().setText(1, "Property")
self.treeWidget_components.headerItem().setText(2, "Value")
self.treeWidget_components.header().setSectionResizeMode(
QHeaderView.ResizeToContents
)
self.verticalLayout.addWidget(self.treeWidget_components)
# quant tables
self.lb_quantTbls = QLabel(self)
self.lb_quantTbls.setObjectName("lb_quantTbls")
self.verticalLayout.addWidget(self.lb_quantTbls)
self.treeWidget_quantTbls = AdaptiveTreeWidget(self)
self.treeWidget_quantTbls.setObjectName("treeWidget_quantTbls")
self.treeWidget_quantTbls.setColumnCount(3)
self.treeWidget_quantTbls.headerItem().setTextAlignment(
0, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_quantTbls.headerItem().setTextAlignment(
1, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_quantTbls.headerItem().setText(0, "ID")
self.treeWidget_quantTbls.headerItem().setText(1, "Property")
self.treeWidget_quantTbls.headerItem().setText(2, "Value")
self.verticalLayout.addWidget(self.treeWidget_quantTbls)
# huffman tables
self.lb_huffTbls = QLabel(self)
self.lb_huffTbls.setObjectName("lb_huffTbls")
self.verticalLayout.addWidget(self.lb_huffTbls)
self.treeWidget_huffTbls = AdaptiveTreeWidget(self)
self.treeWidget_huffTbls.setObjectName("treeWidget_huffTbls")
self.treeWidget_huffTbls.setColumnCount(3)
self.treeWidget_huffTbls.headerItem().setTextAlignment(
0, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_huffTbls.headerItem().setTextAlignment(
1, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_huffTbls.headerItem().setText(0, "ID")
self.treeWidget_huffTbls.headerItem().setText(1, "Property")
self.treeWidget_huffTbls.headerItem().setText(2, "Value")
self.verticalLayout.addWidget(self.treeWidget_huffTbls)
self.setTitle('( None )')
self.clear()
def setTitle(self, title):
self.lb_title.setText(title)
def clear(self):
self.image = None
self.lb_filename.setText(
self.strings['filename'] % ('', 'NO image loaded')
)
self.lb_size.setText(
self.strings['size'] % (0, 0)
)
self.lb_image.clear()
self.lb_components.setText(
self.strings['component'] % 0
)
self.treeWidget_components.clear()
self.lb_quantTbls.setText(
self.strings['quantization'] % 0
)
self.treeWidget_quantTbls.clear()
self.lb_huffTbls.setText(
self.strings['huffman'] % (0, 0)
)
self.treeWidget_huffTbls.clear()
self.imageCleared.emit()
def setImage(self, image):
self.clear()
self.image = image
self.lb_filename.setText(
self.strings['filename'] % (image.filename, 'original')
)
self.lb_size.setText(
self.strings['size'] % image.size
)
self.lb_image.setImageMemSrc(image, 300, 300)
# components
for comp in image.comp_infos:
topItem = QTreeWidgetItem(
self.treeWidget_components,
[str(comp['component_id']), '', '']
)
for key in self.strings['showedComponentsInfo']:
QTreeWidgetItem(topItem, ['', key, str(comp[key])])
self.lb_components.setText(
self.strings['component'] % len(image.comp_infos)
)
# quantization tables
self.lb_quantTbls.setText(
self.strings['quantization'] % len(image.quant_tbls)
)
for i, quant_tbl in enumerate(image.quant_tbls):
topItem = QTreeWidgetItem(
self.treeWidget_quantTbls,
[str(i), '', '']
)
for key in quant_tbl:
QTreeWidgetItem(topItem, ['', key, str(quant_tbl[key])])
# huffman tables
self.lb_huffTbls.setText(
self.strings['huffman'] % (
len(image.dc_huff_tbls),
len(image.ac_huff_tbls)
)
)
for i, hufftbl in enumerate(image.dc_huff_tbls):
topItem = QTreeWidgetItem(
self.treeWidget_huffTbls,
[str(i), 'type', 'DC']
)
for key in hufftbl:
QTreeWidgetItem(topItem, ['', key, str(hufftbl[key])])
for i, hufftbl in enumerate(image.ac_huff_tbls):
topItem = QTreeWidgetItem(
self.treeWidget_huffTbls,
[str(i), 'type', 'AC']
)
for key in hufftbl:
QTreeWidgetItem(topItem, ['', key, str(hufftbl[key])])
self.imageLoaded.emit()
|
[
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QSize"
] |
[((313, 325), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (323, 325), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QSize\n'), ((345, 357), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (355, 357), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QSize\n'), ((932, 949), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self'], {}), '(self)\n', (943, 949), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((1050, 1062), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (1056, 1062), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((1274, 1287), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1285, 1287), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((1379, 1391), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (1385, 1391), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((1527, 1539), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (1533, 1539), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((2022, 2034), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (2028, 2034), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((3084, 3096), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (3090, 3096), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((3955, 3967), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (3961, 3967), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QTreeWidgetItem, QHeaderView, QVBoxLayout, QHBoxLayout\n'), ((1803, 1818), 'PyQt5.QtCore.QSize', 'QSize', (['(250)', '(250)'], {}), '(250, 250)\n', (1808, 1818), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QSize\n')]
|
import argparse
import data_helper
from sklearn.model_selection import train_test_split
import re
import lstm
from lstm import *
import time
from viterbi import Viterbi
xrange = range
def simple_cut(text,dh,lm,viterbi):
"""对一个片段text(标点符号把句子划分为多个片段)进行预测。"""
if text:
#print("text: %s" %text)
text_len = len(text)
X_batch = dh.text2ids(text) # 这里每个 batch 是一个样本
fetches = [lm.y_pred]
feed_dict = {lm.X_inputs:X_batch, lm.lr:1.0, lm.batch_size:1, lm.keep_prob:1.0}
_y_pred = sess.run(fetches, feed_dict)[0][:text_len] # padding填充的部分直接丢弃
nodes = [dict(zip(['s','b','m','e'], each[1:])) for each in _y_pred]
#print(type(dh.labels))
#print(dh.labels)
tags = viterbi.viterbi(nodes)
words = []
for i in range(len(text)):
if tags[i] in ['s', 'b']:
words.append(text[i])
else:
words[-1] += text[i]
return words
else:
return []
def cut_word(sentence,dh,lm,viterbi):
"""首先将一个sentence根据标点和英文符号/字符串划分成多个片段text,然后对每一个片段分词。"""
not_cuts = re.compile(u'([0-9\da-zA-Z ]+)|[。,、?!.\.\?,!]')
result = []
start = 0
for seg_sign in not_cuts.finditer(sentence):
result.extend(simple_cut(sentence[start:seg_sign.start()],dh,lm,viterbi))
result.append(sentence[seg_sign.start():seg_sign.end()])
start = seg_sign.end()
result.extend(simple_cut(sentence[start:],dh,lm,viterbi))
return result
def predict(dh,lm,viterbi,sentence):
# 例一# 例一
result = cut_word(sentence,dh,lm,viterbi)
rss = ''
for each in result:
rss = rss + each + ' / '
print (rss)
def main():
parser = argparse.ArgumentParser(description = "lstm segment args.")
parser.add_argument("-a","--action",type=str,default="predict",help="train or predict")
parser.add_argument("-c","--corpus",type=str,default="data/msr_train.txt",help="train file")
parser.add_argument("-v","--vocab_model",type=str,default="model/vocab_model.pkl",help="vocab model file")
parser.add_argument("-m","--lstm_model",type=str,default="model/bi-lstm.ckpt-6",help="lstm model file")
args = parser.parse_args()
corpus = args.corpus
vocab_model = args.vocab_model
action = args.action
lstm_model = args.lstm_model
dh = data_helper.DataHelper(vocab_model)
dh.datahander(corpus)
#dh.loadmodel(vocab_model)
if action == "predict":
lm = lstm.LSTM(lstm_model)
viterbi = Viterbi(dh.labels)
saver = tf.train.Saver()
saver.restore(sess, lm.model_path)
sentence = u'人们思考问题往往不是从零开始的。就好像你现在阅读这篇文章一样,你对每个词的理解都会依赖于你前面看到的一些词,而不是把你前面看的内容全部抛弃了,忘记了,再去理解这个单词。也就是说,人们的思维总是会有延续性的。'
predict(dh,lm,viterbi,sentence)
while True:
sentence = input("input words for cut .EXIT for exit:\n")
if sentence == "EXIT":
break
predict(dh,lm,viterbi,sentence)
if __name__ == "__main__":
main()
|
[
"lstm.LSTM",
"viterbi.Viterbi",
"argparse.ArgumentParser",
"re.compile",
"data_helper.DataHelper"
] |
[((1117, 1167), 're.compile', 're.compile', (['u"""([0-9\\\\da-zA-Z ]+)|[。,、?!.\\\\.\\\\?,!]"""'], {}), "(u'([0-9\\\\da-zA-Z ]+)|[。,、?!.\\\\.\\\\?,!]')\n", (1127, 1167), False, 'import re\n'), ((1712, 1769), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""lstm segment args."""'}), "(description='lstm segment args.')\n", (1735, 1769), False, 'import argparse\n'), ((2339, 2374), 'data_helper.DataHelper', 'data_helper.DataHelper', (['vocab_model'], {}), '(vocab_model)\n', (2361, 2374), False, 'import data_helper\n'), ((2474, 2495), 'lstm.LSTM', 'lstm.LSTM', (['lstm_model'], {}), '(lstm_model)\n', (2483, 2495), False, 'import lstm\n'), ((2514, 2532), 'viterbi.Viterbi', 'Viterbi', (['dh.labels'], {}), '(dh.labels)\n', (2521, 2532), False, 'from viterbi import Viterbi\n')]
|
#!/usr/bin/env python
'''
Advent of Code 2021 - Day 9: Smoke Basin (Part 1)
https://adventofcode.com/2021/day/9
'''
import numpy as np
class HeightMap():
def __init__(self) -> None:
self._grid = np.array([])
def add_row(self, row):
np_row = np.array(row)
if self._grid.size != 0:
self._grid = np.vstack([self._grid, np_row])
else:
self._grid = np_row
def find_low_points(self, radius=1):
low_points = []
for index, point in np.ndenumerate(self._grid):
neighbor_points = self._neighbors(radius, coordinates=index)
if point < min(neighbor_points):
low_points.append(point)
return low_points
def _neighbors(self, radius, coordinates=(0, 0)):
neighbors = []
row = coordinates[0]
column = coordinates[1]
# Get UP neighbor value
if row >= 1:
neighbors.append(self._grid[row - radius, column])
# Get LEFT neighbor value
if column >= 1:
neighbors.append(self._grid[row, column - radius])
# Get RIGHT neighbor value
if column < len(self._grid[0]) - radius:
neighbors.append(self._grid[row, column + radius])
# Get DOWN neighbor value
if row < len(self._grid) - radius:
neighbors.append(self._grid[row + radius, column])
return neighbors
def __str__(self) -> str:
output = ""
for row in self._grid:
for elem in row:
output = output + f"{elem:>3}"
output = output + "\n"
return output
def calculate_risk(heights):
# Risk is 1 plus the height
return sum([height + 1 for height in heights])
def main():
filename = input("What is the input file name? ")
try:
with open(filename, "r") as file:
# Create a new board
area = HeightMap()
# Read the rows and setup the HeightMap
for line in file:
line = line.strip()
input_row = [int(x) for x in str(line)]
area.add_row(input_row)
print("The input grid: ")
print(area)
low_points = area.find_low_points()
sum_risk_levels = calculate_risk(
low_points) if low_points else None
if sum_risk_levels:
low_points_str = [str(point) for point in low_points]
print(f"Number of low points: {len(low_points)}")
print(f"Low points: {', '.join(low_points_str)}")
print(
f"\nThe sum of the risk levels of all low points is: {sum_risk_levels}\n")
else:
print("The sum of the risk levels of all low points not found.\n")
except FileNotFoundError:
print(f"No such file or directory: '{filename}'")
if __name__ == "__main__":
main()
|
[
"numpy.array",
"numpy.vstack",
"numpy.ndenumerate"
] |
[((211, 223), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (219, 223), True, 'import numpy as np\n'), ((270, 283), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (278, 283), True, 'import numpy as np\n'), ((514, 540), 'numpy.ndenumerate', 'np.ndenumerate', (['self._grid'], {}), '(self._grid)\n', (528, 540), True, 'import numpy as np\n'), ((342, 373), 'numpy.vstack', 'np.vstack', (['[self._grid, np_row]'], {}), '([self._grid, np_row])\n', (351, 373), True, 'import numpy as np\n')]
|
# Implementation of Randomised Selection
"""
Naive Approach
---------
Parameters
---------
An arry with n distinct numbers
---------
Returns
---------
i(th) order statistic, i.e: i(th) smallest element of the input array
---------
Time Complexity
---------
O(n.logn)
---------
Test Cases
---------
[1, 20, 6, 4, 5]
=> [1, 4, 5, 6, 20]
"""
import random
def randomised_selection(unsorted_array, length_of_array, i_order_statistic):
if length_of_array == 1:
return unsorted_array
else:
# pivot = random.choice(unsorted_array)
pivot_range = random.randrange(length_of_array)
pivot = unsorted_array[pivot_range]
pivot_left = []
pivot_right = []
for value in unsorted_array:
if pivot_range == i_order_statistic:
return pivot
if pivot_range > i_order_statistic:
return randomised_selection(unsorted_array[:pivot_range], pivot_range - 1, i_order_statistic)
if pivot_range < i_order_statistic:
return randomised_selection(unsorted_array[pivot_range + 1:], length_of_array - pivot_range, i_order_statistic - pivot_range)
if __name__ == "__main__":
# user_input = input("Enter the list of numbers: \n").strip()
# unsorted_array = [int(item) for item in user_input.split(",")]
print(randomised_selection([1, 23, 3, 43, 5], 5, 3))
|
[
"random.randrange"
] |
[((650, 683), 'random.randrange', 'random.randrange', (['length_of_array'], {}), '(length_of_array)\n', (666, 683), False, 'import random\n')]
|
#!/usr/bin/env python
import cgitb; cgitb.enable()
print('Content-type: text/html\n')
print(
"""<html>
<head>
<title>CGI 4 - CSS</title>
<link rel="stylesheet" type="text/css" href="../css/estilo1.css">
</head>
<body>
<h1>Colocando CSS em um script a parte</h1>
<hr>
<p>Ola imagens CGI!</p>
<div class="wraptocenter">
<img id="imagem" src="../imagens/evil.jpg" border=1 alt="Piadinha idiota" width=350 height=500>
</div>
<hr>
</body>
</html>
"""
)
|
[
"cgitb.enable"
] |
[((36, 50), 'cgitb.enable', 'cgitb.enable', ([], {}), '()\n', (48, 50), False, 'import cgitb\n')]
|
from __future__ import absolute_import, division, print_function, unicode_literals
import braintree
from postgres.orm import Model
class ExchangeRoute(Model):
typname = "exchange_routes"
def __bool__(self):
return self.error != 'invalidated'
__nonzero__ = __bool__
@classmethod
def from_id(cls, id):
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE id = %(id)s
""", locals())
if r:
from gratipay.models.participant import Participant # XXX Red hot hack!
r.set_attributes(participant=Participant.from_id(r.participant))
return r
@classmethod
def from_network(cls, participant, network):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM current_exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
""", locals())
if r:
r.set_attributes(participant=participant)
return r
@classmethod
def from_address(cls, participant, network, address):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
AND address = %(address)s
""", locals())
if r:
r.set_attributes(participant=participant)
return r
@classmethod
def insert(cls, participant, network, address, error='', fee_cap=None, cursor=None):
participant_id = participant.id
r = (cursor or cls.db).one("""
INSERT INTO exchange_routes
(participant, network, address, error, fee_cap)
VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s)
RETURNING exchange_routes.*::exchange_routes
""", locals())
if network == 'braintree-cc':
participant.update_giving_and_teams()
r.set_attributes(participant=participant)
return r
def invalidate(self):
if self.network == 'braintree-cc':
braintree.PaymentMethod.delete(self.address)
# For Paypal, we remove the record entirely to prevent
# an integrity error if the user tries to add the route again
if self.network == 'paypal':
# XXX This doesn't sound right. Doesn't this corrupt history pages?
self.db.run("DELETE FROM exchange_routes WHERE id=%s", (self.id,))
else:
self.update_error('invalidated')
def update_error(self, new_error):
id = self.id
old_error = self.error
if old_error == 'invalidated':
return
self.db.run("""
UPDATE exchange_routes
SET error = %(new_error)s
WHERE id = %(id)s
""", locals())
self.set_attributes(error=new_error)
# Update cached amounts if requested and necessary
if self.network != 'braintree-cc':
return
if self.participant.is_suspicious or bool(new_error) == bool(old_error):
return
# XXX *White* hot hack!
# =====================
# During payday, participant is a record from a select of
# payday_participants (or whatever), *not* an actual Participant
# object. We need the real deal so we can use a method on it ...
from gratipay.models.participant import Participant
participant = Participant.from_username(self.participant.username)
participant.update_giving_and_teams()
|
[
"gratipay.models.participant.Participant.from_username",
"braintree.PaymentMethod.delete",
"gratipay.models.participant.Participant.from_id"
] |
[((3646, 3698), 'gratipay.models.participant.Participant.from_username', 'Participant.from_username', (['self.participant.username'], {}), '(self.participant.username)\n', (3671, 3698), False, 'from gratipay.models.participant import Participant\n'), ((2280, 2324), 'braintree.PaymentMethod.delete', 'braintree.PaymentMethod.delete', (['self.address'], {}), '(self.address)\n', (2310, 2324), False, 'import braintree\n'), ((633, 667), 'gratipay.models.participant.Participant.from_id', 'Participant.from_id', (['r.participant'], {}), '(r.participant)\n', (652, 667), False, 'from gratipay.models.participant import Participant\n')]
|
import os
from os.path import dirname
from unittest import TestCase
import src.superannotate as sa
class TestCloneProject(TestCase):
PROJECT_NAME_1 = "test create from full info1"
PROJECT_NAME_2 = "test create from full info2"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Vector"
TEST_FOLDER_PATH = "data_set/sample_project_vector"
@property
def folder_path(self):
return os.path.join(dirname(dirname(__file__)), self.TEST_FOLDER_PATH)
@property
def classes_json(self):
return f"{self.folder_path}/classes/classes.json"
def setUp(self, *args, **kwargs):
self.tearDown()
self._project_1 = sa.create_project(
self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
sa.delete_project(self.PROJECT_NAME_1)
sa.delete_project(self.PROJECT_NAME_2)
def test_clone_contributors_and_description(self):
team_users = sa.search_team_contributors()
sa.share_project(self.PROJECT_NAME_1, team_users[0], "QA")
first_project_metadata = sa.get_project_metadata(
self.PROJECT_NAME_1, include_contributors=True
)
first_project_contributors = first_project_metadata["contributors"]
sa.clone_project(self.PROJECT_NAME_2, self.PROJECT_NAME_1, "DESCRIPTION", copy_contributors=True)
second_project_metadata = sa.get_project_metadata(
self.PROJECT_NAME_2, include_contributors=True
)
second_project_contributors = second_project_metadata["contributors"]
self.assertEqual(first_project_contributors[0]["user_id"], second_project_contributors[0]["user_id"])
self.assertEqual("DESCRIPTION", second_project_metadata["description"])
|
[
"src.superannotate.clone_project",
"os.path.dirname",
"src.superannotate.search_team_contributors",
"src.superannotate.create_project",
"src.superannotate.get_project_metadata",
"src.superannotate.share_project",
"src.superannotate.delete_project"
] |
[((667, 755), 'src.superannotate.create_project', 'sa.create_project', (['self.PROJECT_NAME_1', 'self.PROJECT_DESCRIPTION', 'self.PROJECT_TYPE'], {}), '(self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.\n PROJECT_TYPE)\n', (684, 755), True, 'import src.superannotate as sa\n'), ((814, 852), 'src.superannotate.delete_project', 'sa.delete_project', (['self.PROJECT_NAME_1'], {}), '(self.PROJECT_NAME_1)\n', (831, 852), True, 'import src.superannotate as sa\n'), ((861, 899), 'src.superannotate.delete_project', 'sa.delete_project', (['self.PROJECT_NAME_2'], {}), '(self.PROJECT_NAME_2)\n', (878, 899), True, 'import src.superannotate as sa\n'), ((977, 1006), 'src.superannotate.search_team_contributors', 'sa.search_team_contributors', ([], {}), '()\n', (1004, 1006), True, 'import src.superannotate as sa\n'), ((1015, 1073), 'src.superannotate.share_project', 'sa.share_project', (['self.PROJECT_NAME_1', 'team_users[0]', '"""QA"""'], {}), "(self.PROJECT_NAME_1, team_users[0], 'QA')\n", (1031, 1073), True, 'import src.superannotate as sa\n'), ((1107, 1178), 'src.superannotate.get_project_metadata', 'sa.get_project_metadata', (['self.PROJECT_NAME_1'], {'include_contributors': '(True)'}), '(self.PROJECT_NAME_1, include_contributors=True)\n', (1130, 1178), True, 'import src.superannotate as sa\n'), ((1285, 1386), 'src.superannotate.clone_project', 'sa.clone_project', (['self.PROJECT_NAME_2', 'self.PROJECT_NAME_1', '"""DESCRIPTION"""'], {'copy_contributors': '(True)'}), "(self.PROJECT_NAME_2, self.PROJECT_NAME_1, 'DESCRIPTION',\n copy_contributors=True)\n", (1301, 1386), True, 'import src.superannotate as sa\n'), ((1417, 1488), 'src.superannotate.get_project_metadata', 'sa.get_project_metadata', (['self.PROJECT_NAME_2'], {'include_contributors': '(True)'}), '(self.PROJECT_NAME_2, include_contributors=True)\n', (1440, 1488), True, 'import src.superannotate as sa\n'), ((434, 451), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (441, 451), False, 'from os.path import dirname\n')]
|
import logging
from rhasspy_weather.data_types.request import WeatherRequest
from rhasspy_weather.parser import rhasspy_intent
from rhasspyhermes.nlu import NluIntent
log = logging.getLogger(__name__)
def parse_intent_message(intent_message: NluIntent) -> WeatherRequest:
"""
Parses any of the rhasspy weather intents.
Args:
intent_message: a Hermes NluIntent
Returns: WeatherRequest object
"""
return rhasspy_intent.parse_intent_message(intent_message.to_rhasspy_dict())
def get_template_values(intent_message: NluIntent) -> dict:
return rhasspy_intent.get_template_values(intent_message.to_rhasspy_dict())
|
[
"logging.getLogger"
] |
[((175, 202), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (192, 202), False, 'import logging\n')]
|
import pytest
def vprintf_test(vamos):
if vamos.flavor == "agcc":
pytest.skip("vprintf not supported")
vamos.run_prog_check_data("vprintf")
|
[
"pytest.skip"
] |
[((80, 116), 'pytest.skip', 'pytest.skip', (['"""vprintf not supported"""'], {}), "('vprintf not supported')\n", (91, 116), False, 'import pytest\n')]
|
#!/usr/bin/env python3
import socket
HOST = '127.0.0.1' # 服务器的主机名或者 IP 地址
PORT = 10009 # 服务器使用的端口
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print(s)
s.connect((HOST, PORT))
s.sendall(b'Hello, world')
print(s)
data = s.recv(1024)
print('Received', repr(data))
|
[
"socket.socket"
] |
[((108, 157), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (121, 157), False, 'import socket\n')]
|
import requests
import Mark
from colorama import Fore
from util.plugins.common import print_slow, getheaders, proxy
def StatusChanger(token, Status):
#change status
CustomStatus = {"custom_status": {"text": Status}} #{"text": Status, "emoji_name": "☢"} if you want to add an emoji to the status
try:
r = requests.patch("https://discord.com/api/v9/users/@me/settings", proxies={"ftp": f'{proxy()}'}, headers=getheaders(token), json=CustomStatus)
print_slow(f"\n{Fore.GREEN}Status changed to {Fore.WHITE}{Status}{Fore.GREEN} ")
except Exception as e:
print(f"{Fore.RED}Error:\n{e}\nOccurred while trying to change the status :/")
print("Enter anything to continue. . . ", end="")
input()
Mark.main()
|
[
"util.plugins.common.print_slow",
"util.plugins.common.getheaders",
"Mark.main",
"util.plugins.common.proxy"
] |
[((741, 752), 'Mark.main', 'Mark.main', ([], {}), '()\n', (750, 752), False, 'import Mark\n'), ((476, 564), 'util.plugins.common.print_slow', 'print_slow', (['f"""\n{Fore.GREEN}Status changed to {Fore.WHITE}{Status}{Fore.GREEN} """'], {}), '(\n f"""\n{Fore.GREEN}Status changed to {Fore.WHITE}{Status}{Fore.GREEN} """)\n', (486, 564), False, 'from util.plugins.common import print_slow, getheaders, proxy\n'), ((430, 447), 'util.plugins.common.getheaders', 'getheaders', (['token'], {}), '(token)\n', (440, 447), False, 'from util.plugins.common import print_slow, getheaders, proxy\n'), ((410, 417), 'util.plugins.common.proxy', 'proxy', ([], {}), '()\n', (415, 417), False, 'from util.plugins.common import print_slow, getheaders, proxy\n')]
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from hashlib import sha256
from recipe_engine import recipe_test_api
class CloudBuildHelperTestApi(recipe_test_api.RecipeTestApi):
def build_success_output(self, image, target='target', canonical_tag=None):
if not image:
img = 'example.com/fake-registry/%s' % target
digest = 'sha256:'+sha256(target).hexdigest()[:16]+'...'
tag = canonical_tag
if tag == ':inputs-hash':
tag = 'cbh-inputs-deadbead...'
else:
img = image.image
digest = image.digest
tag = image.tag
out = {'view_build_url': 'https://example.com/build/%s' % target}
if img:
out['image'] = {'image': img, 'digest': digest, 'tag': tag}
out['view_image_url'] = 'https://example.com/image/%s' % target
return self.m.json.output(out)
def build_error_output(self, message, target='target'):
return self.m.json.output({
'error': message,
'view_build_url': 'https://example.com/build/%s' % target,
})
def upload_success_output(self, tarball, target='target', canonical_tag=None):
if not tarball:
name = 'example/%s' % target
digest = sha256(name).hexdigest()[:16]+'...'
bucket = 'example'
path = 'tarballs/example/%s/%s.tar.gz' % (target, digest)
tag = canonical_tag or '11111-deadbeef'
else:
name = tarball.name
digest = tarball.sha256
bucket = tarball.bucket
path = tarball.path
tag = tarball.version
return self.m.json.output({
'name': name,
'sha256': digest,
'gs': {
'bucket': bucket,
'name': path,
},
'canonical_tag': tag,
})
def upload_error_output(self, message):
return self.m.json.output({'error': message})
def update_pins_output(self, updated):
return self.m.json.output({'updated': updated or []})
|
[
"hashlib.sha256"
] |
[((1283, 1295), 'hashlib.sha256', 'sha256', (['name'], {}), '(name)\n', (1289, 1295), False, 'from hashlib import sha256\n'), ((470, 484), 'hashlib.sha256', 'sha256', (['target'], {}), '(target)\n', (476, 484), False, 'from hashlib import sha256\n')]
|
import sklearn.mixture
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
import matplotlib.patheffects as mpatheffects
def get_gmm_and_pos_label(
array, n_components=2, n_steps=5000
):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
gmm.fit(array.reshape(-1, 1))
label = np.argmax(gmm.means_)
# low = array.min()
# high = array.max()
low = gmm.means_.min() - 2*np.sqrt(gmm.covariances_[np.argmin(gmm.means_)])
high = gmm.means_.max() + 2*np.sqrt(gmm.covariances_[np.argmax(gmm.means_)])
ref_space = np.linspace(low, high, n_steps)
result = gmm.predict(ref_space.reshape(-1, 1))
idx = np.where(np.ediff1d(result) != 0)
cutoffs = ref_space[idx]
return gmm, label, cutoffs
def _get_gmm_and_pos_label(array, n_components=2):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
gmm.fit(array.reshape(-1, 1))
label = np.argmax(gmm.means_)
low = np.expm1(array.min())
high = np.expm1(array.max())
ref_space = np.arange(low, high)
ref_space = np.log1p(ref_space)
result = gmm.predict(ref_space.reshape(-1, 1))
idx = np.where(np.ediff1d(result) != 0)
_cutoffs = ref_space[idx]
diff_mean = np.absolute(_cutoffs - np.mean(array))
diff_high = np.absolute(_cutoffs - np.log1p(high))
cutoffs = _cutoffs[diff_mean < diff_high]
cutoff = np.expm1(cutoffs.max())
# cutoff = cutoffs[np.argmin(diff_mean < diff_high)]
# return gmm, label, cutoff
return gmm, label, _cutoffs
diff_mean = np.absolute(_cutoffs - np.mean(np.expm1(array)))
diff_high = np.absolute(_cutoffs - high)
diff_low = np.absolute(_cutoffs - low)
between = (diff_mean < diff_high) & (diff_mean < diff_low)
cutoffs = _cutoffs[between]
cutoff = cutoffs[np.argmax(between)]
return gmm, label, cutoff
def plot_gmm_fitting(array, gmm, ax):
plt.sca(ax)
_ = plt.hist(array.flatten(), color='lightgray', bins=200, density=True)
x = np.linspace(array.min(), array.max(), 200)
log_prob = gmm.score_samples(x.reshape(-1, 1))
responsibilities = gmm.predict_proba(x.reshape(-1, 1))
pdf = np.exp(log_prob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
mean_index = np.argmax(pdf_individual, axis=0)
rank_map = mean_index.argsort().argsort()
ax.set_prop_cycle(
color=plt.get_cmap('Dark2')(rank_map)
)
ax.plot(x, pdf_individual)
ax.plot(x, pdf, '--k')
return ax
def auto_gate_func(array, n_components=3, n_stds=3, log_transform=True):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
if log_transform:
gmm.fit(np.log1p(array).reshape(-1, 1))
else:
gmm.fit(array.reshape(-1, 1))
means = gmm.means_
stds = np.sqrt(gmm.covariances_)
idx = np.argmax(means)
lower_bound = means[idx] - n_stds * stds[idx]
if log_transform:
return np.expm1(lower_bound)
else:
return lower_bound
def plot_cumulative(array, ax, hist_kwargs={}):
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax.yaxis.set_major_formatter(formatter)
_ = ax.hist(array, histtype='step', bins=300, cumulative=1, **hist_kwargs)
return ax
def gmm_label_map_by_mean(gmm):
return {
o:n
for o, n in zip(
range(len(gmm.means_)),
sorted(range(len(gmm.means_)), key=lambda x: gmm.means_[x][0])
)
}
def sort_predict_label(gmm, labels):
mapping = gmm_label_map_by_mean(gmm)
sorted_labels = labels.copy()
for k, v in mapping.iteritems():
sorted_labels[labels==k] = v
return sorted_labels
def plot_hist_gmm(
df,
markers,
n_components=2,
subplot_grid_shape=None,
transform_log=True,
xlim_percentiles=(0, 100),
cum_density=False,
hide_yaxis_left=True
):
if transform_log:
df = df.transform(np.log1p)
revert_func = np.expm1
else:
revert_func = np.array
if subplot_grid_shape is None:
subplot_grid_shape = (1, len(markers))
n_rows, n_cols = subplot_grid_shape
fig, axes = plt.subplots(n_rows, n_cols, sharex=True)
axes = np.array(axes)
for m, ax in zip(markers, axes.ravel()):
gmm, _, cutoffs = get_gmm_and_pos_label(
df[m].values, n_components=n_components
)
plot_gmm_fitting(df[m].values, gmm, ax)
ax.title.set_text(m)
if hide_yaxis_left:
ax.yaxis.set_visible(False)
p1, p2 = np.array(xlim_percentiles) / 100
axis_min = df.loc[:, markers].quantile(p1).min()
axis_max = df.loc[:, markers].quantile(p2).max()
color_cum = 'gray'
pax = ax.twinx()
pax = plot_cumulative(
df[m].values, pax,
hist_kwargs=dict(color=color_cum, density=cum_density)
)
pax.tick_params(axis='y', labelsize=8, colors=color_cum)
print(cutoffs)
cutoff_range = np.ptp(cutoffs)
if cutoff_range == 0: cutoff_range = 1
cutoff_colors = plt.get_cmap('plasma')(
(cutoffs - np.min(cutoffs)) / cutoff_range
)
for co, cc in zip(cutoffs, cutoff_colors):
ax.axvline(x=co, c=cc, alpha=0.2)
ax.annotate(
'',
xy=(co, 0), xytext=(co, -0.05),
xycoords=('data', 'axes fraction'),
arrowprops=dict(arrowstyle='wedge, tail_width=0.7, shrink_factor=0.5', color=cc)
)
ax.set_xlim(axis_min, axis_max)
# cutoff_string = np.round(revert_func(cutoffs)).astype(int)
for i, (co, cc) in enumerate(
zip(revert_func(cutoffs)[::-1], cutoff_colors[::-1])
):
text = ax.text(
ax.get_xlim()[0] + 0.02*np.diff(ax.get_xlim()),
ax.get_ylim()[1] - 0.05*(i+1)*np.diff(ax.get_ylim()),
f'{np.round(co).astype(int)}',
fontsize=10, c=cc
)
text_outline = mpatheffects.Stroke(linewidth=1, foreground='#000')
text.set_path_effects(
[text_outline, mpatheffects.Normal()]
)
plt.tight_layout()
for aax in fig.axes:
aax.spines['right'].set_color(color_cum)
power_label = aax.yaxis.get_offset_text()
power_label.set_visible(False)
aax.annotate(
power_label.get_text(), xy=(1.02, 1.01),
xycoords='axes fraction', fontsize=10,
color=color_cum
)
plt.sca(ax)
|
[
"numpy.ptp",
"numpy.sqrt",
"matplotlib.patheffects.Normal",
"numpy.array",
"matplotlib.ticker.ScalarFormatter",
"numpy.arange",
"numpy.mean",
"numpy.exp",
"numpy.linspace",
"numpy.min",
"numpy.argmin",
"numpy.round",
"numpy.ediff1d",
"numpy.argmax",
"matplotlib.pyplot.get_cmap",
"numpy.absolute",
"numpy.expm1",
"matplotlib.pyplot.sca",
"matplotlib.patheffects.Stroke",
"matplotlib.pyplot.tight_layout",
"numpy.log1p",
"matplotlib.pyplot.subplots"
] |
[((410, 431), 'numpy.argmax', 'np.argmax', (['gmm.means_'], {}), '(gmm.means_)\n', (419, 431), True, 'import numpy as np\n'), ((669, 700), 'numpy.linspace', 'np.linspace', (['low', 'high', 'n_steps'], {}), '(low, high, n_steps)\n', (680, 700), True, 'import numpy as np\n'), ((1099, 1120), 'numpy.argmax', 'np.argmax', (['gmm.means_'], {}), '(gmm.means_)\n', (1108, 1120), True, 'import numpy as np\n'), ((1207, 1227), 'numpy.arange', 'np.arange', (['low', 'high'], {}), '(low, high)\n', (1216, 1227), True, 'import numpy as np\n'), ((1245, 1264), 'numpy.log1p', 'np.log1p', (['ref_space'], {}), '(ref_space)\n', (1253, 1264), True, 'import numpy as np\n'), ((1807, 1835), 'numpy.absolute', 'np.absolute', (['(_cutoffs - high)'], {}), '(_cutoffs - high)\n', (1818, 1835), True, 'import numpy as np\n'), ((1852, 1879), 'numpy.absolute', 'np.absolute', (['(_cutoffs - low)'], {}), '(_cutoffs - low)\n', (1863, 1879), True, 'import numpy as np\n'), ((2102, 2113), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (2109, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2369, 2385), 'numpy.exp', 'np.exp', (['log_prob'], {}), '(log_prob)\n', (2375, 2385), True, 'import numpy as np\n'), ((2466, 2499), 'numpy.argmax', 'np.argmax', (['pdf_individual'], {'axis': '(0)'}), '(pdf_individual, axis=0)\n', (2475, 2499), True, 'import numpy as np\n'), ((3069, 3094), 'numpy.sqrt', 'np.sqrt', (['gmm.covariances_'], {}), '(gmm.covariances_)\n', (3076, 3094), True, 'import numpy as np\n'), ((3106, 3122), 'numpy.argmax', 'np.argmax', (['means'], {}), '(means)\n', (3115, 3122), True, 'import numpy as np\n'), ((3344, 3384), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (3366, 3384), False, 'from matplotlib import ticker\n'), ((4539, 4580), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {'sharex': '(True)'}), '(n_rows, n_cols, sharex=True)\n', (4551, 4580), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4607), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (4601, 4607), True, 'import numpy as np\n'), ((6638, 6656), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6654, 6656), True, 'import matplotlib.pyplot as plt\n'), ((6998, 7009), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (7005, 7009), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2021), 'numpy.argmax', 'np.argmax', (['between'], {}), '(between)\n', (2012, 2021), True, 'import numpy as np\n'), ((3213, 3234), 'numpy.expm1', 'np.expm1', (['lower_bound'], {}), '(lower_bound)\n', (3221, 3234), True, 'import numpy as np\n'), ((5408, 5423), 'numpy.ptp', 'np.ptp', (['cutoffs'], {}), '(cutoffs)\n', (5414, 5423), True, 'import numpy as np\n'), ((775, 793), 'numpy.ediff1d', 'np.ediff1d', (['result'], {}), '(result)\n', (785, 793), True, 'import numpy as np\n'), ((1339, 1357), 'numpy.ediff1d', 'np.ediff1d', (['result'], {}), '(result)\n', (1349, 1357), True, 'import numpy as np\n'), ((1437, 1451), 'numpy.mean', 'np.mean', (['array'], {}), '(array)\n', (1444, 1451), True, 'import numpy as np\n'), ((1493, 1507), 'numpy.log1p', 'np.log1p', (['high'], {}), '(high)\n', (1501, 1507), True, 'import numpy as np\n'), ((4939, 4965), 'numpy.array', 'np.array', (['xlim_percentiles'], {}), '(xlim_percentiles)\n', (4947, 4965), True, 'import numpy as np\n'), ((5497, 5519), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (5509, 5519), True, 'import matplotlib.pyplot as plt\n'), ((6475, 6526), 'matplotlib.patheffects.Stroke', 'mpatheffects.Stroke', ([], {'linewidth': '(1)', 'foreground': '"""#000"""'}), "(linewidth=1, foreground='#000')\n", (6494, 6526), True, 'import matplotlib.patheffects as mpatheffects\n'), ((1772, 1787), 'numpy.expm1', 'np.expm1', (['array'], {}), '(array)\n', (1780, 1787), True, 'import numpy as np\n'), ((2588, 2609), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Dark2"""'], {}), "('Dark2')\n", (2600, 2609), True, 'import matplotlib.pyplot as plt\n'), ((544, 565), 'numpy.argmin', 'np.argmin', (['gmm.means_'], {}), '(gmm.means_)\n', (553, 565), True, 'import numpy as np\n'), ((626, 647), 'numpy.argmax', 'np.argmax', (['gmm.means_'], {}), '(gmm.means_)\n', (635, 647), True, 'import numpy as np\n'), ((2951, 2966), 'numpy.log1p', 'np.log1p', (['array'], {}), '(array)\n', (2959, 2966), True, 'import numpy as np\n'), ((5545, 5560), 'numpy.min', 'np.min', (['cutoffs'], {}), '(cutoffs)\n', (5551, 5560), True, 'import numpy as np\n'), ((6595, 6616), 'matplotlib.patheffects.Normal', 'mpatheffects.Normal', ([], {}), '()\n', (6614, 6616), True, 'import matplotlib.patheffects as mpatheffects\n'), ((6368, 6380), 'numpy.round', 'np.round', (['co'], {}), '(co)\n', (6376, 6380), True, 'import numpy as np\n')]
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : upsert子查询功能
Case Name : upsert子查询for update语法验证
Description :
1、初始创建测试数据
2、session1以线程方式启动,事务内部相关子查询for update后等待20s
3、session1开始后等待5s,session2对session1相关的锁定行进行update
4、验证session1提交后,session2才提交;
5、session1事务执行结果验证
6、验证session2是在session1事务提交后,才进行的update
Expect :
1、初始创建测试数据乘公共
2、session1以线程方式启动,事务内部相关子查询for update后等待20s,session1开始执行
3、session1开始后等待5s,session2对session1相关的锁定行进行update,session2开始执行
4、验证session1提交后,session2才提交;session2事务提交总时长大于10s
5、session1事务正常提交
6、select方式验证,session1的update结果是session2提交之前的数据;
History :
"""
import time
import unittest
from testcase.utils.ComThread import ComThread
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
class UpsertCase131(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('----Opengauss_Function_DML_Upsert_Case0131:初始化----')
self.pri_sh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.t1 = 't_dml_upsert_sub0131'
self.t2 = 't_dml_upsert0131'
def test_main(self):
self.log.info("--1、初始创建测试数据--")
sql = f"drop table if exists {self.t1};" \
f"create table {self.t1} (a int,b text);" \
f"insert into {self.t1} values(generate_series(1,10)," \
f"'b-'||generate_series(1,10));" \
f"drop table if exists {self.t2};" \
f"create table {self.t2} (a int primary key,b text,c text);" \
f"insert into {self.t2} values (1,1,1),(2,2,2),(3,3,3);" \
f"select * from {self.t2};select * from {self.t1};"
result = self.pri_sh.execut_db_sql(sql)
self.log.info(result)
self.assertTrue("INSERT 0 10" in result and 'INSERT 0 3' in result)
self.log.info("--2、session1以线程方式启动,事务内部相关子查询for update后等待20s--")
sql1 = f"begin;" \
f"insert into {self.t2} values(2) on duplicate key update " \
f"b= (select b from {self.t1} where a = excluded.a for update);" \
f"select pg_sleep(20);" \
f"end;"
self.log.info(sql1)
session1_excute = ComThread(self.pri_sh.execut_db_sql, args=(sql1,))
session1_excute.setDaemon(True)
session1_excute.start()
time.sleep(5)
self.log.info("--3、session1开始后等待5s,session2对session1相关的锁定行进行update--")
sql2 = f"begin;" \
f"update {self.t1} set b ='bb-2' where a =2;" \
f"end;"
self.log.info(sql2)
start_time = time.time()
session2_result = self.pri_sh.execut_db_sql(sql2)
self.assertIn(self.constant.COMMIT_SUCCESS_MSG, session2_result)
self.log.info("--4、验证session1提交后,session2才提交;session2事务提交总时长大于10s--")
self.log.info(session2_result)
end_time = time.time()
self.log.info('start_time:' + str(start_time) +
';end_time:' + str(end_time))
self.log.info('session2执行等待时长' + str(end_time - start_time))
self.assertTrue(end_time - start_time > 10)
self.log.info("--5、session1事务执行结果--")
session1_excute.join()
session1_result = session1_excute.get_result()
self.log.info(session1_result)
self.assertIn(self.constant.COMMIT_SUCCESS_MSG, session1_result)
self.log.info("--6、验证session2是在session1事务提交后,才进行的update--")
sql3 = f"select * from {self.t2} where a = 2;"
result3 = self.pri_sh.execut_db_sql(sql3)
self.log.info(result3)
self.assertIn("b-2", result3)
sql4 = f"select * from {self.t1} where a = 2;"
result4 = self.pri_sh.execut_db_sql(sql4)
self.log.info(result4)
self.assertIn("bb-2", result4)
def tearDown(self):
self.log.info("--清理测试数据--")
clean_sql = f"drop table if exists {self.t1};" \
f"drop table if exists {self.t2};"
clean_result = self.pri_sh.execut_db_sql(clean_sql)
self.log.info(clean_result)
self.log.info('----Opengauss_Function_DML_Upsert_Case0131:用例执行完毕----')
|
[
"time.sleep",
"testcase.utils.Logger.Logger",
"testcase.utils.Constant.Constant",
"testcase.utils.CommonSH.CommonSH",
"time.time",
"testcase.utils.ComThread.ComThread"
] |
[((1408, 1416), 'testcase.utils.Logger.Logger', 'Logger', ([], {}), '()\n', (1414, 1416), False, 'from testcase.utils.Logger import Logger\n'), ((1515, 1540), 'testcase.utils.CommonSH.CommonSH', 'CommonSH', (['"""PrimaryDbUser"""'], {}), "('PrimaryDbUser')\n", (1523, 1540), False, 'from testcase.utils.CommonSH import CommonSH\n'), ((1565, 1575), 'testcase.utils.Constant.Constant', 'Constant', ([], {}), '()\n', (1573, 1575), False, 'from testcase.utils.Constant import Constant\n'), ((2722, 2772), 'testcase.utils.ComThread.ComThread', 'ComThread', (['self.pri_sh.execut_db_sql'], {'args': '(sql1,)'}), '(self.pri_sh.execut_db_sql, args=(sql1,))\n', (2731, 2772), False, 'from testcase.utils.ComThread import ComThread\n'), ((2853, 2866), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2863, 2866), False, 'import time\n'), ((3103, 3114), 'time.time', 'time.time', ([], {}), '()\n', (3112, 3114), False, 'import time\n'), ((3383, 3394), 'time.time', 'time.time', ([], {}), '()\n', (3392, 3394), False, 'import time\n')]
|
"""
This is a django-split-settings main file.
For more information read this:
https://github.com/sobolevn/django-split-settings
Default environment is `development`.
To change settings file:
`DJANGO_ENV=production python manage.py runserver`
"""
import django_heroku
from split_settings.tools import include
base_settings = [
'components/middleware.py', # middleware configuration
'components/apps.py', # installed applications
'components/database.py', # database settings
'components/pyuploadcare.py', # pyuploadcare settings
'components/rest_framework.py', # rest framework settings
'components/allauth.py', # allauth rest_auth settings
'components/currency.py', # currency settings
'components/email.py', # email settings
'components/rest_framework.py', # rest framework settings
'components/common.py', # standard django settings
'components/cors_configuration.py',
# configuration for Access Control Allow Origin
'components/graphene.py',
# sendy config
'components/sendy.py'
]
# Include settings:
include(*base_settings)
django_heroku.settings(locals())
|
[
"split_settings.tools.include"
] |
[((1080, 1103), 'split_settings.tools.include', 'include', (['*base_settings'], {}), '(*base_settings)\n', (1087, 1103), False, 'from split_settings.tools import include\n')]
|
from django.conf.urls import patterns, url, include
from django.contrib import admin
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from .views import template_test
urlpatterns = patterns(
'',
url(r'^test/', template_test, name='template_test'),
url(r'^test2/', include('testapp.another_urls', namespace='foo', app_name='faa'))
)
admin.autodiscover()
urlpatterns += patterns(
'',
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
import debug_toolbar
urlpatterns += patterns(
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
[
"django.conf.urls.include",
"django.conf.urls.url",
"django.contrib.staticfiles.urls.staticfiles_urlpatterns",
"django.contrib.admin.autodiscover"
] |
[((401, 421), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (419, 421), False, 'from django.contrib import admin\n'), ((258, 308), 'django.conf.urls.url', 'url', (['"""^test/"""', 'template_test'], {'name': '"""template_test"""'}), "('^test/', template_test, name='template_test')\n", (261, 308), False, 'from django.conf.urls import patterns, url, include\n'), ((544, 569), 'django.contrib.staticfiles.urls.staticfiles_urlpatterns', 'staticfiles_urlpatterns', ([], {}), '()\n', (567, 569), False, 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n'), ((331, 395), 'django.conf.urls.include', 'include', (['"""testapp.another_urls"""'], {'namespace': '"""foo"""', 'app_name': '"""faa"""'}), "('testapp.another_urls', namespace='foo', app_name='faa')\n", (338, 395), False, 'from django.conf.urls import patterns, url, include\n'), ((476, 500), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (483, 500), False, 'from django.conf.urls import patterns, url, include\n'), ((652, 679), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (659, 679), False, 'from django.conf.urls import patterns, url, include\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.