code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from django.http import HttpResponseRedirect
from django.views import View
from ..core import app_settings
class AppLoginView(View):
def get(self, request, *args, **kwargs):
next = request.GET.get('next', '')
return HttpResponseRedirect(app_settings.REMOTE_LOGIN_URL + next)
class AppSignupView(View):
def get(self, request, *args, **kwargs):
next = request.GET.get('next', '')
return HttpResponseRedirect(app_settings.REMOTE_SIGNUP_URL + next)
class AppLogoutView(View):
def get(self, request, *args, **kwargs):
next = request.GET.get('next', '')
return HttpResponseRedirect(app_settings.REMOTE_LOGOUT_URL + next)
|
[
"django.http.HttpResponseRedirect"
] |
[((240, 298), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['(app_settings.REMOTE_LOGIN_URL + next)'], {}), '(app_settings.REMOTE_LOGIN_URL + next)\n', (260, 298), False, 'from django.http import HttpResponseRedirect\n'), ((432, 491), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['(app_settings.REMOTE_SIGNUP_URL + next)'], {}), '(app_settings.REMOTE_SIGNUP_URL + next)\n', (452, 491), False, 'from django.http import HttpResponseRedirect\n'), ((625, 684), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['(app_settings.REMOTE_LOGOUT_URL + next)'], {}), '(app_settings.REMOTE_LOGOUT_URL + next)\n', (645, 684), False, 'from django.http import HttpResponseRedirect\n')]
|
from scenarios import helper
from scenarios.builder import Builder
from model.enumerations import e_ExperienceFactor, e_MentalOrEmotionalFactor, e_PhyOrPhyFactor, e_EntityType, e_Relation, e_CausalFactorType
from model.knowledge_base import kb
from model.entities import Entity, CausalFactor
from model.utils import BoundingBox
from model import rule
class Controller:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
print('Controller object has been created.')
cls._instance = super(Controller, cls).__new__(cls)
# Put any initialization here.
return cls._instance
def __init__(self, v_obj, m_obj):
self._v_obj = v_obj
self._m_obj = m_obj
def __str__(self):
return "Controller"
def withdraw_btn_callback(self):
val = float(self._v_obj.rmb_text.displayText())
self._m_obj.withdraw(val)
def deposit_btn_callback(self):
val = float(self._v_obj.rmb_text.displayText())
self._m_obj.deposit(val)
def get_experience_causal_factor(self):
res = e_ExperienceFactor.all()
causal_factors = []
for _ in res:
causal_factors.append(_[0])
return causal_factors
def get_mental_or_emotional_causal_factor(self):
res = e_MentalOrEmotionalFactor.all()
causal_factors = []
for _ in res:
causal_factors.append(_[0])
return causal_factors
def get_phy_or_phy_causal_factor(self):
res = e_PhyOrPhyFactor.all()
causal_factors = []
for _ in res:
causal_factors.append(_[0])
return causal_factors
def list_of_vehicle_causal_factor(self):
_ = {
"cf_driver": {
"0": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"1": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"3": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
},
"cf_fellow_passenger": {
"0": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"1": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"3": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
},
"cf_vehicle": {
"0": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"1": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"3": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
},
}
return _
def list_of_pedestrian_causal_factor(self):
pass
def list_of_scenarios(self):
# todo: Load list of scenarios from dedicate module.
_ = {
"0": {
"name": "scenario7",
"description": "Description of scenario 01 here",
"sub_scenarios": {
"0": {
"name": "scenario01_20211016-100245",
"description": "Description of scenario01_20211016-100245",
},
"1": {
"name": "scenario01_20211016-101607",
"description": "Description of scenario01_20211016-101607",
},
"2": {
"name": "scenario01_20211021-101607",
"description": "Description of scenario01_20211021-101607",
}
},
"ego_settings": {},
"vehicles_settings": {
"0": {
"cf_vehicle": {
"causal_factor": {}
},
"cf_driver": {
"causal_factor": {
"id": "1",
"value": "distraction",
"description": "Description of causal factor.",
}
},
"cf_fellow_passenger": {}
},
},
"pedestrian_settings": {},
},
"1": {"name": "Scenario02", "description": "Description of scenario 02 here"},
}
return helper.get_scenarios()
def run_scenario_callback(self, scenario_name: str):
print("pushButton_run_scenario {}".format(scenario_name))
helper.run_scenario(scenario_name)
def run_sub_scenario_callback(self, scenario):
import os
from scenariogeneration import esmini
from config import ESMINI_DIR
esmini(scenario, os.path.join(ESMINI_DIR))
def action_exit_callback(self):
return True
def update_scenario_callback(self, vehicle_id):
print("pushButton_update_scenario")
# done: create Driver (AndreA) instance in ontology
# done: link Driver to Vehicle (JamCar)
# done: assign CausalFactor to Driver (AndreA) instance
# done: SPARQL: Given a CausalFactor, give me DrivingError
# todo: SPARQL: Given current Action + DrivingError, give me next Actions.
# todo: For each actions returned, link each one to an alternative behavior in library
# todo: Present alternative to UI
entity = Entity('Andrea', 68, e_EntityType.Driver, BoundingBox(0.5, 0.6, 1.8, 1.3, 0.0, 0.8))
andrea = kb.insert_entity(entity)
vehicle = kb.get_entity_from_cache(vehicle_id)
kb.add_relation(andrea, vehicle, e_Relation.isOn.isOnVehicle.driverIsOnVehicle)
current_action = kb.get_current_action(vehicle)
cf = CausalFactor("test_name", e_CausalFactorType.HumanFactor.MentalOrEmotionalFactor.Distraction)
cf_i = kb.insert_entity(cf)
kb.add_relation(andrea, cf_i, e_Relation.isImpaired.driverIsImpaired)
driving_errors = rule.get_driving_error_to_causal_factor_rule(e_CausalFactorType.HumanFactor.MentalOrEmotionalFactor.Distraction)
builder = Builder()
sub_scenarios = builder.get_sub_scenario_foo()
import re
for d_error in driving_errors:
_ = re.sub(".*#", "", d_error['x'])
# builder.build("scenario10", _, current_action)
return sub_scenarios
|
[
"model.enumerations.e_PhyOrPhyFactor.all",
"scenarios.helper.get_scenarios",
"model.knowledge_base.kb.get_current_action",
"model.enumerations.e_MentalOrEmotionalFactor.all",
"scenarios.builder.Builder",
"model.entities.CausalFactor",
"os.path.join",
"scenarios.helper.run_scenario",
"model.utils.BoundingBox",
"model.rule.get_driving_error_to_causal_factor_rule",
"model.enumerations.e_ExperienceFactor.all",
"re.sub",
"model.knowledge_base.kb.get_entity_from_cache",
"model.knowledge_base.kb.insert_entity",
"model.knowledge_base.kb.add_relation"
] |
[((1119, 1143), 'model.enumerations.e_ExperienceFactor.all', 'e_ExperienceFactor.all', ([], {}), '()\n', (1141, 1143), False, 'from model.enumerations import e_ExperienceFactor, e_MentalOrEmotionalFactor, e_PhyOrPhyFactor, e_EntityType, e_Relation, e_CausalFactorType\n'), ((1332, 1363), 'model.enumerations.e_MentalOrEmotionalFactor.all', 'e_MentalOrEmotionalFactor.all', ([], {}), '()\n', (1361, 1363), False, 'from model.enumerations import e_ExperienceFactor, e_MentalOrEmotionalFactor, e_PhyOrPhyFactor, e_EntityType, e_Relation, e_CausalFactorType\n'), ((1543, 1565), 'model.enumerations.e_PhyOrPhyFactor.all', 'e_PhyOrPhyFactor.all', ([], {}), '()\n', (1563, 1565), False, 'from model.enumerations import e_ExperienceFactor, e_MentalOrEmotionalFactor, e_PhyOrPhyFactor, e_EntityType, e_Relation, e_CausalFactorType\n'), ((5535, 5557), 'scenarios.helper.get_scenarios', 'helper.get_scenarios', ([], {}), '()\n', (5555, 5557), False, 'from scenarios import helper\n'), ((5690, 5724), 'scenarios.helper.run_scenario', 'helper.run_scenario', (['scenario_name'], {}), '(scenario_name)\n', (5709, 5724), False, 'from scenarios import helper\n'), ((6662, 6686), 'model.knowledge_base.kb.insert_entity', 'kb.insert_entity', (['entity'], {}), '(entity)\n', (6678, 6686), False, 'from model.knowledge_base import kb\n'), ((6706, 6742), 'model.knowledge_base.kb.get_entity_from_cache', 'kb.get_entity_from_cache', (['vehicle_id'], {}), '(vehicle_id)\n', (6730, 6742), False, 'from model.knowledge_base import kb\n'), ((6751, 6830), 'model.knowledge_base.kb.add_relation', 'kb.add_relation', (['andrea', 'vehicle', 'e_Relation.isOn.isOnVehicle.driverIsOnVehicle'], {}), '(andrea, vehicle, e_Relation.isOn.isOnVehicle.driverIsOnVehicle)\n', (6766, 6830), False, 'from model.knowledge_base import kb\n'), ((6856, 6886), 'model.knowledge_base.kb.get_current_action', 'kb.get_current_action', (['vehicle'], {}), '(vehicle)\n', (6877, 6886), False, 'from model.knowledge_base import kb\n'), ((6900, 6998), 'model.entities.CausalFactor', 'CausalFactor', (['"""test_name"""', 'e_CausalFactorType.HumanFactor.MentalOrEmotionalFactor.Distraction'], {}), "('test_name', e_CausalFactorType.HumanFactor.\n MentalOrEmotionalFactor.Distraction)\n", (6912, 6998), False, 'from model.entities import Entity, CausalFactor\n'), ((7009, 7029), 'model.knowledge_base.kb.insert_entity', 'kb.insert_entity', (['cf'], {}), '(cf)\n', (7025, 7029), False, 'from model.knowledge_base import kb\n'), ((7038, 7107), 'model.knowledge_base.kb.add_relation', 'kb.add_relation', (['andrea', 'cf_i', 'e_Relation.isImpaired.driverIsImpaired'], {}), '(andrea, cf_i, e_Relation.isImpaired.driverIsImpaired)\n', (7053, 7107), False, 'from model.knowledge_base import kb\n'), ((7133, 7250), 'model.rule.get_driving_error_to_causal_factor_rule', 'rule.get_driving_error_to_causal_factor_rule', (['e_CausalFactorType.HumanFactor.MentalOrEmotionalFactor.Distraction'], {}), '(e_CausalFactorType.HumanFactor\n .MentalOrEmotionalFactor.Distraction)\n', (7177, 7250), False, 'from model import rule\n'), ((7265, 7274), 'scenarios.builder.Builder', 'Builder', ([], {}), '()\n', (7272, 7274), False, 'from scenarios.builder import Builder\n'), ((5904, 5928), 'os.path.join', 'os.path.join', (['ESMINI_DIR'], {}), '(ESMINI_DIR)\n', (5916, 5928), False, 'import os\n'), ((6602, 6643), 'model.utils.BoundingBox', 'BoundingBox', (['(0.5)', '(0.6)', '(1.8)', '(1.3)', '(0.0)', '(0.8)'], {}), '(0.5, 0.6, 1.8, 1.3, 0.0, 0.8)\n', (6613, 6643), False, 'from model.utils import BoundingBox\n'), ((7405, 7436), 're.sub', 're.sub', (['""".*#"""', '""""""', "d_error['x']"], {}), "('.*#', '', d_error['x'])\n", (7411, 7436), False, 'import re\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from Bio import AlignIO
def concat_msa(msas, output):
"""concatenate msas together"""
alignments = []
for msa in msas:
align = AlignIO.read(msa, "fasta")
# shorten id so the concatenated alignment keeps it
for record in align._records:
record.id = record.id.split("|")[0]
if len(align._records) == 3:
alignments.append(align)
concatenated_alignment = alignments[0]
for alignment in alignments[1:]:
concatenated_alignment += alignment
with open(output, "w") as outfile:
AlignIO.write(concatenated_alignment, outfile, "fasta")
def main():
parser = argparse.ArgumentParser(
prog="concat_msa.py"
)
parser.add_argument(
"--msa",
type=str,
required=True,
nargs="*",
help="multiple sequence alignment to concatenate"
)
parser.add_argument(
"--output",
type=str,
required=True,
help="output file"
)
args = parser.parse_args()
concat_msa(args.msa, args.output)
if __name__ == "__main__":
main()
|
[
"Bio.AlignIO.read",
"Bio.AlignIO.write",
"argparse.ArgumentParser"
] |
[((733, 778), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""concat_msa.py"""'}), "(prog='concat_msa.py')\n", (756, 778), False, 'import argparse\n'), ((213, 239), 'Bio.AlignIO.read', 'AlignIO.read', (['msa', '"""fasta"""'], {}), "(msa, 'fasta')\n", (225, 239), False, 'from Bio import AlignIO\n'), ((650, 705), 'Bio.AlignIO.write', 'AlignIO.write', (['concatenated_alignment', 'outfile', '"""fasta"""'], {}), "(concatenated_alignment, outfile, 'fasta')\n", (663, 705), False, 'from Bio import AlignIO\n')]
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from os import path
import re
import json
import time
import datetime
import xlsxwriter
print("Start: " + str(datetime.datetime.now()))
options = Options()
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
# driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.implicitly_wait(0.5)
url_brands = "https://www.rendez-vous.ru/catalog/brands/"
brands = [
"A<NAME>", "AGL", "BANU", "Bally", 'Bresciani', 'Brimarts', '<NAME>', 'Casadei', 'Casheart,',
'Cerruti 1881', '<NAME>', 'Coccinelle', 'DKNY', 'Doria Maria', 'Doucal\'s', 'F_WD', 'Fabi', '<NAME>',
'<NAME>', 'Flower Mountain', 'Franceschetti', 'Frasconi', '<NAME>', 'Fratelli Rossetti One',
'<NAME>', 'Goose Tech', 'GUM', 'HIDE&JACK', 'Ice Play', 'Iceberg', 'In The Box', 'Inuikii',
'<NAME>', '<NAME>', 'Kalliste', '<NAME>', '<NAME>', 'Lancaster', 'Landi', 'Le Silla',
'Lemon Jelly', "L'Impermeabile", 'Marsell', '<NAME>', 'Moose Knuckles', 'Moreschi', 'Moschino', 'Panchic',
'Pantanetti', 'Parajumpers', 'Pasotti', 'Pertini', '<NAME>', 'Pollini', 'Portolano', 'Premiata',
'<NAME>', 'RBRSL', "Reptile's House", '<NAME>', '<NAME>', '<NAME>', 'SPRAYGROUND',
'Stemar', '<NAME>', 'V SEASON', "VIC MATIE'", "<NAME>", '<NAME>', 'What For', 'Wolford', '3JUIN',
'Premiata will be', 'Sprayground', 'Domrebel', 'GIUSEPPE ZANOTTI DESIGN', 'Giuseppe Zanotti Design',
'GIUSEPPE ZANOTTI', '<NAME>'
]
search_values = ['Wolford', 'RBRSL', "Rocco P", "DKNY", 'Flower Mountain', 'HIDE&JACK', 'Inuikii', 'Lancaster']
categories = [
"Женское",
'Мужское',
"Детское"
]
iframe_ids = ['fl-545545']
show = "//li[@class='next']/a"
pagination_class_selected = 'page selected'
last_page = '//ul[@id="pagination_bottom"]/li[@class="last"]'
search_btn = '//*[@id="search-toggle"]'
search_bar = '//*[@id="Search_q"]'
failed_pages = {'pages': []}
output = xlsxwriter.Workbook('C:\\Users\\admin\\Documents\\outputs\\Rendez-vous {}.xlsx'.format(str(datetime.date.today())))
sheet = output.add_worksheet('Rendez-vous')
sheet.write('A1', 'Артикул')
sheet.write('B1', 'Цена')
sheet.write('C1', 'Старая цена')
sheet.write('D1', 'Скидка')
sheet.write('E1', 'Бренд')
sheet.write('F1', 'Артикул производителя')
sheet.write('G1', 'Ссылка')
tables = {}
count = 0
row = 2
closed = False
scrolled = False
def open_brands():
driver.get(url_brands)
for el in brands:
global scrolled
scrolled = False
scroll_brands(el)
def open_brand(el):
driver.find_element(By.XPATH, '//div[@class="js-quick-search-source brand-popover"]'
'//a[contains(text(), "{}")]'.format(el.upper())).click()
write_data()
def scroll_brands(el):
driver.get(url_brands)
driver.execute_script('window.scrollBy(0, -7000)')
try:
actions = ActionChains(driver)
actions.move_to_element(driver.find_element_by_xpath('//div[@class="js-quick-search-source brand-popover"]'
'//a[contains(text(), "{}")]'.format(el.upper()))).perform()
open_brand(el)
except Exception as e:
print(el.upper() + " not found in the list, skipping.")
print(e)
global scrolled
scrolled = True
def search():
driver.get(url_brands)
for b in search_values:
# WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, '//button[@class="search-mobile__open-button"]')))
driver.find_element(By.XPATH, search_btn).click()
driver.find_element(By.XPATH, search_bar).click()
driver.find_element(By.XPATH, search_bar).clear()
driver.find_element(By.XPATH, search_bar).send_keys(b)
driver.find_element(By.XPATH, search_bar).send_keys(Keys.ENTER)
time.sleep(2)
try:
write_data()
except Exception as e:
print("Failure in finding elements")
def change_page():
try:
# driver.execute_script('window.scrollBy(0, -800)')
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, show)))
change_page = driver.find_element(By.XPATH, show)
actions = ActionChains(driver)
actions.move_to_element(change_page).perform()
change_page.click()
time.sleep(2)
except Exception as e:
for iframe in iframe_ids:
try:
print("Attempting to close iframe")
frame = driver.find_element(By.XPATH, iframe)
driver.switch_to.frame(frame)
driver.find_element_by_xpath('//div[@class="widget__close"]').click()
driver.switch_to.default_content()
driver.find_element_by_xpath(show).click()
except Exception as e:
print(e)
print("Attempting to close lead form")
try:
driver.execute_script("document.querySelector('.lead-form__close').click();")
driver.find_element_by_xpath(show).click()
except Exception as e:
print(e)
print("Attempting to refresh the page")
driver.refresh()
time.sleep(1)
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, show)))
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
driver.find_element_by_xpath(show).click()
time.sleep(2)
def get_data():
# driver.execute_script('window.scrollBy(0, -7000)')
print('Get prices')
elems_var = '//ul[@class="list-items list-view-1 js-list-items"]/li'
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, elems_var)))
elems = driver.find_elements(By.XPATH, elems_var)
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((
By.XPATH, '//ul[@class="list-items list-view-1 js-list-items"]/li[last()]'
)))
counter = 0
print('[begin gather loop]')
print(elems)
for el in elems:
counter += 1
driver.execute_script('window.scrollBy(0, {})'.format(counter * 20))
try:
title, price, brand, link, price_old = [None] * 5 # assign None to 5 variables
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH,
'//ul[@class="list-items list-view-1 js-list-items"]/li[{}]'.format(counter))))
productinfo = json.loads(str(el.find_element_by_xpath(
'//ul[@class="list-items list-view-1 js-list-items"]/li[{}]'
.format(counter)).get_attribute('data-productinfo')).replace('\'', '"'))
try:
title = productinfo['name'].replace(productinfo['brand'] + ' ', '')
except:
print('Failed to obtain price')
# id = productinfo['id']
try:
price = float(productinfo['price'])
except:
print('Failed to obtain price')
try:
brand = productinfo['brand']
except:
print("Failed to obtain brand")
try:
link = str(el.find_element(
By.XPATH,
'//ul[@class="list-items list-view-1 js-list-items"]/li[{}]//a[@class="item-link"]'.format(counter))
.get_attribute('href'))
except:
print("Failed to obtain link")
try:
WebDriverWait(driver, 10).until(EC.visibility_of_element_located)
price_old = el.find_element(By.XPATH, '//ul[@class="list-items list-view-1 js-list-items"]'
'/li[{}]//span[@class="item-price-old"]/span'.format(counter)).get_attribute(
'content')
except Exception as e:
print("No discount for element {}".format(counter))
print(e)
tables[title] = [price, price_old, brand, link]
global row
sheet.write('A' + str(row), title)
sheet.write('B' + str(row), price)
sheet.write('C' + str(row), price_old)
sheet.write('D' + str(row), brand)
sheet.write('E' + str(row), link)
row += 1
except Exception as e:
print("Exception detected while parsing: ")
print(e)
global failed_pages
failed_pages['pages'].append(re.sub('[^0-9]', '', str(driver.current_url)[-3:]).replace('=', ''))
print("Page {}".format(str(re.sub('[^0-9]', '', str(driver.current_url)[-3:]).replace('=', ''))))
print('Prices obtained')
def write_data():
try:
while driver.find_element(By.XPATH, last_page).get_attribute('class') != pagination_class_selected:
get_data()
change_page()
except:
get_data()
def write_file(url, filename, params=0):
try:
if params == 0:
""" ==== FULL ==== """
driver.get(url)
write_data()
driver.quit()
elif params == 1:
""" ==== BRANDS ==== """
open_brands()
driver.quit()
elif params == 2:
""" ==== SEARCH ==== """
search()
driver.quit()
output.close()
except Exception as e:
print("Error caught, terminating: " + str(e))
print('Writing file...')
if not path.exists('{}.json'.format(filename)):
with open('{}.json'.format(filename), 'w') as t:
json.dump({}, t)
t.close()
with open('{}.json'.format(filename), 'r+', encoding='utf-8') as t:
info = json.load(t)
t.seek(0)
info.update(tables)
json.dump(info, t, ensure_ascii=False, indent=4)
t.truncate()
print('...Completed writing')
t.close()
with open('{}_failed_pages.json'.format(filename), 'w', encoding='utf-8') as p:
json.dump(failed_pages, p, ensure_ascii=False, indent=4)
p.close()
def run():
write_file(url_brands, 'C:\\Users\\admin\\Documents\\outputs\\rendez-vous_brands_full', params=1)
write_file(url_brands, 'C:\\Users\\admin\\Documents\\outputs\\rendez-vous_brands_full', params=2)
print("End: " + str(datetime.datetime.now()))
if __name__ == '__main__':
run()
|
[
"selenium.webdriver.chrome.options.Options",
"webdriver_manager.chrome.ChromeDriverManager",
"selenium.webdriver.support.wait.WebDriverWait",
"json.dump",
"time.sleep",
"selenium.webdriver.common.action_chains.ActionChains",
"datetime.datetime.now",
"json.load",
"datetime.date.today",
"selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"selenium.webdriver.support.expected_conditions.element_to_be_clickable"
] |
[((570, 579), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (577, 579), False, 'from selenium.webdriver.chrome.options import Options\n'), ((3372, 3392), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (3384, 3392), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((4327, 4340), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4337, 4340), False, 'import time\n'), ((4717, 4737), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (4729, 4737), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((4829, 4842), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4839, 4842), False, 'import time\n'), ((6261, 6316), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (['(By.XPATH, elems_var)'], {}), '((By.XPATH, elems_var))\n', (6293, 6316), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((6408, 6522), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (['(By.XPATH, \'//ul[@class="list-items list-view-1 js-list-items"]/li[last()]\')'], {}), '((By.XPATH,\n \'//ul[@class="list-items list-view-1 js-list-items"]/li[last()]\'))\n', (6440, 6522), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((10283, 10295), 'json.load', 'json.load', (['t'], {}), '(t)\n', (10292, 10295), False, 'import json\n'), ((10350, 10398), 'json.dump', 'json.dump', (['info', 't'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(info, t, ensure_ascii=False, indent=4)\n', (10359, 10398), False, 'import json\n'), ((10570, 10626), 'json.dump', 'json.dump', (['failed_pages', 'p'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(failed_pages, p, ensure_ascii=False, indent=4)\n', (10579, 10626), False, 'import json\n'), ((533, 556), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (554, 556), False, 'import datetime\n'), ((2526, 2547), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2545, 2547), False, 'import datetime\n'), ((4589, 4639), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (['(By.XPATH, show)'], {}), '((By.XPATH, show))\n', (4621, 4639), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((6229, 6254), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (6242, 6254), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((6376, 6401), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (6389, 6401), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((10156, 10172), 'json.dump', 'json.dump', (['{}', 't'], {}), '({}, t)\n', (10165, 10172), False, 'import json\n'), ((733, 754), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (752, 754), False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((4557, 4582), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (4570, 4582), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((6039, 6052), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6049, 6052), False, 'import time\n'), ((10887, 10910), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10908, 10910), False, 'import datetime\n'), ((6836, 6861), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (6849, 6861), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((8082, 8107), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (8095, 8107), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((5759, 5772), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5769, 5772), False, 'import time\n'), ((5825, 5869), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, show)'], {}), '((By.XPATH, show))\n', (5851, 5869), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((5793, 5818), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (5806, 5818), False, 'from selenium.webdriver.support.wait import WebDriverWait\n')]
|
import re
from django.db import models
unacceptable_chars = "[^a-z0-9\._]"
duplicate_spaces_and_dots = "[\ .]+"
class ShortCodeField(models.CharField):
description = "A short string representing a glyph name"
def pre_save(self, model_instance, add):
model_instance.short_code = sanitize_short_code(model_instance.short_code)
return model_instance.short_code
def sanitize_short_code(input):
"""
We want to filter-out the undesirable characters.
"""
# Turn spaces and dots into single dots
new_code = re.sub(duplicate_spaces_and_dots, '.', input.strip().lower())
# Filter out everything bad
new_code = replace_common_words(re.sub(unacceptable_chars, '', new_code))
# Duplicates once more
return re.sub(duplicate_spaces_and_dots, '.', new_code)
def replace_common_words(input):
# Neumes that we will shorten
replacements = [
("torculus", "torc"),
("tractulus", "trac"),
("punctum", "pun"),
("stropha", "stro"),
("virga", "vir"),
("porrectus", "por"),
("ancus", "anc"),
("status", "stra"),
("quadratus", "q"),
("quassus", "quas"),
("oriscus", "ori"),
("episema", "e"),
("clivis", "cli"),
("rotundus", "r"),
("liquescent", "l"),
("quilismapes", "pes.quil"),
("two", "2"),
("three", "3"),
# Important to strip simple
(".simple", ""),
# Some other language stuff
("langer", "long"),
(".zweiter", ""),
(".abstrich", "")
]
return replace_words(input, replacements)
def replace_words(input, replacements):
for replacement in replacements:
old, new = replacement
input = re.sub(old, new, input)
return input
|
[
"re.sub"
] |
[((759, 807), 're.sub', 're.sub', (['duplicate_spaces_and_dots', '"""."""', 'new_code'], {}), "(duplicate_spaces_and_dots, '.', new_code)\n", (765, 807), False, 'import re\n'), ((679, 719), 're.sub', 're.sub', (['unacceptable_chars', '""""""', 'new_code'], {}), "(unacceptable_chars, '', new_code)\n", (685, 719), False, 'import re\n'), ((1756, 1779), 're.sub', 're.sub', (['old', 'new', 'input'], {}), '(old, new, input)\n', (1762, 1779), False, 'import re\n')]
|
# coding=utf-8
# date: 2019/1/1, 19:38
# name: smz
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from LinearModel.modules.model3 import ModelThreeClasses
from LinearModel.configuration.options import opts
from LinearModel.scripts.gen_data import generate_data
def gen_train_data():
np.random.seed(10)
fields_num = 2
num_classes = 3
sample_size = 2000
mean = np.random.randn(fields_num)
cov = np.eye(fields_num)
diffs = [[3.0], [3.0, 0.0]] # 第三类样本中心与第二类样本中心之间只有y方向上的误差,第二类样本与第一类样本在x和y方向上均偏移3.0
train_X, train_Y = generate_data(num_classes=num_classes, sample_size=sample_size, mean=mean, cov=cov, diffs=diffs)
np.save("../data/train_data_X3.npy", train_X)
np.save("../data/train_data_Y3.npy", train_Y)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
colors = ['r' if np.argmax(label) == 0 else 'b' if np.argmax(label) == 1 else 'y' for label in train_Y]
ax.scatter(train_X[:, 0], train_X[:, 1], c=colors)
ax.set_xlabel("Scaled age(in years)")
ax.set_ylabel("Tumor size(in cm)")
plt.show()
def train_3_classes():
"""这个有问题,因为使用softmax表示的结果和使用sigmoid的那个模型是不同的,需要重写模型"""
model3 = ModelThreeClasses(opts)
model3.build()
train_x3 = np.load("../data/train_data_X3.npy")
train_y3 = np.load("../data/train_data_Y3.npy")
model_name = "model3s.ckpt"
num_samples = len(train_x3)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(opts["epochs"]):
start_pointer = 0
train_x, train_y = shuffle(train_x3, train_y3)
while start_pointer < num_samples:
end_pointer = start_pointer + opts["batch_size"]
batch_x = train_x[start_pointer:end_pointer]
batch_y = train_y[start_pointer:end_pointer]
start_pointer = end_pointer
feed_dict = {model3.inputs: batch_x, model3.labels: batch_y}
loss_value, glob_step_value, merge_str, _ = sess.run(
fetches=[model3.loss, model3.global_step, model3.merge_op,model3.train_step],
feed_dict=feed_dict)
model3.writer.add_summary(merge_str, global_step=glob_step_value)
print("epoch:%d, step:%d, loss:%.6f"%(epoch, glob_step_value, loss_value))
if (epoch + 1) % 10 == 0:
model3.saver.save(sess, opts["checkpoints_dir"] + model_name, global_step=model3.global_step)
if __name__ == "__main__":
# gen_train_data()
train_3_classes()
|
[
"LinearModel.modules.model3.ModelThreeClasses",
"numpy.eye",
"sklearn.utils.shuffle",
"tensorflow.Session",
"numpy.argmax",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.figure",
"LinearModel.scripts.gen_data.generate_data",
"numpy.random.seed",
"numpy.load",
"numpy.random.randn",
"numpy.save",
"matplotlib.pyplot.show"
] |
[((352, 370), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (366, 370), True, 'import numpy as np\n'), ((444, 471), 'numpy.random.randn', 'np.random.randn', (['fields_num'], {}), '(fields_num)\n', (459, 471), True, 'import numpy as np\n'), ((482, 500), 'numpy.eye', 'np.eye', (['fields_num'], {}), '(fields_num)\n', (488, 500), True, 'import numpy as np\n'), ((612, 712), 'LinearModel.scripts.gen_data.generate_data', 'generate_data', ([], {'num_classes': 'num_classes', 'sample_size': 'sample_size', 'mean': 'mean', 'cov': 'cov', 'diffs': 'diffs'}), '(num_classes=num_classes, sample_size=sample_size, mean=mean,\n cov=cov, diffs=diffs)\n', (625, 712), False, 'from LinearModel.scripts.gen_data import generate_data\n'), ((713, 758), 'numpy.save', 'np.save', (['"""../data/train_data_X3.npy"""', 'train_X'], {}), "('../data/train_data_X3.npy', train_X)\n", (720, 758), True, 'import numpy as np\n'), ((763, 808), 'numpy.save', 'np.save', (['"""../data/train_data_Y3.npy"""', 'train_Y'], {}), "('../data/train_data_Y3.npy', train_Y)\n", (770, 808), True, 'import numpy as np\n'), ((819, 831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (829, 831), True, 'import matplotlib.pyplot as plt\n'), ((1115, 1125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1123, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1246), 'LinearModel.modules.model3.ModelThreeClasses', 'ModelThreeClasses', (['opts'], {}), '(opts)\n', (1240, 1246), False, 'from LinearModel.modules.model3 import ModelThreeClasses\n'), ((1282, 1318), 'numpy.load', 'np.load', (['"""../data/train_data_X3.npy"""'], {}), "('../data/train_data_X3.npy')\n", (1289, 1318), True, 'import numpy as np\n'), ((1334, 1370), 'numpy.load', 'np.load', (['"""../data/train_data_Y3.npy"""'], {}), "('../data/train_data_Y3.npy')\n", (1341, 1370), True, 'import numpy as np\n'), ((1446, 1458), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1456, 1458), True, 'import tensorflow as tf\n'), ((1483, 1516), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1514, 1516), True, 'import tensorflow as tf\n'), ((1646, 1673), 'sklearn.utils.shuffle', 'shuffle', (['train_x3', 'train_y3'], {}), '(train_x3, train_y3)\n', (1653, 1673), False, 'from sklearn.utils import shuffle\n'), ((887, 903), 'numpy.argmax', 'np.argmax', (['label'], {}), '(label)\n', (896, 903), True, 'import numpy as np\n'), ((921, 937), 'numpy.argmax', 'np.argmax', (['label'], {}), '(label)\n', (930, 937), True, 'import numpy as np\n')]
|
##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import tests
from common import TestCommon
from results import PassFailResult
@tests.add_test
class MemTest(TestCommon):
'''prints out free and total memory after system boot up'''
name = "freemem"
def get_modules(self, build, machine):
modules = super(MemTest, self).get_modules(build, machine)
modules.add_module("freemem")
return modules
def get_finish_string(self):
return "freemem done!"
def process_data(self, testdir, rawiter):
# the test passed iff the last line is the finish string
lastline = ''
for line in rawiter:
lastline = line
passed = lastline.startswith(self.get_finish_string())
return PassFailResult(passed)
|
[
"results.PassFailResult"
] |
[((1154, 1176), 'results.PassFailResult', 'PassFailResult', (['passed'], {}), '(passed)\n', (1168, 1176), False, 'from results import PassFailResult\n')]
|
import argparse
import os
import sys
import numpy as np
from scipy import misc
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models import vgg16, vgg19
from torchvision.utils import save_image
from lib.gradients import GradCam, GuidedBackpropGrad
from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image
from lib.labels import IMAGENET_LABELS
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--img', type=str, default='',
help='Input image path')
parser.add_argument('--out_dir', type=str, default='./result/cam/',
help='Result directory path')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if args.cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
if args.img:
print('Input image: {}'.format(args.img))
else:
print('Input image: raccoon face (scipy.misc.face())')
print('Output directory: {}'.format(args.out_dir))
print()
return args
def main():
args = parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
target_layer_names = ['35']
target_index = None
# Prepare input image
if args.img:
img = cv2.imread(args.img, 1)
else:
img = misc.face()
img = np.float32(cv2.resize(img, (224, 224))) / 255
preprocessed_img = preprocess_image(img, args.cuda)
model = vgg19(pretrained=True)
if args.cuda:
model.cuda()
# Prediction
output = model(preprocessed_img)
pred_index = np.argmax(output.data.cpu().numpy())
print('Prediction: {}'.format(IMAGENET_LABELS[pred_index]))
# Prepare grad cam
grad_cam = GradCam(
pretrained_model=model,
target_layer_names=target_layer_names,
cuda=args.cuda)
# Compute grad cam
mask = grad_cam(preprocessed_img, target_index)
save_cam_image(img, mask, os.path.join(args.out_dir, 'grad_cam.jpg'))
print('Saved Grad-CAM image')
# Reload preprocessed image
preprocessed_img = preprocess_image(img)
# Compute guided backpropagation
guided_backprop = GuidedBackpropGrad(
pretrained_model=model, cuda=args.cuda)
guided_backprop_saliency = guided_backprop(preprocessed_img, index=target_index)
cam_mask = np.zeros(guided_backprop_saliency.shape)
for i in range(guided_backprop_saliency.shape[0]):
cam_mask[i, :, :] = mask
cam_guided_backprop = np.multiply(cam_mask, guided_backprop_saliency)
save_as_gray_image(
cam_guided_backprop,
os.path.join(args.out_dir, 'guided_grad_cam.jpg'))
print('Saved Guided Grad-CAM image')
if __name__ == '__main__':
main()
|
[
"os.path.exists",
"numpy.multiply",
"lib.gradients.GradCam",
"argparse.ArgumentParser",
"torchvision.models.vgg19",
"lib.gradients.GuidedBackpropGrad",
"os.makedirs",
"lib.image_utils.preprocess_image",
"os.path.join",
"numpy.zeros",
"torch.cuda.is_available",
"cv2.resize",
"cv2.imread",
"scipy.misc.face"
] |
[((455, 480), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (478, 480), False, 'import argparse\n'), ((1634, 1666), 'lib.image_utils.preprocess_image', 'preprocess_image', (['img', 'args.cuda'], {}), '(img, args.cuda)\n', (1650, 1666), False, 'from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image\n'), ((1680, 1702), 'torchvision.models.vgg19', 'vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1685, 1702), False, 'from torchvision.models import vgg16, vgg19\n'), ((1954, 2045), 'lib.gradients.GradCam', 'GradCam', ([], {'pretrained_model': 'model', 'target_layer_names': 'target_layer_names', 'cuda': 'args.cuda'}), '(pretrained_model=model, target_layer_names=target_layer_names, cuda\n =args.cuda)\n', (1961, 2045), False, 'from lib.gradients import GradCam, GuidedBackpropGrad\n'), ((2311, 2332), 'lib.image_utils.preprocess_image', 'preprocess_image', (['img'], {}), '(img)\n', (2327, 2332), False, 'from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image\n'), ((2393, 2451), 'lib.gradients.GuidedBackpropGrad', 'GuidedBackpropGrad', ([], {'pretrained_model': 'model', 'cuda': 'args.cuda'}), '(pretrained_model=model, cuda=args.cuda)\n', (2411, 2451), False, 'from lib.gradients import GradCam, GuidedBackpropGrad\n'), ((2562, 2602), 'numpy.zeros', 'np.zeros', (['guided_backprop_saliency.shape'], {}), '(guided_backprop_saliency.shape)\n', (2570, 2602), True, 'import numpy as np\n'), ((2718, 2765), 'numpy.multiply', 'np.multiply', (['cam_mask', 'guided_backprop_saliency'], {}), '(cam_mask, guided_backprop_saliency)\n', (2729, 2765), True, 'import numpy as np\n'), ((902, 927), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (925, 927), False, 'import torch\n'), ((1316, 1344), 'os.path.exists', 'os.path.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (1330, 1344), False, 'import os\n'), ((1354, 1379), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (1365, 1379), False, 'import os\n'), ((1495, 1518), 'cv2.imread', 'cv2.imread', (['args.img', '(1)'], {}), '(args.img, 1)\n', (1505, 1518), False, 'import cv2\n'), ((1543, 1554), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (1552, 1554), False, 'from scipy import misc\n'), ((2177, 2219), 'os.path.join', 'os.path.join', (['args.out_dir', '"""grad_cam.jpg"""'], {}), "(args.out_dir, 'grad_cam.jpg')\n", (2189, 2219), False, 'import os\n'), ((2827, 2876), 'os.path.join', 'os.path.join', (['args.out_dir', '"""guided_grad_cam.jpg"""'], {}), "(args.out_dir, 'guided_grad_cam.jpg')\n", (2839, 2876), False, 'import os\n'), ((1576, 1603), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (1586, 1603), False, 'import cv2\n')]
|
from floodsystem import stationdata, datafetcher, station
stations = stationdata.build_station_list()
stationdata.update_water_levels(stations)
#Empty lists for each of the risk categories
severe_level_station = []
high_level_station = []
moderate_level_station = []
low_level_station = []
for station in stations: #Sorts out stations into different levels
level = station.relative_water_level()
if level is not None:
if level > 1.2:
severe_level_station.append(station)
elif level > 0.9:
high_level_station.append(station)
elif level > 0.7:
moderate_level_station.append(station)
else:
low_level_station.append(station)
#sets for the different categories
severe_town = {x.town for x in severe_level_station}
high_town = {x.town for x in high_level_station}
moderate_town = {x.town for x in moderate_level_station}
low_town = {x.town for x in low_level_station}
for town in severe_town:
#xx
print(town)
|
[
"floodsystem.stationdata.build_station_list",
"floodsystem.station.relative_water_level",
"floodsystem.stationdata.update_water_levels"
] |
[((70, 102), 'floodsystem.stationdata.build_station_list', 'stationdata.build_station_list', ([], {}), '()\n', (100, 102), False, 'from floodsystem import stationdata, datafetcher, station\n'), ((103, 144), 'floodsystem.stationdata.update_water_levels', 'stationdata.update_water_levels', (['stations'], {}), '(stations)\n', (134, 144), False, 'from floodsystem import stationdata, datafetcher, station\n'), ((372, 402), 'floodsystem.station.relative_water_level', 'station.relative_water_level', ([], {}), '()\n', (400, 402), False, 'from floodsystem import stationdata, datafetcher, station\n')]
|
#//-------------------------------------------------------------------
#/*/{Protheus.doc} ACDA035 -
#
#@author <NAME>
#@since 23/09/2019
#@version P12
#
# CT001 - Inclusão de Lançamento de Inventário
# CT002 - Visão de um lançamento de inventário
# CT003 - Visualização das legendas
# CT004 - Alteração de Lançamento de Inventário
# CT005 - Exclusão de Lançamento de Inventário
# CT007 - Alteração de Lançamento de Inventário sem finalizar contagem
#
#/*/
#//-------------------------------------------------------------------
from tir import Webapp
import unittest
import time
class ACDA035(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAEST','11/07/2019','T1','D MG 01')
inst.oHelper.Program('ACDA035')
inst.oHelper.AddParameter("MV_CBPE012", "", ".T.", ".T.", ".T.")
inst.oHelper.SetParameters()
#CT001 - Geração de uma ordem de separação por ordem de produção
#@author: <NAME>
#@date: 18/09/2019
def test_ACDA035_CT001(self):
self.oHelper.SetButton("Incluir")
self.oHelper.SetButton('Ok')
self.oHelper.SetValue('Codigo Inv.', '000000005')
self.oHelper.SetValue('Usuario', '000010')
self.oHelper.SetValue('Quantidade', '1', grid=True)
self.oHelper.SetValue('Endereco', 'ENDSE01', grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Não')
self.oHelper.SetButton('Cancelar')
#Definição do operação
self.oHelper.AssertTrue()
def test_ACDA035_CT002(self):
self.oHelper.SearchBrowse("D MG 01 000000003")
self.oHelper.SetButton("Visão")
self.oHelper.SetButton('Confirmar')
self.oHelper.AssertTrue()
def test_ACDA035_CT003(self):
self.oHelper.SetButton("Outras Ações", "Legenda")
self.oHelper.SetButton('Ok')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_ACDA035_CT004(self):
self.oHelper.SearchBrowse("D MG 01 000000030")
self.oHelper.SetButton("Alterar")
self.oHelper.SetValue('Quantidade', '3', grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Não')
self.oHelper.AssertTrue()
def test_ACDA035_CT005(self):
self.oHelper.SearchBrowse("D MG 01 000000005")
self.oHelper.SetButton("Outras Ações", "Excluir")
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Sim')
self.oHelper.AssertTrue()
def test_ACDA035_CT006(self):
self.oHelper.AddParameter("MV_WMSNEW ", "", ".F.", ".T.", ".T.")
self.oHelper.SetParameters()
self.oHelper.SearchBrowse("D MG 01 000000029")
self.oHelper.SetButton("Alterar")
self.oHelper.SetValue('Qtd.Original', '3', grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Sim')
self.oHelper.AssertTrue()
def test_ACDA035_CT007(self):
self.oHelper.SearchBrowse("D MG 01 000000032")
self.oHelper.SetButton("Alterar")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Deseja finalizar a contagem?")
self.oHelper.SetButton("Não")
self.oHelper.WaitHide("Deseja finalizar a contagem?")
time.sleep(3)
self.oHelper.SetButton("Visão")
self.oHelper.CheckResult("Produto","ACDACDA03500000000000000000001",grid=True, line=1)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Confirmar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"tir.Webapp",
"time.sleep"
] |
[((3423, 3438), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3436, 3438), False, 'import unittest\n'), ((685, 693), 'tir.Webapp', 'Webapp', ([], {}), '()\n', (691, 693), False, 'from tir import Webapp\n'), ((3097, 3110), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3107, 3110), False, 'import time\n')]
|
from pathlib import Path
from jina import Flow, Document
from jina.executors import BaseExecutor
from jina.parsers import set_pea_parser
from jina.peapods.peas import BasePea
cur_dir = Path(__file__).parent
def test_load_executor_with_custom_driver():
with BaseExecutor.load_config(str(cur_dir / 'yaml/test-executor-with-custom-driver.yml')) as be:
assert be._drivers['IndexRequest'][0].__class__.__name__ == 'DummyEncodeDriver'
def test_load_pod_with_custom_driver():
args = set_pea_parser().parse_args(['--uses', str(cur_dir / 'yaml/test-executor-with-custom-driver.yml')])
with BasePea(args):
# load success with no error
pass
def validate(req):
assert len(req.docs) == 1
assert req.docs[0].text == 'hello from DummyEncodeDriver'
def test_load_flow_with_custom_driver():
with Flow().add(uses=str(cur_dir / 'yaml/test-executor-with-custom-driver.yml')) as f:
f.index([Document()], on_done=validate)
|
[
"pathlib.Path",
"jina.Flow",
"jina.Document",
"jina.parsers.set_pea_parser",
"jina.peapods.peas.BasePea"
] |
[((187, 201), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'from pathlib import Path\n'), ((607, 620), 'jina.peapods.peas.BasePea', 'BasePea', (['args'], {}), '(args)\n', (614, 620), False, 'from jina.peapods.peas import BasePea\n'), ((498, 514), 'jina.parsers.set_pea_parser', 'set_pea_parser', ([], {}), '()\n', (512, 514), False, 'from jina.parsers import set_pea_parser\n'), ((837, 843), 'jina.Flow', 'Flow', ([], {}), '()\n', (841, 843), False, 'from jina import Flow, Document\n'), ((936, 946), 'jina.Document', 'Document', ([], {}), '()\n', (944, 946), False, 'from jina import Flow, Document\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cgi
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import tensorflow as tf
from ricga import configuration
from ricga import inference_wrapper
from ricga.inference_utils import caption_generator
from ricga.inference_utils import vocabulary
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", "/home/meteorshub/code/RICGA/ricga/model/train",
"Model checkpoint file or directory containing a "
"model checkpoint file.")
tf.flags.DEFINE_string("vocab_file", "/home/meteorshub/code/RICGA/ricga/data/mscoco/word_counts.txt",
"Text file containing the vocabulary.")
tf.flags.DEFINE_string("server_ip", "192.168.3.11", "Server address")
tf.flags.DEFINE_integer("server_port", 8080, "server port")
tf.logging.set_verbosity(tf.logging.INFO)
class InferenceModel(object):
def __init__(self):
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
g.finalize()
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
sess = tf.Session(graph=g)
restore_fn(sess)
generator = caption_generator.CaptionGenerator(model, vocab)
self.vocab = vocab
self.sess = sess
self.generator = generator
def run_inf(self, image_data):
captions = self.generator.beam_search(self.sess, image_data)
caption = captions[0]
sentence = [self.vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
return sentence
inf_model = InferenceModel()
class GetHandler(BaseHTTPRequestHandler):
def do_GET(self):
form_message = """<p>RICGA:please upload a picture(jpeg)</p>
<form method="post" action="http://%s:%s" enctype="multipart/form-data">
<input name="file" type="file" accept="image/jpeg" />
<input name="token" type="hidden" />
<input type="submit" value="upload" /></form>""" % (FLAGS.server_ip, FLAGS.server_port)
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(form_message.encode('utf-8'))
def do_POST(self):
form = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']
})
self.send_response(200)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.end_headers()
for field in form.keys():
if field == 'file':
image_file = form[field]
if image_file.filename:
image_data = image_file.file.read()
caption = inf_model.run_inf(image_data)
# caption = "success"
del image_data
message = "Caption: %s" % caption
self.wfile.write(message.encode("utf-8"))
return
self.wfile.write("failure!!".encode('utf-8'))
def main(_):
server = HTTPServer(('0.0.0.0', FLAGS.server_port), GetHandler)
print('Starting server, use <ctrl-c> to stop')
server.serve_forever()
if __name__ == "__main__":
tf.app.run()
|
[
"tensorflow.flags.DEFINE_string",
"tensorflow.Graph",
"cgi.FieldStorage",
"ricga.inference_wrapper.InferenceWrapper",
"tensorflow.Session",
"tensorflow.logging.set_verbosity",
"BaseHTTPServer.HTTPServer",
"tensorflow.flags.DEFINE_integer",
"ricga.configuration.ModelConfig",
"ricga.inference_utils.vocabulary.Vocabulary",
"ricga.inference_utils.caption_generator.CaptionGenerator",
"tensorflow.app.run"
] |
[((399, 571), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""checkpoint_path"""', '"""/home/meteorshub/code/RICGA/ricga/model/train"""', '"""Model checkpoint file or directory containing a model checkpoint file."""'], {}), "('checkpoint_path',\n '/home/meteorshub/code/RICGA/ricga/model/train',\n 'Model checkpoint file or directory containing a model checkpoint file.')\n", (421, 571), True, 'import tensorflow as tf\n'), ((613, 762), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""vocab_file"""', '"""/home/meteorshub/code/RICGA/ricga/data/mscoco/word_counts.txt"""', '"""Text file containing the vocabulary."""'], {}), "('vocab_file',\n '/home/meteorshub/code/RICGA/ricga/data/mscoco/word_counts.txt',\n 'Text file containing the vocabulary.')\n", (635, 762), True, 'import tensorflow as tf\n'), ((779, 848), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""server_ip"""', '"""192.168.3.11"""', '"""Server address"""'], {}), "('server_ip', '192.168.3.11', 'Server address')\n", (801, 848), True, 'import tensorflow as tf\n'), ((849, 908), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""server_port"""', '(8080)', '"""server port"""'], {}), "('server_port', 8080, 'server port')\n", (872, 908), True, 'import tensorflow as tf\n'), ((910, 951), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (934, 951), True, 'import tensorflow as tf\n'), ((3634, 3688), 'BaseHTTPServer.HTTPServer', 'HTTPServer', (["('0.0.0.0', FLAGS.server_port)", 'GetHandler'], {}), "(('0.0.0.0', FLAGS.server_port), GetHandler)\n", (3644, 3688), False, 'from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\n'), ((3800, 3812), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (3810, 3812), True, 'import tensorflow as tf\n'), ((1020, 1030), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1028, 1030), True, 'import tensorflow as tf\n'), ((1350, 1389), 'ricga.inference_utils.vocabulary.Vocabulary', 'vocabulary.Vocabulary', (['FLAGS.vocab_file'], {}), '(FLAGS.vocab_file)\n', (1371, 1389), False, 'from ricga.inference_utils import vocabulary\n'), ((1405, 1424), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (1415, 1424), True, 'import tensorflow as tf\n'), ((1470, 1518), 'ricga.inference_utils.caption_generator.CaptionGenerator', 'caption_generator.CaptionGenerator', (['model', 'vocab'], {}), '(model, vocab)\n', (1504, 1518), False, 'from ricga.inference_utils import caption_generator\n'), ((2634, 2774), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {'fp': 'self.rfile', 'headers': 'self.headers', 'environ': "{'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}"}), "(fp=self.rfile, headers=self.headers, environ={\n 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']})\n", (2650, 2774), False, 'import cgi\n'), ((1080, 1116), 'ricga.inference_wrapper.InferenceWrapper', 'inference_wrapper.InferenceWrapper', ([], {}), '()\n', (1114, 1116), False, 'from ricga import inference_wrapper\n'), ((1172, 1199), 'ricga.configuration.ModelConfig', 'configuration.ModelConfig', ([], {}), '()\n', (1197, 1199), False, 'from ricga import configuration\n')]
|
import os
import sys
if sys.platform.startswith('linux'):
from OpenGL import GL
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtWidgets import QApplication
from analysis.probe.gui.backend_classes import PythonBackendClass1, Logger
from analysis.probe.gui.image_providers import PyplotImageProvider
DEBUG = False
if __name__ == '__main__':
app = QApplication(sys.argv)
appEngine = QQmlApplicationEngine()
context = appEngine.rootContext()
analysis_image_provider1 = PyplotImageProvider(fig=None)
appEngine.addImageProvider("analysisprovider1", analysis_image_provider1)
analysis_image_provider2 = PyplotImageProvider(fig=None)
appEngine.addImageProvider("analysisprovider2", analysis_image_provider2)
# ALL THE ADDIMAGEPROVIDER LINES BELOW ARE REQUIRED TO MAKE QML BELIEVE THE PROVIDER IS VALID BEFORE ITS CREATION
# appEngine.addImageProvider('viewerprovider', CvImageProvider())
# analysis_image_provider = PyplotImageProvider(fig=None)
# appEngine.addImageProvider("analysisprovider", analysis_image_provider)
conf = {
'shared_directory': './' # FIXME: this is obviously BS
}
qml_source_path = os.path.join(conf['shared_directory'], 'qml', 'gui_qtquick', 'gui_qtquick.qml')
if not os.path.isfile(qml_source_path):
raise ValueError("Qml code not found at {}, please verify your installation".format(qml_source_path))
appEngine.load(qml_source_path)
try:
win = appEngine.rootObjects()[0]
except IndexError:
raise ValueError("Could not start the QT GUI")
if not DEBUG:
logger = Logger(context, win, "log")
sys.stdout = logger
print('Hello world')
# icon = QIcon(os.path.join(conf.shared_directory, 'resources', 'icons', 'pyper.png'))
# win.setIcon(icon)
backend = PythonBackendClass1(app, context, win, analysis_image_provider1, analysis_image_provider2) # create instance of backend
context.setContextProperty('py_iface', backend) # register backend python object with qml code under variable name py_iface
win.show()
sys.exit(app.exec_())
|
[
"analysis.probe.gui.backend_classes.Logger",
"PyQt5.QtQml.QQmlApplicationEngine",
"analysis.probe.gui.backend_classes.PythonBackendClass1",
"os.path.join",
"sys.platform.startswith",
"os.path.isfile",
"PyQt5.QtWidgets.QApplication",
"analysis.probe.gui.image_providers.PyplotImageProvider"
] |
[((28, 60), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (51, 60), False, 'import sys\n'), ((386, 408), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (398, 408), False, 'from PyQt5.QtWidgets import QApplication\n'), ((426, 449), 'PyQt5.QtQml.QQmlApplicationEngine', 'QQmlApplicationEngine', ([], {}), '()\n', (447, 449), False, 'from PyQt5.QtQml import QQmlApplicationEngine\n'), ((523, 552), 'analysis.probe.gui.image_providers.PyplotImageProvider', 'PyplotImageProvider', ([], {'fig': 'None'}), '(fig=None)\n', (542, 552), False, 'from analysis.probe.gui.image_providers import PyplotImageProvider\n'), ((664, 693), 'analysis.probe.gui.image_providers.PyplotImageProvider', 'PyplotImageProvider', ([], {'fig': 'None'}), '(fig=None)\n', (683, 693), False, 'from analysis.probe.gui.image_providers import PyplotImageProvider\n'), ((1224, 1303), 'os.path.join', 'os.path.join', (["conf['shared_directory']", '"""qml"""', '"""gui_qtquick"""', '"""gui_qtquick.qml"""'], {}), "(conf['shared_directory'], 'qml', 'gui_qtquick', 'gui_qtquick.qml')\n", (1236, 1303), False, 'import os\n'), ((1891, 1985), 'analysis.probe.gui.backend_classes.PythonBackendClass1', 'PythonBackendClass1', (['app', 'context', 'win', 'analysis_image_provider1', 'analysis_image_provider2'], {}), '(app, context, win, analysis_image_provider1,\n analysis_image_provider2)\n', (1910, 1985), False, 'from analysis.probe.gui.backend_classes import PythonBackendClass1, Logger\n'), ((1316, 1347), 'os.path.isfile', 'os.path.isfile', (['qml_source_path'], {}), '(qml_source_path)\n', (1330, 1347), False, 'import os\n'), ((1670, 1697), 'analysis.probe.gui.backend_classes.Logger', 'Logger', (['context', 'win', '"""log"""'], {}), "(context, win, 'log')\n", (1676, 1697), False, 'from analysis.probe.gui.backend_classes import PythonBackendClass1, Logger\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('HMBBF', '0014_theme'),
]
operations = [
migrations.AddField(
model_name='theme',
name='time',
field=models.CharField(default='\u4e0d\u5fc5\u586b\u5199', max_length=256, verbose_name='\u5177\u4f53\u65f6\u95f4\u6bb5'),
),
migrations.AlterField(
model_name='theme',
name='date',
field=models.DateField(verbose_name='\u65f6\u95f4(\u54ea\u4e00\u5929)'),
),
migrations.AlterField(
model_name='theme',
name='time_end',
field=models.DateTimeField(verbose_name='\u7ed3\u675f\u65f6\u95f4'),
),
]
|
[
"django.db.models.DateTimeField",
"django.db.models.DateField",
"django.db.models.CharField"
] |
[((332, 402), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""不必填写"""', 'max_length': '(256)', 'verbose_name': '"""具体时间段"""'}), "(default='不必填写', max_length=256, verbose_name='具体时间段')\n", (348, 402), False, 'from django.db import migrations, models\n'), ((566, 606), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""时间(哪一天)"""'}), "(verbose_name='时间(哪一天)')\n", (582, 606), False, 'from django.db import migrations, models\n'), ((754, 795), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""结束时间"""'}), "(verbose_name='结束时间')\n", (774, 795), False, 'from django.db import migrations, models\n')]
|
from rb.core.lang import Lang
from rb.core.document import Document
from rb.complexity.complexity_index import ComplexityIndex, compute_indices
from rb.similarity.word2vec import Word2Vec
from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel
from rb.similarity.vector_model_factory import VECTOR_MODELS, create_vector_model
from typing import Tuple, List
from sklearn.svm import SVR
from collections import Counter
from sklearn import svm
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
import matplotlib.pyplot as plt
import pickle
import os
import csv
import random
from werkzeug import secure_filename
import uuid
from rb.cna.cna_graph import CnaGraph
from rb.utils.rblogger import Logger
logger = Logger.get_logger()
class TextClassifier:
def __init__(self):
pass
def get_vector_model(self, lang: Lang = Lang.RO) -> VectorModel:
global logger
if lang is Lang.RO:
vector_model = create_vector_model(Lang.RO, VectorModelType.from_str('word2vec'), "readme")
elif lang is Lang.EN:
vector_model = create_vector_model(Lang.EN, VectorModelType.from_str("word2vec"), "coca")
else:
logger.error(f'Language {lang.value} is not supported for essay scoring task')
vector_model = None
return vector_model
def read_indices(self, base_folder: str = 'categories_readme', lang=Lang.RO) -> List[List[float]]:
categroies = ['general_stats.csv', 'literature_stats.csv', 'science_stats.csv']
results = []
indices = []
if lang is Lang.RO:
with open('rb/processings/text_classifier/indices_ro_class.txt', 'rt', encoding='utf-8') as f:
for line in f:
indices.append(line.strip())
for j, cat in enumerate(categroies):
essay_r = csv.reader(open(os.path.join(base_folder, cat), 'rt', encoding='utf-8'))
""" first col is the score """
for i, row in enumerate(essay_r):
if i == 0:
indices_row = row
continue
res = [j]
for findex in indices:
for k, rr in enumerate(row):
if indices_row[k].strip() == findex:
res.append(rr)
break
results.append(res)
return results
def train_svm(self, results: List[List], save_model_file=None):
total = len(results)
random.shuffle(results)
train_samples = int(total * 0.8)
train = results[:train_samples]
test = results[train_samples:]
y = [int(r[0]) for r in train]
X = [r[1:] for r in train]
clf = svm.SVC(kernel='poly', degree=14, class_weight={0: 0.1, 1: 0.6, 2: 0.3}).fit(X, y)
if save_model_file:
pickle.dump(clf, open(save_model_file, 'wb'))
dev_out, dev_in = [], []
for sample_x in test:
if int(sample_x[0]) == 0 and random.random() < 0.7:
continue
dev_out.append(int(sample_x[0]))
Xx = sample_x[1:]
dev_in.append(Xx)
print(Counter(dev_out))
disp = plot_confusion_matrix(clf, dev_in, dev_out, display_labels=['general', 'science', 'literature', ])
res = clf.predict(dev_in)
disp.ax_.set_title('Confusion Matrix')
right, wrong = 0, 0
for r, clss in zip(res, dev_out):
if r != clss:
wrong += 1
else:
right += 1
logger.info('Acc for classification : {}'.format(right/(wrong + right)))
plt.show()
def predict(self, content: str, file_to_svr_model: str, lang=Lang.RO) -> int:
svr = pickle.load(open(file_to_svr_model, "rb"))
doc = Document(lang=lang, text=content)
vector_model = self.get_vector_model(lang=lang)
cna_graph = CnaGraph(docs=doc, models=[vector_model])
compute_indices(doc=doc, cna_graph=cna_graph)
indices = []
if lang is Lang.RO:
with open('rb/processings/text_classifier/indices_ro_class.txt', 'rt', encoding='utf-8') as f:
for line in f:
indices.append(line.strip())
values_indices = []
for ind in indices:
for key, v in doc.indices.items():
if repr(key) == ind:
values_indices.append(v)
break
class_txt = svr.predict([values_indices])[0]
return class_txt
|
[
"sklearn.svm.SVC",
"rb.cna.cna_graph.CnaGraph",
"random.shuffle",
"rb.utils.rblogger.Logger.get_logger",
"os.path.join",
"collections.Counter",
"rb.similarity.vector_model.VectorModelType.from_str",
"rb.complexity.complexity_index.compute_indices",
"rb.core.document.Document",
"random.random",
"sklearn.metrics.plot_confusion_matrix",
"matplotlib.pyplot.show"
] |
[((749, 768), 'rb.utils.rblogger.Logger.get_logger', 'Logger.get_logger', ([], {}), '()\n', (766, 768), False, 'from rb.utils.rblogger import Logger\n'), ((2569, 2592), 'random.shuffle', 'random.shuffle', (['results'], {}), '(results)\n', (2583, 2592), False, 'import random\n'), ((3279, 3379), 'sklearn.metrics.plot_confusion_matrix', 'plot_confusion_matrix', (['clf', 'dev_in', 'dev_out'], {'display_labels': "['general', 'science', 'literature']"}), "(clf, dev_in, dev_out, display_labels=['general',\n 'science', 'literature'])\n", (3300, 3379), False, 'from sklearn.metrics import confusion_matrix, plot_confusion_matrix\n'), ((3738, 3748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3746, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3938), 'rb.core.document.Document', 'Document', ([], {'lang': 'lang', 'text': 'content'}), '(lang=lang, text=content)\n', (3913, 3938), False, 'from rb.core.document import Document\n'), ((4016, 4057), 'rb.cna.cna_graph.CnaGraph', 'CnaGraph', ([], {'docs': 'doc', 'models': '[vector_model]'}), '(docs=doc, models=[vector_model])\n', (4024, 4057), False, 'from rb.cna.cna_graph import CnaGraph\n'), ((4066, 4111), 'rb.complexity.complexity_index.compute_indices', 'compute_indices', ([], {'doc': 'doc', 'cna_graph': 'cna_graph'}), '(doc=doc, cna_graph=cna_graph)\n', (4081, 4111), False, 'from rb.complexity.complexity_index import ComplexityIndex, compute_indices\n'), ((3246, 3262), 'collections.Counter', 'Counter', (['dev_out'], {}), '(dev_out)\n', (3253, 3262), False, 'from collections import Counter\n'), ((1012, 1048), 'rb.similarity.vector_model.VectorModelType.from_str', 'VectorModelType.from_str', (['"""word2vec"""'], {}), "('word2vec')\n", (1036, 1048), False, 'from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel\n'), ((2804, 2882), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""poly"""', 'degree': '(14)', 'class_weight': '{(0): 0.1, (1): 0.6, (2): 0.3}'}), "(kernel='poly', degree=14, class_weight={(0): 0.1, (1): 0.6, (2): 0.3})\n", (2811, 2882), False, 'from sklearn import svm\n'), ((1146, 1182), 'rb.similarity.vector_model.VectorModelType.from_str', 'VectorModelType.from_str', (['"""word2vec"""'], {}), "('word2vec')\n", (1170, 1182), False, 'from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel\n'), ((1895, 1925), 'os.path.join', 'os.path.join', (['base_folder', 'cat'], {}), '(base_folder, cat)\n', (1907, 1925), False, 'import os\n'), ((3079, 3094), 'random.random', 'random.random', ([], {}), '()\n', (3092, 3094), False, 'import random\n')]
|
"""
enclosure_tags
~~~~~~~~~~~~~~
Fix tags for MP3 enclosures (e.g. podcasts).
Adds a "with tags" link to a version of the file with tags set as follows:
* the entry title as title
* the feed title as album
* the entry/feed author as author
This plugin needs additional dependencies, use the ``unstable-plugins`` extra
to install them:
.. code-block:: bash
pip install reader[unstable-plugins]
To load::
READER_APP_PLUGIN='reader._plugins.enclosure_tags:init' \\
python -m reader serve
Implemented for https://github.com/lemon24/reader/issues/50.
Became a plugin in https://github.com/lemon24/reader/issues/52.
"""
import tempfile
from urllib.parse import urlparse
import mutagen.mp3
import requests
from flask import Blueprint
from flask import request
from flask import Response
from flask import stream_with_context
from flask import url_for
blueprint = Blueprint('enclosure_tags', __name__)
ALL_TAGS = ('album', 'title', 'artist')
SET_ONLY_IF_MISSING_TAGS = {'artist'}
@blueprint.route('/enclosure-tags', defaults={'filename': None})
@blueprint.route('/enclosure-tags/<filename>')
def enclosure_tags(filename):
def update_tags(file):
emp3 = mutagen.mp3.EasyMP3(file)
changed = False
for key in ALL_TAGS:
if key in SET_ONLY_IF_MISSING_TAGS and emp3.get(key):
continue
value = request.args.get(key)
if not value:
continue
emp3[key] = [value]
changed = True
if changed:
emp3.save(file)
file.seek(0)
def chunks(req):
# Send the headers as soon as possible.
# Some browsers wait for the headers before showing the "Save As" dialog.
yield ''
tmp = tempfile.TemporaryFile()
for chunk in req.iter_content(chunk_size=2 ** 20):
tmp.write(chunk)
tmp.seek(0)
update_tags(tmp)
try:
while True:
data = tmp.read(2 ** 20)
if not data:
break
yield data
finally:
tmp.close()
url = request.args['url']
req = requests.get(url, stream=True)
headers = {}
for name in ('Content-Type', 'Content-Disposition'):
if name in req.headers:
headers[name] = req.headers[name]
return Response(stream_with_context(chunks(req)), headers=headers)
def enclosure_tags_filter(enclosure, entry):
filename = urlparse(enclosure.href).path.split('/')[-1]
if not filename.endswith('.mp3'):
return []
args = {'url': enclosure.href, 'filename': filename}
if entry.title:
args['title'] = entry.title
if entry.feed.title:
args['album'] = entry.feed.title
if entry.author or entry.feed.author:
args['artist'] = entry.author or entry.feed.author
return [('with tags', url_for('enclosure_tags.enclosure_tags', **args))]
def init(app):
app.register_blueprint(blueprint)
app.reader_additional_enclosure_links.append(enclosure_tags_filter)
|
[
"flask.request.args.get",
"urllib.parse.urlparse",
"requests.get",
"flask.url_for",
"tempfile.TemporaryFile",
"flask.Blueprint"
] |
[((881, 918), 'flask.Blueprint', 'Blueprint', (['"""enclosure_tags"""', '__name__'], {}), "('enclosure_tags', __name__)\n", (890, 918), False, 'from flask import Blueprint\n'), ((2164, 2194), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (2176, 2194), False, 'import requests\n'), ((1762, 1786), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {}), '()\n', (1784, 1786), False, 'import tempfile\n'), ((1376, 1397), 'flask.request.args.get', 'request.args.get', (['key'], {}), '(key)\n', (1392, 1397), False, 'from flask import request\n'), ((2891, 2939), 'flask.url_for', 'url_for', (['"""enclosure_tags.enclosure_tags"""'], {}), "('enclosure_tags.enclosure_tags', **args)\n", (2898, 2939), False, 'from flask import url_for\n'), ((2482, 2506), 'urllib.parse.urlparse', 'urlparse', (['enclosure.href'], {}), '(enclosure.href)\n', (2490, 2506), False, 'from urllib.parse import urlparse\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
import types
import unittest
import inspect
from lxml import etree
from . import XSL, XML, VERSION_LIST
from .core import get_module, get_stylesheet, get_source_version, get_migration_path, list_versions
from .main import parse_args
from .migrate import migrate_by_stylesheet, do_migration, get_params
from .utils import _print, _check, _decode_data
replace_list = [
('\n', ''),
('\t', ''),
(' ', ''),
]
def _replace(s, vals=replace_list):
if s is None:
return ''
_s = s
for u, v in vals:
_s = _s.replace(u, v)
return _s
def compare_elements(el1, el2):
"""Compare two elements and all their children
:return: True or False
"""
_check(el1, (etree._Element), TypeError)
_check(el2, (etree._Element), TypeError)
# https://stackoverflow.com/questions/7905380/testing-equivalence-of-xml-etree-elementtree
if el1.tag != el2.tag:
return False
if _replace(el1.text) != _replace(el2.text):
return False
if _replace(el1.tail) != _replace(el2.tail):
return False
if el1.attrib != el2.attrib:
return False
if len(el1) != len(el2):
return False
return all(compare_elements(e1, e2) for e1, e2 in zip(el1, el2))
class TestUtils(unittest.TestCase):
def test_check(self):
"""Test that _check works"""
with self.assertRaisesRegex(TypeError, r"object '1' is not of class <class 'str'>"):
_check(1, str, TypeError)
with self.assertRaises(TypeError):
_check(1, str, TypeError, message="")
def test_migrate(self):
"""Test that migrate works"""
# exceptions
with self.assertRaises(TypeError):
migrate_by_stylesheet(1, 2)
with self.assertRaises(IOError):
migrate_by_stylesheet('file.xml', 'file.xsl')
def test_parse_args(self):
"""Test correct arguments"""
# default with -t/--target-version
args = parse_args("file.xml -v -t 1.0")
self.assertEqual(args.infile, "file.xml")
self.assertEqual(args.target_version, "1.0")
self.assertEqual(args.outfile, "file_v1.0.xml")
self.assertFalse(args.list_versions)
# specify outfile
args = parse_args("file.xml -v -t 1.0 -o my_output.xml")
self.assertEqual(args.outfile, "my_output.xml")
# list valid versions
args = parse_args("-l")
self.assertEqual(args.infile, '')
self.assertEqual(args.target_version, VERSION_LIST[-1])
self.assertIsNone(args.outfile)
self.assertTrue(args.list_versions)
self.assertFalse(args.show_version)
# show version in file
args = parse_args("-v -s file.xml")
self.assertEqual(args.infile, 'file.xml')
self.assertEqual(args.target_version, VERSION_LIST[-1])
# self.assertEqual(args.outfile, 'file_v0.8.0.dev1.xml')
self.assertIsNone(args.outfile)
self.assertFalse(args.list_versions)
self.assertTrue(args.show_version)
# show package version
args = parse_args("-v -V")
self.assertEqual(args.infile, '')
self.assertIsNone(args.outfile)
self.assertTrue(args.version)
self.assertFalse(args.list_versions)
self.assertFalse(args.show_version)
def test_get_stylesheet(self):
"""Given versions return the correct stylesheet to use"""
stylesheet = get_stylesheet("1", "2")
self.assertEqual(os.path.basename(stylesheet), 'migrate_v1_to_v2.xsl')
self.assertTrue(os.path.exists(stylesheet))
original = os.path.join(XML, 'original.xml')
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_details="Nothing much")
migrated = etree.ElementTree(etree.XML(_migrated))
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
# self.assertTrue(False)
with self.assertRaises(OSError):
get_stylesheet("nothing", "something")
def test_get_source_version(self):
"""Obtain the version in the original"""
source_version = get_source_version(os.path.join(XML, 'original.xml'))
self.assertEqual(source_version, '1')
fn_v07 = os.path.join(XML, 'test2.sff')
source_version_v07 = get_source_version(fn_v07)
self.assertEqual(source_version_v07, '0.7.0.dev0')
fn_v08 = os.path.join(XML, 'test2_v0.8.0.dev1.sff')
source_version_v08 = get_source_version(fn_v08)
self.assertEqual(source_version_v08, '0.8.0.dev1')
def test_get_migration_path(self):
"""Determine the sequence of migrations to perform"""
version_list = ['1', '2', '3', '4', '5', '6']
migration_path = get_migration_path('2', '6', version_list=version_list)
self.assertEqual(migration_path, [('2', '3'), ('3', '4'), ('4', '5'), ('5', '6')])
# cannot find start
with self.assertRaisesRegex(ValueError, r".*invalid migration start.*"):
get_migration_path('0', '6', version_list=version_list)
# cannot find end
with self.assertRaisesRegex(ValueError, r".*invalid migration end.*"):
get_migration_path('1', '9', version_list=version_list)
def test_do_migration_example(self):
"""Toy migration example"""
version_list = ['1', '2']
cmd = "{infile} -v --target-version 2 --outfile {outfile}".format(
infile=os.path.join(XML, "original.xml"),
outfile=os.path.join(XML, "my_output.xml")
)
args = parse_args(cmd)
_text = "48ec3e2ab568763658fc3f5430b851ceaf1593d6" # secrets.token_hex(20)
status = do_migration(
args,
value_list=[_text],
version_list=version_list,
)
_output = os.path.join(XML, "original_v2.xml")
self.assertTrue(os.path.exists(_output))
self.assertEqual(status, os.EX_OK)
output = etree.parse(_output)
self.assertEqual(output.xpath('/segmentation/details/text()')[0], _text)
os.remove(args.outfile)
def test_do_migration(self):
"""Do an actual migration using the convenience function"""
# try a null migration
target_version = "0.8.0.dev1"
outfile = os.path.join(XML, 'my_file_out.sff')
cmd = "{infile} -v --target-version {target_version} --outfile {outfile}".format(
infile=os.path.join(XML, 'test2_v0.8.0.dev1.sff'),
target_version=target_version,
outfile=outfile,
)
args = parse_args(cmd)
status = do_migration(args)
self.assertEqual(status, os.EX_OK)
self.assertFalse(os.path.exists(outfile)) # the file was not created
# try an actual migrations
cmd = "{infile} -v --target-version {target_version} --outfile {outfile}".format(
infile=os.path.join(XML, 'test2.sff'),
target_version=target_version,
outfile=outfile
)
args = parse_args(cmd)
status = do_migration(args)
self.assertEqual(status, os.EX_OK)
self.assertTrue(os.path.exists(outfile)) # the file was not created
in_version = get_source_version(args.infile)
out_version = get_source_version(outfile)
self.assertNotEqual(in_version, out_version)
self.assertEqual(out_version, target_version)
os.remove(outfile)
def test_get_module(self):
"""Check that we can get the right module for this migration"""
module = get_module('1', '2')
self.assertIsInstance(module, types.ModuleType)
def test_get_params(self):
"""Test getting params"""
module = get_module('1', '2')
_text = "ce3c90151bb3c803c8e6570ee7d5845ac3c96c38" # secrets.token_hex(20)
params = get_params(module.PARAM_LIST, value_list=[_text])
self.assertIsInstance(params, dict)
self.assertEqual(len(params), 1)
with self.assertRaises(ValueError):
get_params(module.PARAM_LIST, value_list=[_text, _text])
def test_list_versions(self):
"""Test that we can list the supported versions"""
args = parse_args("-v -l")
status, version_count = list_versions()
self.assertEqual(status, os.EX_OK)
self.assertEqual(version_count, 2)
class TestMigrations(unittest.TestCase):
def test_original_to_add_field(self):
"""Test adding a field to the original"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'add_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_add_field.xsl')
# we pass the value of the `details` param as follows:
# A = reference.xpath(<xpath>)[0]
# etree.XSLT.strparam(A) - handle a possibly quoted string
details_text = reference.xpath('/segmentation/details/text()')[0]
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_details=details_text) # bytes
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_to_drop_field(self):
"""Test dropping a field from the original"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'drop_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_drop_field.xsl')
with self.assertWarns(UserWarning):
_migrated = migrate_by_stylesheet(original, stylesheet, verbose=True)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_rename_field(self):
"""Test changing a field by renaming it"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_rename_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_rename_field.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
# sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
# sys.stderr.write('\n')
# sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_add_attribute(self):
"""Test changing a field by adding an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_add_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_add_attribute.xsl')
lang_text = reference.xpath('/segmentation/name/@lang')[0]
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_name_lang=lang_text)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
# sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
# sys.stderr.write('\n')
# sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_drop_attribute(self):
"""Test changing a field by dropping an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_drop_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_drop_attribute.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_change_value(self):
"""Test changing a field by changing the value"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_change_value.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_change_value.xsl')
_segment_name = reference.xpath('/segmentation/segment[@id=1]/name/text()')[0]
_migrated = migrate_by_stylesheet(original, stylesheet, segment_name=_segment_name)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_to_change_field_rename_attribute(self):
"""Test changing a field by renaming an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_rename_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_rename_attribute.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_list_to_change_value_list(self):
"""Test changing all the values for a list"""
original = os.path.join(XML, 'original_list.xml')
reference = etree.parse(os.path.join(XML, 'change_value_list.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_value_list.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
class TestEMDBSFFMigrations(unittest.TestCase):
def test_migrate_mesh_exceptions(self):
"""Test that we capture exceptions"""
module = get_module('0.7.0.dev0', '0.8.0.dev1')
# create an empty mesh
mesh = etree.Element("mesh")
with self.assertRaisesRegex(ValueError, r".*invalid endianness.*"):
module.migrate_mesh(mesh, endianness='other')
with self.assertRaisesRegex(ValueError, r".*invalid triangles mode.*"):
module.migrate_mesh(mesh, triangles_mode='other')
with self.assertRaisesRegex(ValueError, r".*invalid vertices mode.*"):
module.migrate_mesh(mesh, vertices_mode='other')
# no geometry
verts, norms, tris = module.migrate_mesh(mesh)
self.assertIsInstance(verts, etree._Element)
self.assertEqual(int(verts.get("num_vertices")), 0)
# let's get the signature of the migrate_mesh function to get the default values for kwargs
signature = inspect.signature(module.migrate_mesh)
# verts
self.assertEqual(verts.get("mode"), signature.parameters['vertices_mode'].default)
self.assertEqual(verts.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(verts.get("data"), "")
# norms
self.assertEqual(norms.get("mode"), signature.parameters['vertices_mode'].default)
self.assertEqual(norms.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(norms.get("data"), "")
# tris
self.assertEqual(tris.get("mode"), signature.parameters['triangles_mode'].default)
self.assertEqual(tris.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(tris.get("data"), "")
def test_v0_7_0_dev0_to_v0_8_0_dev0(self):
"""Test migration from v0.7.0.dev0 to v0.8.0.dev1"""
original = os.path.join(XML, 'test2.sff')
stylesheet = get_stylesheet("0.7.0.dev0", "0.8.0.dev1")
# phase I migration using stylesheet
_migrated = migrate_by_stylesheet(original, stylesheet)
# convert migration to an ElementTree object
migrated = etree.ElementTree(etree.XML(_migrated))
_original = etree.parse(original)
segments = _original.xpath('/segmentation/segmentList/segment')
_print(segments)
segment_meshes = dict()
module = get_module('0.7.0.dev0', '0.8.0.dev1')
for segment in segments:
segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('meshList/mesh'):
_vertices, _normals, _triangles = module.migrate_mesh(
mesh)
segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = _vertices, _normals, _triangles
migrated_segments = migrated.xpath('/segmentation/segment_list/segment')
for migrated_segment in migrated_segments:
for migrated_mesh in migrated_segment.xpath('mesh_list/mesh'):
_vertices, _normals, _triangles = segment_meshes[int(migrated_segment.get("id"))][
int(migrated_mesh.get("id"))]
migrated_mesh.insert(0, _vertices)
migrated_mesh.insert(1, _normals)
migrated_mesh.insert(2, _triangles)
# let's see what it looks like
migrated_decoded = etree.tostring(migrated, xml_declaration=True, encoding='UTF-8', pretty_print=True).decode(
'utf-8')
# sys.stderr.write('migrated:\n' + migrated_decoded)
# with open(os.path.join(XML, 'test2_v0.8.0.dev1.sff'), 'w') as f:
# f.write(migrated_decoded)
def test_meshes_equal_v0_7_0_dev0_vs_v0_8_0_dev0(self):
"""Test that the mesh data is the same
We only compare surface vertices. Normal vertices correspond one-to-one to surface vertices and are not relevant
to triangles.
"""
v7 = os.path.join(XML, 'test7.sff')
v8 = os.path.join(XML, 'test7_v0.8.0.dev1.sff')
fv7 = etree.parse(v7)
fv8 = etree.parse(v8)
fv7_segments = fv7.xpath('/segmentation/segmentList/segment')
# extract vertices, normals and triangles
fv7_segment_meshes = dict()
for segment in fv7_segments:
fv7_segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('meshList/mesh'):
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = {
'surface_vertices': dict(),
'normal_vertices': dict(),
'triangles': dict(),
}
vertex_list = next(mesh.iter('vertexList'))
for vertex in vertex_list:
if vertex.get('designation') == 'surface' or vertex.get('designation') is None:
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['surface_vertices'][
int(vertex.get('vID'))] = tuple(map(lambda v: float(v.text), vertex.xpath('*')))
elif vertex.get('designation') == 'normal':
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['normal_vertices'][
int(vertex.get('vID'))] = tuple(map(lambda v: float(v.text), vertex.xpath('*')))
triangle_list = next(mesh.iter('polygonList'))
for triangle in triangle_list:
# _print(tuple(map(lambda t: t.text, triangle.xpath('v'))))
vertex_ids = list(map(lambda p: int(p.text), triangle.xpath('v')))
if len(vertex_ids) == 3:
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['triangles'][
int(triangle.get('PID'))] = tuple(vertex_ids), tuple()
elif len(vertex_ids) == 6:
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['triangles'][
int(triangle.get('PID'))] = tuple(vertex_ids[::2]), tuple(vertex_ids[1::2])
else:
pass
# _print(fv7_segment_meshes)
fv8_segments = fv8.xpath('/segmentation/segment_list/segment')
# extract vertices, normals and triangles
fv8_segment_meshes = dict()
for segment in fv8_segments:
fv8_segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('mesh_list/mesh'):
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = dict()
vertices = next(mesh.iter('vertices'))
# _print(vertices.keys())
# _print(vertices.get("data").encode("ASCII"))
vertex_list = _decode_data(vertices.get("data").encode('ASCII'),
int(vertices.get("num_vertices")), vertices.get("mode"),
vertices.get("endianness"))
vertex_tuples = list(zip(vertex_list[::3], vertex_list[1::3], vertex_list[2::3]))
# _print(data_vectors)
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['surface_vertices'] = dict(
zip(range(len(vertex_tuples)), vertex_tuples))
# _print(data_dict)
normals = next(mesh.iter('normals'))
normal_list = _decode_data(normals.get("data").encode('ASCII'), int(normals.get("num_normals")),
normals.get("mode"), normals.get('endianness'))
normal_tuples = list(zip(normal_list[::3], normal_list[1::3], normal_list[2::3]))
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['normal_vertices'] = dict(
zip(range(len(normal_tuples)), normal_tuples))
triangles = next(mesh.iter('triangles'))
triangle_list = _decode_data(triangles.get("data").encode('ASCII'),
int(triangles.get("num_triangles")),
triangles.get("mode"), triangles.get('endianness'))
triangle_tuples = list(zip(triangle_list[::3], triangle_list[1::3], triangle_list[2::3]))
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['triangles'] = dict(
zip(range(len(triangle_tuples)), triangle_tuples))
# _print(fv8_segment_meshes)
# compare
fv7_surface_vertices = list()
for segment_id in fv7_segment_meshes:
for mesh_id in fv7_segment_meshes[segment_id]:
for triangle_id in fv7_segment_meshes[segment_id][mesh_id]['triangles']:
triangle = fv7_segment_meshes[segment_id][mesh_id]['triangles'][triangle_id]
# _print(triangle)
# _print(triangle)
s0, s1, s2 = triangle[0]
# n0, n1, n2 = triangle[1]
fv7_surface_vertices += [fv7_segment_meshes[segment_id][mesh_id]['surface_vertices'][s0],
fv7_segment_meshes[segment_id][mesh_id]['surface_vertices'][s1],
fv7_segment_meshes[segment_id][mesh_id]['surface_vertices'][s2]]
fv8_surface_vertices = list()
for segment_id in fv8_segment_meshes:
for mesh_id in fv8_segment_meshes[segment_id]:
for triangle_id in fv8_segment_meshes[segment_id][mesh_id]['triangles']:
triangle = fv8_segment_meshes[segment_id][mesh_id]['triangles'][triangle_id]
# _print(triangle)
s0, s1, s2 = triangle
fv8_surface_vertices += [fv8_segment_meshes[segment_id][mesh_id]['surface_vertices'][s0],
fv8_segment_meshes[segment_id][mesh_id]['surface_vertices'][s1],
fv8_segment_meshes[segment_id][mesh_id]['surface_vertices'][s2]]
# _print(fv7_surface_vertices[1283])
# _print(fv8_surface_vertices[1283])
self.assertEqual(len(fv7_surface_vertices), len(fv8_surface_vertices))
for u, v in zip(fv7_surface_vertices, fv8_surface_vertices):
self.assertAlmostEqual(u[0], v[0])
self.assertAlmostEqual(u[1], v[1])
self.assertAlmostEqual(u[2], v[2])
def test_v0_7_0_dev0_to_v0_8_0_dev0_shapes(self):
"""Test that we can migrate shapes"""
original = os.path.join(XML, 'test_shape_segmentation.sff')
stylesheet = get_stylesheet("0.7.0.dev0", "0.8.0.dev1")
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
migrated_decoded = etree.tostring(migrated, xml_declaration=True, encoding='UTF-8', pretty_print=True).decode(
'utf-8')
sys.stderr.write(migrated_decoded)
with open(os.path.join(XML, 'test_shape_segmentation_v0.8.0.dev1.sff'), 'w') as f:
f.write(migrated_decoded)
class TestMain(unittest.TestCase):
def test_parse_args(self):
"""Test parse_args function"""
cmd = "file.xml -v"
args = parse_args(cmd)
self.assertEqual(args.infile, "file.xml")
self.assertEqual(args.outfile, "file_v{}.xml".format(VERSION_LIST[-1]))
def test_parse_args_outfile(self):
"""Test that outfile arg is honoured"""
cmd = "file.xml -v -o nothing.xml"
args = parse_args(cmd)
self.assertEqual(args.outfile, "nothing.xml")
def test_no_shlex(self):
"""Test not using shlex"""
cmd = ["file.xml", "-v", "-o", "nothing.xml"]
args = parse_args(cmd, use_shlex=False)
self.assertEqual(args.infile, "file.xml")
self.assertEqual(args.outfile, "nothing.xml")
|
[
"lxml.etree.Element",
"os.path.exists",
"lxml.etree.XML",
"lxml.etree.parse",
"os.path.join",
"inspect.signature",
"sys.stderr.write",
"os.path.basename",
"lxml.etree.tostring",
"os.remove"
] |
[((3640, 3673), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (3652, 3673), False, 'import os\n'), ((4315, 4345), 'os.path.join', 'os.path.join', (['XML', '"""test2.sff"""'], {}), "(XML, 'test2.sff')\n", (4327, 4345), False, 'import os\n'), ((4478, 4520), 'os.path.join', 'os.path.join', (['XML', '"""test2_v0.8.0.dev1.sff"""'], {}), "(XML, 'test2_v0.8.0.dev1.sff')\n", (4490, 4520), False, 'import os\n'), ((5883, 5919), 'os.path.join', 'os.path.join', (['XML', '"""original_v2.xml"""'], {}), "(XML, 'original_v2.xml')\n", (5895, 5919), False, 'import os\n'), ((6029, 6049), 'lxml.etree.parse', 'etree.parse', (['_output'], {}), '(_output)\n', (6040, 6049), False, 'from lxml import etree\n'), ((6139, 6162), 'os.remove', 'os.remove', (['args.outfile'], {}), '(args.outfile)\n', (6148, 6162), False, 'import os\n'), ((6352, 6388), 'os.path.join', 'os.path.join', (['XML', '"""my_file_out.sff"""'], {}), "(XML, 'my_file_out.sff')\n", (6364, 6388), False, 'import os\n'), ((7473, 7491), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (7482, 7491), False, 'import os\n'), ((8560, 8593), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (8572, 8593), False, 'import os\n'), ((8683, 8729), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_add_field.xsl"""'], {}), "(XSL, 'original_to_add_field.xsl')\n", (8695, 8729), False, 'import os\n'), ((9351, 9373), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (9367, 9373), False, 'import sys\n'), ((9604, 9637), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (9616, 9637), False, 'import os\n'), ((9728, 9775), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_drop_field.xsl"""'], {}), "(XSL, 'original_to_drop_field.xsl')\n", (9740, 9775), False, 'import os\n'), ((10157, 10179), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (10173, 10179), False, 'import sys\n'), ((10392, 10425), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (10404, 10425), False, 'import os\n'), ((10531, 10593), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_change_field_rename_field.xsl"""'], {}), "(XSL, 'original_to_change_field_rename_field.xsl')\n", (10543, 10593), False, 'import os\n'), ((11163, 11196), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (11175, 11196), False, 'import os\n'), ((11303, 11366), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_change_field_add_attribute.xsl"""'], {}), "(XSL, 'original_to_change_field_add_attribute.xsl')\n", (11315, 11366), False, 'import os\n'), ((12082, 12115), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (12094, 12115), False, 'import os\n'), ((12223, 12287), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_change_field_drop_attribute.xsl"""'], {}), "(XSL, 'original_to_change_field_drop_attribute.xsl')\n", (12235, 12287), False, 'import os\n'), ((12607, 12629), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (12623, 12629), False, 'import sys\n'), ((12849, 12882), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (12861, 12882), False, 'import os\n'), ((12988, 13050), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_change_field_change_value.xsl"""'], {}), "(XSL, 'original_to_change_field_change_value.xsl')\n", (13000, 13050), False, 'import os\n'), ((13455, 13477), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (13471, 13477), False, 'import sys\n'), ((13734, 13767), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (13746, 13767), False, 'import os\n'), ((13877, 13943), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_change_field_rename_attribute.xsl"""'], {}), "(XSL, 'original_to_change_field_rename_attribute.xsl')\n", (13889, 13943), False, 'import os\n'), ((14233, 14255), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (14249, 14255), False, 'import sys\n'), ((14498, 14536), 'os.path.join', 'os.path.join', (['XML', '"""original_list.xml"""'], {}), "(XML, 'original_list.xml')\n", (14510, 14536), False, 'import os\n'), ((14634, 14688), 'os.path.join', 'os.path.join', (['XSL', '"""original_to_change_value_list.xsl"""'], {}), "(XSL, 'original_to_change_value_list.xsl')\n", (14646, 14688), False, 'import os\n'), ((14978, 15000), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (14994, 15000), False, 'import sys\n'), ((15356, 15377), 'lxml.etree.Element', 'etree.Element', (['"""mesh"""'], {}), "('mesh')\n", (15369, 15377), False, 'from lxml import etree\n'), ((16104, 16142), 'inspect.signature', 'inspect.signature', (['module.migrate_mesh'], {}), '(module.migrate_mesh)\n', (16121, 16142), False, 'import inspect\n'), ((17015, 17045), 'os.path.join', 'os.path.join', (['XML', '"""test2.sff"""'], {}), "(XML, 'test2.sff')\n", (17027, 17045), False, 'import os\n'), ((17352, 17373), 'lxml.etree.parse', 'etree.parse', (['original'], {}), '(original)\n', (17363, 17373), False, 'from lxml import etree\n'), ((19059, 19089), 'os.path.join', 'os.path.join', (['XML', '"""test7.sff"""'], {}), "(XML, 'test7.sff')\n", (19071, 19089), False, 'import os\n'), ((19103, 19145), 'os.path.join', 'os.path.join', (['XML', '"""test7_v0.8.0.dev1.sff"""'], {}), "(XML, 'test7_v0.8.0.dev1.sff')\n", (19115, 19145), False, 'import os\n'), ((19160, 19175), 'lxml.etree.parse', 'etree.parse', (['v7'], {}), '(v7)\n', (19171, 19175), False, 'from lxml import etree\n'), ((19190, 19205), 'lxml.etree.parse', 'etree.parse', (['v8'], {}), '(v8)\n', (19201, 19205), False, 'from lxml import etree\n'), ((25725, 25773), 'os.path.join', 'os.path.join', (['XML', '"""test_shape_segmentation.sff"""'], {}), "(XML, 'test_shape_segmentation.sff')\n", (25737, 25773), False, 'import os\n'), ((26109, 26143), 'sys.stderr.write', 'sys.stderr.write', (['migrated_decoded'], {}), '(migrated_decoded)\n', (26125, 26143), False, 'import sys\n'), ((3515, 3543), 'os.path.basename', 'os.path.basename', (['stylesheet'], {}), '(stylesheet)\n', (3531, 3543), False, 'import os\n'), ((3593, 3619), 'os.path.exists', 'os.path.exists', (['stylesheet'], {}), '(stylesheet)\n', (3607, 3619), False, 'import os\n'), ((3854, 3874), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (3863, 3874), False, 'from lxml import etree\n'), ((4217, 4250), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (4229, 4250), False, 'import os\n'), ((5944, 5967), 'os.path.exists', 'os.path.exists', (['_output'], {}), '(_output)\n', (5958, 5967), False, 'import os\n'), ((6759, 6782), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (6773, 6782), False, 'import os\n'), ((7202, 7225), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (7216, 7225), False, 'import os\n'), ((8626, 8660), 'os.path.join', 'os.path.join', (['XML', '"""add_field.xml"""'], {}), "(XML, 'add_field.xml')\n", (8638, 8660), False, 'import os\n'), ((9163, 9183), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (9172, 9183), False, 'from lxml import etree\n'), ((9670, 9705), 'os.path.join', 'os.path.join', (['XML', '"""drop_field.xml"""'], {}), "(XML, 'drop_field.xml')\n", (9682, 9705), False, 'import os\n'), ((9939, 9959), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (9948, 9959), False, 'from lxml import etree\n'), ((10458, 10508), 'os.path.join', 'os.path.join', (['XML', '"""change_field_rename_field.xml"""'], {}), "(XML, 'change_field_rename_field.xml')\n", (10470, 10508), False, 'import os\n'), ((10695, 10715), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (10704, 10715), False, 'from lxml import etree\n'), ((11229, 11280), 'os.path.join', 'os.path.join', (['XML', '"""change_field_add_attribute.xml"""'], {}), "(XML, 'change_field_add_attribute.xml')\n", (11241, 11280), False, 'import os\n'), ((11611, 11631), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (11620, 11631), False, 'from lxml import etree\n'), ((12148, 12200), 'os.path.join', 'os.path.join', (['XML', '"""change_field_drop_attribute.xml"""'], {}), "(XML, 'change_field_drop_attribute.xml')\n", (12160, 12200), False, 'import os\n'), ((12389, 12409), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (12398, 12409), False, 'from lxml import etree\n'), ((12915, 12965), 'os.path.join', 'os.path.join', (['XML', '"""change_field_change_value.xml"""'], {}), "(XML, 'change_field_change_value.xml')\n", (12927, 12965), False, 'import os\n'), ((13267, 13287), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (13276, 13287), False, 'from lxml import etree\n'), ((13800, 13854), 'os.path.join', 'os.path.join', (['XML', '"""change_field_rename_attribute.xml"""'], {}), "(XML, 'change_field_rename_attribute.xml')\n", (13812, 13854), False, 'import os\n'), ((14045, 14065), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (14054, 14065), False, 'from lxml import etree\n'), ((14569, 14611), 'os.path.join', 'os.path.join', (['XML', '"""change_value_list.xml"""'], {}), "(XML, 'change_value_list.xml')\n", (14581, 14611), False, 'import os\n'), ((14790, 14810), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (14799, 14810), False, 'from lxml import etree\n'), ((17309, 17329), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (17318, 17329), False, 'from lxml import etree\n'), ((25939, 25959), 'lxml.etree.XML', 'etree.XML', (['_migrated'], {}), '(_migrated)\n', (25948, 25959), False, 'from lxml import etree\n'), ((5520, 5553), 'os.path.join', 'os.path.join', (['XML', '"""original.xml"""'], {}), "(XML, 'original.xml')\n", (5532, 5553), False, 'import os\n'), ((5575, 5609), 'os.path.join', 'os.path.join', (['XML', '"""my_output.xml"""'], {}), "(XML, 'my_output.xml')\n", (5587, 5609), False, 'import os\n'), ((6498, 6540), 'os.path.join', 'os.path.join', (['XML', '"""test2_v0.8.0.dev1.sff"""'], {}), "(XML, 'test2_v0.8.0.dev1.sff')\n", (6510, 6540), False, 'import os\n'), ((6955, 6985), 'os.path.join', 'os.path.join', (['XML', '"""test2.sff"""'], {}), "(XML, 'test2.sff')\n", (6967, 6985), False, 'import os\n'), ((18493, 18580), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {'xml_declaration': '(True)', 'encoding': '"""UTF-8"""', 'pretty_print': '(True)'}), "(migrated, xml_declaration=True, encoding='UTF-8',\n pretty_print=True)\n", (18507, 18580), False, 'from lxml import etree\n'), ((25988, 26075), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {'xml_declaration': '(True)', 'encoding': '"""UTF-8"""', 'pretty_print': '(True)'}), "(migrated, xml_declaration=True, encoding='UTF-8',\n pretty_print=True)\n", (26002, 26075), False, 'from lxml import etree\n'), ((26162, 26222), 'os.path.join', 'os.path.join', (['XML', '"""test_shape_segmentation_v0.8.0.dev1.sff"""'], {}), "(XML, 'test_shape_segmentation_v0.8.0.dev1.sff')\n", (26174, 26222), False, 'import os\n'), ((3917, 3941), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {}), '(migrated)\n', (3931, 3941), False, 'from lxml import etree\n'), ((9300, 9325), 'lxml.etree.tostring', 'etree.tostring', (['reference'], {}), '(reference)\n', (9314, 9325), False, 'from lxml import etree\n'), ((9415, 9439), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {}), '(migrated)\n', (9429, 9439), False, 'from lxml import etree\n'), ((10106, 10131), 'lxml.etree.tostring', 'etree.tostring', (['reference'], {}), '(reference)\n', (10120, 10131), False, 'from lxml import etree\n'), ((10221, 10245), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {}), '(migrated)\n', (10235, 10245), False, 'from lxml import etree\n'), ((12556, 12581), 'lxml.etree.tostring', 'etree.tostring', (['reference'], {}), '(reference)\n', (12570, 12581), False, 'from lxml import etree\n'), ((12671, 12695), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {}), '(migrated)\n', (12685, 12695), False, 'from lxml import etree\n'), ((13404, 13429), 'lxml.etree.tostring', 'etree.tostring', (['reference'], {}), '(reference)\n', (13418, 13429), False, 'from lxml import etree\n'), ((13519, 13543), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {}), '(migrated)\n', (13533, 13543), False, 'from lxml import etree\n'), ((14182, 14207), 'lxml.etree.tostring', 'etree.tostring', (['reference'], {}), '(reference)\n', (14196, 14207), False, 'from lxml import etree\n'), ((14297, 14321), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {}), '(migrated)\n', (14311, 14321), False, 'from lxml import etree\n'), ((14927, 14952), 'lxml.etree.tostring', 'etree.tostring', (['reference'], {}), '(reference)\n', (14941, 14952), False, 'from lxml import etree\n'), ((15042, 15066), 'lxml.etree.tostring', 'etree.tostring', (['migrated'], {}), '(migrated)\n', (15056, 15066), False, 'from lxml import etree\n')]
|
"""Shaft element module for STOCHASTIC ROSS.
This module creates an instance of random shaft element for stochastic
analysis.
"""
from ross.shaft_element import ShaftElement
from ross.stochastic.st_materials import ST_Material
from ross.stochastic.st_results_elements import plot_histogram
from ross.units import Q_, check_units
__all__ = ["ST_ShaftElement", "st_shaft_example"]
class ST_ShaftElement:
"""Random shaft element.
Creates an object containing a generator with random instances of
ShaftElement.
Parameters
----------
L : float, pint.Quantity, list
Element length.
Input a list to make it random.
idl : float, pint.Quantity, list
Inner diameter of the element at the left position.
Input a list to make it random.
odl : float, pint.Quantity, list
Outer diameter of the element at the left position.
Input a list to make it random.
idr : float, pint.Quantity, list, optional
Inner diameter of the element at the right position
Default is equal to idl value (cylindrical element)
Input a list to make it random.
odr : float, pint.Quantity, list, optional
Outer diameter of the element at the right position.
Default is equal to odl value (cylindrical element)
Input a list to make it random.
material : ross.material, list of ross.material
Shaft material.
Input a list to make it random.
n : int, optional
Element number (coincident with it's first node).
If not given, it will be set when the rotor is assembled
according to the element's position in the list supplied to
axial_force : float, list, optional
Axial force.
Input a list to make it random.
Default is 0.
torque : float, list, optional
Torque
Input a list to make it random.
Default is 0.
shear_effects : bool, optional
Determine if shear effects are taken into account.
Default is True.
rotary_inertia : bool, optional
Determine if rotary_inertia effects are taken into account.
Default is True.
gyroscopic : bool, optional
Determine if gyroscopic effects are taken into account.
Default is True.
shear_method_calc : str, optional
Determines which shear calculation method the user will adopt.
Default is 'cowper'
is_random : list
List of the object attributes to become random.
Possibilities:
["L", "idl", "odl", "idr", "odr", "material", "axial_force", "torque"]
Example
-------
>>> import numpy as np
>>> import ross.stochastic as srs
>>> size = 5
>>> E = np.random.uniform(208e9, 211e9, size)
>>> st_steel = srs.ST_Material(name="Steel", rho=7810, E=E, G_s=81.2e9)
>>> elms = srs.ST_ShaftElement(L=1,
... idl=0,
... odl=np.random.uniform(0.1, 0.2, size),
... material=st_steel,
... is_random=["odl", "material"],
... )
>>> len(list(iter(elms)))
5
"""
@check_units
def __init__(
self,
L,
idl,
odl,
idr=None,
odr=None,
material=None,
n=None,
axial_force=0,
torque=0,
shear_effects=True,
rotary_inertia=True,
gyroscopic=True,
shear_method_calc="cowper",
is_random=None,
):
if idr is None:
idr = idl
if "idl" in is_random and "idr" not in is_random:
is_random.append("idr")
if odr is None:
odr = odl
if "odl" in is_random and "odr" not in is_random:
is_random.append("odr")
if isinstance(material, ST_Material):
material = list(iter(material))
attribute_dict = dict(
L=L,
idl=idl,
odl=odl,
idr=idr,
odr=odr,
material=material,
n=n,
axial_force=axial_force,
torque=torque,
shear_effects=shear_effects,
rotary_inertia=rotary_inertia,
gyroscopic=gyroscopic,
shear_method_calc=shear_method_calc,
tag=None,
)
self.is_random = is_random
self.attribute_dict = attribute_dict
def __iter__(self):
"""Return an iterator for the container.
Returns
-------
An iterator over random shaft elements.
Examples
--------
>>> import ross.stochastic as srs
>>> elm = srs.st_shaft_example()
>>> len(list(iter(elm)))
2
"""
return iter(self.random_var(self.is_random, self.attribute_dict))
def __getitem__(self, key):
"""Return the value for a given key from attribute_dict.
Parameters
----------
key : str
A class parameter as string.
Raises
------
KeyError
Raises an error if the parameter doesn't belong to the class.
Returns
-------
Return the value for the given key.
Example
-------
>>> import numpy as np
>>> import ross.stochastic as srs
>>> size = 5
>>> E = np.random.uniform(208e9, 211e9, size)
>>> st_steel = srs.ST_Material(name="Steel", rho=7810, E=E, G_s=81.2e9)
>>> elms = srs.ST_ShaftElement(L=1,
... idl=0,
... odl=np.random.uniform(0.1, 0.2, size),
... material=st_steel,
... is_random=["odl", "material"],
... )
>>> elms["L"]
1
"""
if key not in self.attribute_dict.keys():
raise KeyError("Object does not have parameter: {}.".format(key))
return self.attribute_dict[key]
def __setitem__(self, key, value):
"""Set new parameter values for the object.
Function to change a parameter value.
It's not allowed to add new parameters to the object.
Parameters
----------
key : str
A class parameter as string.
value : The corresponding value for the attrbiute_dict's key.
***check the correct type for each key in ST_ShaftElement
docstring.
Raises
------
KeyError
Raises an error if the parameter doesn't belong to the class.
Example
-------
>>> import numpy as np
>>> import ross.stochastic as srs
>>> size = 5
>>> E = np.random.uniform(208e9, 211e9, size)
>>> st_steel = srs.ST_Material(name="Steel", rho=7810, E=E, G_s=81.2e9)
>>> elms = srs.ST_ShaftElement(L=1,
... idl=0,
... odl=np.random.uniform(0.1, 0.2, size),
... material=st_steel,
... is_random=["odl", "material"],
... )
>>> elms["odl"] = np.linspace(0.1, 0.2, 5)
>>> elms["odl"]
array([0.1 , 0.125, 0.15 , 0.175, 0.2 ])
"""
if key not in self.attribute_dict.keys():
raise KeyError("Object does not have parameter: {}.".format(key))
self.attribute_dict[key] = value
def random_var(self, is_random, *args):
"""Generate a list of objects as random attributes.
This function creates a list of objects with random values for selected
attributes from ross.ShaftElement.
Parameters
----------
is_random : list
List of the object attributes to become stochastic.
*args : dict
Dictionary instanciating the ross.ShaftElement class.
The attributes that are supposed to be stochastic should be
set as lists of random variables.
Returns
-------
f_list : generator
Generator of random objects.
"""
args_dict = args[0]
new_args = []
for i in range(len(args_dict[is_random[0]])):
arg = []
for key, value in args_dict.items():
if key in is_random:
arg.append(value[i])
else:
arg.append(value)
new_args.append(arg)
f_list = (ShaftElement(*arg) for arg in new_args)
return f_list
def plot_random_var(self, var_list=None, histogram_kwargs=None, plot_kwargs=None):
"""Plot histogram and the PDF.
This function creates a histogram to display the random variable
distribution.
Parameters
----------
var_list : list, optional
List of random variables, in string format, to plot.
Default is plotting all the random variables.
histogram_kwargs : dict, optional
Additional key word arguments can be passed to change
the plotly.go.histogram (e.g. histnorm="probability density", nbinsx=20...).
*See Plotly API to more information.
plot_kwargs : dict, optional
Additional key word arguments can be passed to change the plotly go.figure
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...).
*See Plotly API to more information.
Returns
-------
fig : Plotly graph_objects.Figure()
A figure with the histogram plots.
Examples
--------
>>> import ross.stochastic as srs
>>> elm = srs.st_shaft_example()
>>> fig = elm.plot_random_var(["odl"])
>>> # fig.show()
"""
label = dict(
L="Length",
idl="Left inner diameter",
odl="Left outer diameter",
idr="Right inner diameter",
odr="Right outer diameter",
)
is_random = self.is_random
if "material" in is_random:
is_random.remove("material")
if var_list is None:
var_list = is_random
elif not all(var in is_random for var in var_list):
raise ValueError(
"Random variable not in var_list. Select variables from {}".format(
is_random
)
)
return plot_histogram(
self.attribute_dict, label, var_list, histogram_kwargs={}, plot_kwargs={}
)
def st_shaft_example():
"""Return an instance of a simple random shaft element.
The purpose is to make available a simple model so that doctest can be
written using it.
Returns
-------
elm : ross.stochastic.ST_ShaftElement
An instance of a random shaft element object.
Examples
--------
>>> import ross.stochastic as srs
>>> elm = srs.st_shaft_example()
>>> len(list(iter(elm)))
2
"""
from ross.materials import steel
elm = ST_ShaftElement(
L=[1.0, 1.1],
idl=0.0,
odl=[0.1, 0.2],
material=steel,
is_random=["L", "odl"],
)
return elm
|
[
"ross.shaft_element.ShaftElement",
"ross.stochastic.st_results_elements.plot_histogram"
] |
[((10531, 10624), 'ross.stochastic.st_results_elements.plot_histogram', 'plot_histogram', (['self.attribute_dict', 'label', 'var_list'], {'histogram_kwargs': '{}', 'plot_kwargs': '{}'}), '(self.attribute_dict, label, var_list, histogram_kwargs={},\n plot_kwargs={})\n', (10545, 10624), False, 'from ross.stochastic.st_results_elements import plot_histogram\n'), ((8585, 8603), 'ross.shaft_element.ShaftElement', 'ShaftElement', (['*arg'], {}), '(*arg)\n', (8597, 8603), False, 'from ross.shaft_element import ShaftElement\n')]
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests import PRE_INSTANCES
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.instance_create_group"
class InstanceCreateRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'instance_create_runners'
_runner_cls = 'InstanceCreateRunner'
@test(depends_on_groups=["services.initialize"],
runs_after_groups=[PRE_INSTANCES],
groups=[GROUP, groups.INST_CREATE])
class InstanceCreateGroup(TestGroup):
"""Test Instance Create functionality."""
def __init__(self):
super(InstanceCreateGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def create_empty_instance(self):
"""Create an empty instance."""
self.test_runner.run_empty_instance_create()
@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_INIT_CREATE])
class InstanceInitCreateGroup(TestGroup):
"""Test Instance Init Create functionality."""
def __init__(self):
super(InstanceInitCreateGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def create_initial_configuration(self):
"""Create a configuration group for a new initialized instance."""
self.test_runner.run_initial_configuration_create()
@test(runs_after=[create_initial_configuration])
def create_initialized_instance(self):
"""Create an instance with initial properties."""
self.test_runner.run_initialized_instance_create()
@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_CREATE_WAIT],
runs_after_groups=[groups.MODULE_CREATE, groups.CFGGRP_CREATE,
groups.INST_ERROR_DELETE])
class InstanceCreateWaitGroup(TestGroup):
"""Test that Instance Create Completes."""
def __init__(self):
super(InstanceCreateWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_instance(self):
"""Waiting for main instance to become active."""
self.test_runner.run_wait_for_instance()
@test(depends_on_groups=[groups.INST_INIT_CREATE],
groups=[GROUP, groups.INST_INIT_CREATE_WAIT],
runs_after_groups=[groups.INST_CREATE_WAIT])
class InstanceInitCreateWaitGroup(TestGroup):
"""Test that Instance Init Create Completes."""
def __init__(self):
super(InstanceInitCreateWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_init_instance(self):
"""Waiting for init instance to become active."""
self.test_runner.run_wait_for_init_instance()
@test(depends_on=[wait_for_init_instance])
def add_initialized_instance_data(self):
"""Add data to the initialized instance."""
self.test_runner.run_add_initialized_instance_data()
@test(runs_after=[add_initialized_instance_data])
def validate_initialized_instance(self):
"""Validate the initialized instance data and properties."""
self.test_runner.run_validate_initialized_instance()
@test(depends_on_groups=[groups.INST_INIT_CREATE_WAIT],
groups=[GROUP, groups.INST_INIT_DELETE])
class InstanceInitDeleteGroup(TestGroup):
"""Test Initialized Instance Delete functionality."""
def __init__(self):
super(InstanceInitDeleteGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def delete_initialized_instance(self):
"""Delete the initialized instance."""
self.test_runner.run_initialized_instance_delete()
@test(depends_on_groups=[groups.INST_INIT_DELETE],
runs_after_groups=[groups.INST_ERROR_DELETE],
groups=[GROUP, groups.INST_INIT_DELETE_WAIT])
class InstanceInitDeleteWaitGroup(TestGroup):
"""Test that Initialized Instance Delete Completes."""
def __init__(self):
super(InstanceInitDeleteWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_init_delete(self):
"""Wait for the initialized instance to be gone."""
self.test_runner.run_wait_for_init_delete()
@test(runs_after=[wait_for_init_delete])
def delete_initial_configuration(self):
"""Delete the initial configuration group."""
self.test_runner.run_initial_configuration_delete()
|
[
"proboscis.test"
] |
[((1044, 1167), 'proboscis.test', 'test', ([], {'depends_on_groups': "['services.initialize']", 'runs_after_groups': '[PRE_INSTANCES]', 'groups': '[GROUP, groups.INST_CREATE]'}), "(depends_on_groups=['services.initialize'], runs_after_groups=[\n PRE_INSTANCES], groups=[GROUP, groups.INST_CREATE])\n", (1048, 1167), False, 'from proboscis import test\n'), ((1531, 1621), 'proboscis.test', 'test', ([], {'depends_on_groups': '[groups.INST_CREATE]', 'groups': '[GROUP, groups.INST_INIT_CREATE]'}), '(depends_on_groups=[groups.INST_CREATE], groups=[GROUP, groups.\n INST_INIT_CREATE])\n', (1535, 1621), False, 'from proboscis import test\n'), ((2255, 2440), 'proboscis.test', 'test', ([], {'depends_on_groups': '[groups.INST_CREATE]', 'groups': '[GROUP, groups.INST_CREATE_WAIT]', 'runs_after_groups': '[groups.MODULE_CREATE, groups.CFGGRP_CREATE, groups.INST_ERROR_DELETE]'}), '(depends_on_groups=[groups.INST_CREATE], groups=[GROUP, groups.\n INST_CREATE_WAIT], runs_after_groups=[groups.MODULE_CREATE, groups.\n CFGGRP_CREATE, groups.INST_ERROR_DELETE])\n', (2259, 2440), False, 'from proboscis import test\n'), ((2843, 2988), 'proboscis.test', 'test', ([], {'depends_on_groups': '[groups.INST_INIT_CREATE]', 'groups': '[GROUP, groups.INST_INIT_CREATE_WAIT]', 'runs_after_groups': '[groups.INST_CREATE_WAIT]'}), '(depends_on_groups=[groups.INST_INIT_CREATE], groups=[GROUP, groups.\n INST_INIT_CREATE_WAIT], runs_after_groups=[groups.INST_CREATE_WAIT])\n', (2847, 2988), False, 'from proboscis import test\n'), ((3830, 3929), 'proboscis.test', 'test', ([], {'depends_on_groups': '[groups.INST_INIT_CREATE_WAIT]', 'groups': '[GROUP, groups.INST_INIT_DELETE]'}), '(depends_on_groups=[groups.INST_INIT_CREATE_WAIT], groups=[GROUP,\n groups.INST_INIT_DELETE])\n', (3834, 3929), False, 'from proboscis import test\n'), ((4327, 4473), 'proboscis.test', 'test', ([], {'depends_on_groups': '[groups.INST_INIT_DELETE]', 'runs_after_groups': '[groups.INST_ERROR_DELETE]', 'groups': '[GROUP, groups.INST_INIT_DELETE_WAIT]'}), '(depends_on_groups=[groups.INST_INIT_DELETE], runs_after_groups=[groups\n .INST_ERROR_DELETE], groups=[GROUP, groups.INST_INIT_DELETE_WAIT])\n', (4331, 4473), False, 'from proboscis import test\n'), ((2044, 2091), 'proboscis.test', 'test', ([], {'runs_after': '[create_initial_configuration]'}), '(runs_after=[create_initial_configuration])\n', (2048, 2091), False, 'from proboscis import test\n'), ((3397, 3438), 'proboscis.test', 'test', ([], {'depends_on': '[wait_for_init_instance]'}), '(depends_on=[wait_for_init_instance])\n', (3401, 3438), False, 'from proboscis import test\n'), ((3603, 3651), 'proboscis.test', 'test', ([], {'runs_after': '[add_initialized_instance_data]'}), '(runs_after=[add_initialized_instance_data])\n', (3607, 3651), False, 'from proboscis import test\n'), ((4887, 4926), 'proboscis.test', 'test', ([], {'runs_after': '[wait_for_init_delete]'}), '(runs_after=[wait_for_init_delete])\n', (4891, 4926), False, 'from proboscis import test\n')]
|
import tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses
# 1.数据集准备
(x, y), (x_val, y_val) = datasets.mnist.load_data() # 加载数据集,返回的是两个元组,分别表示训练集和测试集
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. # 转换为张量,并缩放到0~1
y = tf.convert_to_tensor(y, dtype=tf.int32) # 转换为张量(标签)
print(x.shape, y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y)) # 构建数据集对象
train_dataset = train_dataset.batch(32).repeat(10) # 设置批量训练的batch为32,要将训练集重复训练10遍
# 2.搭建网络
network = Sequential([ # 搭建网络容器
layers.Conv2D(6, kernel_size=3, strides=1), # 第一个卷积层,6个3*3*1卷积核
layers.MaxPooling2D(pool_size=2, strides=2), # 池化层,卷积核2*2,步长2
layers.ReLU(), # 激活函数
layers.Conv2D(16, kernel_size=3, strides=1), # 第二个卷积层,16个3*3*6卷积核
layers.MaxPooling2D(pool_size=2, strides=2), # 池化层
layers.ReLU(), # 激活函数
layers.Flatten(), # 拉直,方便全连接层处理
layers.Dense(120, activation='relu'), # 全连接层,120个节点
layers.Dense(84, activation='relu'), # 全连接层,84个节点
layers.Dense(10) # 输出层,10个节点
])
network.build(input_shape=(None, 28, 28, 1)) # 定义输入,batch_size=32,输入图片大小是28*28,通道数为1。
network.summary() # 显示出每层的待优化参数量
# 3.模型训练(计算梯度,迭代更新网络参数)
optimizer = optimizers.SGD(lr=0.01) # 声明采用批量随机梯度下降方法,学习率=0.01
acc_meter = metrics.Accuracy() # 新建accuracy测量器
for step, (x, y) in enumerate(train_dataset): # 一次输入batch组数据进行训练
with tf.GradientTape() as tape: # 构建梯度记录环境
x = tf.reshape(x, (32, 28, 28, 1)) # 将输入拉直,[b,28,28]->[b,784]
# x = tf.extand_dims(x, axis=3)
out = network(x) # 输出[b, 10]
y_onehot = tf.one_hot(y, depth=10) # one-hot编码
loss = tf.square(out - y_onehot)
loss = tf.reduce_sum(loss) / 32 # 定义均方差损失函数,注意此处的32对应为batch的大小
grads = tape.gradient(loss, network.trainable_variables) # 计算网络中各个参数的梯度
optimizer.apply_gradients(zip(grads, network.trainable_variables)) # 更新网络参数
acc_meter.update_state(tf.argmax(out, axis=1), y) # 比较预测值与标签,并计算精确度(写入数据,进行求精度)
if step % 200 == 0: # 每200个step,打印一次结果
print('Step', step, ': Loss is: ', float(loss), ' Accuracy: ', acc_meter.result().numpy()) # 读取数据
acc_meter.reset_states() # 清零测量器l
|
[
"tensorflow.one_hot",
"tensorflow.keras.metrics.Accuracy",
"tensorflow.keras.layers.Conv2D",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.ReLU",
"tensorflow.reduce_sum",
"tensorflow.keras.optimizers.SGD",
"tensorflow.GradientTape",
"tensorflow.argmax",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.Flatten",
"tensorflow.square"
] |
[((147, 173), 'tensorflow.keras.datasets.mnist.load_data', 'datasets.mnist.load_data', ([], {}), '()\n', (171, 173), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((278, 317), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y'], {'dtype': 'tf.int32'}), '(y, dtype=tf.int32)\n', (298, 317), True, 'import tensorflow as tf\n'), ((371, 413), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x, y)'], {}), '((x, y))\n', (405, 413), True, 'import tensorflow as tf\n'), ((1212, 1235), 'tensorflow.keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (1226, 1235), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((1275, 1293), 'tensorflow.keras.metrics.Accuracy', 'metrics.Accuracy', ([], {}), '()\n', (1291, 1293), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((208, 249), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (228, 249), True, 'import tensorflow as tf\n'), ((555, 597), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(6)'], {'kernel_size': '(3)', 'strides': '(1)'}), '(6, kernel_size=3, strides=1)\n', (568, 597), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((624, 667), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (643, 667), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((691, 704), 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (702, 704), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((718, 761), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(16)'], {'kernel_size': '(3)', 'strides': '(1)'}), '(16, kernel_size=3, strides=1)\n', (731, 761), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((789, 832), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (808, 832), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((845, 858), 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (856, 858), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((872, 888), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (886, 888), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((909, 945), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(120)'], {'activation': '"""relu"""'}), "(120, activation='relu')\n", (921, 945), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((966, 1001), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(84)'], {'activation': '"""relu"""'}), "(84, activation='relu')\n", (978, 1001), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((1021, 1037), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (1033, 1037), False, 'from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses\n'), ((1386, 1403), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1401, 1403), True, 'import tensorflow as tf\n'), ((1437, 1467), 'tensorflow.reshape', 'tf.reshape', (['x', '(32, 28, 28, 1)'], {}), '(x, (32, 28, 28, 1))\n', (1447, 1467), True, 'import tensorflow as tf\n'), ((1593, 1616), 'tensorflow.one_hot', 'tf.one_hot', (['y'], {'depth': '(10)'}), '(y, depth=10)\n', (1603, 1616), True, 'import tensorflow as tf\n'), ((1645, 1670), 'tensorflow.square', 'tf.square', (['(out - y_onehot)'], {}), '(out - y_onehot)\n', (1654, 1670), True, 'import tensorflow as tf\n'), ((1686, 1705), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (1699, 1705), True, 'import tensorflow as tf\n'), ((1940, 1962), 'tensorflow.argmax', 'tf.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (1949, 1962), True, 'import tensorflow as tf\n')]
|
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
class DialogueEntry(tk.Toplevel):
"""
DialogueEntry : tkinter.Toplevel
Dialogue box that allow the user to input a text in a field.
kwargs :
title : title of the dialogue box
text : text displayed in the label of the dialogue box
ok_button_callback : callable that is called when the ok button is pressed
textvariable : tkinter.StringVar that is used in the Entry widget
width : 300 by default, width of the window
height : 70 by default, height of the window
xpos, ypos : screen coordinates. By default, these coordinates place the window in the middle of the screen
methods :
get(self) : gets the string in the entry widget
set(self, value) : sets the string in the entry widget
"""
def __init__(self, *args, title="Please Enter a value", text="Enter a value", ok_button_callback=None, textvariable=None, width=300, height=70, xpos=None, ypos=None, **kwargs):
super().__init__(*args, **kwargs)
w, h = width, height
if xpos is None:
ws = self.winfo_screenwidth() # width of the screen
x = (ws // 2) - (w // 2)
else:
x = xpos
if ypos is None:
hs = self.winfo_screenheight() # height of the screen
y = (hs // 2) - (h // 2)
else:
y = ypos
self.title(title)
self.geometry(f"{w}x{h}+{x}+{y}")
self.resizable(False, False)
self.update()
self.textvar = textvariable or tk.StringVar()
self.ok_button_callback = ok_button_callback
self.entry = tk.Entry(self, textvariable=self.textvar, width=w // 6)
self.ok_btn = tk.Button(self, text="Ok", command=self.on_ok_btn)
self.cancel_btn = tk.Button(self, text="Cancel", command=self.on_cancel_btn)
self.label = tk.Label(self, text=text)
self.protocol("WM_DELETE_WINDOW", self.on_cancel_btn)
self.label.grid(row=0, column=0, columnspan=2, sticky="ew")
self.entry.grid(row=1, column=0, columnspan=2, sticky="ew")
self.ok_btn.grid(row=2, column=0, sticky="ew")
self.cancel_btn.grid(row=2, column=1, sticky="ew")
self.mainloop()
def on_ok_btn(self):
if callable(self.ok_button_callback):
self.ok_button_callback()
self.on_cancel_btn()
def on_cancel_btn(self):
self.destroy()
def get(self):
return self.textvar.get()
def set(self, value):
self.textvar.set(value)
|
[
"Tkinter.Entry",
"Tkinter.Label",
"Tkinter.Button",
"Tkinter.StringVar"
] |
[((1748, 1803), 'Tkinter.Entry', 'tk.Entry', (['self'], {'textvariable': 'self.textvar', 'width': '(w // 6)'}), '(self, textvariable=self.textvar, width=w // 6)\n', (1756, 1803), True, 'import Tkinter as tk\n'), ((1826, 1876), 'Tkinter.Button', 'tk.Button', (['self'], {'text': '"""Ok"""', 'command': 'self.on_ok_btn'}), "(self, text='Ok', command=self.on_ok_btn)\n", (1835, 1876), True, 'import Tkinter as tk\n'), ((1903, 1961), 'Tkinter.Button', 'tk.Button', (['self'], {'text': '"""Cancel"""', 'command': 'self.on_cancel_btn'}), "(self, text='Cancel', command=self.on_cancel_btn)\n", (1912, 1961), True, 'import Tkinter as tk\n'), ((1983, 2008), 'Tkinter.Label', 'tk.Label', (['self'], {'text': 'text'}), '(self, text=text)\n', (1991, 2008), True, 'import Tkinter as tk\n'), ((1659, 1673), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (1671, 1673), True, 'import Tkinter as tk\n')]
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import sys
import os
import unittest
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from lava.lib.dl.slayer.neuron import alif
verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
seed = np.random.randint(1000)
# seed = 590
np.random.seed(seed)
if verbose:
print(f'{seed=}')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
if verbose:
print(
'CUDA is not available in the system. '
'Testing for CPU version only.'
)
device = torch.device('cpu')
# neuron parameters
threshold = 1
current_decay = np.random.random()
voltage_decay = np.random.random()
threshold_decay = np.random.random()
refractory_decay = np.random.random()
# create input
time = torch.FloatTensor(np.arange(200)).to(device)
# expand to (batch, neuron, time) tensor
spike_input = torch.autograd.Variable(
torch.zeros([5, 4, len(time)]), requires_grad=True
).to(device)
spike_input.data[..., np.random.randint(spike_input.shape[-1], size=5)] = 1
weight = torch.FloatTensor(
5 * np.random.random(size=spike_input.shape[-1]) - 0.5
).reshape(
[1, 1, spike_input.shape[-1]]
).to(device)
# initialize neuron
neuron = alif.Neuron(
threshold,
threshold_step=0.5 * threshold,
current_decay=current_decay,
voltage_decay=voltage_decay,
threshold_decay=threshold_decay,
refractory_decay=refractory_decay,
persistent_state=True,
).to(device)
quantized_weight = neuron.quantize_8bit(weight)
neuron.debug = True
# get the neuron response for full input
current, voltage, th, ref = neuron.dynamics(quantized_weight * spike_input)
spike = neuron.spike(voltage, th, ref)
class TestALIF(unittest.TestCase):
def test_input_output_range(self):
if verbose:
print(spike_input.sum(), spike_input.flatten())
if verbose:
print(spike.sum(), spike.flatten())
self.assertTrue(
spike_input.sum().item() > 0,
'There was zero input spike. Check the test setting.'
)
self.assertTrue(
spike.sum().item() > 0,
'There was zero ouptut spike. Check the test setting.'
)
def test_properties(self):
_ = neuron.weight_exponent
_ = neuron.v_th_mant
_ = neuron.cx_current_decay
_ = neuron.cx_voltage_decay
_ = neuron.cx_threshold_decay
_ = neuron.cx_refractory_decay
_ = neuron.scale
_ = neuron.shape
_ = neuron.device
# just looking for errors
self.assertTrue(True, 'Encountered errors.')
def test_batch_consistency(self):
spike_var = torch.norm(torch.var(spike, dim=0)).item()
voltage_var = torch.norm(torch.var(voltage, dim=0)).item()
current_var = torch.norm(torch.var(current, dim=0)).item()
th_var = torch.norm(torch.var(th, dim=0)).item()
ref_var = torch.norm(torch.var(ref, dim=0)).item()
self.assertTrue(
spike_var < 1e-5,
f'Spike variation across batch dimension is inconsistent. '
f'Variance was {spike_var}. Expected 0.'
)
self.assertTrue(
current_var < 1e-5,
f'Current variation across batch dimension is inconsistent. '
f'Variance was {current_var}. Expected 0.'
)
self.assertTrue(
voltage_var < 1e-5,
f'Voltage variation across batch dimension is inconsistent. '
f'Variance was {voltage_var}. Expected 0.'
)
self.assertTrue(
th_var < 1e-5,
f'Threshold variation across batch dimension is inconsistent. '
f'Variance was {th_var}. Expected 0.'
)
self.assertTrue(
ref_var < 1e-5,
f'Refractory variation across batch dimension is inconsistent. '
f'Variance was {ref_var}. Expected 0.'
)
def test_integer_states(self):
# there should be no quantization error when
# states are scaled with s_scale
voltage_error = torch.norm(
torch.floor(voltage * neuron.s_scale)
- voltage * neuron.s_scale
)
current_error = torch.norm(
torch.floor(current * neuron.s_scale)
- current * neuron.s_scale
)
th_error = torch.norm(
torch.floor(th * neuron.s_scale)
- th * neuron.s_scale
)
ref_error = torch.norm(
torch.floor(ref * neuron.s_scale)
- ref * neuron.s_scale
)
self.assertTrue(
voltage_error < 1e-5,
f'Voltage calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {voltage_error}'
)
self.assertTrue(
current_error < 1e-5,
f'Current calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {current_error}'
)
self.assertTrue(
th_error < 1e-5,
f'Threshold calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {th_error}'
)
self.assertTrue(
ref_error < 1e-5,
f'Refractory calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {ref_error}'
)
def test_persistent_state(self):
# clear previous persistent state
neuron.current_state *= 0
neuron.voltage_state *= 0
neuron.threshold_state *= 0
neuron.threshold_state += neuron.threshold # stable at th0
neuron.refractory_state *= 0
# break the calculation into two parts: before ind and after ind
ind = int(np.random.random() * (spike_input.shape[-1] - 1)) + 1
current0, voltage0, th0, ref0 = neuron.dynamics(
quantized_weight[..., :ind] * spike_input[..., :ind]
)
spike0 = neuron.spike(voltage0, th0, ref0)
current1, voltage1, th1, ref1 = neuron.dynamics(
quantized_weight[..., ind:] * spike_input[..., ind:]
)
spike1 = neuron.spike(voltage1, th1, ref1)
spike_error = (
torch.norm(spike[..., :ind] - spike0)
+ torch.norm(spike[..., ind:] - spike1)
).item()
voltage_error = (
torch.norm(voltage[..., :ind] - voltage0)
+ torch.norm(voltage[..., ind:] - voltage1)
).item()
current_error = (
torch.norm(current[..., :ind] - current0)
+ torch.norm(current[..., ind:] - current1)
).item()
th_error = (
torch.norm(th[..., :ind] - th0)
+ torch.norm(th[..., ind:] - th1)
).item()
ref_error = (
torch.norm(ref[..., :ind] - ref0)
+ torch.norm(ref[..., ind:] - ref1)
).item()
if verbose:
print(ind)
if spike_error >= 1e-5:
print('Persistent spike states')
print(
spike[0, 0, ind - 10:ind + 10].cpu().data.numpy().tolist()
)
print(spike0[0, 0, -10:].cpu().data.numpy().tolist())
print(spike1[0, 0, :10].cpu().data.numpy().tolist())
if voltage_error >= 1e-5:
print('Persistent voltage states')
print((
neuron.s_scale * voltage[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * voltage0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * voltage1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if current_error >= 1e-5:
print('Persistent current states')
print((
neuron.s_scale * current[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * current0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * current1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if th_error >= 1e-5:
print('Persistent threshold states')
print((
neuron.s_scale * th[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * th0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * th1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if ref_error >= 1e-5:
print('Persistent refractory states')
print((
neuron.s_scale * ref[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * ref0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * ref1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if verbose:
if bool(os.environ.get('DISPLAY', None)):
plt.figure()
plt.plot(
time.cpu().data.numpy(),
current[0, 0].cpu().data.numpy(),
label='current'
)
plt.plot(
time[:ind].cpu().data.numpy(),
current0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
current1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
voltage[0, 0].cpu().data.numpy(),
label='voltage'
)
plt.plot(
time[:ind].cpu().data.numpy(),
voltage0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
voltage1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.plot(
time[spike[0, 0] > 0].cpu().data.numpy(),
0 * spike[0, 0][spike[0, 0] > 0].cpu().data.numpy(),
'.', markersize=12, label='spike'
)
plt.plot(
time[:ind][spike0[0, 0] > 0].cpu().data.numpy(),
0 * spike0[0, 0][spike0[0, 0] > 0].cpu().data.numpy(),
'.', label=':ind'
)
plt.plot(
time[ind:][spike1[0, 0] > 0].cpu().data.numpy(),
0 * spike1[0, 0][spike1[0, 0] > 0].cpu().data.numpy(),
'.', label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
th[0, 0].cpu().data.numpy(),
label='threshold'
)
plt.plot(
time[:ind].cpu().data.numpy(),
th0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
th1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
ref[0, 0].cpu().data.numpy(),
label='refractory'
)
plt.plot(
time[:ind].cpu().data.numpy(),
ref0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
ref1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.show()
self.assertTrue(
spike_error < 1e-5,
f'Persistent state has errors in spike calculation. '
f'Error was {spike_error}.'
f'{seed=}'
)
self.assertTrue(
voltage_error < 1e-5,
f'Persistent state has errors in voltage calculation. '
f'Error was {voltage_error}.'
f'{seed=}'
)
self.assertTrue(
current_error < 1e-5,
f'Persistent state has errors in current calculation. '
f'Error was {current_error}.'
f'{seed=}'
)
self.assertTrue(
th_error < 1e-5,
f'Persistent state has errors in threshold calculation. '
f'Error was {th_error}.'
f'{seed=}'
)
self.assertTrue(
ref_error < 1e-5,
f'Persistent state has errors in refractory calculation. '
f'Error was {ref_error}.'
f'{seed=}'
)
def test_backward(self):
spike_target = spike.clone().detach()
current_target = current.clone().detach()
voltage_target = voltage.clone().detach()
spike_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] = 1
current_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] -= 1
voltage_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] -= -1
loss = F.mse_loss(spike, spike_target) \
+ F.mse_loss(current, current_target) \
+ F.mse_loss(voltage, voltage_target)
loss.backward()
# just looking for errors
self.assertTrue(True, 'Encountered errors.')
def test_graded_spikes(self):
# TODO: after further study of network behavior with graded spikes.
pass
|
[
"lava.lib.dl.slayer.neuron.alif.Neuron",
"torch.nn.functional.mse_loss",
"matplotlib.pyplot.show",
"numpy.random.random",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"torch.floor",
"os.environ.get",
"numpy.random.randint",
"torch.cuda.is_available",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"torch.norm",
"torch.var",
"numpy.arange",
"torch.device"
] |
[((346, 369), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (363, 369), True, 'import numpy as np\n'), ((383, 403), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (397, 403), True, 'import numpy as np\n'), ((442, 467), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (465, 467), False, 'import torch\n'), ((730, 748), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (746, 748), True, 'import numpy as np\n'), ((765, 783), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (781, 783), True, 'import numpy as np\n'), ((802, 820), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (818, 820), True, 'import numpy as np\n'), ((840, 858), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (856, 858), True, 'import numpy as np\n'), ((482, 502), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (494, 502), False, 'import torch\n'), ((659, 678), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (671, 678), False, 'import torch\n'), ((1097, 1145), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (1114, 1145), True, 'import numpy as np\n'), ((1327, 1540), 'lava.lib.dl.slayer.neuron.alif.Neuron', 'alif.Neuron', (['threshold'], {'threshold_step': '(0.5 * threshold)', 'current_decay': 'current_decay', 'voltage_decay': 'voltage_decay', 'threshold_decay': 'threshold_decay', 'refractory_decay': 'refractory_decay', 'persistent_state': '(True)'}), '(threshold, threshold_step=0.5 * threshold, current_decay=\n current_decay, voltage_decay=voltage_decay, threshold_decay=\n threshold_decay, refractory_decay=refractory_decay, persistent_state=True)\n', (1338, 1540), False, 'from lava.lib.dl.slayer.neuron import alif\n'), ((900, 914), 'numpy.arange', 'np.arange', (['(200)'], {}), '(200)\n', (909, 914), True, 'import numpy as np\n'), ((14489, 14524), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['voltage', 'voltage_target'], {}), '(voltage, voltage_target)\n', (14499, 14524), True, 'import torch.nn.functional as F\n'), ((4204, 4241), 'torch.floor', 'torch.floor', (['(voltage * neuron.s_scale)'], {}), '(voltage * neuron.s_scale)\n', (4215, 4241), False, 'import torch\n'), ((4339, 4376), 'torch.floor', 'torch.floor', (['(current * neuron.s_scale)'], {}), '(current * neuron.s_scale)\n', (4350, 4376), False, 'import torch\n'), ((4469, 4501), 'torch.floor', 'torch.floor', (['(th * neuron.s_scale)'], {}), '(th * neuron.s_scale)\n', (4480, 4501), False, 'import torch\n'), ((4590, 4623), 'torch.floor', 'torch.floor', (['(ref * neuron.s_scale)'], {}), '(ref * neuron.s_scale)\n', (4601, 4623), False, 'import torch\n'), ((9575, 9606), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', 'None'], {}), "('DISPLAY', None)\n", (9589, 9606), False, 'import os\n'), ((9625, 9637), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9635, 9637), True, 'import matplotlib.pyplot as plt\n'), ((10199, 10217), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (10209, 10217), True, 'import matplotlib.pyplot as plt\n'), ((10234, 10246), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10244, 10246), True, 'import matplotlib.pyplot as plt\n'), ((10264, 10276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10274, 10276), True, 'import matplotlib.pyplot as plt\n'), ((11524, 11542), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (11534, 11542), True, 'import matplotlib.pyplot as plt\n'), ((11559, 11571), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11569, 11571), True, 'import matplotlib.pyplot as plt\n'), ((11589, 11601), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11599, 11601), True, 'import matplotlib.pyplot as plt\n'), ((12150, 12168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (12160, 12168), True, 'import matplotlib.pyplot as plt\n'), ((12185, 12197), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12195, 12197), True, 'import matplotlib.pyplot as plt\n'), ((12215, 12227), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12225, 12227), True, 'import matplotlib.pyplot as plt\n'), ((12780, 12798), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (12790, 12798), True, 'import matplotlib.pyplot as plt\n'), ((12815, 12827), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12825, 12827), True, 'import matplotlib.pyplot as plt\n'), ((12844, 12854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12852, 12854), True, 'import matplotlib.pyplot as plt\n'), ((14075, 14123), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (14092, 14123), True, 'import numpy as np\n'), ((14191, 14239), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (14208, 14239), True, 'import numpy as np\n'), ((14308, 14356), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (14325, 14356), True, 'import numpy as np\n'), ((14389, 14420), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['spike', 'spike_target'], {}), '(spike, spike_target)\n', (14399, 14420), True, 'import torch.nn.functional as F\n'), ((14437, 14472), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['current', 'current_target'], {}), '(current, current_target)\n', (14447, 14472), True, 'import torch.nn.functional as F\n'), ((2783, 2806), 'torch.var', 'torch.var', (['spike'], {'dim': '(0)'}), '(spike, dim=0)\n', (2792, 2806), False, 'import torch\n'), ((2848, 2873), 'torch.var', 'torch.var', (['voltage'], {'dim': '(0)'}), '(voltage, dim=0)\n', (2857, 2873), False, 'import torch\n'), ((2915, 2940), 'torch.var', 'torch.var', (['current'], {'dim': '(0)'}), '(current, dim=0)\n', (2924, 2940), False, 'import torch\n'), ((2977, 2997), 'torch.var', 'torch.var', (['th'], {'dim': '(0)'}), '(th, dim=0)\n', (2986, 2997), False, 'import torch\n'), ((3035, 3056), 'torch.var', 'torch.var', (['ref'], {'dim': '(0)'}), '(ref, dim=0)\n', (3044, 3056), False, 'import torch\n'), ((5954, 5972), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5970, 5972), True, 'import numpy as np\n'), ((6411, 6448), 'torch.norm', 'torch.norm', (['(spike[..., :ind] - spike0)'], {}), '(spike[..., :ind] - spike0)\n', (6421, 6448), False, 'import torch\n'), ((6463, 6500), 'torch.norm', 'torch.norm', (['(spike[..., ind:] - spike1)'], {}), '(spike[..., ind:] - spike1)\n', (6473, 6500), False, 'import torch\n'), ((6556, 6597), 'torch.norm', 'torch.norm', (['(voltage[..., :ind] - voltage0)'], {}), '(voltage[..., :ind] - voltage0)\n', (6566, 6597), False, 'import torch\n'), ((6612, 6653), 'torch.norm', 'torch.norm', (['(voltage[..., ind:] - voltage1)'], {}), '(voltage[..., ind:] - voltage1)\n', (6622, 6653), False, 'import torch\n'), ((6709, 6750), 'torch.norm', 'torch.norm', (['(current[..., :ind] - current0)'], {}), '(current[..., :ind] - current0)\n', (6719, 6750), False, 'import torch\n'), ((6765, 6806), 'torch.norm', 'torch.norm', (['(current[..., ind:] - current1)'], {}), '(current[..., ind:] - current1)\n', (6775, 6806), False, 'import torch\n'), ((6857, 6888), 'torch.norm', 'torch.norm', (['(th[..., :ind] - th0)'], {}), '(th[..., :ind] - th0)\n', (6867, 6888), False, 'import torch\n'), ((6903, 6934), 'torch.norm', 'torch.norm', (['(th[..., ind:] - th1)'], {}), '(th[..., ind:] - th1)\n', (6913, 6934), False, 'import torch\n'), ((6986, 7019), 'torch.norm', 'torch.norm', (['(ref[..., :ind] - ref0)'], {}), '(ref[..., :ind] - ref0)\n', (6996, 7019), False, 'import torch\n'), ((7034, 7067), 'torch.norm', 'torch.norm', (['(ref[..., ind:] - ref1)'], {}), '(ref[..., ind:] - ref1)\n', (7044, 7067), False, 'import torch\n'), ((1187, 1231), 'numpy.random.random', 'np.random.random', ([], {'size': 'spike_input.shape[-1]'}), '(size=spike_input.shape[-1])\n', (1203, 1231), True, 'import numpy as np\n')]
|
import decimal
import pytest
from amount import Amount as A
def test_basic():
a = A(("0.30", "$"))
assert '$ 0.30' == str(a)
a = A({"$": decimal.Decimal(4)})
assert '$ 4', str(a)
def test_add():
a = A(("2.34", "$"))
b = A(("5.97", "$"))
assert "$ 8.31" == str(a+b)
c = A(("9.01", "CAD")) + A(("15.56", "$"))
assert "CAD 9.01" == str(c.get("CAD"))
assert "$ 15.56" == str(c.get("$"))
d = a + c
assert "$ 17.90" == str(d.get("$"))
assert "CAD 9.01" == str(d.get("CAD"))
|
[
"decimal.Decimal",
"amount.Amount"
] |
[((88, 104), 'amount.Amount', 'A', (["('0.30', '$')"], {}), "(('0.30', '$'))\n", (89, 104), True, 'from amount import Amount as A\n'), ((223, 239), 'amount.Amount', 'A', (["('2.34', '$')"], {}), "(('2.34', '$'))\n", (224, 239), True, 'from amount import Amount as A\n'), ((248, 264), 'amount.Amount', 'A', (["('5.97', '$')"], {}), "(('5.97', '$'))\n", (249, 264), True, 'from amount import Amount as A\n'), ((306, 324), 'amount.Amount', 'A', (["('9.01', 'CAD')"], {}), "(('9.01', 'CAD'))\n", (307, 324), True, 'from amount import Amount as A\n'), ((327, 344), 'amount.Amount', 'A', (["('15.56', '$')"], {}), "(('15.56', '$'))\n", (328, 344), True, 'from amount import Amount as A\n'), ((152, 170), 'decimal.Decimal', 'decimal.Decimal', (['(4)'], {}), '(4)\n', (167, 170), False, 'import decimal\n')]
|
from django.views.generic import TemplateView
from django_tables2.config import RequestConfig
from django_tables2_column_shifter.tables import (
ColumnShiftTableBootstrap2,
ColumnShiftTableBootstrap3,
ColumnShiftTableBootstrap4,
ColumnShiftTableBootstrap5,
)
from .models import Author, Book
from .tables import get_author_table_class, get_book_table_class
class Index(TemplateView):
template_name = "testproject/index.html"
class Base(object):
container_css = "span10 offset1"
template_name = "testproject/test_bootstrap2.html"
table_class_version = ColumnShiftTableBootstrap2
def get_context_data(self, **kwargs):
context = super(Base, self).get_context_data(**kwargs)
# Build tabels
author_queryset = Author.objects.all()
author_table1 = get_author_table_class(
self.table_class_version
)(author_queryset)
author_table2 = get_author_table_class(
self.table_class_version
)(author_queryset, prefix="authors2")
book_queryset = Book.objects.all()
book_table = get_book_table_class(
self.table_class_version
)(book_queryset, prefix="books")
# Turn on sorting and pagination
RequestConfig(self.request, paginate={'per_page': 2}).configure(author_table1)
RequestConfig(self.request, paginate={'per_page': 2}).configure(author_table2)
RequestConfig(self.request, paginate={'per_page': 2}).configure(book_table)
context['container_css'] = self.container_css
context['author_table1'] = author_table1
context['author_table2'] = author_table2
context['book_table'] = book_table
context['book_queryset'] = book_queryset
return context
class Bootstrap2(Base, TemplateView):
pass
class Bootstrap3(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap3.html"
table_class_version = ColumnShiftTableBootstrap3
class Bootstrap4(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap4.html"
table_class_version = ColumnShiftTableBootstrap4
class Bootstrap4_1_3(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap4.1.3.html"
table_class_version = ColumnShiftTableBootstrap4
class Bootstrap5(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap5.html"
table_class_version = ColumnShiftTableBootstrap5
|
[
"django_tables2.config.RequestConfig"
] |
[((1252, 1305), 'django_tables2.config.RequestConfig', 'RequestConfig', (['self.request'], {'paginate': "{'per_page': 2}"}), "(self.request, paginate={'per_page': 2})\n", (1265, 1305), False, 'from django_tables2.config import RequestConfig\n'), ((1339, 1392), 'django_tables2.config.RequestConfig', 'RequestConfig', (['self.request'], {'paginate': "{'per_page': 2}"}), "(self.request, paginate={'per_page': 2})\n", (1352, 1392), False, 'from django_tables2.config import RequestConfig\n'), ((1426, 1479), 'django_tables2.config.RequestConfig', 'RequestConfig', (['self.request'], {'paginate': "{'per_page': 2}"}), "(self.request, paginate={'per_page': 2})\n", (1439, 1479), False, 'from django_tables2.config import RequestConfig\n')]
|
import numpy as np
EXPERIMENT_NAME = 'EXP_12'
CORPUS_PATH = '/home/dddhiraj/Documents/stuff/data/wiki_en.txt'
TRAINING_WINDOW = 5
CONTEXT_DIMENSION = 64
LEANING_RATE = 1
DROPOUT = 0.05
CONTEXT_DECAY = 1 - TRAINING_WINDOW ** -0.5
CONTRASTIVE_WEIGHT = 1#0.1
NEGATIVE_SAMPLE_SIZE = TRAINING_WINDOW ** 2
CONEXT_INERTIA = np.sqrt(TRAINING_WINDOW)
THREADS = 6
CHUNK_SIZE = 5000
DB = 'REDIS'
if DB == 'MONGO':
import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.train_1#neighbour_aware_context_initilization_train_window_8
if DB == 'REDIS':
import redis
collection = redis.Redis(db=1) #11
key_collection= redis.Redis(db=2) #12
#import redisai
# collection = redisai.Client(db=14)
# key_collection = redisai.Client(db=15)
'''
Experiment details:
Trained on wiki data with 51 million words.
'''
|
[
"pymongo.MongoClient",
"numpy.sqrt",
"redis.Redis"
] |
[((367, 391), 'numpy.sqrt', 'np.sqrt', (['TRAINING_WINDOW'], {}), '(TRAINING_WINDOW)\n', (374, 391), True, 'import numpy as np\n'), ((496, 544), 'pymongo.MongoClient', 'pymongo.MongoClient', (['"""mongodb://localhost:27017"""'], {}), "('mongodb://localhost:27017')\n", (515, 544), False, 'import pymongo\n'), ((721, 738), 'redis.Redis', 'redis.Redis', ([], {'db': '(1)'}), '(db=1)\n', (732, 738), False, 'import redis\n'), ((763, 780), 'redis.Redis', 'redis.Redis', ([], {'db': '(2)'}), '(db=2)\n', (774, 780), False, 'import redis\n')]
|
#!/usr/bin/env python
# coding: utf-8
""" yasi
Date: 20th November 2013
Author: nkmathew <<EMAIL>>
Dialect aware s-expression indenter
"""
from __future__ import print_function
import argparse
import hashlib
import os
import re
import shutil
import sys
import time
import collections
import json
import difflib
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
# pylint: disable=unused-import
from pprint import pprint # noqa
__version__ = '2.1.2'
@lru_cache(maxsize=None)
def create_args_parser():
""" Return command line parser """
parser = argparse.ArgumentParser(
description='Dialect-aware s-expression indenter', prog='yasi')
parser.add_argument('files', help='List of files to be indented. '
'Will indent from standard input if no files are specified',
nargs='*')
parser.add_argument(
'-nc', '--no-compact', '--nc', dest='compact',
help='Do not compact the code, just indent', action='store_false')
parser.add_argument(
'-nb', '--no-backup', '--nb', dest='backup', action='store_false',
help='Do not create a backup file even if --backup-dir is specified ')
parser.add_argument(
'-nm', '--no-modify', '--nm', dest='modify',
help='Do not modify the file', action='store_false')
parser.add_argument(
'--diff', '-diff', dest='output_diff',
help='Prints unified diff of the initial and final result',
action='store_true')
parser.add_argument(
'-nw', '--no-warning', '--nw', dest='warning',
help='Do not display warnings', action='store_false')
parser.add_argument(
'-nr', '--no-rc', '--nr', dest='read_rc',
help='Ignore any rc files in the current or home folder',
action='store_false')
parser.add_argument(
'--no-output', '-no-output', dest='output',
help='Suppress output of the indented code', action='store_false')
parser.add_argument(
'-c', '--color', '-color', dest='colour_diff',
help='Display diff text in color', action='store_true')
parser.add_argument(
'-ne', '--no-exit', '--ne', dest='exit', action='store_false',
help='Instructs the program not to exit when a warning is raised.')
parser.add_argument(
'-o', dest='output_file',
help='Path/name of output file', type=str, default='')
parser.add_argument(
'--tab', '-tab', dest='tab_size',
help='Indent with tabs using the specified tabwidth. A tab is assumed \
equal to 4 spaces by default when expanding the tabs in the input file',
type=int, default=-1)
parser.add_argument(
'--dialect', '-dialect',
help='Use Scheme keywords', type=str, default='')
parser.add_argument(
'-v', '--version', action='version',
help='Prints script version', version='yasi v%s' % __version__)
parser.add_argument(
'-suffix', '--suffix', dest='backup_suffix', help='Backup file suffix',
type=str, default='.yasi.bak~')
parser.add_argument(
'-bd', '--backup-dir', '--bd', '-backup-dir',
help='The directory where the backup file is to be written',
type=str, default=os.getcwd())
parser.add_argument(
'-is', '--indent-size', '--is',
help='The number of spaces per indent',
type=int, default=2)
parser.add_argument(
'-di', '--default-indent', '--di',
help='The indent level to be used in case a '
"function's argument is in the next line. Vim uses 2, the most common being 1.",
type=int, default=1)
parser.add_argument(
'-ic', '--indent-comments', '--ic',
help='If true, comment lines will be indented possibly '
'messing with any deliberate comment layout', action='store_true')
parser.add_argument(
'-uni', '--uniform', '-uniform', '--uni',
help='Dictates whether the if-clause and else-clause of an if-like'
'block should have the same indent level.',
action='store_true')
parser.add_argument(
'-parallel', '--parallel',
help='Process the given files in parallel',
action='store_true')
return parser
def parse_args(arguments=None):
""" Reads command-line arguments
>>> parse_args('--indent-comments')
"""
if arguments is None:
arguments = sys.argv[1:]
if isinstance(arguments, str):
arguments = arguments.split()
if isinstance(arguments, argparse.Namespace):
return arguments
parser = create_args_parser()
args = parser.parse_args(arguments)
args.dialect = args.dialect.lower()
if args.dialect not in ['lisp', 'newlisp', 'clojure', 'scheme', 'all', '']:
parser.error("`{0}' is not a recognized dialect".format(args.dialect))
args.backup_dir = os.path.expanduser(args.backup_dir)
if not os.path.exists(args.backup_dir):
parser.error("Directory `{0}' does not exist".format(args.backup_dir))
if len(args.files) > 1 and args.output_file:
parser.error('Cannot use the -o flag when more than one file is specified')
if not args.files:
# Indentation from standard input
if args.modify and not args.output_file:
args.modify = False
args.backup = False
args.warning = False
if args.output_diff:
# If someone requests a diff we assume they don't want the file to be
# modified
args.modify = False
return args
def read_file(fname):
""" read_file(fname : str) -> str
>>> read_file(r'C:\\mine\\test.lisp')
r'(print "No, no, there\'s \\r\\nlife in him!. ")\\r\\n\\r\\n'
The file is read in binary mode in order to preserve original line endings.
Line ending Binary mode Text mode
CRLF CRLF LF
CR CR LF
"""
assert os.path.exists(fname), "\n--%s-- Warning: File `%s' does not exist..." \
% (current_time(), fname)
with open(fname, 'rb') as fp:
return fp.read().decode('utf-8')
def current_time():
""" current_time() -> str
>>> current_time()
14:28:04
Returns the current local time in 24 clock system.
"""
return time.strftime('%X', (time.localtime()))
def backup_source_file(fname, args=None):
""" backup_source_file(fname : str)
>>> backup_source_file('~/Desktop/lisp/test.lisp')
Create a backup copy of the source file.
"""
args = parse_args(args)
backup_dir = args.backup_dir
assert os.path.exists(fname), \
("\n--%s-- Warning: File `%s' does not exist..." % (current_time(), fname))
assert os.path.exists(os.path.abspath(backup_dir)), \
("\n--%s-- Warning: Directory `%s' does not exist..." % (current_time(), fname))
backup_name = backup_dir + os.sep + os.path.split(fname)[1] + args.backup_suffix
try:
shutil.copyfile(fname, backup_name)
except IOError:
message = "\n--%s-- Warning: Couldn't backup the file `%s' in `%s', check if you have enough permissions. "
tpl = (current_time(), fname, backup_dir)
sys.stderr.write(message % tpl)
def md5sum(content):
""" md5sum(content : str) -> str
>>> md5sum('Keyboard not found!! Press F1 to continue...')
'ad98cde09016d2e99a726966a4291acf'
Returns a checksum to be used to determine whether the file has changed.
A simple textual comparison can still do the work
"""
return hashlib.md5(content).hexdigest()
def find_line_ending(string):
""" find_line_ending(string : str) -> str
>>> find_line_ending('Elementary my dear Watson. \\r')
'\\r'
Find the line ending in the file so that we can try to preserve it.
"""
if CRLF in string:
return CRLF
if CR in string:
return CR
return LF
@lru_cache(maxsize=None)
def trim(string):
""" trim(string : str) -> str
Uses every usefull hack to try and reduce extra whitespace without
messing with character literals
"""
# Trailing whitespace
string = re.sub('[ \t]*$', '', string)
# turn '(print(+ 1 1))' to '(print (+ 1 1))'
string = re.sub(r'''([^\\(\[, {@~`'#^])(\(|\[|{)''', r'\1 \2', string, re.X)
# turn ')(' to ') ('
string = re.sub(r'(\)|\]|})(\[|\(|{)', r'\1 \2', string)
# Remove any space before closing brackets '(print 12 )' ==> '(print 12)'
string = re.sub('[ \t]*(\)|\]|})', r'\1', string)
# remove extra whitespace "(print 'this)" ==> "(print 'this)"
string = re.sub('[ \t]{2,}', ' ', string)
# turn ') ) ) ' into '))) '
string = re.sub(r'(\))[ \t]*(?=(\)))', r'\1', string)
string = re.sub(r'(\])[ \t]*(?=(\]))', r'\1', string)
string = re.sub(r'(})[ \t]*(?=(}))', r'\1', string)
# turn '( ( ( ' into '((( '
string = re.sub(r'(\()[ \t]*(?=(\())', r'\1', string)
string = re.sub(r'(\[)[ \t]*(?=(\[))', r'\1', string)
string = re.sub(r'({)[ \t]*(?=({))', r'\1', string)
# remove leading whitespace ' print' ==> 'print'
string = re.sub('^[ \t]*', '', string)
# Remove space between quote and opening bracket, "' (1 2 3)" ==> "'(1 2 3)"
string = re.sub("('|`)[ \t]+(\(|\[|{)", r'\1\2', string)
return string
def find_trim_limit(string, args=None):
""" find_trim_limit(string : str) -> int
>>> find_trim_limit(r'(list #\; #\")')
14
>>> find_trim_limit(r'(list ; ")')
6
>>> find_trim_limit(r'(list " ;)')
7
The function attempts to identify upto which point we are supposed to trim
so that we don't mess with strings or any aligned comments.
It does this by comparing the positions of semicolons and double
quotes. It doesn't consider the multiline comment marker. If your
code uses multiline comments(#| ... |#), you'll have to use --no-compact mode
"""
args = parse_args(args)
# Find position of the first unescaped semi colon
comment_start = re.search(r'([^\\];)|(^;)', string)
# Find position of the first unescaped double quote
string_start = re.search(r'([^\\]")|(^")', string)
# Assign -1 if there's no match
limit = string_start.end() if string_start else -1
comment_start = comment_start.end() if comment_start else -1
if comment_start != -1:
# If a semi colon is found, include all the whitespace before it to preserve
# any aligned comments
comment_start = re.search('[ \t]*;', string).start()
if args.dialect == 'newlisp':
# Find out which string type comes first(normal, tag or brace strings)
brace_string_start = re.search('{', string)
tag_string_start = re.search('\[text\]', string)
brace_string_start = brace_string_start.end() if brace_string_start else -1
tag_string_start = tag_string_start.end() if tag_string_start else -1
pos_lst = [limit, brace_string_start, tag_string_start]
pos_lst = [x for x in pos_lst if x != -1]
if pos_lst:
limit = min(pos_lst)
if comment_start != -1 and limit != -1:
if comment_start < limit:
# If the semicolon comes before the comma, it means the string has been
# commented out
limit = comment_start
elif comment_start != -1 and limit == -1:
# If there's a semicolon but no quote, use the semicolon position as the
# limit
limit = comment_start
elif limit == -1:
# If neither a semicolon nor a double quote has been found, use the length
# of the string as the limit
limit = len(string)
return limit
@lru_cache(maxsize=None)
def is_macro_name(func_name, dialect):
""" is_macro_name(func_name : str, dialect : str) -> bool
>>> is_macro_name('yacc:define-parser')
True
Tests if a word is a macro using the language's/dialect's convention,
e.g macros in Lisp usually start with 'def' and 'with' in Scheme. Saves
the effort of finding all the macros in Lisp/Scheme/Clojure/newLISP and storing
them in a list.
"""
if not func_name:
return False
if dialect == 'lisp':
return re.search('^(macro|def|do|with-)', func_name, re.I)
if dialect == 'scheme':
return re.search('^(call-|def|with-)', func_name)
if dialect == 'clojure':
return re.search('^(def|with)', func_name)
if dialect == 'newlisp':
return re.search('^(macro|def)', func_name)
return False
@lru_cache(maxsize=None)
def split_preserve(string, sep):
""" split_preserve(string : str, sep : str) -> [str]
>>> split_preserve('''
"My dear Holmes, " said I, "this is too much. You would certainly
have been burned, had you lived a few centuries ago.
''', '\\n')
['\\n',
' "My dear Holmes, " said I, "this is too much. You would certainly\\n',
' have been burned, had you lived a few centuries ago.\\n',
' ']
Splits the string and sticks the separator back to every string in the list.
"""
# split the whole string into a list so that you can iterate line by line.
str_list = string.split(sep)
if str_list[-1] == '':
# If you split 'this\nthat\n' you get ['this', 'that', ''] if
# you add newlines to every string in the list you get
# ['this\n', 'that\n', '\n']. You've just added
# another newline at the end of the file.
del str_list[-1]
str_list = [x + sep for x in str_list]
else:
# ['this', 'that'] will become ['this\n', 'that\n'] when
# mapped. A newline has been added to the file. We don't want
# this, so we strip it below.
str_list = [x + sep for x in str_list]
str_list[-1] = str_list[-1].rstrip(sep)
return str_list
@lru_cache(maxsize=None)
def all_whitespace(string):
""" all_whitespace(string : str) -> bool
>>> all_whitespace(' ')
True
Returns True if a string has only whitespace.
"""
return re.search('^[ \t]*(\r|\n|$)', string)
def detabify(text, args):
""" tabify(text : str, args : argparse.Namespace|str) -> str
Expands tabs
"""
args = parse_args(args)
if args.tab_size < 1:
return text.expandtabs(4)
return text.expandtabs(args.tab_size)
def tabify(text, args):
""" tabify(text : str, args : argparse.Namespace|str) -> str
>>> tabify(' (println "hello world")', '--tab=3')
'\t\t (println "hello world")'
Replace spaces with tabs
"""
args = parse_args(args)
if args.tab_size < 1:
return text
tab_equiv = ' ' * args.tab_size
return text.replace(tab_equiv, '\t')
def pad_leading_whitespace(string, zero_level, blist, args=None):
""" pad_leading_whitespace(string : str, current_level : int,
zero_level : int) -> str
>>> pad_leading_whitespace("(print 'Yello)")
" (print 'Yello)"
Takes a string and indents it using the current indentation level
and the zero level.
"""
args = parse_args(args)
if args.compact:
# if compact mode is on, split the string into two, trim the first
# position and merge the two portions.
trim_limit = find_trim_limit(string, args)
comment_line = re.search('^[ \t]*;', string, re.M)
if comment_line and args.indent_comments:
trim_limit = comment_line.end()
substr1 = string[0:trim_limit]
substr2 = string[trim_limit:]
substr1 = trim(substr1)
string = substr1 + substr2
else:
# If in nocompact mode, remove leading spaces only
string = re.sub('^[ \t]+', '', string, count=0)
indent_level = zero_level
if blist:
indent_level = blist[-1]['indent_level']
padding = ' ' * indent_level
padding = tabify(padding, args)
return padding + string, indent_level
def indent_line(zerolevel, bracket_list, line, in_comment, in_symbol_region,
args=None):
""" indent_line(zerolevel : int, bracket_list : list, line : str, in_comment : bool,
in_symbol_region : bool, args : string|list)
Most important function in the indentation process. It uses the bracket
locations stored in the list to indent the line.
"""
args = parse_args(args)
comment_line = re.search('^[ \t]*;', line, re.M)
if args.indent_comments:
# We are allowed to indent comment lines
comment_line = False
if not args.compact and bracket_list == [] and not in_comment:
# If nocompact mode is on and there are no unclosed blocks, try to
# find the zero level by simply counting spaces before a line that
# is not empty or has a comment
_line = detabify(line, args)
leading_spaces = re.search('^[ \t]+[^; )\n\r]', _line)
if leading_spaces:
# NOTE: If you don't subtract one here, the zero level will increase
# every time you indent the file because the character at the end of
# the regex is part of the capture.
zerolevel = leading_spaces.end() - 1
else:
zerolevel = 0
if in_symbol_region:
# No processing done in strings and comments
return zerolevel, line, 0
if not comment_line and not all_whitespace(line):
# If this is not a comment line indent the line.
# If the list is empty, then the current_level defaults
# to zero
curr_line, current_level = pad_leading_whitespace(line, zerolevel,
bracket_list, args)
return zerolevel, curr_line, current_level
return zerolevel, line, 0
# ---------------------------------------------------------------------------------
# GLOBAL CONSTANTS::
CR = '\r'
LF = '\n'
CRLF = CR + LF
KEYWORD0 = 0 # Non-keyword
KEYWORD1 = 1 # Indents uniformly by 1 unit
KEYWORD2 = 2 # Distinguishes subforms
KEYWORD3 = 3 # Indents uniformly by 2 units
KEYWORD4 = 4 # A 1-keyword used mostly for defining local functions e.g flets
# Keywords that indent by two spaces
SCHEME_KEYWORDS = \
['define', 'local-odd?', 'when', 'begin', 'case',
'local-even?', 'do', 'call-with-bytevector-output-port',
'call-with-input-file', 'call-with-port',
'call-with-current-continuation', 'open-file-input-port',
'call-with-port', 'call-with-values', 'call-with-output-file',
'call-with-string-output-port', 'define-syntax', 'if', 'let', 'let*',
'library', 'unless', 'lambda', 'syntax-rules', 'syntax-case',
'let-syntax', 'letrec*', 'letrec', 'let-values', 'let*-values',
'with-exception-handler', 'with-input-from-file',
'with-interrupts-disabled', 'with-input-from-string',
'with-output-to-file', 'with-input-from-port',
'with-output-to-string', 'with-source-path', 'with-syntax',
'with-implicit',
'with-error-handler', 'module', 'parameterize']
CLOJURE_KEYWORDS = \
['defn', 'fn', 'dorun', 'doseq', 'loop', 'when',
'let', 'defmacro', 'binding', 'doto', 'ns', ':import', 'defstruct',
'condp', 'comment', 'when', 'when-let', '->', '->>',
'extend-type', 'reify', 'binding', 'when-not', 'proxy', 'dotimes',
'try', 'finally', 'for', 'letfn', 'catch', 'iterate', 'while',
'with-local-vars', 'locking', 'defmulti', 'defmethod', 'extend'
]
LISP_KEYWORDS = \
[':implementation', ':method', 'case', 'defclass',
'defconstant', 'defgeneric', 'defimplementation',
'define-condition', 'define-implementation-package',
'definterface', 'defmacro', 'defmethod', 'defpackage',
'defproject', 'deftype', 'defun', 'defvar', 'do-external-symbols',
'dolist', 'dotimes', 'ecase', 'etypecase', 'flet', 'handler-bind',
'if', 'lambda', 'let', 'let*', 'print-unreadable-object',
'macrolet', 'defparameter', 'with-slots', 'typecase', 'loop', 'when', 'prog1',
'unless', 'with-open-file', 'with-output-to-string', 'with-input-from-string',
'block', 'handler-case', 'defstruct', 'eval-when', 'tagbody', 'ignore-errors',
'labels', 'multiple-value-bind', 'progn', 'unwind-protect', 'collect'
]
NEWLISP_KEYWORDS = \
['while', 'if', 'case', 'dotimes', 'define', 'dolist', 'catch',
'throw', 'lambda', 'lambda-macro', 'when', 'unless', 'letex', 'begin',
'dostring', 'let', 'letn', 'doargs', 'define-macro', 'until', 'do-until',
'do-while', 'for-all', 'find-all', 'for'
]
# The 'if' and 'else' part of an if block should have different indent levels so
# that they can stand out since there's no else Keyword in Lisp/Scheme to make
# this explicit. list IF_LIKE helps us track these keywords.
IF_LIKE = ['if']
@lru_cache(maxsize=None)
def parse_rc_json():
""" Reads the json configuration file(.yasirc.json), parses it and returns the
dictionary
"""
fname = '.yasirc.json'
path = os.path.expanduser('~/' + fname)
if os.path.exists(fname):
path = os.path.abspath(fname)
elif not os.path.exists(path):
path = ''
content = ''
if path:
with open(path) as f:
content = f.read()
ret = {}
if content:
ret = json.loads(content)
return collections.defaultdict(dict, ret)
def assign_indent_numbers(lst, inum, dic):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic
def add_keywords(args):
""" add_keywords(dialect : str) -> [str, str]
Takes a lisp dialect name and returns a list of keywords that increase
indentation by two spaces and those that can be one-armed like 'if'
"""
dialect = args.dialect
keywords = collections.defaultdict(int)
two_spacers = []
two_armed = IF_LIKE
local_binders = []
if dialect == 'lisp': # Lisp
two_spacers = LISP_KEYWORDS
two_armed += ['multiple-value-bind', 'destructuring-bind', 'do', 'do*']
local_binders += ['flet', 'macrolet', 'labels']
elif dialect == 'scheme': # Scheme
two_spacers = SCHEME_KEYWORDS
two_armed += ['with-slots', 'do', 'do*']
local_binders += []
elif dialect == 'clojure': # Clojure
two_spacers = CLOJURE_KEYWORDS
two_armed += []
local_binders += ['letfn']
elif dialect == 'newlisp': # newLISP
two_spacers = NEWLISP_KEYWORDS
two_armed += []
local_binders += []
elif dialect == 'all':
two_spacers = LISP_KEYWORDS + SCHEME_KEYWORDS + CLOJURE_KEYWORDS + \
NEWLISP_KEYWORDS
keywords = assign_indent_numbers(two_spacers, KEYWORD1, keywords)
keywords = assign_indent_numbers(two_armed, KEYWORD2, keywords)
keywords = assign_indent_numbers(local_binders, KEYWORD4, keywords)
if args.read_rc:
rc_keywords = parse_rc_json()
keywords.update(rc_keywords[dialect])
return keywords
# ---------------------------------------------------------------------------------
def find_first_arg_pos(bracket_offset, curr_line, args=None):
""" find_first_arg_pos(bracket_offset : int, curr_line : str) -> [int, int]
Arguments:
bracket_offset - The position of the bracket in the current line e.g
" ( list 'timey 'wimey )" --> 4
" ( list 'timey 'wimey )" --> 1
"( list 'timey 'wimey )" --> 0
>>> find_first_arg_pos(0, "( list 'one-sheep 'two-sheep )")
[11, 5]
Returns the position of the first argument to the function relative to the
position of the opening bracket and the number of spaces between the opening
bracket and the function name.
The two values will to be used to align the other arguments in the subsequent line
"""
args = parse_args(args)
spaces_before_func = 0
subline = curr_line[bracket_offset + 1:]
if re.search('^[ \t]*($|\r)', subline):
# whitespace extending to the end of the line means there's no
# function in this line. The indentation level defaults to one.
arg_pos = 1
else:
if bracket_offset != len(curr_line) - 1 and curr_line[bracket_offset + 1] == ' ':
# control reaches here if we are not at the end of the line
# and whitespace follows. We must first find the position of the
# function and then the arguments position
match = re.search(' +[^)\]]| \)', subline) # Find the first non whitespace/bracket character
if match:
spaces_before_func = match.end() - match.start() - 1
end = match.end()
else:
end = 0
# Then use the end of the whitespace group as the first argument
arg_pos = re.search(' +([^)])|( *(\(|\[))', subline[end:])
if arg_pos:
arg_pos = arg_pos.end() + spaces_before_func + 1
else:
arg_pos = spaces_before_func + 1
if re.match('^[ \t]*(#\||;|$|\r)',
subline[(end - 1 + subline[end - 1:].find(' ')):]):
# But, if a comment if found after the function name, the
# indent level becomes one
arg_pos = spaces_before_func + args.default_indent
else:
# If there's no space after the bracket, simply find the end of the
# whitespace group
match = re.search(' +([^)}\n\r])|( *(\(|\[|{))', subline)
if match: # found the argument
arg_pos = match.end()
else: # Either empty list or argument is in the next line
arg_pos = 1
if re.match('^[\t ]*(;|$|\r)', subline[subline.find(' '):]):
# Again if a comment is found after the function name, the
# indent level defaults to 1
arg_pos = spaces_before_func + args.default_indent
return [arg_pos, spaces_before_func]
def _pop_from_list(bracket, lst, line, real_pos, offset, msg_stack):
""" _pop_from_list(char : str, lst : [str], line : str,
real_pos : int, offset : int)
The function is called when a closing bracket is encountered. The function
simply pops the last pushed item and issues a warning if an error is
encountered.
"""
# Try to spot a case when a square bracket is used to close a round bracket
# block
if bracket == ']':
correct_closer = '['
elif bracket == ')':
correct_closer = '('
else:
correct_closer = '{'
if lst != []:
popped = lst.pop()
popped_char = popped['character']
popped_pos = popped['line_number']
popped_offset = popped['bracket_pos']
if popped_char is not correct_closer:
message = "Bracket `%s' does not match `%s' at (%d, %d)"
message = message % (bracket, popped_char, popped_pos, popped_offset)
warning_info = {
'msg': message,
'line': line,
'column': real_pos
}
msg_stack.append(warning_info)
else:
# If the list is empty and a closing bracket is found, it means we have
# excess brackets. That warning is issued here. The coordinates used
# will be slightly or largely off target depending on how much your
# code was modified when used with compact mode
message = "Unmatched closing bracket `%s'" % bracket
warning_info = {
'msg': message,
'line': line,
'column': offset + 1
}
msg_stack.append(warning_info)
return lst
def _push_to_list(lst, func_name, char, line, offset,
first_arg_pos, first_item, in_list_literal,
lead_spaces, args=None):
""" _push_to_list(lst : [str], func_name : str, char : str, line : int, offset : int,
first_arg_pos :int , first_item : int, in_list_literal : bool,
lead_spaces : int, args : str)
Called when an opening bracket is encountered. A hash containing the
necessary data to pin point errors and the indentation level is stored in
the list and the list returned.
"""
args = parse_args(args)
keywords = add_keywords(args)
pos_hash = {'character': char,
'line_number': line,
'bracket_pos': offset,
'indent_level': offset + first_arg_pos, # the default value, e.g in normal function
'func_name': func_name,
'spaces': 0}
is_macro = is_macro_name(func_name, args.dialect)
two_spacer = is_macro or keywords[func_name] in [KEYWORD1, KEYWORD4]
if in_list_literal or char == '{' or (char == '[' and args.dialect == 'clojure'):
# found quoted list or clojure hashmap/vector
pos_hash['indent_level'] = first_item
elif keywords[func_name] == KEYWORD2:
# We only make the if-clause stand out if not in uniform mode
pos_hash['indent_level'] = lead_spaces + ((offset + args.indent_size * 2)
if not args.uniform
else (offset + args.indent_size))
elif func_name != '':
if two_spacer:
pos_hash['indent_level'] = lead_spaces + offset + args.indent_size
elif keywords[func_name] == KEYWORD3:
pos_hash['indent_level'] = lead_spaces + offset + (2 * args.indent_size)
lst.append(pos_hash)
try:
# A hack to make flets and labels in Lisp not indent like
# functions. The 'labels' indentation may not be exactly
# perfect.
parent_func = lst[-3]['func_name']
# Make 'special' indentation occur only in a Clojure binding block([]) for
# letfns
non_bind_block = args.dialect == 'clojure' and lst[-2]['character'] != '['
if keywords[parent_func] == KEYWORD4 and not non_bind_block:
lst[-1]['indent_level'] = offset + args.indent_size
except IndexError:
pass
return lst
def indent_code(original_code, args=None):
""" indented_code(string : str, fname : str) -> [...]
Arguments:
fpath: Simply used in formatting the warning messages
>>> indent_code("(print\n'Hello)")
{'bracket_locations': [],
'comment_locations': [],
'in_comment': 0,
'in_newlisp_tag_string': False,
'in_string': False,
'in_symbol_with_space': False,
'indented_code': ['(print\n', " 'Hello)"],
'last_quote_location': (),
'last_symbol_location': (),
'message_stack': [],
'newlisp_brace_locations': [],
'original_code': ['(print\n', "'Hello)"],
'first_tag_string': ()}
The last entry in the list is the indented string.
"""
args = parse_args(args)
keywords = add_keywords(args)
# Safeguards against processing brackets inside strings
in_string = False
# newLISP use curly brackets as a syntax for multiline strings
# this variable here tries to keep track of that
in_newlisp_string = 0
in_newlisp_tag_string = False
newlisp_brace_locations = []
first_tag_string = ()
# zero_level helps us get the same results as Sitaram's indenter when in
# --no-compact mode.
zero_level = 0
# The two variables prevent formatting comment regions or symbols with whitespace
in_comment = 0
in_symbol_with_space = False
comment_locations = []
last_symbol_location = ()
# A in_symbol_region is the region between pipes(| |) or in strings. This
# includes the comment region. This region is not to be messed with.
in_symbol_region = in_string or in_comment or in_symbol_with_space or \
in_newlisp_string or in_newlisp_tag_string
# we need to know the line number in order to issue almost accurate messages about
# unclosed brackets and string
line_number = 1
# Stores the last position a quote was encountered so that in case there are
# any unclosed strings, we can pinpoint them
last_quote_location = ()
line_ending = find_line_ending(original_code)
code_lines = split_preserve(original_code, line_ending)
indented_code = []
bracket_locations = []
# List of warnings from errors in the code
message_stack = []
for line in code_lines:
escaped = False
curr_line = line
# Get the indent level and the indented line
zero_level, curr_line, indent_level = indent_line(zero_level,
bracket_locations,
line, in_comment,
in_symbol_region, args)
# Build up the indented string.
indented_code.append(curr_line)
regex = '^[ \t]*'
lead_spaces = re.findall(regex, curr_line)
if lead_spaces:
curr_line = re.sub(regex, detabify(lead_spaces[0], args), curr_line)
offset = 0
for curr_char in curr_line:
next_char = curr_line[offset + 1:offset + 2]
prev_char = curr_line[offset - 1:offset]
substr = curr_line[offset + 1:] # slice to the end
if escaped:
# Move to the next character if the current one has been escaped
escaped = False
offset += 1
continue
if curr_char == '\\' and not in_newlisp_string and not in_newlisp_tag_string:
# the next character has been escaped
escaped = True
if (curr_char == ';' or (curr_char == '#' and args.dialect == 'newlisp'))\
and not in_symbol_region and not \
(prev_char == '#' and args.dialect == 'scheme'):
# a comment has been found, go to the next line
# A sharp sign(#) before a semi-colon in Scheme is used to
# comment out sections of code. We don't treat it as a comment
break
# ----------------------------------------------------------
# Comments are dealt with here. Clojure and newLISP don't have Lisp
# style multiline comments so don't include them.
if args.dialect not in ['clojure', 'newlisp'] and curr_char == '|' \
and not in_string:
if prev_char == '#' and not in_symbol_with_space:
comment_locations.append((line_number, offset))
in_comment += 1
elif in_comment and next_char == '#':
in_comment -= 1
comment_locations.pop()
elif not in_comment:
if in_symbol_with_space:
last_symbol_location = ()
in_symbol_with_space = False
else:
last_symbol_location = (line_number, offset)
in_symbol_with_space = True
# ----------------------------------------------------------
# Strings are dealt with here only if we are not in a comment
if not (in_symbol_with_space or in_comment or in_newlisp_tag_string):
if curr_char == '"':
last_quote_location = (line_number, offset)
in_string = not bool(in_string)
if args.dialect == 'newlisp' and not in_string:
# We handle newLISP's multiline(brace) string here. Brace
# strings can nest
if curr_char == '{':
newlisp_brace_locations.append((line_number, offset))
in_newlisp_string += 1
elif curr_char == '}':
if newlisp_brace_locations:
newlisp_brace_locations.pop()
else:
message = "Attempt to close a non-existent newLISP string"
warning_info = {
'msg': message,
'line': line_number,
'column': offset
}
message_stack.append(warning_info)
in_newlisp_string -= 1
if curr_char == '[' and args.dialect == 'newlisp' and not \
(in_newlisp_string or in_string):
# We have to handle tag strings in newLISP here.
if re.match('\[text\]', curr_line[offset:offset + 7]):
in_newlisp_tag_string = True
if first_tag_string == ():
first_tag_string = (line_number, offset)
elif re.match('\[/text\]', curr_line[offset:offset + 7]):
in_newlisp_tag_string = False
first_tag_string = ()
in_symbol_region = in_string or in_comment or in_symbol_with_space \
or in_newlisp_string or in_newlisp_tag_string
if in_symbol_region:
# move on if we are in a string, a symbol with a space or a comment
# altogether known as the symbol region
offset += 1
continue
# Finds the real position of a bracket to be used in pinpointing where
# the unclosed bracket is. The real position is different from the offset
# because current offset is the position of the bracket in the
# trimmed string not the original.
real_position = (offset - zero_level) + \
len(re.findall('^[ \t]*', line)[0]) - indent_level
if curr_char in ['(', '[', '{']:
if curr_char in ['[', '{'] and args.dialect in ['lisp', 'newlisp']:
# Square/Curly brackets are used should not contribute to
# the indentation in CL and newLISP
offset += 1
continue
first_arg_pos, spaces_before_func = \
find_first_arg_pos(offset, curr_line, args)
func_name = substr[0:first_arg_pos - 1].strip(')]\t\n\r ').lower()
in_list_literal = False
if re.search("[^#]('|`|#)([ \t]*\(|\[)($|\r)", curr_line[0:offset + 1]):
in_list_literal = True
if re.search('^[^ \t]+[ \t]*($|\r)', substr):
# The function is the last symbol/form in the line
func_name = substr.strip(')]\t\n\r ').lower()
if in_list_literal:
# an empty string is always in a non-empty string, we don't want
# this. We set False as the func_name because it's not a string
# in_list_literal prevents an keyword in a list literal from
# affecting the indentation
func_name = ''
if func_name in ['define-macro', 'defmacro']:
# Macro names are part of two space indenters.
# This part tries to find the name so that it is not indented
# like a function the next time it's used.
end_of_space = re.search('^[ \t]*', substr).end()
substr = substr[end_of_space:]
substr = substr[re.search('[ \t]*', substr).start():].strip()
macro_name = substr[:substr.find(' ')] # macro name is delimeted by whitespace
if macro_name != '':
keywords[macro_name] = KEYWORD1
# first_item stores the position of the first item in the literal list
# it's necessary so that we don't assume that the first item is always
# after the opening bracket.
first_item = re.search('[ \t]*', curr_line[offset + 1:]).end() + offset + 1
bracket_locations = _push_to_list(bracket_locations[:], func_name,
curr_char, line_number, offset,
first_arg_pos, first_item,
in_list_literal,
spaces_before_func, args)
elif curr_char in [']', ')', '}']:
if curr_char in [']', '}'] and args.dialect in ['lisp', 'newlisp']:
# Square/Curly brackets are used should not contribute to
# the indentation in CL and newLISP
offset += 1
continue
bracket_locations = _pop_from_list(curr_char, bracket_locations[:],
line_number, real_position,
offset, message_stack)
if bracket_locations and curr_char in [' ', '\t'] and \
keywords[bracket_locations[-1]['func_name']] == KEYWORD2:
# This part changes the indentation level of a then clause so that
# we can achieve something like:
# (if (= this that)
# 'then-form
# 'else-form)
# This is done by keeping track of the number of spaces found. If
# you find two spaces it means that, for example that we have just
# passed the then-form and hence should decrease the indentation
# level by 2.(I shamelessly copied this algorithm from Dorai's
# indenter)
if prev_char not in [' ', '\t', ''] or not \
re.search('^[ \t]*(;|#\||$|\r)', curr_line):
# The level shouldn't be decreased if the line is a comment
# line. The regex above takes care of that.
bracket_locations[-1]['spaces'] += 1
if bracket_locations[-1]['spaces'] == 2:
bracket_locations[-1]['indent_level'] -= \
0 if args.uniform else args.indent_size
# some dummy value to prevent control from reaching here again
bracket_locations[-1]['spaces'] = 999
offset += 1
line_number += 1
res = {
'message_stack': message_stack,
'first_tag_string': first_tag_string,
'in_newlisp_tag_string': in_newlisp_tag_string,
'last_symbol_location': last_symbol_location,
'comment_locations': comment_locations,
'newlisp_brace_locations': newlisp_brace_locations,
'in_string': in_string,
'in_comment': in_comment,
'in_symbol_with_space': in_symbol_with_space,
'bracket_locations': bracket_locations,
'last_quote_location': last_quote_location,
'original_code': code_lines,
'indented_code': indented_code
}
return res
def colour_diff(diff_lines):
""" colour_diff(diff_lines : lst)
Print diff text to terminal in color
"""
try:
import colorama
except ImportError:
# colorama is not available, print plain diff
print(''.join(list(diff_lines)))
return
colorama.init()
def p_green(text):
""" Print added line in green """
print(colorama.Fore.GREEN + text + colorama.Fore.WHITE, end='')
def p_yellow(text):
""" Print diff section header in yellow """
print(colorama.Fore.YELLOW + text + colorama.Fore.WHITE, end='')
def p_red(text):
""" Print removed line in red """
print(colorama.Fore.RED + text + colorama.Fore.WHITE, end='')
section = re.compile('@@\s+-\d\d,\d\d\s\+\d\d,\d\d\s+@@')
for line in diff_lines:
if line.startswith('-'):
p_red(line)
elif line.startswith('+'):
p_green(line)
elif section.search(line):
p_yellow(line)
else:
print(line, end='')
def _post_indentation(res, args=None, fpath=''):
""" _post_indentation(res : dict):
Called after the string has been indented appropriately.
It takes care of writing the file and checking for unclosed strings
or comments.
"""
fname = os.path.basename(fpath)
args = parse_args(args)
for msg in res['message_stack']:
if args.warning:
if args.files:
msg['fname'] = fname
sys.stderr.write('\n{fname}:{line}:{column}: {msg}'.format(**msg))
else:
# Input was passed through stdin
sys.stderr.write('\n:{line}:{column}: {msg}'.format(**msg))
if res['bracket_locations']:
# If the bracket_locations list is not empty it means that there are some
# brackets(opening) that haven't been closed.
for bracket in res['bracket_locations']:
line = bracket['line_number']
column = bracket['bracket_pos']
character = bracket['character']
# The bracket_locations are not very accurate. The warning might be
# misleading because it considers round and square brackets to be
# the same.
message = "\n%s:%d:%d: Unmatched `%s'"
if args.warning:
sys.stderr.write(message % (fname, line, column, character))
if res['newlisp_brace_locations']:
for brace in res['newlisp_brace_locations']:
message = "\n%s:%d:%d: Unclosed newLISP brace string"
if args.warning:
sys.stderr.write(message % (fname, brace[0], brace[1]))
if res['comment_locations']:
for comment in res['comment_locations']:
message = "\n%s:%d:%d: Unclosed multiline comment"
tpl = (fname,) + comment
if args.warning:
sys.stderr.write(message % tpl)
if res['last_symbol_location']:
message = "\n%s:%d:%d: Unclosed symbol"
tpl = (fname,) + res['last_symbol_location']
if args.warning:
sys.stderr.write(message % tpl)
if res['in_string']:
message = "\n%s:%d:%d: String extends to end-of-file"
tpl = (fname,) + res['last_quote_location']
if args.warning:
sys.stderr.write(message % tpl)
if res['in_newlisp_tag_string']:
message = "\n%s:%d:%d: Tag string extends to end-of-file"
tpl = (fname,) + res['first_tag_string']
if args.warning:
sys.stderr.write(message % tpl)
output_file = args.output_file
if not output_file:
output_file = fpath
indented_code = res['indented_code']
indent_result = ''.join(indented_code)
if indented_code == res['original_code'] and args.files:
message = "File '%s' has already been formatted. Leaving it unchanged...\n"
sys.stderr.write(message % fname)
if output_file != fpath:
with open(output_file, 'wb') as indented_file:
indented_file.write(indent_result.encode('utf8'))
else:
if args.output_diff:
diff = difflib.unified_diff(res['original_code'], indented_code, n=5)
if args.colour_diff:
colour_diff(diff)
else:
print(''.join(list(diff)))
elif args.output:
print(indent_result, end='')
if args.modify:
# write in binary mode to preserve the original line ending
with open(output_file, 'wb') as indented_file:
indented_file.write(indent_result.encode('utf8'))
def indent_files(arguments):
""" indent_files(arguments)
Note: if the parallel option is provided, the files will be read and processed
in parallel
"""
args = parse_args(arguments)
if not args.files:
# Indent from stdin
code = sys.stdin.read()
indent_result = indent_code(code, args)
_post_indentation(indent_result)
if args.parallel:
import multiprocessing
pool = multiprocessing.Pool(multiprocessing.cpu_count())
pool.starmap(indent_file, [(fname, args) for fname in args.files])
else:
for fname in args.files:
indent_file(fname, args)
def indent_file(fname, args):
"""
indent_file(fname: string, args)
1. Create a backup of the source file(backup_source_file())
2. Read the file contents(read_file())
3. Indent the code(indent_code())
4. Write to the file or print the indented code(_post_indentation())
"""
args = parse_args(args)
fname = os.path.expanduser(fname)
code = read_file(fname)
if not args.dialect:
# Guess dialect from the file extensions if none is specified in the command
# line
if fname.endswith('.lisp'):
args.dialect = 'lisp'
elif fname.endswith('.lsp'):
args.dialect = 'newlisp'
elif re.search(".clj[sc]{0,1}$", fname):
args.dialect = 'clojure'
elif fname.endswith('.ss') or fname.endswith('.scm'):
args.dialect = 'scheme'
else:
args.dialect = 'all'
indent_result = indent_code(code, args)
if args.backup:
# Create a backup file in the specified directory
backup_source_file(fname, args)
_post_indentation(indent_result, fpath=fname)
def main():
""" Entry point """
indent_files(sys.argv[1:])
if __name__ == '__main__':
main()
|
[
"re.compile",
"multiprocessing.cpu_count",
"difflib.unified_diff",
"sys.stdin.read",
"colorama.init",
"re.search",
"os.path.exists",
"argparse.ArgumentParser",
"os.path.split",
"time.localtime",
"os.path.expanduser",
"json.loads",
"hashlib.md5",
"re.match",
"sys.stderr.write",
"shutil.copyfile",
"backports.functools_lru_cache.lru_cache",
"re.sub",
"re.findall",
"os.getcwd",
"collections.defaultdict",
"os.path.basename",
"os.path.abspath"
] |
[((528, 551), 'backports.functools_lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (537, 551), False, 'from backports.functools_lru_cache import lru_cache\n'), ((7924, 7947), 'backports.functools_lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (7933, 7947), False, 'from backports.functools_lru_cache import lru_cache\n'), ((11670, 11693), 'backports.functools_lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (11679, 11693), False, 'from backports.functools_lru_cache import lru_cache\n'), ((12515, 12538), 'backports.functools_lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (12524, 12538), False, 'from backports.functools_lru_cache import lru_cache\n'), ((13844, 13867), 'backports.functools_lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (13853, 13867), False, 'from backports.functools_lru_cache import lru_cache\n'), ((20756, 20779), 'backports.functools_lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (20765, 20779), False, 'from backports.functools_lru_cache import lru_cache\n'), ((630, 721), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Dialect-aware s-expression indenter"""', 'prog': '"""yasi"""'}), "(description='Dialect-aware s-expression indenter',\n prog='yasi')\n", (653, 721), False, 'import argparse\n'), ((4917, 4952), 'os.path.expanduser', 'os.path.expanduser', (['args.backup_dir'], {}), '(args.backup_dir)\n', (4935, 4952), False, 'import os\n'), ((5977, 5998), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (5991, 5998), False, 'import os\n'), ((6629, 6650), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (6643, 6650), False, 'import os\n'), ((8155, 8184), 're.sub', 're.sub', (['"""[ \t]*$"""', '""""""', 'string'], {}), "('[ \\t]*$', '', string)\n", (8161, 8184), False, 'import re\n'), ((8247, 8315), 're.sub', 're.sub', (['"""([^\\\\\\\\(\\\\[, {@~`\'#^])(\\\\(|\\\\[|{)"""', '"""\\\\1 \\\\2"""', 'string', 're.X'], {}), '("([^\\\\\\\\(\\\\[, {@~`\'#^])(\\\\(|\\\\[|{)", \'\\\\1 \\\\2\', string, re.X)\n', (8253, 8315), False, 'import re\n'), ((8354, 8405), 're.sub', 're.sub', (['"""(\\\\)|\\\\]|})(\\\\[|\\\\(|{)"""', '"""\\\\1 \\\\2"""', 'string'], {}), "('(\\\\)|\\\\]|})(\\\\[|\\\\(|{)', '\\\\1 \\\\2', string)\n", (8360, 8405), False, 'import re\n'), ((8495, 8537), 're.sub', 're.sub', (['"""[ \t]*(\\\\)|\\\\]|})"""', '"""\\\\1"""', 'string'], {}), "('[ \\t]*(\\\\)|\\\\]|})', '\\\\1', string)\n", (8501, 8537), False, 'import re\n'), ((8619, 8651), 're.sub', 're.sub', (['"""[ \t]{2,}"""', '""" """', 'string'], {}), "('[ \\t]{2,}', ' ', string)\n", (8625, 8651), False, 'import re\n'), ((8697, 8743), 're.sub', 're.sub', (['"""(\\\\))[ \\\\t]*(?=(\\\\)))"""', '"""\\\\1"""', 'string'], {}), "('(\\\\))[ \\\\t]*(?=(\\\\)))', '\\\\1', string)\n", (8703, 8743), False, 'import re\n'), ((8755, 8801), 're.sub', 're.sub', (['"""(\\\\])[ \\\\t]*(?=(\\\\]))"""', '"""\\\\1"""', 'string'], {}), "('(\\\\])[ \\\\t]*(?=(\\\\]))', '\\\\1', string)\n", (8761, 8801), False, 'import re\n'), ((8813, 8855), 're.sub', 're.sub', (['"""(})[ \\\\t]*(?=(}))"""', '"""\\\\1"""', 'string'], {}), "('(})[ \\\\t]*(?=(}))', '\\\\1', string)\n", (8819, 8855), False, 'import re\n'), ((8901, 8947), 're.sub', 're.sub', (['"""(\\\\()[ \\\\t]*(?=(\\\\())"""', '"""\\\\1"""', 'string'], {}), "('(\\\\()[ \\\\t]*(?=(\\\\())', '\\\\1', string)\n", (8907, 8947), False, 'import re\n'), ((8959, 9005), 're.sub', 're.sub', (['"""(\\\\[)[ \\\\t]*(?=(\\\\[))"""', '"""\\\\1"""', 'string'], {}), "('(\\\\[)[ \\\\t]*(?=(\\\\[))', '\\\\1', string)\n", (8965, 9005), False, 'import re\n'), ((9017, 9059), 're.sub', 're.sub', (['"""({)[ \\\\t]*(?=({))"""', '"""\\\\1"""', 'string'], {}), "('({)[ \\\\t]*(?=({))', '\\\\1', string)\n", (9023, 9059), False, 'import re\n'), ((9128, 9157), 're.sub', 're.sub', (['"""^[ \t]*"""', '""""""', 'string'], {}), "('^[ \\t]*', '', string)\n", (9134, 9157), False, 'import re\n'), ((9252, 9302), 're.sub', 're.sub', (['"""(\'|`)[ \t]+(\\\\(|\\\\[|{)"""', '"""\\\\1\\\\2"""', 'string'], {}), '("(\'|`)[ \\t]+(\\\\(|\\\\[|{)", \'\\\\1\\\\2\', string)\n', (9258, 9302), False, 'import re\n'), ((10022, 10058), 're.search', 're.search', (['"""([^\\\\\\\\];)|(^;)"""', 'string'], {}), "('([^\\\\\\\\];)|(^;)', string)\n", (10031, 10058), False, 'import re\n'), ((10133, 10169), 're.search', 're.search', (['"""([^\\\\\\\\]")|(^")"""', 'string'], {}), '(\'([^\\\\\\\\]")|(^")\', string)\n', (10142, 10169), False, 'import re\n'), ((14054, 14091), 're.search', 're.search', (["'^[ \\t]*(\\r|\\n|$)'", 'string'], {}), "('^[ \\t]*(\\r|\\n|$)', string)\n", (14063, 14091), False, 'import re\n'), ((16389, 16422), 're.search', 're.search', (['"""^[ \t]*;"""', 'line', 're.M'], {}), "('^[ \\t]*;', line, re.M)\n", (16398, 16422), False, 'import re\n'), ((20945, 20977), 'os.path.expanduser', 'os.path.expanduser', (["('~/' + fname)"], {}), "('~/' + fname)\n", (20963, 20977), False, 'import os\n'), ((20985, 21006), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (20999, 21006), False, 'import os\n'), ((21264, 21298), 'collections.defaultdict', 'collections.defaultdict', (['dict', 'ret'], {}), '(dict, ret)\n', (21287, 21298), False, 'import collections\n'), ((21750, 21778), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (21773, 21778), False, 'import collections\n'), ((23881, 23916), 're.search', 're.search', (["'^[ \\t]*($|\\r)'", 'subline'], {}), "('^[ \\t]*($|\\r)', subline)\n", (23890, 23916), False, 'import re\n'), ((43349, 43364), 'colorama.init', 'colorama.init', ([], {}), '()\n', (43362, 43364), False, 'import colorama\n'), ((43801, 43860), 're.compile', 're.compile', (['"""@@\\\\s+-\\\\d\\\\d,\\\\d\\\\d\\\\s\\\\+\\\\d\\\\d,\\\\d\\\\d\\\\s+@@"""'], {}), "('@@\\\\s+-\\\\d\\\\d,\\\\d\\\\d\\\\s\\\\+\\\\d\\\\d,\\\\d\\\\d\\\\s+@@')\n", (43811, 43860), False, 'import re\n'), ((44364, 44387), 'os.path.basename', 'os.path.basename', (['fpath'], {}), '(fpath)\n', (44380, 44387), False, 'import os\n'), ((48668, 48693), 'os.path.expanduser', 'os.path.expanduser', (['fname'], {}), '(fname)\n', (48686, 48693), False, 'import os\n'), ((4964, 4995), 'os.path.exists', 'os.path.exists', (['args.backup_dir'], {}), '(args.backup_dir)\n', (4978, 4995), False, 'import os\n'), ((6344, 6360), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6358, 6360), False, 'import time\n'), ((6764, 6791), 'os.path.abspath', 'os.path.abspath', (['backup_dir'], {}), '(backup_dir)\n', (6779, 6791), False, 'import os\n'), ((6987, 7022), 'shutil.copyfile', 'shutil.copyfile', (['fname', 'backup_name'], {}), '(fname, backup_name)\n', (7002, 7022), False, 'import shutil\n'), ((10673, 10695), 're.search', 're.search', (['"""{"""', 'string'], {}), "('{', string)\n", (10682, 10695), False, 'import re\n'), ((10723, 10754), 're.search', 're.search', (['"""\\\\[text\\\\]"""', 'string'], {}), "('\\\\[text\\\\]', string)\n", (10732, 10754), False, 'import re\n'), ((12196, 12247), 're.search', 're.search', (['"""^(macro|def|do|with-)"""', 'func_name', 're.I'], {}), "('^(macro|def|do|with-)', func_name, re.I)\n", (12205, 12247), False, 'import re\n'), ((12291, 12333), 're.search', 're.search', (['"""^(call-|def|with-)"""', 'func_name'], {}), "('^(call-|def|with-)', func_name)\n", (12300, 12333), False, 'import re\n'), ((12378, 12413), 're.search', 're.search', (['"""^(def|with)"""', 'func_name'], {}), "('^(def|with)', func_name)\n", (12387, 12413), False, 'import re\n'), ((12458, 12494), 're.search', 're.search', (['"""^(macro|def)"""', 'func_name'], {}), "('^(macro|def)', func_name)\n", (12467, 12494), False, 'import re\n'), ((15338, 15373), 're.search', 're.search', (['"""^[ \t]*;"""', 'string', 're.M'], {}), "('^[ \\t]*;', string, re.M)\n", (15347, 15373), False, 'import re\n'), ((15698, 15736), 're.sub', 're.sub', (['"""^[ \t]+"""', '""""""', 'string'], {'count': '(0)'}), "('^[ \\t]+', '', string, count=0)\n", (15704, 15736), False, 'import re\n'), ((16849, 16886), 're.search', 're.search', (["'^[ \\t]+[^; )\\n\\r]'", '_line'], {}), "('^[ \\t]+[^; )\\n\\r]', _line)\n", (16858, 16886), False, 'import re\n'), ((21023, 21045), 'os.path.abspath', 'os.path.abspath', (['fname'], {}), '(fname)\n', (21038, 21045), False, 'import os\n'), ((21233, 21252), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (21243, 21252), False, 'import json\n'), ((32891, 32919), 're.findall', 're.findall', (['regex', 'curr_line'], {}), '(regex, curr_line)\n', (32901, 32919), False, 'import re\n'), ((46942, 46975), 'sys.stderr.write', 'sys.stderr.write', (['(message % fname)'], {}), '(message % fname)\n', (46958, 46975), False, 'import sys\n'), ((47943, 47959), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (47957, 47959), False, 'import sys\n'), ((3298, 3309), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3307, 3309), False, 'import os\n'), ((7217, 7248), 'sys.stderr.write', 'sys.stderr.write', (['(message % tpl)'], {}), '(message % tpl)\n', (7233, 7248), False, 'import sys\n'), ((7563, 7583), 'hashlib.md5', 'hashlib.md5', (['content'], {}), '(content)\n', (7574, 7583), False, 'import hashlib\n'), ((21059, 21079), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (21073, 21079), False, 'import os\n'), ((24405, 24441), 're.search', 're.search', (['""" +[^)\\\\]]| \\\\)"""', 'subline'], {}), "(' +[^)\\\\]]| \\\\)', subline)\n", (24414, 24441), False, 'import re\n'), ((24757, 24807), 're.search', 're.search', (['""" +([^)])|( *(\\\\(|\\\\[))"""', 'subline[end:]'], {}), "(' +([^)])|( *(\\\\(|\\\\[))', subline[end:])\n", (24766, 24807), False, 'import re\n'), ((25414, 25465), 're.search', 're.search', (["' +([^)}\\n\\r])|( *(\\\\(|\\\\[|{))'", 'subline'], {}), "(' +([^)}\\n\\r])|( *(\\\\(|\\\\[|{))', subline)\n", (25423, 25465), False, 'import re\n'), ((46153, 46184), 'sys.stderr.write', 'sys.stderr.write', (['(message % tpl)'], {}), '(message % tpl)\n', (46169, 46184), False, 'import sys\n'), ((46362, 46393), 'sys.stderr.write', 'sys.stderr.write', (['(message % tpl)'], {}), '(message % tpl)\n', (46378, 46393), False, 'import sys\n'), ((46584, 46615), 'sys.stderr.write', 'sys.stderr.write', (['(message % tpl)'], {}), '(message % tpl)\n', (46600, 46615), False, 'import sys\n'), ((47192, 47254), 'difflib.unified_diff', 'difflib.unified_diff', (["res['original_code']", 'indented_code'], {'n': '(5)'}), "(res['original_code'], indented_code, n=5)\n", (47212, 47254), False, 'import difflib\n'), ((48139, 48166), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (48164, 48166), False, 'import multiprocessing\n'), ((6925, 6945), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (6938, 6945), False, 'import os\n'), ((10493, 10521), 're.search', 're.search', (['"""[ \t]*;"""', 'string'], {}), "('[ \\t]*;', string)\n", (10502, 10521), False, 'import re\n'), ((36593, 36645), 're.match', 're.match', (['"""\\\\[text\\\\]"""', 'curr_line[offset:offset + 7]'], {}), "('\\\\[text\\\\]', curr_line[offset:offset + 7])\n", (36601, 36645), False, 'import re\n'), ((38341, 38411), 're.search', 're.search', (['"[^#](\'|`|#)([ \\t]*\\\\(|\\\\[)($|\\r)"', 'curr_line[0:offset + 1]'], {}), '("[^#](\'|`|#)([ \\t]*\\\\(|\\\\[)($|\\r)", curr_line[0:offset + 1])\n', (38350, 38411), False, 'import re\n'), ((38474, 38515), 're.search', 're.search', (["'^[^ \\t]+[ \\t]*($|\\r)'", 'substr'], {}), "('^[^ \\t]+[ \\t]*($|\\r)', substr)\n", (38483, 38515), False, 'import re\n'), ((45397, 45457), 'sys.stderr.write', 'sys.stderr.write', (['(message % (fname, line, column, character))'], {}), '(message % (fname, line, column, character))\n', (45413, 45457), False, 'import sys\n'), ((45662, 45717), 'sys.stderr.write', 'sys.stderr.write', (['(message % (fname, brace[0], brace[1]))'], {}), '(message % (fname, brace[0], brace[1]))\n', (45678, 45717), False, 'import sys\n'), ((45946, 45977), 'sys.stderr.write', 'sys.stderr.write', (['(message % tpl)'], {}), '(message % tpl)\n', (45962, 45977), False, 'import sys\n'), ((49004, 49038), 're.search', 're.search', (['""".clj[sc]{0,1}$"""', 'fname'], {}), "('.clj[sc]{0,1}$', fname)\n", (49013, 49038), False, 'import re\n'), ((36827, 36880), 're.match', 're.match', (['"""\\\\[/text\\\\]"""', 'curr_line[offset:offset + 7]'], {}), "('\\\\[/text\\\\]', curr_line[offset:offset + 7])\n", (36835, 36880), False, 'import re\n'), ((41803, 41847), 're.search', 're.search', (["'^[ \\t]*(;|#\\\\||$|\\r)'", 'curr_line'], {}), "('^[ \\t]*(;|#\\\\||$|\\r)', curr_line)\n", (41812, 41847), False, 'import re\n'), ((37709, 37736), 're.findall', 're.findall', (['"""^[ \t]*"""', 'line'], {}), "('^[ \\t]*', line)\n", (37719, 37736), False, 'import re\n'), ((39334, 39362), 're.search', 're.search', (['"""^[ \t]*"""', 'substr'], {}), "('^[ \\t]*', substr)\n", (39343, 39362), False, 'import re\n'), ((39948, 39991), 're.search', 're.search', (['"""[ \t]*"""', 'curr_line[offset + 1:]'], {}), "('[ \\t]*', curr_line[offset + 1:])\n", (39957, 39991), False, 'import re\n'), ((39456, 39483), 're.search', 're.search', (['"""[ \t]*"""', 'substr'], {}), "('[ \\t]*', substr)\n", (39465, 39483), False, 'import re\n')]
|
import re
SMS_CHAR_COUNT_LIMIT = 612 # 153 * 4
# regexes for use in recipients.validate_email_address.
# Valid characters taken from https://en.wikipedia.org/wiki/Email_address#Local-part
# Note: Normal apostrophe eg `Firstname-o'surname@domain.com` is allowed.
hostname_part = re.compile(r"^(xn-|[a-z0-9]+)(-[a-z0-9]+)*$", re.IGNORECASE)
tld_part = re.compile(r"^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$", re.IGNORECASE)
VALID_LOCAL_CHARS = r"a-zA-ZÀ-ÿ0-9.!#$%&'*+/=?^_`{|}~\-"
EMAIL_REGEX_PATTERN = r"^[{}]+@([^.@][^@\s]+)$".format(VALID_LOCAL_CHARS)
email_with_smart_quotes_regex = re.compile(
# matches wider than an email - everything between an at sign and the nearest whitespace
r"(^|\s)\S+@\S+(\s|$)",
flags=re.MULTILINE,
)
|
[
"re.compile"
] |
[((281, 340), 're.compile', 're.compile', (['"""^(xn-|[a-z0-9]+)(-[a-z0-9]+)*$"""', 're.IGNORECASE'], {}), "('^(xn-|[a-z0-9]+)(-[a-z0-9]+)*$', re.IGNORECASE)\n", (291, 340), False, 'import re\n'), ((353, 424), 're.compile', 're.compile', (['"""^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$"""', 're.IGNORECASE'], {}), "('^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$', re.IGNORECASE)\n", (363, 424), False, 'import re\n'), ((589, 646), 're.compile', 're.compile', (['"""(^|\\\\s)\\\\S+@\\\\S+(\\\\s|$)"""'], {'flags': 're.MULTILINE'}), "('(^|\\\\s)\\\\S+@\\\\S+(\\\\s|$)', flags=re.MULTILINE)\n", (599, 646), False, 'import re\n')]
|
import time
class Cache(object):
globalCache = {}
localCace = {}
timeout = 0
now = time.time()
@staticmethod
def setTimeout(timeout):
Cache.timeout = timeout
@staticmethod
def updateTime():
Cache.now = time.time()
@staticmethod
def _isValid(timestamp):
return True if Cache.now - Cache.timeout <= timestamp else False
@staticmethod
def getGlobal(kw, fx, args = ()):
if kw not in Cache.globalCache:
Cache.globalCache[kw] = {
'timestamp': Cache.now,
'value' : fx(args),
'args' : args
}
elif not Cache._isValid(Cache.globalCache[kw]['timestamp']):
Cache.globalCache[kw]['value'] = fx(args)
return Cache.globalCache[kw]['value']
@staticmethod
def getGlobalB(kw, fx, args = ()):
return fx()
@staticmethod
def getLocal(kw, dom, fx, args = ()):
if dom not in Cache.localCace:
Cache.localCace[dom] = {}
if kw not in Cache.localCace[dom]:
Cache.localCace[dom][kw] = {
'timestamp': Cache.now,
'value' : fx(args),
'args' : args
}
elif not Cache._isValid(Cache.localCace[dom][kw]['timestamp']):
Cache.localCace[dom][kw]['value'] = fx(args)
return Cache.localCace[dom][kw]['value']
@staticmethod
def getLocalB(kw, dom, fx, args = ()):
return fx()
|
[
"time.time"
] |
[((88, 99), 'time.time', 'time.time', ([], {}), '()\n', (97, 99), False, 'import time\n'), ((217, 228), 'time.time', 'time.time', ([], {}), '()\n', (226, 228), False, 'import time\n')]
|
import zeep
import asyncio, sys
from onvif import ONVIFCamera
import cv2
import numpy as np
import urllib
from urllib.request import urlopen
IP="192.168.2.22" # Camera IP address
PORT=80 # Port
USER="admin" # Username
PASS="<PASSWORD>" # Password
XMAX = 1
XMIN = -1
YMAX = 1
YMIN = -1
moverequest = None
ptz = None
active = False
def zeep_pythonvalue(self, xmlvalue):
return xmlvalue
zeep.xsd.simple.AnySimpleType.pythonvalue = zeep_pythonvalue
def setup_move():
mycam = ONVIFCamera(IP, PORT, USER, PASS)
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
global ptz
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# Get presets
print("Get Presets...")
gp = ptz.create_type('GetPresets')
gp.ProfileToken = profileToken
presets = ptz.GetPresets(gp)
for preset in presets:
if (hasattr(preset, "Name")):
name = preset.Name
else:
name = ""
position = preset['PTZPosition']
print("preset {} => ({}, {}, {})".format(name, position.PanTilt.x,
position.PanTilt.y,
position.Zoom.x))
# GetStatus
print("GetStatus")
status = ptz.GetStatus({'ProfileToken': profileToken})
print('status {} {} {} ? => {}'.format(status.Position.PanTilt.x, status.Position.PanTilt.y,
status.Position.Zoom.x,
status.MoveStatus.PanTilt))
# abMove = ptz.create_type('AbsoluteMove')
# abMove.ProfileToken = profileToken
# print('status {} {} {} {}'.format(status.Position.PanTilt.x, status.Position.PanTilt.y,
# status.Velocity.PanTilt.x, status.Velocity.PanTilt.y))
return
# Get PTZ configuration options for getting continuous move range
request = ptz.create_type('GetConfigurationOptions')
request.ConfigurationToken = media_profile.PTZConfiguration.token
ptz_configuration_options = ptz.GetConfigurationOptions(request)
global moverequest
moverequest = ptz.create_type('ContinuousMove')
moverequest.ProfileToken = media_profile.token
if moverequest.Velocity is None:
moverequest.Velocity = ptz.GetStatus({'ProfileToken': media_profile.token}).Position
# Get range of pan and tilt
# NOTE: X and Y are velocity vector
# global XMAX, XMIN, YMAX, YMIN
# XMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Max
# XMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Min
# YMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Max
# YMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Min
def url_to_image(url):
# password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
# password_mgr.add_password(None, url, USER, PASS)
# handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
# opener = urllib.request.build_opener(handler)
# urllib.request.install_opener(opener)
# resp = urlopen(url)
import requests
from requests.auth import HTTPDigestAuth
resp = requests.get(url, auth=HTTPDigestAuth(USER, PASS))
if resp.status_code == 200:
image = np.asarray(bytearray(resp.content), dtype="uint8")
image2 = cv2.imdecode(image, cv2.IMREAD_COLOR)
cv2.imshow('image', image2)
return image
else:
return None
class CameraController:
presets = []
status = None
def get_current_preset(self):
mycam = ONVIFCamera(IP, PORT, USER, PASS, '../wsdl/')
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# GetStatus
print("GetStatus")
self.status = ptz.GetStatus({'ProfileToken': profileToken})
print('status {} {} {} ? => {}'.format(self.status.Position.PanTilt.x, self.status.Position.PanTilt.y,
self.status.Position.Zoom.x,
self.status.MoveStatus.PanTilt))
min_dist = 100
current_prest = None
for preset in self.presets:
position = preset['PTZPosition']
dist = pow((self.status.Position.PanTilt.x - position.PanTilt.x), 2) + pow((self.status.Position.PanTilt.y - position.PanTilt.y), 2)
if dist < min_dist:
min_dist = dist
current_prest = preset
snapshot = media.GetSnapshotUri({'ProfileToken': profileToken})
print('snapshot uri {}'.format(snapshot))
# image = io.imread(snapshot)
# n_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# cv2.imwrite('./image1.jpg', n_image)
image = url_to_image(snapshot.Uri)
cv2.imwrite('./image2.jpg', image)
return current_prest, self.status.MoveStatus.PanTilt, snapshot
def get_presets(self):
mycam = ONVIFCamera(IP, PORT, USER, PASS, '../wsdl/')
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# Get presets
print("Get Presets...")
gp = ptz.create_type('GetPresets')
gp.ProfileToken = profileToken
self.presets = ptz.GetPresets(gp)
for preset in self.presets:
if (hasattr(preset, "Name")):
name = preset.Name
else:
name = ""
position = preset['PTZPosition']
print("preset {} => ({}, {}, {})".format(name, position.PanTilt.x,
position.PanTilt.y,
position.Zoom.x))
return self.presets
if __name__ == '__main__':
# url_to_image('http://192.168.1.108/onvifsnapshot/media_service/snapshot?channel=1&subtype=0')
# setup_move()
camera = CameraController()
camera.get_presets()
camera.get_current_preset()
|
[
"cv2.imwrite",
"onvif.ONVIFCamera",
"cv2.imshow",
"requests.auth.HTTPDigestAuth",
"cv2.imdecode"
] |
[((510, 543), 'onvif.ONVIFCamera', 'ONVIFCamera', (['IP', 'PORT', 'USER', 'PASS'], {}), '(IP, PORT, USER, PASS)\n', (521, 543), False, 'from onvif import ONVIFCamera\n'), ((3575, 3612), 'cv2.imdecode', 'cv2.imdecode', (['image', 'cv2.IMREAD_COLOR'], {}), '(image, cv2.IMREAD_COLOR)\n', (3587, 3612), False, 'import cv2\n'), ((3621, 3648), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image2'], {}), "('image', image2)\n", (3631, 3648), False, 'import cv2\n'), ((3812, 3857), 'onvif.ONVIFCamera', 'ONVIFCamera', (['IP', 'PORT', 'USER', 'PASS', '"""../wsdl/"""'], {}), "(IP, PORT, USER, PASS, '../wsdl/')\n", (3823, 3857), False, 'from onvif import ONVIFCamera\n'), ((5275, 5309), 'cv2.imwrite', 'cv2.imwrite', (['"""./image2.jpg"""', 'image'], {}), "('./image2.jpg', image)\n", (5286, 5309), False, 'import cv2\n'), ((5426, 5471), 'onvif.ONVIFCamera', 'ONVIFCamera', (['IP', 'PORT', 'USER', 'PASS', '"""../wsdl/"""'], {}), "(IP, PORT, USER, PASS, '../wsdl/')\n", (5437, 5471), False, 'from onvif import ONVIFCamera\n'), ((3431, 3457), 'requests.auth.HTTPDigestAuth', 'HTTPDigestAuth', (['USER', 'PASS'], {}), '(USER, PASS)\n', (3445, 3457), False, 'from requests.auth import HTTPDigestAuth\n')]
|
from django.contrib import admin
from .models import Comment
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('id', 'content_object', 'text', 'comment_time', 'user', 'root', 'parent', 'reply_to')
|
[
"django.contrib.admin.register"
] |
[((64, 87), 'django.contrib.admin.register', 'admin.register', (['Comment'], {}), '(Comment)\n', (78, 87), False, 'from django.contrib import admin\n')]
|
from datetime import date, timedelta
from allocation.domain import events
from allocation.domain.model import Product, OrderLine, Batch
today = date.today()
tomorrow = today + timedelta(days=1)
later = tomorrow + timedelta(days=10)
def test_prefers_warehouse_batches_to_shipments():
in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None)
shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow)
product = Product(sku="RETRO-CLOCK", batches=[in_stock_batch, shipment_batch])
line = OrderLine("ofer", "RETRO-CLOCK", 10)
product.allocate(line)
assert in_stock_batch.available_quantity == 90
assert shipment_batch.available_quantity == 100
def test_prefers_ealier_batches():
earliest = Batch("speedy-batch", "MINIMALIST-SPOON", 100, eta=today)
medium = Batch("normal-batch", "MINIMALIST-SPOON", 100, eta=tomorrow)
latest = Batch("slow-batch", "MINIMALIST-SPOON", 100, eta=later)
product = Product(sku="MINIMALIST-SPOON", batches=[medium, earliest, latest])
line = OrderLine("order1", "MINIMALIST-SPOON", 10)
product.allocate(line)
assert earliest.available_quantity == 90
assert medium.available_quantity == 100
assert latest.available_quantity == 100
def test_returns_allocated_batch_ref():
in_stock_batch = Batch("in-stock-batch-ref", "HIGHBROW-POSTER", 100, eta=None)
shipment_batch = Batch("shipment-batch-ref", "HIGHBROW-POSTER", 100, eta=tomorrow)
line = OrderLine("oref", "HIGHBROW-POSTER", 10)
product = Product(sku="HIGHBROW-POSTER", batches=[in_stock_batch, shipment_batch])
allocation = product.allocate(line)
assert allocation == in_stock_batch.reference
def test_records_out_of_stock_event_if_cannot_allocate():
batch = Batch("batch1", "SMALL-FORK", 10, eta=today)
product = Product(sku="SMALL-FORK", batches=[batch])
product.allocate(OrderLine("order1", "SMALL-FORK", 10))
allocation = product.allocate(OrderLine("order2", "SMALL-FORK", 1))
assert product.events[-1] == events.OutOfStock(sku="SMALL-FORK")
assert allocation is None
def test_increments_version_number():
line = OrderLine("oref", "SCANDI-PEN", 10)
product = Product(
sku="SCANDI-PEN", batches=[Batch("b1", "SCANDI-PEN", 100, eta=None)])
product.version_number = 7
product.allocate(line)
assert product.version_number == 8
|
[
"allocation.domain.model.Product",
"datetime.timedelta",
"allocation.domain.model.OrderLine",
"allocation.domain.events.OutOfStock",
"datetime.date.today",
"allocation.domain.model.Batch"
] |
[((145, 157), 'datetime.date.today', 'date.today', ([], {}), '()\n', (155, 157), False, 'from datetime import date, timedelta\n'), ((177, 194), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (186, 194), False, 'from datetime import date, timedelta\n'), ((214, 232), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (223, 232), False, 'from datetime import date, timedelta\n'), ((306, 359), 'allocation.domain.model.Batch', 'Batch', (['"""in-stock-batch"""', '"""RETRO-CLOCK"""', '(100)'], {'eta': 'None'}), "('in-stock-batch', 'RETRO-CLOCK', 100, eta=None)\n", (311, 359), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((381, 438), 'allocation.domain.model.Batch', 'Batch', (['"""shipment-batch"""', '"""RETRO-CLOCK"""', '(100)'], {'eta': 'tomorrow'}), "('shipment-batch', 'RETRO-CLOCK', 100, eta=tomorrow)\n", (386, 438), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((453, 521), 'allocation.domain.model.Product', 'Product', ([], {'sku': '"""RETRO-CLOCK"""', 'batches': '[in_stock_batch, shipment_batch]'}), "(sku='RETRO-CLOCK', batches=[in_stock_batch, shipment_batch])\n", (460, 521), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((533, 569), 'allocation.domain.model.OrderLine', 'OrderLine', (['"""ofer"""', '"""RETRO-CLOCK"""', '(10)'], {}), "('ofer', 'RETRO-CLOCK', 10)\n", (542, 569), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((753, 810), 'allocation.domain.model.Batch', 'Batch', (['"""speedy-batch"""', '"""MINIMALIST-SPOON"""', '(100)'], {'eta': 'today'}), "('speedy-batch', 'MINIMALIST-SPOON', 100, eta=today)\n", (758, 810), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((824, 884), 'allocation.domain.model.Batch', 'Batch', (['"""normal-batch"""', '"""MINIMALIST-SPOON"""', '(100)'], {'eta': 'tomorrow'}), "('normal-batch', 'MINIMALIST-SPOON', 100, eta=tomorrow)\n", (829, 884), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((898, 953), 'allocation.domain.model.Batch', 'Batch', (['"""slow-batch"""', '"""MINIMALIST-SPOON"""', '(100)'], {'eta': 'later'}), "('slow-batch', 'MINIMALIST-SPOON', 100, eta=later)\n", (903, 953), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((968, 1035), 'allocation.domain.model.Product', 'Product', ([], {'sku': '"""MINIMALIST-SPOON"""', 'batches': '[medium, earliest, latest]'}), "(sku='MINIMALIST-SPOON', batches=[medium, earliest, latest])\n", (975, 1035), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1047, 1090), 'allocation.domain.model.OrderLine', 'OrderLine', (['"""order1"""', '"""MINIMALIST-SPOON"""', '(10)'], {}), "('order1', 'MINIMALIST-SPOON', 10)\n", (1056, 1090), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1315, 1376), 'allocation.domain.model.Batch', 'Batch', (['"""in-stock-batch-ref"""', '"""HIGHBROW-POSTER"""', '(100)'], {'eta': 'None'}), "('in-stock-batch-ref', 'HIGHBROW-POSTER', 100, eta=None)\n", (1320, 1376), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1398, 1463), 'allocation.domain.model.Batch', 'Batch', (['"""shipment-batch-ref"""', '"""HIGHBROW-POSTER"""', '(100)'], {'eta': 'tomorrow'}), "('shipment-batch-ref', 'HIGHBROW-POSTER', 100, eta=tomorrow)\n", (1403, 1463), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1475, 1515), 'allocation.domain.model.OrderLine', 'OrderLine', (['"""oref"""', '"""HIGHBROW-POSTER"""', '(10)'], {}), "('oref', 'HIGHBROW-POSTER', 10)\n", (1484, 1515), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1530, 1602), 'allocation.domain.model.Product', 'Product', ([], {'sku': '"""HIGHBROW-POSTER"""', 'batches': '[in_stock_batch, shipment_batch]'}), "(sku='HIGHBROW-POSTER', batches=[in_stock_batch, shipment_batch])\n", (1537, 1602), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1766, 1810), 'allocation.domain.model.Batch', 'Batch', (['"""batch1"""', '"""SMALL-FORK"""', '(10)'], {'eta': 'today'}), "('batch1', 'SMALL-FORK', 10, eta=today)\n", (1771, 1810), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1825, 1867), 'allocation.domain.model.Product', 'Product', ([], {'sku': '"""SMALL-FORK"""', 'batches': '[batch]'}), "(sku='SMALL-FORK', batches=[batch])\n", (1832, 1867), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((2151, 2186), 'allocation.domain.model.OrderLine', 'OrderLine', (['"""oref"""', '"""SCANDI-PEN"""', '(10)'], {}), "('oref', 'SCANDI-PEN', 10)\n", (2160, 2186), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1889, 1926), 'allocation.domain.model.OrderLine', 'OrderLine', (['"""order1"""', '"""SMALL-FORK"""', '(10)'], {}), "('order1', 'SMALL-FORK', 10)\n", (1898, 1926), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((1963, 1999), 'allocation.domain.model.OrderLine', 'OrderLine', (['"""order2"""', '"""SMALL-FORK"""', '(1)'], {}), "('order2', 'SMALL-FORK', 1)\n", (1972, 1999), False, 'from allocation.domain.model import Product, OrderLine, Batch\n'), ((2035, 2070), 'allocation.domain.events.OutOfStock', 'events.OutOfStock', ([], {'sku': '"""SMALL-FORK"""'}), "(sku='SMALL-FORK')\n", (2052, 2070), False, 'from allocation.domain import events\n'), ((2245, 2285), 'allocation.domain.model.Batch', 'Batch', (['"""b1"""', '"""SCANDI-PEN"""', '(100)'], {'eta': 'None'}), "('b1', 'SCANDI-PEN', 100, eta=None)\n", (2250, 2285), False, 'from allocation.domain.model import Product, OrderLine, Batch\n')]
|
# -*- coding: utf-8 -*-
'''
:codeauthor: <NAME> <<EMAIL>>
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch
)
# Import Salt Libs
import salt.utils.json
import salt.states.grafana as grafana
from salt.exceptions import SaltInvocationError
class GrafanaTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.grafana
'''
def setup_loader_modules(self):
return {grafana: {}}
# 'dashboard_present' function tests: 1
def test_dashboard_present(self):
'''
Test to ensure the grafana dashboard exists and is managed.
'''
name = 'myservice'
rows = ['systemhealth', 'requests', 'title']
row = [{'panels': [{'id': 'a'}], 'title': 'systemhealth'}]
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
comt1 = ('Dashboard myservice is set to be updated. The following rows '
'set to be updated: {0}'.format(['systemhealth']))
self.assertRaises(SaltInvocationError, grafana.dashboard_present, name,
profile=False)
self.assertRaises(SaltInvocationError, grafana.dashboard_present, name,
True, True)
mock = MagicMock(side_effect=[{'hosts': True, 'index': False},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True}])
mock_f = MagicMock(side_effect=[False, False, True, True, True, True])
mock_t = MagicMock(return_value='')
mock_i = MagicMock(return_value=False)
source = {'dashboard': '["rows", {"rows":["baz", null, 1.0, 2]}]'}
mock_dict = MagicMock(return_value={'_source': source})
with patch.dict(grafana.__salt__, {'config.option': mock,
'elasticsearch.exists': mock_f,
'pillar.get': mock_t,
'elasticsearch.get': mock_dict,
'elasticsearch.index': mock_i}):
self.assertRaises(SaltInvocationError, grafana.dashboard_present,
name)
with patch.dict(grafana.__opts__, {'test': True}):
self.assertRaises(SaltInvocationError, grafana.dashboard_present,
name)
comt = ('Dashboard {0} is set to be created.'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(grafana.dashboard_present(name, True), ret)
mock = MagicMock(return_value={'rows':
[{'panels': 'b',
'title': 'systemhealth'}]})
with patch.object(salt.utils.json, 'loads', mock):
ret.update({'comment': comt1, 'result': None})
self.assertDictEqual(grafana.dashboard_present(name, True,
rows=row),
ret)
with patch.object(salt.utils.json, 'loads',
MagicMock(return_value={'rows': {}})):
self.assertRaises(SaltInvocationError,
grafana.dashboard_present, name,
rows_from_pillar=rows)
comt = ('Dashboard myservice is up to date')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(grafana.dashboard_present(name, True), ret)
mock = MagicMock(return_value={'rows': [{'panels': 'b',
'title': 'systemhealth'}]})
with patch.dict(grafana.__opts__, {'test': False}):
with patch.object(salt.utils.json, 'loads', mock):
comt = ('Failed to update dashboard myservice.')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(grafana.dashboard_present(name, True,
rows=row),
ret)
# 'dashboard_absent' function tests: 1
def test_dashboard_absent(self):
'''
Test to ensure the named grafana dashboard is deleted.
'''
name = 'myservice'
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[{'hosts': True, 'index': False},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True}])
mock_f = MagicMock(side_effect=[True, False])
with patch.dict(grafana.__salt__, {'config.option': mock,
'elasticsearch.exists': mock_f}):
self.assertRaises(SaltInvocationError, grafana.dashboard_absent,
name)
with patch.dict(grafana.__opts__, {'test': True}):
comt = ('Dashboard myservice is set to be removed.')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(grafana.dashboard_absent(name), ret)
comt = ('Dashboard myservice does not exist.')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(grafana.dashboard_absent(name), ret)
|
[
"tests.support.mock.MagicMock",
"tests.support.mock.patch.object",
"tests.support.mock.patch.dict",
"salt.states.grafana.dashboard_absent",
"salt.states.grafana.dashboard_present"
] |
[((1500, 1760), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'side_effect': "[{'hosts': True, 'index': False}, {'hosts': True, 'index': True}, {'hosts':\n True, 'index': True}, {'hosts': True, 'index': True}, {'hosts': True,\n 'index': True}, {'hosts': True, 'index': True}, {'hosts': True, 'index':\n True}]"}), "(side_effect=[{'hosts': True, 'index': False}, {'hosts': True,\n 'index': True}, {'hosts': True, 'index': True}, {'hosts': True, 'index':\n True}, {'hosts': True, 'index': True}, {'hosts': True, 'index': True},\n {'hosts': True, 'index': True}])\n", (1509, 1760), False, 'from tests.support.mock import MagicMock, patch\n'), ((1994, 2055), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'side_effect': '[False, False, True, True, True, True]'}), '(side_effect=[False, False, True, True, True, True])\n', (2003, 2055), False, 'from tests.support.mock import MagicMock, patch\n'), ((2073, 2099), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'return_value': '""""""'}), "(return_value='')\n", (2082, 2099), False, 'from tests.support.mock import MagicMock, patch\n'), ((2117, 2146), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (2126, 2146), False, 'from tests.support.mock import MagicMock, patch\n'), ((2242, 2285), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'_source': source}"}), "(return_value={'_source': source})\n", (2251, 2285), False, 'from tests.support.mock import MagicMock, patch\n'), ((5144, 5268), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'side_effect': "[{'hosts': True, 'index': False}, {'hosts': True, 'index': True}, {'hosts':\n True, 'index': True}]"}), "(side_effect=[{'hosts': True, 'index': False}, {'hosts': True,\n 'index': True}, {'hosts': True, 'index': True}])\n", (5153, 5268), False, 'from tests.support.mock import MagicMock, patch\n'), ((5358, 5394), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'side_effect': '[True, False]'}), '(side_effect=[True, False])\n', (5367, 5394), False, 'from tests.support.mock import MagicMock, patch\n'), ((2299, 2477), 'tests.support.mock.patch.dict', 'patch.dict', (['grafana.__salt__', "{'config.option': mock, 'elasticsearch.exists': mock_f, 'pillar.get':\n mock_t, 'elasticsearch.get': mock_dict, 'elasticsearch.index': mock_i}"], {}), "(grafana.__salt__, {'config.option': mock, 'elasticsearch.exists':\n mock_f, 'pillar.get': mock_t, 'elasticsearch.get': mock_dict,\n 'elasticsearch.index': mock_i})\n", (2309, 2477), False, 'from tests.support.mock import MagicMock, patch\n'), ((4211, 4287), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'rows': [{'panels': 'b', 'title': 'systemhealth'}]}"}), "(return_value={'rows': [{'panels': 'b', 'title': 'systemhealth'}]})\n", (4220, 4287), False, 'from tests.support.mock import MagicMock, patch\n'), ((5408, 5497), 'tests.support.mock.patch.dict', 'patch.dict', (['grafana.__salt__', "{'config.option': mock, 'elasticsearch.exists': mock_f}"], {}), "(grafana.__salt__, {'config.option': mock, 'elasticsearch.exists':\n mock_f})\n", (5418, 5497), False, 'from tests.support.mock import MagicMock, patch\n'), ((2775, 2819), 'tests.support.mock.patch.dict', 'patch.dict', (['grafana.__opts__', "{'test': True}"], {}), "(grafana.__opts__, {'test': True})\n", (2785, 2819), False, 'from tests.support.mock import MagicMock, patch\n'), ((3171, 3247), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'rows': [{'panels': 'b', 'title': 'systemhealth'}]}"}), "(return_value={'rows': [{'panels': 'b', 'title': 'systemhealth'}]})\n", (3180, 3247), False, 'from tests.support.mock import MagicMock, patch\n'), ((4358, 4403), 'tests.support.mock.patch.dict', 'patch.dict', (['grafana.__opts__', "{'test': False}"], {}), "(grafana.__opts__, {'test': False})\n", (4368, 4403), False, 'from tests.support.mock import MagicMock, patch\n'), ((5669, 5713), 'tests.support.mock.patch.dict', 'patch.dict', (['grafana.__opts__', "{'test': True}"], {}), "(grafana.__opts__, {'test': True})\n", (5679, 5713), False, 'from tests.support.mock import MagicMock, patch\n'), ((6071, 6101), 'salt.states.grafana.dashboard_absent', 'grafana.dashboard_absent', (['name'], {}), '(name)\n', (6095, 6101), True, 'import salt.states.grafana as grafana\n'), ((3103, 3140), 'salt.states.grafana.dashboard_present', 'grafana.dashboard_present', (['name', '(True)'], {}), '(name, True)\n', (3128, 3140), True, 'import salt.states.grafana as grafana\n'), ((3365, 3409), 'tests.support.mock.patch.object', 'patch.object', (['salt.utils.json', '"""loads"""', 'mock'], {}), "(salt.utils.json, 'loads', mock)\n", (3377, 3409), False, 'from tests.support.mock import MagicMock, patch\n'), ((3768, 3804), 'tests.support.mock.MagicMock', 'MagicMock', ([], {'return_value': "{'rows': {}}"}), "(return_value={'rows': {}})\n", (3777, 3804), False, 'from tests.support.mock import MagicMock, patch\n'), ((4147, 4184), 'salt.states.grafana.dashboard_present', 'grafana.dashboard_present', (['name', '(True)'], {}), '(name, True)\n', (4172, 4184), True, 'import salt.states.grafana as grafana\n'), ((4426, 4470), 'tests.support.mock.patch.object', 'patch.object', (['salt.utils.json', '"""loads"""', 'mock'], {}), "(salt.utils.json, 'loads', mock)\n", (4438, 4470), False, 'from tests.support.mock import MagicMock, patch\n'), ((5883, 5913), 'salt.states.grafana.dashboard_absent', 'grafana.dashboard_absent', (['name'], {}), '(name)\n', (5907, 5913), True, 'import salt.states.grafana as grafana\n'), ((3519, 3566), 'salt.states.grafana.dashboard_present', 'grafana.dashboard_present', (['name', '(True)'], {'rows': 'row'}), '(name, True, rows=row)\n', (3544, 3566), True, 'import salt.states.grafana as grafana\n'), ((4649, 4696), 'salt.states.grafana.dashboard_present', 'grafana.dashboard_present', (['name', '(True)'], {'rows': 'row'}), '(name, True, rows=row)\n', (4674, 4696), True, 'import salt.states.grafana as grafana\n')]
|
def pprint(arr):
for line in arr:
print(line)
# 5 7
# 0 1 1
# 0 2 3
# 1 2 3
# 1 3 6
# 2 3 4
# 2 4 2
# 3 4 5
import sys
import heapq as hq
N, M = map(int, sys.stdin.readline().split(" "))
W = [[float('inf')] * N for _ in range(N)]
h = []
for _ in range(M):
i, j, w = map(int, sys.stdin.readline().split(" "))
hq.heappush(h, (w, i, j))
print(h)
def Kruskal(heap, source):
answer = []
visited = []
while heap:
w, i, j = hq.heappop(heap)
return answer
print(Kruskal(h, 0))
|
[
"sys.stdin.readline",
"heapq.heappush",
"heapq.heappop"
] |
[((330, 355), 'heapq.heappush', 'hq.heappush', (['h', '(w, i, j)'], {}), '(h, (w, i, j))\n', (341, 355), True, 'import heapq as hq\n'), ((460, 476), 'heapq.heappop', 'hq.heappop', (['heap'], {}), '(heap)\n', (470, 476), True, 'import heapq as hq\n'), ((167, 187), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (185, 187), False, 'import sys\n'), ((293, 313), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (311, 313), False, 'import sys\n')]
|
"""common parser argument
"""
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
import argparse
from enum import Enum
import logging
from sys import exit as sys_exit
from . import archivist
from .logger import set_logger
from .proof_mechanism import ProofMechanism
LOGGER = logging.getLogger(__name__)
# from https://stackoverflow.com/questions/43968006/support-for-enum-arguments-in-argparse
class EnumAction(argparse.Action):
"""
Argparse action for handling Enums
"""
def __init__(self, **kwargs):
# Pop off the type value
enum_type = kwargs.pop("type", None)
# Ensure an Enum subclass is provided
if enum_type is None:
raise ValueError("type must be assigned an Enum when using EnumAction")
if not issubclass(enum_type, Enum):
raise TypeError("type must be an Enum when using EnumAction")
# Generate choices from the Enum
kwargs.setdefault("choices", tuple(e.name for e in enum_type))
super().__init__(**kwargs)
self._enum = enum_type
def __call__(self, parser, namespace, values, option_string=None):
# Convert value back into an Enum
value = self._enum[values]
setattr(namespace, self.dest, value)
def common_parser(description):
"""Construct parser with security option for token/auth authentication"""
parser = argparse.ArgumentParser(
description=description,
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="print verbose debugging",
)
parser.add_argument(
"-u",
"--url",
type=str,
dest="url",
action="store",
default="https://rkvst.poc.jitsuin.io",
help="location of Archivist service",
)
parser.add_argument(
"-p",
"--proof-mechanism",
type=ProofMechanism,
action=EnumAction,
dest="proof_mechanism",
default=ProofMechanism.SIMPLE_HASH,
help="mechanism for proving the evidence for events on the Asset",
)
security = parser.add_mutually_exclusive_group(required=True)
security.add_argument(
"-t",
"--auth-token",
type=str,
dest="auth_token_file",
action="store",
default=".auth_token",
reqyuired=True,
help="FILE containing API authentication token",
)
return parser, security
def endpoint(args):
if args.verbose:
set_logger("DEBUG")
else:
set_logger("INFO")
arch = None
LOGGER.info("Initialising connection to Jitsuin Archivist...")
fixtures = {
"assets": {
"proof_mechanism": args.proof_mechanism.name,
},
}
if args.auth_token_file:
with open(args.auth_token_file, mode="r", encoding="utf-8") as tokenfile:
authtoken = tokenfile.read().strip()
arch = archivist.Archivist(args.url, authtoken, verify=False, fixtures=fixtures)
if arch is None:
LOGGER.error("Critical error. Aborting.")
sys_exit(1)
return arch
|
[
"logging.getLogger",
"argparse.ArgumentParser",
"sys.exit"
] |
[((308, 335), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (325, 335), False, 'import logging\n'), ((1412, 1460), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (1435, 1460), False, 'import argparse\n'), ((3147, 3158), 'sys.exit', 'sys_exit', (['(1)'], {}), '(1)\n', (3155, 3158), True, 'from sys import exit as sys_exit\n')]
|
import argparse, pdb
import gym
import numpy as np
import os
import pickle
import random
import torch
import scipy.misc
from gym.envs.registration import register
parser = argparse.ArgumentParser()
parser.add_argument('-display', type=int, default=0)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-lanes', type=int, default=3)
parser.add_argument('-traffic_rate', type=int, default=15)
parser.add_argument('-state_image', type=int, default=1)
parser.add_argument('-save_images', type=int, default=0)
parser.add_argument('-store', type=int, default=1)
parser.add_argument('-data_dir', type=str, default='traffic-data/state-action-cost/')
parser.add_argument('-fps', type=int, default=30)
parser.add_argument('-time_slot', type=int, default=0)
parser.add_argument('-map', type=str, default='i80', choices={'ai', 'i80', 'us101', 'lanker', 'peach'})
parser.add_argument('-delta_t', type=float, default=0.1)
opt = parser.parse_args()
opt.state_image = (opt.state_image == 1)
opt.store = (opt.store == 1)
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
os.system("mkdir -p " + opt.data_dir)
kwargs = dict(
display=opt.display,
state_image=opt.state_image,
store=opt.store,
fps=opt.fps,
nb_lanes=opt.lanes,
traffic_rate=opt.traffic_rate,
data_dir=opt.data_dir,
delta_t=opt.delta_t,
)
register(
id='Traffic-v0',
entry_point='traffic_gym:Simulator',
kwargs=kwargs
)
register(
id='I-80-v0',
entry_point='map_i80:I80',
kwargs=kwargs
)
gym.envs.registration.register(
id='US-101-v0',
entry_point='map_us101:US101',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Lankershim-v0',
entry_point='map_lanker:Lankershim',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Peachtree-v0',
entry_point='map_peach:Peachtree',
kwargs=kwargs,
)
env_names = {
'ai': 'Traffic-v0',
'i80': 'I-80-v0',
'us101': 'US-101-v0',
'lanker': 'Lankershim-v0',
'peach': 'Peachtree-v0',
}
print('Building the environment (loading data, if any)')
env = gym.make(env_names[opt.map])
env.reset(frame=0, time_slot=opt.time_slot)
done = False
while not done:
observation, reward, done, info = env.step()
env.render()
print(f'Data generation for <{opt.map}, time slot {opt.time_slot}> completed')
|
[
"torch.manual_seed",
"argparse.ArgumentParser",
"random.seed",
"gym.envs.registration.register",
"numpy.random.seed",
"os.system",
"gym.make"
] |
[((173, 198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (196, 198), False, 'import argparse, pdb\n'), ((1027, 1048), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (1038, 1048), False, 'import random\n'), ((1049, 1073), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (1063, 1073), True, 'import numpy as np\n'), ((1074, 1101), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (1091, 1101), False, 'import torch\n'), ((1103, 1140), 'os.system', 'os.system', (["('mkdir -p ' + opt.data_dir)"], {}), "('mkdir -p ' + opt.data_dir)\n", (1112, 1140), False, 'import os\n'), ((1367, 1444), 'gym.envs.registration.register', 'register', ([], {'id': '"""Traffic-v0"""', 'entry_point': '"""traffic_gym:Simulator"""', 'kwargs': 'kwargs'}), "(id='Traffic-v0', entry_point='traffic_gym:Simulator', kwargs=kwargs)\n", (1375, 1444), False, 'from gym.envs.registration import register\n'), ((1460, 1524), 'gym.envs.registration.register', 'register', ([], {'id': '"""I-80-v0"""', 'entry_point': '"""map_i80:I80"""', 'kwargs': 'kwargs'}), "(id='I-80-v0', entry_point='map_i80:I80', kwargs=kwargs)\n", (1468, 1524), False, 'from gym.envs.registration import register\n'), ((1540, 1637), 'gym.envs.registration.register', 'gym.envs.registration.register', ([], {'id': '"""US-101-v0"""', 'entry_point': '"""map_us101:US101"""', 'kwargs': 'kwargs'}), "(id='US-101-v0', entry_point=\n 'map_us101:US101', kwargs=kwargs)\n", (1570, 1637), False, 'import gym\n'), ((1649, 1756), 'gym.envs.registration.register', 'gym.envs.registration.register', ([], {'id': '"""Lankershim-v0"""', 'entry_point': '"""map_lanker:Lankershim"""', 'kwargs': 'kwargs'}), "(id='Lankershim-v0', entry_point=\n 'map_lanker:Lankershim', kwargs=kwargs)\n", (1679, 1756), False, 'import gym\n'), ((1768, 1872), 'gym.envs.registration.register', 'gym.envs.registration.register', ([], {'id': '"""Peachtree-v0"""', 'entry_point': '"""map_peach:Peachtree"""', 'kwargs': 'kwargs'}), "(id='Peachtree-v0', entry_point=\n 'map_peach:Peachtree', kwargs=kwargs)\n", (1798, 1872), False, 'import gym\n'), ((2096, 2124), 'gym.make', 'gym.make', (['env_names[opt.map]'], {}), '(env_names[opt.map])\n', (2104, 2124), False, 'import gym\n')]
|
import matplotlib.pylab as plt
import numpy as np
import random
from scipy.ndimage import gaussian_filter
mu =9
N = 50
k = 10
eta =10
sigma = 2
p0 = 0.5
inverse_random = False
L = range(N*N)
Q = np.zeros((N*mu,N*mu))
for o in range(mu*mu):
print(o)
F = 1000*k
a = np.ones((N,N))
for k_ in range(1000):
linear_idx = random.choices(L, weights=a.ravel()/float(a.sum()), k = k)
x, y = np.unravel_index(linear_idx, a.shape)
x += np.random.randint(-eta,eta,k)
y += np.random.randint(-eta,eta,k)
cond = (x<0) | (x>=N) | (y<0) | (y>=N)
x_ = np.delete(x, np.where(cond))
y_ = np.delete(y, np.where(cond))
a[x_,y_]+=F
a = gaussian_filter(a,sigma =sigma)
if np.random.random()>p0 and inverse_random:
a = a.max()-a
Mx,My = np.unravel_index(o,(mu,mu))
Q[Mx*N:(Mx+1)*N,My*N:(My+1)*N] = a
fig,ax = plt.subplots(1,1,figsize = (20,20))
plt.imshow(Q, interpolation='nearest')
plt.axis('off')
|
[
"matplotlib.pylab.axis",
"matplotlib.pylab.subplots",
"numpy.ones",
"numpy.where",
"numpy.random.random",
"numpy.zeros",
"matplotlib.pylab.imshow",
"numpy.random.randint",
"numpy.unravel_index",
"scipy.ndimage.gaussian_filter"
] |
[((197, 223), 'numpy.zeros', 'np.zeros', (['(N * mu, N * mu)'], {}), '((N * mu, N * mu))\n', (205, 223), True, 'import numpy as np\n'), ((908, 944), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (920, 944), True, 'import matplotlib.pylab as plt\n'), ((944, 982), 'matplotlib.pylab.imshow', 'plt.imshow', (['Q'], {'interpolation': '"""nearest"""'}), "(Q, interpolation='nearest')\n", (954, 982), True, 'import matplotlib.pylab as plt\n'), ((983, 998), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (991, 998), True, 'import matplotlib.pylab as plt\n'), ((278, 293), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (285, 293), True, 'import numpy as np\n'), ((831, 860), 'numpy.unravel_index', 'np.unravel_index', (['o', '(mu, mu)'], {}), '(o, (mu, mu))\n', (847, 860), True, 'import numpy as np\n'), ((420, 457), 'numpy.unravel_index', 'np.unravel_index', (['linear_idx', 'a.shape'], {}), '(linear_idx, a.shape)\n', (436, 457), True, 'import numpy as np\n'), ((472, 503), 'numpy.random.randint', 'np.random.randint', (['(-eta)', 'eta', 'k'], {}), '(-eta, eta, k)\n', (489, 503), True, 'import numpy as np\n'), ((515, 546), 'numpy.random.randint', 'np.random.randint', (['(-eta)', 'eta', 'k'], {}), '(-eta, eta, k)\n', (532, 546), True, 'import numpy as np\n'), ((708, 739), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['a'], {'sigma': 'sigma'}), '(a, sigma=sigma)\n', (723, 739), False, 'from scipy.ndimage import gaussian_filter\n'), ((618, 632), 'numpy.where', 'np.where', (['cond'], {}), '(cond)\n', (626, 632), True, 'import numpy as np\n'), ((660, 674), 'numpy.where', 'np.where', (['cond'], {}), '(cond)\n', (668, 674), True, 'import numpy as np\n'), ((751, 769), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (767, 769), True, 'import numpy as np\n')]
|
from django.db import models
# from cms.models.fields import PlaceholderField
from cms.models import CMSPlugin
# from filer.fields.image import FilerImageField
from arkestra_utilities.output_libraries.dates import nice_date
# from arkestra_utilities.models import ArkestraGenericModel
from arkestra_utilities.generic_models import ArkestraGenericPluginOptions, ArkestraGenericModel
from arkestra_utilities.mixins import URLModelMixin
from arkestra_utilities.settings import PLUGIN_HEADING_LEVELS, PLUGIN_HEADING_LEVEL_DEFAULT
from contacts_and_people.models import Entity, Person #, default_entity_id
# from links.models import ExternalLink
from managers import VacancyManager, StudentshipManager
class CommonVacancyAndStudentshipInformation(ArkestraGenericModel, URLModelMixin):
class Meta:
abstract = True
ordering = ['-closing_date']
closing_date = models.DateField()
description = models.TextField(null=True, blank=True,
help_text="No longer used")
def link_to_more(self):
return self.get_hosted_by.get_related_info_page_url("vacancies-and-studentships")
@property
def get_when(self):
"""
get_when provides a human-readable attribute under which items can be grouped.
Usually, this is an easily-readble rendering of the date (e.g. "April 2010") but it can also be "Top news", for items to be given special prominence.
"""
try:
# The render function of CMSNewsAndEventsPlugin can set a temporary sticky attribute for Top news items
if self.sticky:
return "Top news"
except AttributeError:
pass
date_format = "F Y"
get_when = nice_date(self.closing_date, date_format)
return get_when
@property
def date(self):
return self.closing_date
class Vacancy(CommonVacancyAndStudentshipInformation):
url_path = "vacancy"
job_number = models.CharField(max_length=9)
salary = models.CharField(blank=True, max_length=255, null=True,
help_text=u"Please include currency symbol")
objects = VacancyManager()
class Meta:
verbose_name_plural = "Vacancies"
class Studentship(CommonVacancyAndStudentshipInformation):
url_path = "studentship"
supervisors = models.ManyToManyField(Person, null=True, blank=True,
related_name="%(class)s_people")
objects = StudentshipManager()
class VacanciesPlugin(CMSPlugin, ArkestraGenericPluginOptions):
DISPLAY = (
(u"vacancies & studentships", u"Vacancies and studentships"),
(u"vacancies", u"Vacancies only"),
(u"studentships", u"Studentships only"),
)
display = models.CharField(max_length=25,choices=DISPLAY, default="vacancies & studentships")
# entity = models.ForeignKey(Entity, null=True, blank=True,
# help_text="Leave blank for autoselect", related_name="%(class)s_plugin")
vacancies_heading_text = models.CharField(max_length=25, default="Vacancies")
studentships_heading_text = models.CharField(max_length=25, default="Studentships")
|
[
"managers.StudentshipManager",
"django.db.models.DateField",
"django.db.models.TextField",
"arkestra_utilities.output_libraries.dates.nice_date",
"django.db.models.ManyToManyField",
"managers.VacancyManager",
"django.db.models.CharField"
] |
[((886, 904), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (902, 904), False, 'from django.db import models\n'), ((928, 995), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)', 'help_text': '"""No longer used"""'}), "(null=True, blank=True, help_text='No longer used')\n", (944, 995), False, 'from django.db import models\n'), ((1972, 2002), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(9)'}), '(max_length=9)\n', (1988, 2002), False, 'from django.db import models\n'), ((2016, 2121), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)', 'help_text': 'u"""Please include currency symbol"""'}), "(blank=True, max_length=255, null=True, help_text=\n u'Please include currency symbol')\n", (2032, 2121), False, 'from django.db import models\n'), ((2144, 2160), 'managers.VacancyManager', 'VacancyManager', ([], {}), '()\n', (2158, 2160), False, 'from managers import VacancyManager, StudentshipManager\n'), ((2341, 2432), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Person'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""%(class)s_people"""'}), "(Person, null=True, blank=True, related_name=\n '%(class)s_people')\n", (2363, 2432), False, 'from django.db import models\n'), ((2451, 2471), 'managers.StudentshipManager', 'StudentshipManager', ([], {}), '()\n', (2469, 2471), False, 'from managers import VacancyManager, StudentshipManager\n'), ((2736, 2825), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'choices': 'DISPLAY', 'default': '"""vacancies & studentships"""'}), "(max_length=25, choices=DISPLAY, default=\n 'vacancies & studentships')\n", (2752, 2825), False, 'from django.db import models\n'), ((2997, 3049), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'default': '"""Vacancies"""'}), "(max_length=25, default='Vacancies')\n", (3013, 3049), False, 'from django.db import models\n'), ((3082, 3137), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'default': '"""Studentships"""'}), "(max_length=25, default='Studentships')\n", (3098, 3137), False, 'from django.db import models\n'), ((1734, 1775), 'arkestra_utilities.output_libraries.dates.nice_date', 'nice_date', (['self.closing_date', 'date_format'], {}), '(self.closing_date, date_format)\n', (1743, 1775), False, 'from arkestra_utilities.output_libraries.dates import nice_date\n')]
|
import json
from django.utils.http import urlencode
import mock
import requests
from olympia.amo.tests import AMOPaths, TestCase
from olympia.amo.urlresolvers import reverse
from olympia.files.models import FileUpload
from olympia.github.tests.test_github import (
GithubBaseTestCase, example_pull_request)
class TestGithubView(AMOPaths, GithubBaseTestCase, TestCase):
def setUp(self):
super(TestGithubView, self).setUp()
self.url = reverse('github.validate')
def post(self, data, header=None, data_type=None):
data_type = data_type or 'application/json'
if (data_type == 'application/json'):
data = json.dumps(data)
elif (data_type == 'application/x-www-form-urlencoded'):
data = urlencode({'payload': json.dumps(data)})
return self.client.post(
self.url, data=data,
content_type=data_type,
HTTP_X_GITHUB_EVENT=header or 'pull_request'
)
def complete(self):
pending, success = self.requests.post.call_args_list
self.check_status(
'pending',
call=pending,
url='https://api.github.com/repos/org/repo/statuses/abc'
)
self.check_status(
'success',
call=success,
url='https://api.github.com/repos/org/repo/statuses/abc',
target_url=mock.ANY
)
assert FileUpload.objects.get()
def test_not_pull_request(self):
assert self.post({}, header='meh').status_code == 200
def test_bad_pull_request(self):
assert self.post({'pull_request': {}}).status_code == 422
def setup_xpi(self):
self.response = mock.Mock()
self.response.content = open(self.xpi_path('github-repo')).read()
self.requests.get.return_value = self.response
def test_pending_fails(self):
self.setup_xpi()
post = mock.Mock()
# GitHub returns a 404 when the addons-robot account does not
# have write access.
post.status_code = 404
post.raise_for_status.side_effect = requests.HTTPError(response=post)
self.requests.post.return_value = post
res = self.post(example_pull_request)
assert 'write access' in json.loads(res.content)['details']
def test_good_not_json(self):
self.setup_xpi()
assert self.post(
example_pull_request,
data_type='application/x-www-form-urlencoded').status_code == 201
self.complete()
def test_good(self):
self.setup_xpi()
assert self.post(example_pull_request).status_code == 201
self.complete()
|
[
"json.loads",
"olympia.files.models.FileUpload.objects.get",
"mock.Mock",
"json.dumps",
"requests.HTTPError",
"olympia.amo.urlresolvers.reverse"
] |
[((463, 489), 'olympia.amo.urlresolvers.reverse', 'reverse', (['"""github.validate"""'], {}), "('github.validate')\n", (470, 489), False, 'from olympia.amo.urlresolvers import reverse\n'), ((1419, 1443), 'olympia.files.models.FileUpload.objects.get', 'FileUpload.objects.get', ([], {}), '()\n', (1441, 1443), False, 'from olympia.files.models import FileUpload\n'), ((1698, 1709), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1707, 1709), False, 'import mock\n'), ((1915, 1926), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1924, 1926), False, 'import mock\n'), ((2101, 2134), 'requests.HTTPError', 'requests.HTTPError', ([], {'response': 'post'}), '(response=post)\n', (2119, 2134), False, 'import requests\n'), ((663, 679), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (673, 679), False, 'import json\n'), ((2262, 2285), 'json.loads', 'json.loads', (['res.content'], {}), '(res.content)\n', (2272, 2285), False, 'import json\n'), ((786, 802), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (796, 802), False, 'import json\n')]
|
import requests
import logging
from .auth import Auth
domain = "https://ptx.transportdata.tw/MOTC/v2/Rail/THSR/"
default_limit_count = 20
logger = logging.getLogger('flask.app')
auth = Auth()
def get_station():
"""GET /v2/Rail/THSR/Station 取得車站基本資料
Returns:
[dict] -- 車站基本資料
"""
action = "Station"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_station_id(station_names):
"""取得高鐵車站對應id
Arguments:
station_names {[list]} -- 想查詢的車站名稱
Returns:
[dictionary] -- key: station name, value: station id
"""
all_stations = get_station()
matchs = {}
for station_name in station_names:
match = None
try:
match = next(filter(lambda x:
station_name.strip() in x['StationName']['Zh_tw'].strip(), all_stations))
except StopIteration:
pass
if match:
matchs[station_name.strip()] = match['StationID']
return matchs
def get_fare(departure, destination):
"""GET /v2/Rail/THSR/ODFare/{OriginStationID}/to/{DestinationStationID}
取得指定[起訖站間]之票價資料
Arguments:
departure {str} -- 出發車站id
destination {str} -- 到達車站id
"""
if not departure:
return {}
if not destination:
return {}
action = "ODFare/{}/to/{}".format(departure, destination)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_timetable(no=''):
"""GET /v2/Rail/THSR/GeneralTimetable
取得所有車次的定期時刻表資料
Arguments:
no {str} -- 指定車次
"""
action = "GeneralTimetable"
if no:
action += "/TrainNo/{}".format(no)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_seat(id):
"""GET /v2/Rail/THSR/AvailableSeatStatusList/{StationID}
取得動態指定[車站]的對號座剩餘座位資訊看板資料
"""
if not id:
return {}
action = "AvailableSeatStatusList/{}".format(id)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
else:
logger.info(r)
return {}
def get_news():
"""GET /v2/Rail/THSR/News
取得高鐵最新消息資料
"""
action = "News"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_alert():
"""GET /v2/Rail/THSR/AlertInfo
取得即時通阻事件資料
"""
action = "AlertInfo"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def __get_odata_parameter(top=0, skip=0, format="", orderby="", filter=""):
"""統一整理odata的固定參數指定回傳
Keyword Arguments:
top {int} -- 回傳幾筆 (default: {0})
skip {int} -- 跳過前面幾筆 (default: {0})
format {str} -- 回傳格式 json or xml (default: {""})
orderby {str} -- 排列順序, 傳入response欄位名稱 (default: {""})
filter {str} -- 篩選條件 (default: {""})
Returns:
[type] -- odata parameter的querystring
"""
param = {'top': top, 'skip': skip, 'orderby': orderby,
'format': format, 'filter': filter}
result = ""
if top > 0:
result += "&$top={top}"
if skip > 0:
result += "&$skip={skip}"
if orderby:
result += "&$orderby={orderby}"
if format:
result += "&$format={format}"
if filter:
result += "&$filter={filter}"
return result.format(**param)
if __name__ == '__main__':
pass
|
[
"logging.getLogger",
"requests.get"
] |
[((149, 179), 'logging.getLogger', 'logging.getLogger', (['"""flask.app"""'], {}), "('flask.app')\n", (166, 179), False, 'import logging\n'), ((459, 493), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (471, 493), False, 'import requests\n'), ((1704, 1738), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1716, 1738), False, 'import requests\n'), ((2178, 2212), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2190, 2212), False, 'import requests\n'), ((2633, 2667), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2645, 2667), False, 'import requests\n'), ((3007, 3041), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3019, 3041), False, 'import requests\n'), ((3359, 3393), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3371, 3393), False, 'import requests\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .bayes_layers import VariationalLinearCertainActivations, VariationalLinearReLU
from .variables import GaussianVar
class MLP(nn.Module):
def __init__(self, x_dim, y_dim, hidden_size=None):
super(MLP, self).__init__()
self.sizes = [x_dim]
if hidden_size is not None:
self.sizes += hidden_size
self.sizes += [y_dim]
self.make_layers()
def make_layers(self):
# layers = [VariationalLinearCertainActivations(self.sizes[0], self.sizes[1])]
# for in_dim, out_dim in zip(self.sizes[1:-1], self.sizes[2:]):
# print('in_dim:{}, out_dim:{}'.format(in_dim, out_dim))
# layers.append(VariationalLinearReLU(in_dim, out_dim))
# self.layers = nn.Sequential(*layers)
layers = [VariationalLinearCertainActivations(self.sizes[0], self.sizes[1])]
for in_dim, out_dim in zip(self.sizes[1:-1], self.sizes[2:]):
# print('in_dim:{}, out_dim:{}'.format(in_dim, out_dim))
layers.append(VariationalLinearReLU(in_dim, out_dim))
self.layers = nn.Sequential(*layers)
# self.layers = nn.Sequential(
# VariationalLinearCertainActivations(1, 128),
# VariationalLinearReLU(128, 128),
# VariationalLinearReLU(128, 2)
# )
#
# self.layers = nn.Sequential(VariationalLinearCertainActivations(self.sizes[0], self.sizes[1]))
# for in_dim, out_dim in zip(self.sizes[1:-1], self.sizes[2:]):
# print('in_dim:{}, out_dim:{}'.format(in_dim, out_dim))
# self.layers.add_module('{}-{}'.format(in_dim, out_dim), VariationalLinearReLU(in_dim, out_dim))
def forward(self, input):
return self.layers(input)
def surprise(self):
all_surprise = 0
for layer in self.layers:
all_surprise += layer.surprise()
return all_surprise
def forward_mcmc(self, input, n_samples=None, average=False):
h = self.layers[0].forward_mcmc(input)
for layer in self.layers[1:]:
h = layer.forward_mcmc(F.relu(h), n_samples)
return h
class AdaptedMLP(object):
def __init__(self, mlp, adapter, device=torch.device('cpu')):
self.mlp = mlp.to(device)
self.__dict__.update(mlp.__dict__)
self.device = device
self.make_adapters(adapter)
def make_adapters(self, adapter):
self.adapter = {}
for ad in ['in', 'out']:
self.adapter[ad] = {
'scale': torch.tensor(adapter[ad]['scale']).to(self.device),
'shift': torch.tensor(adapter[ad]['shift']).to(self.device)
}
def __call__(self, input):
x_ad = self.adapter['in']['scale'] * input + self.adapter['in']['shift']
self.pre_adapt = self.mlp(x_ad)
mean = self.adapter['out']['scale'] * self.pre_adapt.mean + self.adapter['out']['shift']
cov = self.adapter['out']['scale'].reshape(-1, 1) * self.adapter['out']['scale'].reshape(1, -1) * self.pre_adapt.var
return GaussianVar(mean, cov)
def __repr__(self):
return "AdaptedMLP(\n" + self.mlp.__repr__() + ")"
def surprise(self):
return self.mlp.surprise()
def parameters(self):
return self.mlp.parameters()
def mcmc(self, input, n_samples=None):
x_ad = self.adapter['in']['scale'] * input + self.adapter['in']['shift']
self.pre_adapt = self.mlp.forward_mcmc(x_ad, n_samples)
mean = self.adapter['out']['scale'] * self.pre_adapt + self.adapter['out']['shift']
return mean
|
[
"torch.nn.Sequential",
"torch.tensor",
"torch.nn.functional.relu",
"torch.device"
] |
[((1149, 1171), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1162, 1171), True, 'import torch.nn as nn\n'), ((2273, 2292), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2285, 2292), False, 'import torch\n'), ((2149, 2158), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (2155, 2158), True, 'import torch.nn.functional as F\n'), ((2594, 2628), 'torch.tensor', 'torch.tensor', (["adapter[ad]['scale']"], {}), "(adapter[ad]['scale'])\n", (2606, 2628), False, 'import torch\n'), ((2671, 2705), 'torch.tensor', 'torch.tensor', (["adapter[ad]['shift']"], {}), "(adapter[ad]['shift'])\n", (2683, 2705), False, 'import torch\n')]
|
from demos.setup import np, plt
from compecon import BasisChebyshev, BasisSpline
from compecon.tools import nodeunif
__author__ = 'Randall'
# DEMAPP06 Chebychev and cubic spline derivative approximation errors
# Function to be approximated
def f(x):
g = np.zeros((3, x.size))
g[0], g[1], g[2] = np.exp(-x), -np.exp(-x), np.exp(-x)
return g
# Set degree of approximation and endpoints of approximation interval
a = -1 # left endpoint
b = 1 # right endpoint
n = 10 # order of interpolatioin
# Construct refined uniform grid for error ploting
x = nodeunif(1001, a, b)
# Compute actual and fitted values on grid
y, d, s = f(x) # actual
# Construct and evaluate Chebychev interpolant
C = BasisChebyshev(n, a, b, f=f) # chose basis functions
yc = C(x) # values
dc = C(x, 1) # first derivative
sc = C(x, 2) # second derivative
# Construct and evaluate cubic spline interpolant
S = BasisSpline(n, a, b, f=f) # chose basis functions
ys = S(x) # values
ds = S(x, 1) # first derivative
ss = S(x, 2) # second derivative
# Plot function approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, y - yc[0])
plt.ylabel('Chebychev')
plt.title('Function Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, y - ys[0])
plt.ylabel('Cubic Spline')
plt.xlabel('x')
# Plot first derivative approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, d - dc[0])
plt.ylabel('Chebychev')
plt.title('First Derivative Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, d - ds[0], 'm')
plt.ylabel('Cubic Spline')
plt.xlabel('x')
# Plot second derivative approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, s - sc[0])
plt.ylabel('Chebychev')
plt.title('Second Derivative Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, s - ss[0], 'm')
plt.ylabel('Cubic Spline')
plt.xlabel('x')
plt.show()
|
[
"demos.setup.plt.plot",
"compecon.BasisChebyshev",
"demos.setup.np.zeros",
"demos.setup.plt.xlabel",
"demos.setup.plt.title",
"compecon.BasisSpline",
"demos.setup.plt.subplot",
"demos.setup.plt.ylabel",
"demos.setup.plt.figure",
"demos.setup.plt.show",
"compecon.tools.nodeunif",
"demos.setup.np.exp"
] |
[((649, 669), 'compecon.tools.nodeunif', 'nodeunif', (['(1001)', 'a', 'b'], {}), '(1001, a, b)\n', (657, 669), False, 'from compecon.tools import nodeunif\n'), ((810, 838), 'compecon.BasisChebyshev', 'BasisChebyshev', (['n', 'a', 'b'], {'f': 'f'}), '(n, a, b, f=f)\n', (824, 838), False, 'from compecon import BasisChebyshev, BasisSpline\n'), ((1078, 1103), 'compecon.BasisSpline', 'BasisSpline', (['n', 'a', 'b'], {'f': 'f'}), '(n, a, b, f=f)\n', (1089, 1103), False, 'from compecon import BasisChebyshev, BasisSpline\n'), ((1327, 1339), 'demos.setup.plt.figure', 'plt.figure', ([], {}), '()\n', (1337, 1339), False, 'from demos.setup import np, plt\n'), ((1362, 1384), 'demos.setup.plt.plot', 'plt.plot', (['x', '(y - yc[0])'], {}), '(x, y - yc[0])\n', (1370, 1384), False, 'from demos.setup import np, plt\n'), ((1385, 1408), 'demos.setup.plt.ylabel', 'plt.ylabel', (['"""Chebychev"""'], {}), "('Chebychev')\n", (1395, 1408), False, 'from demos.setup import np, plt\n'), ((1409, 1450), 'demos.setup.plt.title', 'plt.title', (['"""Function Approximation Error"""'], {}), "('Function Approximation Error')\n", (1418, 1450), False, 'from demos.setup import np, plt\n'), ((1452, 1472), 'demos.setup.plt.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1463, 1472), False, 'from demos.setup import np, plt\n'), ((1473, 1495), 'demos.setup.plt.plot', 'plt.plot', (['x', '(y - ys[0])'], {}), '(x, y - ys[0])\n', (1481, 1495), False, 'from demos.setup import np, plt\n'), ((1496, 1522), 'demos.setup.plt.ylabel', 'plt.ylabel', (['"""Cubic Spline"""'], {}), "('Cubic Spline')\n", (1506, 1522), False, 'from demos.setup import np, plt\n'), ((1523, 1538), 'demos.setup.plt.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1533, 1538), False, 'from demos.setup import np, plt\n'), ((1585, 1597), 'demos.setup.plt.figure', 'plt.figure', ([], {}), '()\n', (1595, 1597), False, 'from demos.setup import np, plt\n'), ((1620, 1642), 'demos.setup.plt.plot', 'plt.plot', (['x', '(d - dc[0])'], {}), '(x, d - dc[0])\n', (1628, 1642), False, 'from demos.setup import np, plt\n'), ((1643, 1666), 'demos.setup.plt.ylabel', 'plt.ylabel', (['"""Chebychev"""'], {}), "('Chebychev')\n", (1653, 1666), False, 'from demos.setup import np, plt\n'), ((1667, 1716), 'demos.setup.plt.title', 'plt.title', (['"""First Derivative Approximation Error"""'], {}), "('First Derivative Approximation Error')\n", (1676, 1716), False, 'from demos.setup import np, plt\n'), ((1718, 1738), 'demos.setup.plt.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1729, 1738), False, 'from demos.setup import np, plt\n'), ((1739, 1766), 'demos.setup.plt.plot', 'plt.plot', (['x', '(d - ds[0])', '"""m"""'], {}), "(x, d - ds[0], 'm')\n", (1747, 1766), False, 'from demos.setup import np, plt\n'), ((1767, 1793), 'demos.setup.plt.ylabel', 'plt.ylabel', (['"""Cubic Spline"""'], {}), "('Cubic Spline')\n", (1777, 1793), False, 'from demos.setup import np, plt\n'), ((1794, 1809), 'demos.setup.plt.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1804, 1809), False, 'from demos.setup import np, plt\n'), ((1856, 1868), 'demos.setup.plt.figure', 'plt.figure', ([], {}), '()\n', (1866, 1868), False, 'from demos.setup import np, plt\n'), ((1891, 1913), 'demos.setup.plt.plot', 'plt.plot', (['x', '(s - sc[0])'], {}), '(x, s - sc[0])\n', (1899, 1913), False, 'from demos.setup import np, plt\n'), ((1914, 1937), 'demos.setup.plt.ylabel', 'plt.ylabel', (['"""Chebychev"""'], {}), "('Chebychev')\n", (1924, 1937), False, 'from demos.setup import np, plt\n'), ((1938, 1988), 'demos.setup.plt.title', 'plt.title', (['"""Second Derivative Approximation Error"""'], {}), "('Second Derivative Approximation Error')\n", (1947, 1988), False, 'from demos.setup import np, plt\n'), ((1990, 2010), 'demos.setup.plt.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2001, 2010), False, 'from demos.setup import np, plt\n'), ((2011, 2038), 'demos.setup.plt.plot', 'plt.plot', (['x', '(s - ss[0])', '"""m"""'], {}), "(x, s - ss[0], 'm')\n", (2019, 2038), False, 'from demos.setup import np, plt\n'), ((2039, 2065), 'demos.setup.plt.ylabel', 'plt.ylabel', (['"""Cubic Spline"""'], {}), "('Cubic Spline')\n", (2049, 2065), False, 'from demos.setup import np, plt\n'), ((2066, 2081), 'demos.setup.plt.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2076, 2081), False, 'from demos.setup import np, plt\n'), ((2083, 2093), 'demos.setup.plt.show', 'plt.show', ([], {}), '()\n', (2091, 2093), False, 'from demos.setup import np, plt\n'), ((264, 285), 'demos.setup.np.zeros', 'np.zeros', (['(3, x.size)'], {}), '((3, x.size))\n', (272, 285), False, 'from demos.setup import np, plt\n'), ((1340, 1360), 'demos.setup.plt.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1351, 1360), False, 'from demos.setup import np, plt\n'), ((1598, 1618), 'demos.setup.plt.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1609, 1618), False, 'from demos.setup import np, plt\n'), ((1869, 1889), 'demos.setup.plt.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1880, 1889), False, 'from demos.setup import np, plt\n'), ((309, 319), 'demos.setup.np.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (315, 319), False, 'from demos.setup import np, plt\n'), ((334, 344), 'demos.setup.np.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (340, 344), False, 'from demos.setup import np, plt\n'), ((322, 332), 'demos.setup.np.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (328, 332), False, 'from demos.setup import np, plt\n')]
|
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
import tensorflow as tf
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').disabled = True
class SimpleDqnNpcV3:
"Klasa implementująca agenta DQN opartego o prostą sieć neuronową"
def __init__(self, num_of_inputs, num_of_outputs):
"""
num_of_inputs - długość wektora będącego wejściem dla sieci neuronowej
num_of_outputs - ilość wyjść z sieci neuronowej
"""
self._num_of_inputs = num_of_inputs
self._num_of_outputs = num_of_outputs
self._exploration_rate = 1.0
self._exploration_rate_min = 0.1
self._exploration_rate_decay = 0.997
self._discout_rate = 0.95
self.memory = deque(maxlen=4096)
self._init_model()
def _init_model(self):
"""
Inicjalizuje model sieci neuronowej.
Wybraliśmy (w naszym mniemaniu) najproszte parametry i kształt.
"""
self._model = Sequential()
self._model.add(Dense(5 * self._num_of_inputs, input_dim=self._num_of_inputs, activation='relu'))
self._model.add(Dropout(0.15))
self._model.add(Dense(4 * self._num_of_inputs, activation='sigmoid'))
self._model.add(Dropout(0.15))
self._model.add(Dense(self._num_of_outputs, activation='linear'))
self._model.compile(optimizer=Adam(), loss='mean_squared_error')
def act(self, state):
"""Przewiduje i zwraca akcję, którą należy wykonać"""
if np.random.rand() <= self._exploration_rate:
return random.randrange(self._num_of_outputs)
act_values = self._model.predict(state)
return np.argmax(act_values[0])
def retain(self, current_state, taken_action, gained_reward, next_state, is_done):
"""Zapisuje dyn przypadku w pamięci agenta"""
self.memory.append((current_state, taken_action, gained_reward, next_state, is_done))
def replay(self, batch_size):
"""
Doszkala sieć neuronową na losowym fragmencie z jego pamięci
batch-size - rozmiar fragmentu pamięci
"""
batch = random.sample(self.memory, batch_size)
for current_state, taken_action, gained_reward, next_state, is_done in batch:
next_act_best_profit = gained_reward
if not is_done:
future_act_profits = self._model.predict(next_state)
next_act_best_profit = gained_reward + self._discout_rate * np.amax(future_act_profits[0])
current_act_profits = self._model.predict(current_state)
current_act_profits[0][taken_action] = gained_reward + self._discout_rate * next_act_best_profit
with tf.device('/device:GPU:0'):
self._model.fit(x=current_state, y=current_act_profits, epochs=1, verbose=0)
if self._exploration_rate > self._exploration_rate_min:
self._exploration_rate *= self._exploration_rate_decay
def load(self, model_path):
"""Wczytuje model z pamięci"""
self._model.load_weights(model_path)
def save(self, model_path):
"""Zapisuje modele do pamięci"""
self._model.save_weights(model_path)
NUM_OF_AGENTS = 4
NUM_OF_EPISODES = 75
FRAMES_PER_EPISODE = 1000
BATCH_SIZE = 16
GAME_ID = "LunarLander-v2"
if __name__ == "__main__":
with tf.device('/device:CPU:0'):
game = gym.make(GAME_ID)
num_of_actions = game.action_space.n
observation_size = game.observation_space.shape[0]
npc = SimpleDqnNpcV3(observation_size, num_of_actions)
is_done = False
avgs = []
for model in range(NUM_OF_AGENTS):
scores = []
for episode in range(NUM_OF_EPISODES):
score = 0
current_state = np.reshape(game.reset(), [1, observation_size])
for frame in range(FRAMES_PER_EPISODE):
# game.render()
action = npc.act(current_state)
new_state, gained_reward, is_done, info = game.step(action)
new_state = np.reshape(new_state, [1, observation_size])
npc.retain(current_state, action, gained_reward, new_state, is_done)
score += gained_reward
current_state = new_state
if len(npc.memory) > BATCH_SIZE:
npc.replay(BATCH_SIZE)
if is_done:
print("episode: {0}/{1}; result: {2}; e: {3} used memory: {4}/{5}; time: {5}"
.format(episode, NUM_OF_EPISODES, score, npc._exploration_rate, len(npc.memory), npc.memory.maxlen, frame))
break
scores.append(score)
if not is_done:
print("episode: {0}/{1}; result: {2}; used memory: {3}/{4}; time: {5}"
.format(episode, NUM_OF_EPISODES, score, len(npc.memory), npc.memory.maxlen, frame))
npc.save("evo_dqn_" + str(model) + ".h5")
avgs.append(sum(scores) / len(scores))
for i, avg in enumerate(avgs):
print("Model {} has avarage: {}".format(i, avg))
print("Overall avg: {}".format(sum(avgs) / len(avgs)))
|
[
"logging.getLogger",
"tensorflow.device",
"random.sample",
"keras.optimizers.Adam",
"collections.deque",
"numpy.random.rand",
"numpy.reshape",
"random.randrange",
"numpy.amax",
"numpy.argmax",
"keras.models.Sequential",
"keras.layers.Dense",
"keras.layers.Dropout",
"gym.make"
] |
[((275, 306), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (292, 306), False, 'import logging\n'), ((903, 921), 'collections.deque', 'deque', ([], {'maxlen': '(4096)'}), '(maxlen=4096)\n', (908, 921), False, 'from collections import deque\n'), ((1141, 1153), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1151, 1153), False, 'from keras.models import Sequential\n'), ((1829, 1853), 'numpy.argmax', 'np.argmax', (['act_values[0]'], {}), '(act_values[0])\n', (1838, 1853), True, 'import numpy as np\n'), ((2283, 2321), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (2296, 2321), False, 'import random\n'), ((3493, 3519), 'tensorflow.device', 'tf.device', (['"""/device:CPU:0"""'], {}), "('/device:CPU:0')\n", (3502, 3519), True, 'import tensorflow as tf\n'), ((3536, 3553), 'gym.make', 'gym.make', (['GAME_ID'], {}), '(GAME_ID)\n', (3544, 3553), False, 'import gym\n'), ((1178, 1263), 'keras.layers.Dense', 'Dense', (['(5 * self._num_of_inputs)'], {'input_dim': 'self._num_of_inputs', 'activation': '"""relu"""'}), "(5 * self._num_of_inputs, input_dim=self._num_of_inputs, activation='relu'\n )\n", (1183, 1263), False, 'from keras.layers import Dense, Dropout\n'), ((1284, 1297), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (1291, 1297), False, 'from keras.layers import Dense, Dropout\n'), ((1323, 1375), 'keras.layers.Dense', 'Dense', (['(4 * self._num_of_inputs)'], {'activation': '"""sigmoid"""'}), "(4 * self._num_of_inputs, activation='sigmoid')\n", (1328, 1375), False, 'from keras.layers import Dense, Dropout\n'), ((1401, 1414), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (1408, 1414), False, 'from keras.layers import Dense, Dropout\n'), ((1440, 1488), 'keras.layers.Dense', 'Dense', (['self._num_of_outputs'], {'activation': '"""linear"""'}), "(self._num_of_outputs, activation='linear')\n", (1445, 1488), False, 'from keras.layers import Dense, Dropout\n'), ((1664, 1680), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1678, 1680), True, 'import numpy as np\n'), ((1727, 1765), 'random.randrange', 'random.randrange', (['self._num_of_outputs'], {}), '(self._num_of_outputs)\n', (1743, 1765), False, 'import random\n'), ((1528, 1534), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (1532, 1534), False, 'from keras.optimizers import Adam\n'), ((2856, 2882), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (2865, 2882), True, 'import tensorflow as tf\n'), ((4243, 4287), 'numpy.reshape', 'np.reshape', (['new_state', '[1, observation_size]'], {}), '(new_state, [1, observation_size])\n', (4253, 4287), True, 'import numpy as np\n'), ((2630, 2660), 'numpy.amax', 'np.amax', (['future_act_profits[0]'], {}), '(future_act_profits[0])\n', (2637, 2660), True, 'import numpy as np\n')]
|
from .core import ACSDataset
import collections
__all__ = ["PerCapitaIncome"]
class PerCapitaIncome(ACSDataset):
"""
PER CAPITA INCOME IN THE PAST 12 MONTHS (IN 2018 INFLATION-ADJUSTED DOLLARS)
"""
AGGREGATION = None
UNIVERSE = "total population"
TABLE_NAME = "B19301"
RAW_FIELDS = collections.OrderedDict({"001": "per_capita_income"})
|
[
"collections.OrderedDict"
] |
[((316, 369), 'collections.OrderedDict', 'collections.OrderedDict', (["{'001': 'per_capita_income'}"], {}), "({'001': 'per_capita_income'})\n", (339, 369), False, 'import collections\n')]
|
"""
Path to CNS-related files.
Most paths are defined by dictionaries that gather several related
paths. Here, instead of defining the dictionaries with static paths, we
have functions that create those dict-containing paths dynamically. The
default values are defined by:
- axis
- tensors
- translation_vectors
- water_box
But you can re-use the functions to create new dictionaries with updated
paths. This is useful for those cases when the `cns/` folder is moved
to a different folder.
"""
from pathlib import Path
from haddock import toppar_path
# exact file names as present in the cns/ scripts folder
PARAMETERS_FILE = "haddock.param"
TOPOLOGY_FILE = "haddock.top"
LINK_FILE = "protein-allhdg5-4-noter.link"
SCATTER_LIB = "scatter.lib"
INITIAL_POSITIONS_DIR = "initial_positions"
# default prepared paths
parameters_file = Path(toppar_path, PARAMETERS_FILE)
topology_file = Path(toppar_path, TOPOLOGY_FILE)
link_file = Path(toppar_path, LINK_FILE)
scatter_lib = Path(toppar_path, SCATTER_LIB)
def get_translation_vectors(path):
"""
Generate paths for translation vectors.
Parameters
----------
path : pathlib.Path
If absolute, paths will be absolute, if relative paths will be
relative. Adds the INITIAL_POSITIONS_DIR path before the file
name.
"""
translation_vectors = {}
for i in range(51):
_s = f'trans_vector_{i}'
_p = Path(path, INITIAL_POSITIONS_DIR, _s)
translation_vectors[_s] = _p
return translation_vectors
def get_tensors(path):
"""Generate paths for tensors."""
tensors = {
"tensor_psf": Path(path, "tensor.psf"),
"tensor_pdb": Path(path, "tensor.pdb"),
"tensor_para_psf": Path(path, "tensor_para.psf"),
"tensor_para_pdb": Path(path, "tensor_para.pdb"),
"tensor_dani_psf": Path(path, "tensor_dani.psf"),
"tensor_dani_pdb": Path(path, "tensor_dani.pdb"),
}
return tensors
def get_axis(path):
"""Generate paths for axis."""
axis = {
"top_axis": Path(path, "top_axis.pro"),
"par_axis": Path(path, "par_axis.pro"),
"top_axis_dani": Path(path, "top_axis_dani.pro"),
}
return axis
def get_water_box(path):
"""Generate paths for water box."""
water_box = {
"boxtyp20": Path(path, "boxtyp20.pdb"),
}
return water_box
axis = get_axis(toppar_path)
tensors = get_tensors(toppar_path)
translation_vectors = get_translation_vectors(toppar_path)
water_box = get_water_box(toppar_path)
|
[
"pathlib.Path"
] |
[((837, 871), 'pathlib.Path', 'Path', (['toppar_path', 'PARAMETERS_FILE'], {}), '(toppar_path, PARAMETERS_FILE)\n', (841, 871), False, 'from pathlib import Path\n'), ((888, 920), 'pathlib.Path', 'Path', (['toppar_path', 'TOPOLOGY_FILE'], {}), '(toppar_path, TOPOLOGY_FILE)\n', (892, 920), False, 'from pathlib import Path\n'), ((933, 961), 'pathlib.Path', 'Path', (['toppar_path', 'LINK_FILE'], {}), '(toppar_path, LINK_FILE)\n', (937, 961), False, 'from pathlib import Path\n'), ((976, 1006), 'pathlib.Path', 'Path', (['toppar_path', 'SCATTER_LIB'], {}), '(toppar_path, SCATTER_LIB)\n', (980, 1006), False, 'from pathlib import Path\n'), ((1413, 1450), 'pathlib.Path', 'Path', (['path', 'INITIAL_POSITIONS_DIR', '_s'], {}), '(path, INITIAL_POSITIONS_DIR, _s)\n', (1417, 1450), False, 'from pathlib import Path\n'), ((1621, 1645), 'pathlib.Path', 'Path', (['path', '"""tensor.psf"""'], {}), "(path, 'tensor.psf')\n", (1625, 1645), False, 'from pathlib import Path\n'), ((1669, 1693), 'pathlib.Path', 'Path', (['path', '"""tensor.pdb"""'], {}), "(path, 'tensor.pdb')\n", (1673, 1693), False, 'from pathlib import Path\n'), ((1722, 1751), 'pathlib.Path', 'Path', (['path', '"""tensor_para.psf"""'], {}), "(path, 'tensor_para.psf')\n", (1726, 1751), False, 'from pathlib import Path\n'), ((1780, 1809), 'pathlib.Path', 'Path', (['path', '"""tensor_para.pdb"""'], {}), "(path, 'tensor_para.pdb')\n", (1784, 1809), False, 'from pathlib import Path\n'), ((1838, 1867), 'pathlib.Path', 'Path', (['path', '"""tensor_dani.psf"""'], {}), "(path, 'tensor_dani.psf')\n", (1842, 1867), False, 'from pathlib import Path\n'), ((1896, 1925), 'pathlib.Path', 'Path', (['path', '"""tensor_dani.pdb"""'], {}), "(path, 'tensor_dani.pdb')\n", (1900, 1925), False, 'from pathlib import Path\n'), ((2046, 2072), 'pathlib.Path', 'Path', (['path', '"""top_axis.pro"""'], {}), "(path, 'top_axis.pro')\n", (2050, 2072), False, 'from pathlib import Path\n'), ((2094, 2120), 'pathlib.Path', 'Path', (['path', '"""par_axis.pro"""'], {}), "(path, 'par_axis.pro')\n", (2098, 2120), False, 'from pathlib import Path\n'), ((2147, 2178), 'pathlib.Path', 'Path', (['path', '"""top_axis_dani.pro"""'], {}), "(path, 'top_axis_dani.pro')\n", (2151, 2178), False, 'from pathlib import Path\n'), ((2311, 2337), 'pathlib.Path', 'Path', (['path', '"""boxtyp20.pdb"""'], {}), "(path, 'boxtyp20.pdb')\n", (2315, 2337), False, 'from pathlib import Path\n')]
|
#!/usr/bin/python2.7
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for simple JSON templates.
A JSON template is a dictionary of JSON data in which string values
may be simple templates in string.Template format (i.e.,
$dollarSignEscaping). By default, the template is expanded against
its own data, optionally updated with additional context.
"""
import json
from string import Template
import sys
__author__ = '<EMAIL> (<NAME>)'
def ExpandJsonTemplate(json_data, extra_context=None, use_self=True):
"""Recursively template-expand a json dict against itself or other context.
The context for string expansion is the json dict itself by default, updated
by extra_context, if supplied.
Args:
json_data: (dict) A JSON object where string values may be templates.
extra_context: (dict) Additional context for template expansion.
use_self: (bool) Whether to expand the template against itself, or only use
extra_context.
Returns:
A dict where string template values have been expanded against
the context.
"""
if use_self:
context = dict(json_data)
else:
context = {}
if extra_context:
context.update(extra_context)
def RecursiveExpand(obj):
if isinstance(obj, list):
return [RecursiveExpand(x) for x in obj]
elif isinstance(obj, dict):
return dict((k, RecursiveExpand(v)) for k, v in obj.iteritems())
elif isinstance(obj, (str, unicode)):
return Template(obj).safe_substitute(context)
else:
return obj
return RecursiveExpand(json_data)
if __name__ == '__main__':
if len(sys.argv) > 1:
json_in = open(sys.argv[1])
else:
json_in = sys.stdin
data = json.load(json_in)
expanded = ExpandJsonTemplate(data)
json.dump(expanded, sys.stdout, indent=2)
|
[
"json.load",
"string.Template",
"json.dump"
] |
[((2243, 2261), 'json.load', 'json.load', (['json_in'], {}), '(json_in)\n', (2252, 2261), False, 'import json\n'), ((2302, 2343), 'json.dump', 'json.dump', (['expanded', 'sys.stdout'], {'indent': '(2)'}), '(expanded, sys.stdout, indent=2)\n', (2311, 2343), False, 'import json\n'), ((2014, 2027), 'string.Template', 'Template', (['obj'], {}), '(obj)\n', (2022, 2027), False, 'from string import Template\n')]
|
# Generated by Django 4.0 on 2021-12-15 09:04
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('book', '0005_alter_book_rented_count'),
]
operations = [
migrations.AlterField(
model_name='book',
name='cover_img',
field=models.ImageField(default=django.utils.timezone.now, upload_to='static/media/book/covers'),
preserve_default=False,
),
]
|
[
"django.db.models.ImageField"
] |
[((366, 461), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': 'django.utils.timezone.now', 'upload_to': '"""static/media/book/covers"""'}), "(default=django.utils.timezone.now, upload_to=\n 'static/media/book/covers')\n", (383, 461), False, 'from django.db import migrations, models\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _thread
import sys
import time
from math import exp
from random import random
from typing import List, Tuple, Set
from scipy import spatial
import numpy as np
import torch
from torch import nn
from torch.optim import optimizer
from torch.utils import tensorboard
from torch.utils.data import DataLoader
import torch.nn.functional as F
from dataloader import BidirectionalOneShotIterator
from dataloader import TrainDataset
from dataloader import TestDataset
import tensorflow as tf
import tensorboard as tb
import logging
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
torch.random.manual_seed(123456)
# region model
class KGEModel(nn.Module):
def __init__(self, train_seeds, nentity, nrelation, nvalue, hidden_dim, gamma, double_entity_embedding=False,
double_relation_embedding=False):
super(KGEModel, self).__init__()
# self.model_name = model_name
self.nentity = nentity
self.nrelation = nrelation
self.nvalue = nvalue
self.hidden_dim = hidden_dim
self.epsilon = 2.0
self.gamma = nn.Parameter(
torch.Tensor([gamma]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),
requires_grad=False
)
self.entity_dim = hidden_dim * 2 if double_entity_embedding else hidden_dim
self.relation_dim = hidden_dim * 2 if double_relation_embedding else hidden_dim
self.value_dim = hidden_dim * 2 if double_entity_embedding else hidden_dim
entity_weight = torch.zeros(nentity, self.entity_dim)
nn.init.uniform_(
tensor=entity_weight,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
for left_entity, right_entity in train_seeds:
entity_weight[left_entity] = entity_weight[right_entity]
self.entity_embedding = nn.Parameter(entity_weight)
# nn.init.normal_(self.entity_embedding)
self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))
# nn.init.normal_(self.relation_embedding)
nn.init.uniform_(
tensor=self.relation_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
self.value_embedding = nn.Parameter(torch.zeros(nvalue, self.value_dim))
# nn.init.normal_(self.value_embedding)
nn.init.uniform_(
tensor=self.value_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
def forward(self, sample, mode='single'):
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
head = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=sample[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.value_embedding,
dim=0,
index=sample[:, 2]
).unsqueeze(1)
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part.view(-1)
).view(batch_size, negative_sample_size, -1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=tail_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.value_embedding,
dim=0,
index=tail_part[:, 2]
).unsqueeze(1)
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=head_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.value_embedding,
dim=0,
index=tail_part.view(-1)
).view(batch_size, negative_sample_size, -1)
else:
raise ValueError('mode %s not supported' % mode)
score = self.TransE(head, relation, tail, mode)
return score
def TransE(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head + (relation - tail)
else:
score = (head + relation) - tail
score = self.gamma.item() - torch.norm(score, p=1, dim=2)
return score
def RotatE(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
@staticmethod
def train_step(model, optimizer, positive_sample, negative_sample, subsampling_weight, mode, device="cuda"):
model.train()
optimizer.zero_grad()
positive_sample = positive_sample.to(device)
negative_sample = negative_sample.to(device)
subsampling_weight = subsampling_weight.to(device)
negative_score = model((positive_sample, negative_sample), mode=mode)
negative_score = F.logsigmoid(-negative_score).mean(dim=1)
positive_score = model(positive_sample)
positive_score = F.logsigmoid(positive_score).squeeze(dim=1)
positive_sample_loss = - (subsampling_weight * positive_score).sum() / subsampling_weight.sum()
negative_sample_loss = - (subsampling_weight * negative_score).sum() / subsampling_weight.sum()
loss = (positive_sample_loss + negative_sample_loss) / 2
loss.backward()
optimizer.step()
return loss.item()
# endregion
# region 日志
def get_logger(filename):
"""
Return instance of logger
统一的日志样式
"""
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
logging.basicConfig(format='%(message)s', level=logging.INFO)
handler = logging.FileHandler(filename)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
logger = get_logger("./train.log")
# endregion
# region 进度条
class Progbar(object):
"""
Progbar class inspired by keras
进度条
```
progbar = Progbar(max_step=100)
for i in range(100):
progbar.update(i, [("step", i), ("next", i+1)])
```
"""
def __init__(self, max_step, width=30):
self.max_step = max_step
self.width = width
self.last_width = 0
self.sum_values = {}
self.start = time.time()
self.last_step = 0
self.info = ""
self.bar = ""
def _update_values(self, curr_step, values):
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (curr_step - self.last_step), curr_step - self.last_step]
else:
self.sum_values[k][0] += v * (curr_step - self.last_step)
self.sum_values[k][1] += (curr_step - self.last_step)
def _write_bar(self, curr_step):
last_width = self.last_width
sys.stdout.write("\b" * last_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.max_step))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (curr_step, self.max_step)
prog = float(curr_step) / self.max_step
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if curr_step < self.max_step:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
return bar
def _get_eta(self, curr_step):
now = time.time()
if curr_step:
time_per_unit = (now - self.start) / curr_step
else:
time_per_unit = 0
eta = time_per_unit * (self.max_step - curr_step)
if curr_step < self.max_step:
info = ' - ETA: %ds' % eta
else:
info = ' - %ds' % (now - self.start)
return info
def _get_values_sum(self):
info = ""
for name, value in self.sum_values.items():
info += ' - %s: %.6f' % (name, value[0] / max(1, value[1]))
return info
def _write_info(self, curr_step):
info = ""
info += self._get_eta(curr_step)
info += self._get_values_sum()
sys.stdout.write(info)
return info
def _update_width(self, curr_step):
curr_width = len(self.bar) + len(self.info)
if curr_width < self.last_width:
sys.stdout.write(" " * (self.last_width - curr_width))
if curr_step >= self.max_step:
sys.stdout.write("\n")
sys.stdout.flush()
self.last_width = curr_width
def update(self, curr_step, values):
"""Updates the progress bar.
Args:
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
"""
self._update_values(curr_step, values)
self.bar = self._write_bar(curr_step)
self.info = self._write_info(curr_step)
self._update_width(curr_step)
self.last_step = curr_step
# endregion
# region 测试对齐实体
class Tester:
left_ids: List[int] = [] # test_seeds 中对齐实体的左实体id
right_ids: List[int] = [] # test_seeds 中对齐实体的右实体id
seeds: List[Tuple[int, int]] = [] # (m, 2) 对齐的实体对(a,b)称a为左实体,b为右实体
train_seeds: List[Tuple[int, int]] = [] # (0.8m, 2)
test_seeds: List[Tuple[int, int]] = [] # (0.2m, 2)
linkEmbedding = []
kg1E = []
kg2E = []
EA_results = {}
def read_entity_align_list(self, entity_align_file_path):
ret = []
with open(entity_align_file_path, encoding='utf-8') as f:
for line in f:
th = line[:-1].split('\t')
ret.append((int(th[0]), int(th[1])))
self.seeds = ret
# 80%训练集,20%测试集
train_percent = 0.3
train_max_idx = int(train_percent * len(self.seeds))
self.train_seeds = self.seeds[:]
self.test_seeds = self.seeds[:]
self.left_ids = []
self.right_ids = []
for left_entity, right_entity in self.test_seeds:
self.left_ids.append(left_entity) # 对齐的左边的实体
self.right_ids.append(right_entity) # 对齐的右边的实体
def XRA(self, entity_embedding_file_path):
self.linkEmbedding = []
with open(entity_embedding_file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i in range(len(lines)):
aline = lines[i].strip()
aline_list = aline.split()
self.linkEmbedding.append(aline_list)
@staticmethod
def get_vec(entities_embedding, id_list: List[int], device="cuda"):
tensor = torch.LongTensor(id_list).view(-1, 1).to(device)
return entities_embedding(tensor).view(-1, 200).cpu().detach().numpy()
@staticmethod
def get_vec2(entities_embedding, id_list: List[int], device="cuda"):
all_entity_ids = torch.LongTensor(id_list).view(-1).to(device)
all_entity_vec = torch.index_select(
entities_embedding,
dim=0,
index=all_entity_ids
).view(-1, 200).cpu().detach().numpy()
return all_entity_vec
def calculate(self, top_k=(1, 10, 50, 100)):
Lvec = np.array([self.linkEmbedding[e1] for e1, e2 in self.test_seeds])
Rvec = np.array([self.linkEmbedding[e2] for e1, e2 in self.test_seeds])
return self.get_hits(Lvec, Rvec, top_k)
def get_hits2(self, Lvec, Rvec, top_k=(1, 10, 50, 100)):
sim = spatial.distance.cdist(Lvec, Rvec, metric='cityblock')
return self.get_hits(Lvec, Rvec, sim, top_k)
def get_hits(self, Lvec, Rvec, sim, top_k=(1, 10, 50, 100)):
# Lvec (m, d), Rvec (m, d)
# Lvec和Rvec分别是对齐的左右实体的嵌入组成的列表,d是嵌入维度,m是实体个数
# sim=distance(Lvec, Rvec) (m, m)
# sim[i, j] 表示在 Lvec 的实体 i 到 Rvec 的实体 j 的距离
top_lr = [0] * len(top_k)
for i in range(Lvec.shape[0]): # 对于每个KG1实体
rank = sim[i, :].argsort()
# sim[i, :] 是一个行向量,表示将 Lvec 中的实体 i 到 Rvec 的所有实体的距离
# argsort 表示将距离按大小排序,返回排序后的下标。比如[6,3,5]下标[0,1,2],排序后[3,5,6],则返回[1,2,0]
rank_index = np.where(rank == i)[0][0]
# 对于一维向量,np.where(rank == i) 等价于 list(rank).index(i),即查找元素 i 在 rank 中的下标
# 这里的 i 不是代表 Lvec 中的实体 i 的下标,而是代表 Rvec 中和 i 对齐的实体的下标。
for j in range(len(top_k)):
if rank_index < top_k[j]: # index 从 0 开始,因此用 '<' 号
top_lr[j] += 1
top_rl = [0] * len(top_k)
for i in range(Rvec.shape[0]):
rank = sim[:, i].argsort()
rank_index = np.where(rank == i)[0][0]
for j in range(len(top_k)):
if rank_index < top_k[j]:
top_rl[j] += 1
logger.info('For each left:')
left = []
for i in range(len(top_lr)):
hits = top_k[i]
hits_value = top_lr[i] / len(self.test_seeds) * 100
left.append((hits, hits_value))
logger.info('Hits@%d: %.2f%%' % (hits, hits_value))
logger.info('For each right:')
right = []
for i in range(len(top_rl)):
hits = top_k[i]
hits_value = top_rl[i] / len(self.test_seeds) * 100
right.append((hits, hits_value))
logger.info('Hits@%d: %.2f%%' % (hits, hits_value))
return {
"left": left,
"right": right,
}
# endregion
# region 保存与加载模型,恢复训练状态
_MODEL_STATE_DICT = "model_state_dict"
_OPTIMIZER_STATE_DICT = "optimizer_state_dict"
_EPOCH = "epoch"
_STEP = "step"
_BEST_SCORE = "best_score"
_LOSS = "loss"
def load_checkpoint(model: nn.Module, optim: optimizer.Optimizer,
checkpoint_path="./result/fr_en/checkpoint.tar") -> Tuple[int, int, float, float]:
"""Loads training checkpoint.
:param checkpoint_path: path to checkpoint
:param model: model to update state
:param optim: optimizer to update state
:return tuple of starting epoch id, starting step id, best checkpoint score
"""
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint[_MODEL_STATE_DICT])
optim.load_state_dict(checkpoint[_OPTIMIZER_STATE_DICT])
start_epoch_id = checkpoint[_EPOCH] + 1
step = checkpoint[_STEP] + 1
best_score = checkpoint[_BEST_SCORE]
loss = checkpoint[_LOSS]
return start_epoch_id, step, best_score, loss
def save_checkpoint(model: nn.Module, optim: optimizer.Optimizer,
epoch_id: int, step: int, best_score: float, loss: float,
save_path="./result/fr_en/checkpoint.tar"):
torch.save({
_MODEL_STATE_DICT: model.state_dict(),
_OPTIMIZER_STATE_DICT: optim.state_dict(),
_EPOCH: epoch_id,
_STEP: step,
_BEST_SCORE: best_score,
_LOSS: loss,
}, save_path)
def save_entity_embedding_list(model, embedding_path="./result/fr_en/ATentsembed.txt"):
with open(embedding_path, 'w') as f:
d = model.entity_embedding.data.detach().cpu().numpy()
for i in range(len(d)):
f.write(" ".join([str(j) for j in d[i].tolist()]))
f.write("\n")
# endregion
# region 数据集
def read_ids_and_names(dir_path, sp="\t"):
ids = []
names = []
with open(dir_path, encoding="utf-8") as file:
lines = file.readlines()
for line in lines:
id_to_name = line.strip().split(sp)
ids.append(int(id_to_name[0]))
names.append(id_to_name[1])
return ids, names
def read_triple(triple_path):
with open(triple_path, 'r') as fr:
triple = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[1])
rel = int(line_split[2])
triple.add((head, rel, tail))
return list(triple)
def append_align_triple(triple: List[Tuple[int, int, int]], entity_align_list: List[Tuple[int, int]]):
# 使用对齐实体替换头节点,构造属性三元组数据,从而达到利用对齐实体数据的目的
align_set = {}
for i in entity_align_list:
align_set[i[0]] = i[1]
align_set[i[1]] = i[0]
triple_replace_with_align = []
bar = Progbar(max_step=len(triple))
count = 0
for entity, attr, value in triple:
if entity in align_set:
triple_replace_with_align.append((align_set[entity], attr, value))
count += 1
bar.update(count, [("step", count)])
return triple + triple_replace_with_align
# endregion
class TransE:
def __init__(self,
# input paths
entity_align_file="data/fr_en/ref_ent_ids",
all_entity_file="data/fr_en/ent_ids_all",
all_attr_file="data/fr_en/att2id_all",
all_value_file="data/fr_en/att_value2id_all",
all_triple_file="data/fr_en/att_triple_all",
# output paths
checkpoint_path="./result/TransE/fr_en/checkpoint.tar",
embedding_path="./result/TransE/fr_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE/fr_en/log/",
device="cuda",
learning_rate=0.001,
visualize=False
):
self.entity_align_file = entity_align_file
self.all_entity_file = all_entity_file
self.all_attr_file = all_attr_file
self.all_value_file = all_value_file
self.all_triple_file = all_triple_file
self.device = device
self.visualize = visualize
self.tensorboard_log_dir = tensorboard_log_dir
self.checkpoint_path = checkpoint_path
self.embedding_path = embedding_path
self.learning_rate = learning_rate
def init_data(self):
self.t = Tester()
self.t.read_entity_align_list(self.entity_align_file) # 得到已知对齐实体
self.entity_list, self.entity_name_list = read_ids_and_names(self.all_entity_file)
self.attr_list, _ = read_ids_and_names(self.all_attr_file)
self.value_list, _ = read_ids_and_names(self.all_value_file)
self.train_triples = read_triple(self.all_triple_file)
self.entity_count = len(self.entity_list)
self.attr_count = len(self.attr_list)
self.value_count = len(self.value_list)
logger.info("entity: " + str(self.entity_count)
+ " attr: " + str(self.attr_count)
+ " value: " + str(self.value_count))
def append_align_triple(self):
self.train_triples = append_align_triple(self.train_triples, self.t.train_seeds)
def init_dataset(self):
train_dataloader_head = DataLoader(
TrainDataset(self.train_triples, self.entity_count, self.attr_count, self.value_count, 512, 'head-batch'),
batch_size=1024,
shuffle=False,
num_workers=4,
collate_fn=TrainDataset.collate_fn
)
train_dataloader_tail = DataLoader(
TrainDataset(self.train_triples, self.entity_count, self.attr_count, self.value_count, 512, 'tail-batch'),
batch_size=1024,
shuffle=False,
num_workers=4,
collate_fn=TrainDataset.collate_fn
)
self.train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
def init_model(self):
self.model = KGEModel(
self.t.seeds, # 所有seed
nentity=self.entity_count,
nrelation=self.attr_count,
nvalue=self.value_count,
hidden_dim=200,
gamma=24.0,
).to(self.device)
def init_optimizer(self):
self.optim = torch.optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.learning_rate
)
def init_soft_align(self):
self.combination_threshold = 3 # 小于这个距离则模型认为已对齐
self.combination_restriction = 5000 # 模型认为对齐的实体对的个数
self.distance2entitiesPair: List[Tuple[int, Tuple[int, int]]] = []
self.combinationProbability: List[float] = [0] * self.entity_count # [0, 1)
self.correspondingEntity = {}
self.model_think_align_entities = []
self.model_is_able_to_predict_align_entities = False
def soft_align(self, positive_sample, mode='single'):
batch_size = positive_sample.size()[0]
# positive_sample (batch_size, 3)
# batch_size 个 (entity, attr, value) 的三元组
# negative_sample (batch_size, negative_sample_size)
# batch_size 个长度为 negative_sample_size 的 (neg_id1, neg_id2, ...) 替换用的待使用id
# 设 e 是正例实体,e' 是负例实体,e* 是模型认为的e的对齐实体
# 1. head-batch
# (e, a, v) + (e'1, e'2, ..., e'n) ->
# ((e, a, v), (e'1, a, v))
# ((e, a, v), (e'2, a, v))
# ...
# ((e, a, v), (e'n, a, v))
# 2. tail-batch
# (e, a, v) + (v'1, v'2, ..., v'n) ->
# ((e, a, v), (e, a, v'1))
# ((e, a, v), (e, a, v'2))
# ...
# ((e, a, v), (e, a, v'n))
soft_positive_sample = positive_sample.clone()
if mode == "head-batch":
# 负例是随机替换头部
# (neg_id1, neg_id2, ...) 是实体id
# ((e, a, v), (e'1, a, v))
# 已有 (e, a, v) + (e'1, e'2, ..., e'n)
for i in range(batch_size):
# 1. 模型认为头部是对齐的
h1 = soft_positive_sample[i][0].item()
if self.combinationProbability[h1] >= 0.5 and h1 in self.correspondingEntity: # 如果可信
# 希望 (e, a, v) (e', a, v) -> (e*, a, v) (e', a, v)
h1_cor = self.correspondingEntity[h1] # 获取模型认为的对齐实体
soft_positive_sample[i][0] = h1_cor # 替换为模型认为的对齐实体
elif mode == "tail-batch":
# 负例是随机替换尾部
# (neg_id1, neg_id2, ...) 是属性值id
# ((e, a, v), (e, a, v'2))
# 已有 (e, a, v) + (v'1, v'2, ..., v'n)
for i in range(batch_size):
# 1. 模型认为头部是对齐的
h1 = soft_positive_sample[i][0].item()
if self.combinationProbability[h1] >= 0.5 and h1 in self.correspondingEntity: # 如果可信
# 希望 (e, a, v) (e', a, v) -> (e*, a, v) (e', a, v)
h1_cor = self.correspondingEntity[h1] # 获取模型认为的对齐实体
soft_positive_sample[i][0] = h1_cor # 替换为模型认为的对齐实体
return soft_positive_sample
def do_combine(self, thread_name, sim):
# sim[i, j] 表示在 Lvec 的实体 i 到 Rvec 的实体 j 的距离
logger.info(thread_name + " " + "模型对齐中")
computing_time = time.time()
# 1. 按距离排序
self.distance2entitiesPair: List[Tuple[int, Tuple[int, int]]] = []
filtered = np.where(sim <= self.combination_threshold)
for i, j in zip(filtered[0], filtered[1]):
self.distance2entitiesPair.append((sim[i, j], (self.t.left_ids[i], self.t.right_ids[j])))
filter_time = time.time()
logger.info(thread_name + " " + "距离小于 "
+ str(self.combination_threshold) + " 的实体对有 "
+ str(len(self.distance2entitiesPair)) + " 个")
logger.info(thread_name + " " + "扁平化,用时 " + str(int(filter_time - computing_time)) + " 秒")
# 2.初始化"模型认为两实体是对齐的"这件事的可信概率
combinationProbability: List[float] = [0] * self.entity_count # [0, 1)
# 3.模型认为的对齐实体
correspondingEntity = {}
self.model_think_align_entities = []
occupied: Set[int] = set()
combination_counter = 0
sigmoid = lambda x: 1.0 / (1.0 + exp(-x))
for dis, (ent1, ent2) in self.distance2entitiesPair:
if dis > self.combination_threshold:
# 超过可信范围,不可信
continue
# 距离在可信范围内
if ent1 in occupied or ent2 in occupied:
continue
if combination_counter >= self.combination_restriction:
break
combination_counter += 1
self.correspondingEntity[ent1] = ent2
self.correspondingEntity[ent2] = ent1
self.model_think_align_entities.append((ent1, ent2))
occupied.add(ent1)
occupied.add(ent2)
combinationProbability[ent1] = sigmoid(self.combination_threshold - dis) # 必有 p > 0.5
combinationProbability[ent2] = sigmoid(self.combination_threshold - dis)
logger.info(thread_name + " " + "对齐了 " + str(len(self.model_think_align_entities)) + " 个实体")
self.combination_restriction += 1000
self.model_is_able_to_predict_align_entities = False # 上锁
self.combinationProbability = combinationProbability
self.correspondingEntity = correspondingEntity
self.model_is_able_to_predict_align_entities = True # 解锁
align_time = time.time()
logger.info(thread_name + " " + "模型对齐完成,用时 " + str(int(align_time - filter_time)) + " 秒")
def run_train(self, need_to_load_checkpoint=True):
logger.info("start training")
init_step = 1
total_steps = 20001
test_steps = 5000
last_loss = 100
score = 0
last_score = score
if need_to_load_checkpoint:
_, init_step, score, last_loss = load_checkpoint(self.model, self.optim, self.checkpoint_path)
last_score = score
summary_writer = tensorboard.SummaryWriter(log_dir=self.tensorboard_log_dir)
progbar = Progbar(max_step=total_steps - init_step)
start_time = time.time()
for step in range(init_step, total_steps):
positive_sample, negative_sample, subsampling_weight, mode = next(self.train_iterator)
loss = self.model.train_step(self.model, self.optim,
positive_sample, negative_sample,
subsampling_weight, mode, self.device)
# 软对齐
# 根据模型认为的对齐实体,修改 positive_sample,negative_sample,再训练一轮
if self.model_is_able_to_predict_align_entities:
soft_positive_sample = self.soft_align(positive_sample, mode)
loss2 = self.model.train_step(self.model, self.optim,
soft_positive_sample, negative_sample,
subsampling_weight, mode, self.device)
loss = (loss + loss2) / 2
progbar.update(step - init_step + 1, [
("loss", loss),
("cost", round((time.time() - start_time))),
("aligned", len(self.model_think_align_entities))
])
if self.visualize:
summary_writer.add_scalar(tag='Loss/train', scalar_value=loss, global_step=step)
if step == 12000 or step == 13000 or step == 14000:
logger.info("\n计算距离中")
computing_time = time.time()
left_vec = self.t.get_vec2(self.model.entity_embedding, self.t.left_ids)
right_vec = self.t.get_vec2(self.model.entity_embedding, self.t.right_ids)
sim = spatial.distance.cdist(left_vec, right_vec, metric='euclidean')
logger.info("计算距离完成,用时 " + str(int(time.time() - computing_time)) + " 秒")
# self.do_combine("Thread-" + str(step), sim)
# try:
# logger.info("启动线程,获取模型认为的对齐实体")
# _thread.start_new_thread(self.do_combine, ("Thread of step-" + str(step), sim,))
# except SystemExit:
# logger.error("Error: 无法启动线程")
logger.info("属性消融实验")
hits = self.t.get_hits(left_vec, right_vec, sim)
hits_left = hits["left"]
hits_right = hits["right"]
left_hits_10 = hits_left[2][1]
right_hits_10 = hits_right[2][1]
score = (left_hits_10 + right_hits_10) / 2
logger.info("score = " + str(score))
if self.visualize:
summary_writer.add_embedding(tag='Embedding',
mat=self.model.entity_embedding,
metadata=self.entity_name_list,
global_step=step)
summary_writer.add_scalar(tag='Hits@1/left', scalar_value=hits_left[0][1], global_step=step)
summary_writer.add_scalar(tag='Hits@10/left', scalar_value=hits_left[1][1], global_step=step)
summary_writer.add_scalar(tag='Hits@50/left', scalar_value=hits_left[2][1], global_step=step)
summary_writer.add_scalar(tag='Hits@100/left', scalar_value=hits_left[3][1], global_step=step)
summary_writer.add_scalar(tag='Hits@1/right', scalar_value=hits_right[0][1], global_step=step)
summary_writer.add_scalar(tag='Hits@10/right', scalar_value=hits_right[1][1], global_step=step)
summary_writer.add_scalar(tag='Hits@50/right', scalar_value=hits_right[2][1], global_step=step)
summary_writer.add_scalar(tag='Hits@100/right', scalar_value=hits_right[3][1], global_step=step)
if score > last_score:
last_score = score
save_checkpoint(self.model, self.optim, 1, step, score, loss, self.checkpoint_path)
save_entity_embedding_list(self.model, self.embedding_path)
def run_test(self):
load_checkpoint(self.model, self.optim, self.checkpoint_path)
logger.info("\n属性消融实验")
left_vec = self.t.get_vec2(self.model.entity_embedding, self.t.left_ids)
right_vec = self.t.get_vec2(self.model.entity_embedding, self.t.right_ids)
hits = self.t.get_hits(left_vec, right_vec)
hits_left = hits["left"]
hits_right = hits["right"]
left_hits_10 = hits_left[2][1]
right_hits_10 = hits_right[2][1]
score = (left_hits_10 + right_hits_10) / 2
logger.info("score = " + str(score))
def train_model_for_fr_en():
m = TransE(
checkpoint_path="./result/TransE2/fr_en/checkpoint.tar",
embedding_path="./result/TransE2/fr_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE2/fr_en/log/"
)
m.init_data()
# m.append_align_triple()
m.init_soft_align()
m.init_dataset()
m.init_model()
m.init_optimizer()
m.run_train(need_to_load_checkpoint=False)
def train_model_for_ja_en():
m = TransE(entity_align_file="data/ja_en/ref_ent_ids",
all_entity_file="data/ja_en/ent_ids_all",
all_attr_file="data/ja_en/att2id_all",
all_value_file="data/ja_en/att_value2id_all",
all_triple_file="data/ja_en/att_triple_all",
checkpoint_path="./result/TransE2/ja_en/checkpoint.tar",
embedding_path="./result/TransE2/ja_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE2/ja_en/log/")
m.init_data()
# m.append_align_triple()
m.init_soft_align()
m.init_dataset()
m.init_model()
m.init_optimizer()
m.run_train(need_to_load_checkpoint=False)
def train_model_for_zh_en():
m = TransE(entity_align_file="data/zh_en/ref_ent_ids",
all_entity_file="data/zh_en/ent_ids_all",
all_attr_file="data/zh_en/att2id_all",
all_value_file="data/zh_en/att_value2id_all",
all_triple_file="data/zh_en/att_triple_all",
checkpoint_path="./result/TransE2/zh_en/checkpoint.tar",
embedding_path="./result/TransE2/zh_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE2/zh_en/log/")
m.init_data()
# m.append_align_triple()
m.init_soft_align()
m.init_dataset()
m.init_model()
m.init_optimizer()
m.run_train(need_to_load_checkpoint=False)
def test_model():
m = TransE()
m.init_data()
m.init_model()
m.init_optimizer()
m.run_test()
# train_model_for_fr_en()
# train_model_for_ja_en()
train_model_for_zh_en()
|
[
"logging.getLogger",
"numpy.log10",
"torch.LongTensor",
"torch.optim.optimizer.step",
"torch.sin",
"numpy.array",
"torch.cos",
"math.exp",
"dataloader.TrainDataset",
"torch.random.manual_seed",
"torch.utils.tensorboard.SummaryWriter",
"numpy.where",
"torch.optim.optimizer.zero_grad",
"logging.FileHandler",
"sys.stdout.flush",
"torch.Tensor",
"torch.norm",
"torch.nn.functional.logsigmoid",
"time.time",
"logging.basicConfig",
"torch.index_select",
"scipy.spatial.distance.cdist",
"logging.Formatter",
"torch.load",
"torch.stack",
"dataloader.BidirectionalOneShotIterator",
"torch.nn.Parameter",
"torch.chunk",
"torch.zeros",
"sys.stdout.write"
] |
[((691, 723), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(123456)'], {}), '(123456)\n', (715, 723), False, 'import torch\n'), ((7415, 7442), 'logging.getLogger', 'logging.getLogger', (['"""logger"""'], {}), "('logger')\n", (7432, 7442), False, 'import logging\n'), ((7481, 7542), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': 'logging.INFO'}), "(format='%(message)s', level=logging.INFO)\n", (7500, 7542), False, 'import logging\n'), ((7558, 7587), 'logging.FileHandler', 'logging.FileHandler', (['filename'], {}), '(filename)\n', (7577, 7587), False, 'import logging\n'), ((16004, 16031), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (16014, 16031), False, 'import torch\n'), ((1730, 1767), 'torch.zeros', 'torch.zeros', (['nentity', 'self.entity_dim'], {}), '(nentity, self.entity_dim)\n', (1741, 1767), False, 'import torch\n'), ((2079, 2106), 'torch.nn.Parameter', 'nn.Parameter', (['entity_weight'], {}), '(entity_weight)\n', (2091, 2106), False, 'from torch import nn\n'), ((5344, 5371), 'torch.chunk', 'torch.chunk', (['head', '(2)'], {'dim': '(2)'}), '(head, 2, dim=2)\n', (5355, 5371), False, 'import torch\n'), ((5399, 5426), 'torch.chunk', 'torch.chunk', (['tail', '(2)'], {'dim': '(2)'}), '(tail, 2, dim=2)\n', (5410, 5426), False, 'import torch\n'), ((5592, 5617), 'torch.cos', 'torch.cos', (['phase_relation'], {}), '(phase_relation)\n', (5601, 5617), False, 'import torch\n'), ((5640, 5665), 'torch.sin', 'torch.sin', (['phase_relation'], {}), '(phase_relation)\n', (5649, 5665), False, 'import torch\n'), ((6175, 6215), 'torch.stack', 'torch.stack', (['[re_score, im_score]'], {'dim': '(0)'}), '([re_score, im_score], dim=0)\n', (6186, 6215), False, 'import torch\n'), ((6488, 6509), 'torch.optim.optimizer.zero_grad', 'optimizer.zero_grad', ([], {}), '()\n', (6507, 6509), False, 'from torch.optim import optimizer\n'), ((7247, 7263), 'torch.optim.optimizer.step', 'optimizer.step', ([], {}), '()\n', (7261, 7263), False, 'from torch.optim import optimizer\n'), ((7648, 7707), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s: %(message)s"""'], {}), "('%(asctime)s:%(levelname)s: %(message)s')\n", (7665, 7707), False, 'import logging\n'), ((8241, 8252), 'time.time', 'time.time', ([], {}), '()\n', (8250, 8252), False, 'import time\n'), ((8790, 8827), 'sys.stdout.write', 'sys.stdout.write', (["('\\x08' * last_width)"], {}), "('\\x08' * last_width)\n", (8806, 8827), False, 'import sys\n'), ((8834, 8856), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (8850, 8856), False, 'import sys\n'), ((9382, 9403), 'sys.stdout.write', 'sys.stdout.write', (['bar'], {}), '(bar)\n', (9398, 9403), False, 'import sys\n'), ((9474, 9485), 'time.time', 'time.time', ([], {}), '()\n', (9483, 9485), False, 'import time\n'), ((10171, 10193), 'sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (10187, 10193), False, 'import sys\n'), ((10500, 10518), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10516, 10518), False, 'import sys\n'), ((13171, 13235), 'numpy.array', 'np.array', (['[self.linkEmbedding[e1] for e1, e2 in self.test_seeds]'], {}), '([self.linkEmbedding[e1] for e1, e2 in self.test_seeds])\n', (13179, 13235), True, 'import numpy as np\n'), ((13251, 13315), 'numpy.array', 'np.array', (['[self.linkEmbedding[e2] for e1, e2 in self.test_seeds]'], {}), '([self.linkEmbedding[e2] for e1, e2 in self.test_seeds])\n', (13259, 13315), True, 'import numpy as np\n'), ((13440, 13494), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['Lvec', 'Rvec'], {'metric': '"""cityblock"""'}), "(Lvec, Rvec, metric='cityblock')\n", (13462, 13494), False, 'from scipy import spatial\n'), ((21177, 21251), 'dataloader.BidirectionalOneShotIterator', 'BidirectionalOneShotIterator', (['train_dataloader_head', 'train_dataloader_tail'], {}), '(train_dataloader_head, train_dataloader_tail)\n', (21205, 21251), False, 'from dataloader import BidirectionalOneShotIterator\n'), ((24503, 24514), 'time.time', 'time.time', ([], {}), '()\n', (24512, 24514), False, 'import time\n'), ((24628, 24671), 'numpy.where', 'np.where', (['(sim <= self.combination_threshold)'], {}), '(sim <= self.combination_threshold)\n', (24636, 24671), True, 'import numpy as np\n'), ((24847, 24858), 'time.time', 'time.time', ([], {}), '()\n', (24856, 24858), False, 'import time\n'), ((26694, 26705), 'time.time', 'time.time', ([], {}), '()\n', (26703, 26705), False, 'import time\n'), ((27244, 27303), 'torch.utils.tensorboard.SummaryWriter', 'tensorboard.SummaryWriter', ([], {'log_dir': 'self.tensorboard_log_dir'}), '(log_dir=self.tensorboard_log_dir)\n', (27269, 27303), False, 'from torch.utils import tensorboard\n'), ((27385, 27396), 'time.time', 'time.time', ([], {}), '()\n', (27394, 27396), False, 'import time\n'), ((1220, 1241), 'torch.Tensor', 'torch.Tensor', (['[gamma]'], {}), '([gamma])\n', (1232, 1241), False, 'import torch\n'), ((2204, 2245), 'torch.zeros', 'torch.zeros', (['nrelation', 'self.relation_dim'], {}), '(nrelation, self.relation_dim)\n', (2215, 2245), False, 'import torch\n'), ((2509, 2544), 'torch.zeros', 'torch.zeros', (['nvalue', 'self.value_dim'], {}), '(nvalue, self.value_dim)\n', (2520, 2544), False, 'import torch\n'), ((5177, 5206), 'torch.norm', 'torch.norm', (['score'], {'p': '(1)', 'dim': '(2)'}), '(score, p=1, dim=2)\n', (5187, 5206), False, 'import torch\n'), ((7714, 7733), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7731, 7733), False, 'import logging\n'), ((10361, 10415), 'sys.stdout.write', 'sys.stdout.write', (["(' ' * (self.last_width - curr_width))"], {}), "(' ' * (self.last_width - curr_width))\n", (10377, 10415), False, 'import sys\n'), ((10468, 10490), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (10484, 10490), False, 'import sys\n'), ((20597, 20707), 'dataloader.TrainDataset', 'TrainDataset', (['self.train_triples', 'self.entity_count', 'self.attr_count', 'self.value_count', '(512)', '"""head-batch"""'], {}), "(self.train_triples, self.entity_count, self.attr_count, self.\n value_count, 512, 'head-batch')\n", (20609, 20707), False, 'from dataloader import TrainDataset\n'), ((20900, 21010), 'dataloader.TrainDataset', 'TrainDataset', (['self.train_triples', 'self.entity_count', 'self.attr_count', 'self.value_count', '(512)', '"""tail-batch"""'], {}), "(self.train_triples, self.entity_count, self.attr_count, self.\n value_count, 512, 'tail-batch')\n", (20912, 21010), False, 'from dataloader import TrainDataset\n'), ((6780, 6809), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['(-negative_score)'], {}), '(-negative_score)\n', (6792, 6809), True, 'import torch.nn.functional as F\n'), ((6896, 6924), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['positive_score'], {}), '(positive_score)\n', (6908, 6924), True, 'import torch.nn.functional as F\n'), ((28764, 28775), 'time.time', 'time.time', ([], {}), '()\n', (28773, 28775), False, 'import time\n'), ((28978, 29041), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['left_vec', 'right_vec'], {'metric': '"""euclidean"""'}), "(left_vec, right_vec, metric='euclidean')\n", (29000, 29041), False, 'from scipy import spatial\n'), ((2918, 2986), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'sample[:, 0]'}), '(self.entity_embedding, dim=0, index=sample[:, 0])\n', (2936, 2986), False, 'import torch\n'), ((3086, 3156), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'sample[:, 1]'}), '(self.relation_embedding, dim=0, index=sample[:, 1])\n', (3104, 3156), False, 'import torch\n'), ((3252, 3319), 'torch.index_select', 'torch.index_select', (['self.value_embedding'], {'dim': '(0)', 'index': 'sample[:, 2]'}), '(self.value_embedding, dim=0, index=sample[:, 2])\n', (3270, 3319), False, 'import torch\n'), ((8891, 8914), 'numpy.log10', 'np.log10', (['self.max_step'], {}), '(self.max_step)\n', (8899, 8914), True, 'import numpy as np\n'), ((14091, 14110), 'numpy.where', 'np.where', (['(rank == i)'], {}), '(rank == i)\n', (14099, 14110), True, 'import numpy as np\n'), ((14548, 14567), 'numpy.where', 'np.where', (['(rank == i)'], {}), '(rank == i)\n', (14556, 14567), True, 'import numpy as np\n'), ((25465, 25472), 'math.exp', 'exp', (['(-x)'], {}), '(-x)\n', (25468, 25472), False, 'from math import exp\n'), ((3781, 3854), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'tail_part[:, 1]'}), '(self.relation_embedding, dim=0, index=tail_part[:, 1])\n', (3799, 3854), False, 'import torch\n'), ((3950, 4020), 'torch.index_select', 'torch.index_select', (['self.value_embedding'], {'dim': '(0)', 'index': 'tail_part[:, 2]'}), '(self.value_embedding, dim=0, index=tail_part[:, 2])\n', (3968, 4020), False, 'import torch\n'), ((12609, 12634), 'torch.LongTensor', 'torch.LongTensor', (['id_list'], {}), '(id_list)\n', (12625, 12634), False, 'import torch\n'), ((12854, 12879), 'torch.LongTensor', 'torch.LongTensor', (['id_list'], {}), '(id_list)\n', (12870, 12879), False, 'import torch\n'), ((4278, 4349), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'head_part[:, 0]'}), '(self.entity_embedding, dim=0, index=head_part[:, 0])\n', (4296, 4349), False, 'import torch\n'), ((4449, 4522), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'head_part[:, 1]'}), '(self.relation_embedding, dim=0, index=head_part[:, 1])\n', (4467, 4522), False, 'import torch\n'), ((28389, 28400), 'time.time', 'time.time', ([], {}), '()\n', (28398, 28400), False, 'import time\n'), ((12925, 12992), 'torch.index_select', 'torch.index_select', (['entities_embedding'], {'dim': '(0)', 'index': 'all_entity_ids'}), '(entities_embedding, dim=0, index=all_entity_ids)\n', (12943, 12992), False, 'import torch\n'), ((29111, 29122), 'time.time', 'time.time', ([], {}), '()\n', (29120, 29122), False, 'import time\n')]
|
import discord
import time
import random
import datetime
import asyncio
import json
import config
from discord.ext import commands
from data.data_handler import data_handler
from itertools import chain
from collections import OrderedDict
def gainedRP(player, gained_rp):
if player['Level']['timeOfNextEarn'] > time.time():
return True, False, player['Level']['rank']
rank = get_rank_from(player['Level']['rp'] + gained_rp)
if rank > player['Level']['rank']:
return False, True, rank
return False, False, rank
# Function to get a user's rank and remaining rp to next rank.
# Takes current rp as parameter
def get_rank_from(rp):
# Sets the starting value to be our remaining rp
rem_rp = int(rp)
# Starts the rank at 0
rank = 0
# Loops throught the ranks and checks if the user had enough rp to rank up
# If so, take that rp away from rem_rp and add one to their rank
while rem_rp >= config.rp_ranks[rank]:
rem_rp -= config.rp_ranks[rank]
rank += 1
# Returns the final values for rank and rem_rp.
return rank
# Function to get profile pages (1 - 3)
async def get_page(self, ctx, number, userid):
clans = data_handler.load("clans")
profiles = data_handler.load("profiles")
user = await self.bot.fetch_user(userid)
player = profiles[str(userid)]
rank = player['Level']['rank']
title = config.rp_ranktitles[rank]
page = discord.Embed(title = f"{user.display_name}'s profile",
colour = int(player['Settings']['colours'][player['Settings']['colour']], 16),
description = f"{user.name}#{user.discriminator}")
page.set_thumbnail(url = user.avatar_url_as(static_format = 'png'))
page.set_footer(text = f"Requested by {ctx.author.display_name}",
icon_url = ctx.author.avatar_url_as(static_format='png'))
if number == 1:
# Page 1
try:
clan = clans[player['Base']['clanID']]['Base']['name']
except:
clan = "None"
page.add_field(name = "Base Info",
value = f"Account Name: {player['Base']['username']} \nClan: {clan} \nCountry: {player['Base']['country']}",
inline = False)
page.add_field(name = "Level Info",
value = f"Level: {player['Level']['rank']} \nTotal Experience: {player['Level']['rp']} \nTitle: {title}",
inline = False)
if number == 2:
# Page 2
page.add_field(name = "Achievements",
value = f"Amount of Lord titles: {player['Achievements']['lords']} \nAmount of Squire titles: {player['Achievements']['squires']} \nBest :trophy: rating: {player['Achievements']['rating']}",
inline = False)
page.add_field(name = "Fun Favourites",
value = f"Favourite unit: {player['Favourites']['unit']} \nFavourite Tactic: {player['Favourites']['tactic']} \nFavourite Tome: {player['Favourites']['tome']} \nFavourite Skin: {player['Favourites']['skin']}",
inline = False)
if number == 3 and userid is not None:
# Page 3
member = discord.utils.find(lambda g: g.get_member(userid), self.bot.guilds)
if member is not None:
member = member.get_member(userid)
days = int(int(time.time() - (member.created_at - datetime.datetime.utcfromtimestamp(0)).total_seconds())/ 86400)
discord_date = f"{member.created_at.ctime()} ({days} days ago)"
page.add_field(name = "Discord Info",
value = f"Joined Discord on: {discord_date} \nStatus: {member.status} \nid: `{member.id}` \nAvatar Link: {member.avatar_url_as(format='png')}")
return page
# get reaction with number + vice versa
def get_reaction(number, reaction = None):
reactions = {
1: "1\u20e3",
2: "2\u20e3",
3: "3\u20e3",
4: "4\u20e3",
5: "5\u20e3",
6: "6\u20e3",
7: "7\u20e3",
8: "8\u20e3",
9: "9\u20e3",
10: "10\u20e3"
}
if reaction is None:
return reactions.get(number, 0)
else:
return list(reactions.keys())[list(reactions.values()).index(reaction)]
# async handling of user reactions
async def handle_reactions(self, ctx, userid, pages, page1, message):
profiles = data_handler.load("profiles")
page = 0
while True:
def check(reaction, user):
if user.bot == True:
return False
if reaction.message.id != message.id:
return False
reactions = ['⏪', '◀', '⏺️', '▶', '⏩']
return user.id == ctx.author.id and str(reaction) in reactions
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60.0, check=check)
except asyncio.TimeoutError:
break
reaction = str(reaction)
if reaction == '⏺️':
playerid = int(random.choice(list(profiles)))
while playerid == userid:
playerid = int(random.choice(list(profiles)))
page1 = await get_page(self, ctx, 1, playerid)
page2 = await get_page(self, ctx, 2, playerid)
page3 = await get_page(self, ctx, 3, playerid)
pages = [page1, page2, page3]
await message.edit(embed=pages[0])
await handle_reactions(self, ctx, playerid, pages, page1, message)
return
elif reaction == '⏪':
page = 0
elif reaction == '◀':
page -= 1
if page < 0:
page = 0
elif reaction == '▶':
page += 1
if page >= 3:
page = 2
elif reaction == '⏩':
page = 2
await message.edit(embed=pages[page])
class Profiles(commands.Cog):
# Initialises the variables and sets the bot.
def __init__(self, bot):
self.bot = bot
# Our base level command. Due to invoke_without_command=True it means that this command is only run when no
# sub-command is run. Makes it a command group with a name.
@commands.group(name='profile', invoke_without_command = True, aliases = ['p', 'P', 'Profile'])
# Defines it as a function.
async def profile(self, ctx, *, userName:str = None):
"""
Check your profile or that of another member.
You no longer need to mention the user to check their profile!
"""
profiles = data_handler.load("profiles")
userids = list()
if userName is None:
# if user wants to display his own profile, display only his own.
userid = ctx.message.author.id
else:
for user in list(filter(lambda u: userName in u.name, self.bot.users)):
userids.append(user.id)
for guild in self.bot.guilds:
for member in list(filter(lambda m: userName in m.display_name, guild.members)):
userids.append(member.id)
for profile in profiles:
if userName.casefold() in profiles[profile]['Base']['username'].casefold():
userids.append(int(profile))
# distinct result list
userids = list(OrderedDict.fromkeys(userids))
# filter out userids without existing user profile
tempUserids = list()
for userid in userids:
try:
player = profiles[str(userid)]
if config.rp_showHistoricProfiles == False:
member = discord.utils.find(lambda g: g.get_member(userid), self.bot.guilds).get_member(userid)
tempUserids.append(userid)
except:
continue
userids = tempUserids
if len(userids) <= 0:
await ctx.send("I don't know that Discord User/profile")
return
if len(userids) > 10:
await ctx.send("I found more than 10 matching profiles. Please be more specific.")
return
if len(userids) > 1:
# more then 1 possilbe profile found, let the user decide which should be shown
selectionpage = discord.Embed(title = "I found more than one matching profile. Please select the correct one:", description = "")
selectionpage.set_footer(text = f"Requested by {ctx.author.display_name}", icon_url = ctx.author.avatar_url_as(static_format='png'))
selection = await ctx.send(embed=selectionpage)
foundUser = list()
i = 1
for userid in userids:
player = profiles[str(userid)]
user = await self.bot.fetch_user(userid)
reactionString = str(get_reaction(i))
selectionpage.add_field(name = f"{reactionString}", value = f"{user.name}#{user.discriminator} - Account Name: {player['Base']['username']}", inline = False)
foundUser.append(userid)
await selection.add_reaction(reactionString)
i += 1
await selection.edit(embed=selectionpage)
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=lambda r, u: u.id == ctx.author.id and u.bot == False)
except asyncio.TimeoutError:
return
# show the profile of this id:
userid = foundUser[int(get_reaction(0, str(reaction))) - 1]
else:
userid = userids[0]
# display profile of found user
page1 = await get_page(self, ctx, 1, userid)
page2 = await get_page(self, ctx, 2, userid)
page3 = await get_page(self, ctx, 3, userid)
pages = [page1, page2, page3]
message = await ctx.send(embed=page1)
await message.add_reaction("⏪")
await message.add_reaction("◀")
await message.add_reaction("⏺️")
await message.add_reaction("▶")
await message.add_reaction("⏩")
await handle_reactions(self, ctx, userid, pages, page1, message)
@profile.command(name="set")
async def setProfile(self, ctx, attribute, *, value):
try:
"""
Change values on your profile. You can change:
`username`, `clan`, `country`, `lords`, `squires`, `rating`, `unit`, `tactic`, `tome`, `skin`, `colour`.
"""
profiles = data_handler.load("profiles")
player = profiles[str(ctx.author.id)]
attribute = attribute.lower()
if attribute in ['colour', 'color', 'colours', 'colors']:
await self.changeProfileColour(ctx, int(value))
return
if attribute in ["lords", "lord"]:
if str(value)[0] == "+":
player['Achievements']['lords'] += int(value)
else:
player['Achievements']['lords'] = int(value)
elif attribute in ["clans" "clan"]:
player["Base"]["clan"] = value
elif attribute in ["squires", "squire"]:
if str(value)[0] == "+":
player['Achievements']['squires'] += int(value)
else:
player['Achievements']['squires'] = int(value)
elif attribute in ["rating"]:
player['Achievements']['rating'] = int(value)
elif attribute in ["unit", "units", "troop"]:
player['Favourites']['unit'] = value
elif attribute in ["tactic", "strategy", "layout"]:
player['Favourites']['tactic'] = value
elif attribute in ["tome", "masteryskill", "book"]:
player['Favourites']['tome'] = value
elif attribute in ["skin", "look"]:
player['Favourites']['skin'] = value
elif attribute in ["country", "location"]:
player['Base']['country'] = value
elif attribute in ["name", "accountname", "account", "username"]:
player['Base']['username'] = value
else:
await ctx.send("This is not a valid setting. You can change: " +
"`username`, `clan`, `country`, `lords`, `squires`, `rating`, `unit`, `tactic`, `tome`, `skin`, `colour`.")
return
except ValueError:
await ctx.send("Invalid Value. Please choose a number.")
else:
await ctx.send("Profile updated.")
data_handler.dump(profiles, "profiles")
@profile.command(name='colour', aliases = ['color', 'colours', 'colors'])
async def changeProfileColour(self, ctx, colour:int = None):
"""
Allows you to change the colour of all your profile based information!
"""
profiles = data_handler.load("profiles")
try:
player = profiles[str(ctx.author.id)]
except:
await ctx.send("An error occured. Please try again.")
return
colourList = list(player['Settings']['colours'])
if colour is None or colour >= len(colourList) or colour < 0:
description = "Unlocked Colours:"
for colourIndex in range(len(colourList)):
description = description + f"\n{colourIndex}. {colourList[colourIndex]} - `#{player['Settings']['colours'][colourList[colourIndex]]}`"
embed = discord.Embed(title = "Please select a valid colour.",
colour = int(player['Settings']['colours'][player['Settings']['colour']], 16),
description = description)
Color = str(colourList.index(player['Settings']['colour'])) + ". " + player['Settings']['colour'] + " - `#" + player['Settings']['colours'][f"{player['Settings']['colour']}"] + "`"
embed.add_field(name = "Current Colour:",
value = Color)
embed.set_footer(text = f"Requested by {ctx.author.display_name}",
icon_url = ctx.author.avatar_url_as(static_format='png'))
await ctx.send(embed=embed)
return
player['Settings']['colour'] = colourList[colour]
profiles[ctx.author.id] = player
data_handler.dump(profiles, "profiles")
await ctx.send("Updated your colour.")
@commands.Cog.listener()
async def on_message(self, ctx):
"""
Gives you rank points per message on a one minute cooldown.
"""
if ctx.author.bot:
return
profiles = data_handler.load("profiles")
try:
player = profiles[str(ctx.author.id)]
except KeyError:
profiles[f"{ctx.author.id}"] = {
"Base": {
"username": f"{ctx.author.display_name}", "clanID": "None", "country": "Earth"},
"Level": {
"rp": 0, "rank": 0, "timeOfNextEarn": 0},
"Achievements": {
"lords": 0, "squires": 0, "rating": 1000},
"Favourites": {
"unit": "None", "tactic": "None", "tome": "None", "skin": "None"},
"Settings": {
"rankUpMessage": "chat", "colour": "Default", "colours": {"Default": "000000"},"permissions": []}}
player = profiles[str(ctx.author.id)]
gained_rp = int(random.randint(config.rp_min, config.rp_max) * config.rp_mult)
cooldown, rankedUp, rank = gainedRP(player, gained_rp)
if cooldown:
return
player['Level']['rank'] = rank
player['Level']['timeOfNextEarn'] = time.time() + config.rp_cooldown
player['Level']['rp'] += gained_rp
pRUM = player['Settings']['rankUpMessage']
if rankedUp and pRUM in ['any','dm','chat']:
servers = data_handler.load("servers")
try:
sRUM = servers[str(ctx.guild.id)]['Messages']['rankUpMessages']
except KeyError:
sRUM = "any"
if sRUM == "channel":
destination = ctx.guild.get_channel(servers[str(ctx.guild.id)]['Messages']['rankUpChannel'])
elif sRUM == "any" and pRUM in ["chat", "any"]:
destination = ctx.channel
elif pRUM in ["dm", "any"]:
destination = ctx.author
try:
await destination.send(f"Congrats {ctx.author.mention}! You've earned enough rank points to rank up to Rank {rank}!")
except discord.Forbidden:
if pRUM == "any":
destination = ctx.author
await destination.send(f"Congrats {ctx.author.mention}! You've earned enough rank points to rank up to Rank {rank}!")
if rank == 1:
await destination.send("You've also unlocked a new colour: Rank 1!")
player['Settings']['colours']['Rank 1'] = "fefefe"
elif rank == 5:
await destination.send("You've also unlocked a new colour: Rank 5!")
player['Settings']['colours']['Rank 5'] = "7af8d3"
elif rank == 10:
await destination.send("You've also unlocked a new colour: Level 10!")
player['Settings']['colours']['Rank 10'] = "327c31"
data_handler.dump(profiles, "profiles")
@profile.group(name="leaderboard", aliases=["lb"], invoke_without_command = True)
async def levelLB(self, ctx, page: int = 1):
"""
Check where people are relative to each other! Not specifying a page will select the first page.
"""
if page < 1:
await ctx.send("That isn't a valid page.")
return
# Sort the dictionary into a list.
profiles = data_handler.load("profiles")
rankings = []
description = ""
for player in profiles:
try:
rankings.append({'id': player, 'rp': profiles[player]['Level']['rp']})
except KeyError:
pass
if page > ((len(rankings) // 10) + 1):
await ctx.send("That page is too large.")
return
def getKey(item):
return item['rp']
rankings = sorted(rankings, reverse = True, key = getKey)
# Add the top 10
end = 10 * page
if len(rankings) < (10 * page):
end = len(rankings)
for i in range((page * 10) - 10, end):
user = await ctx.bot.fetch_user(rankings[i]['id'])
description += f"**{i + 1}.** {user.name}#{user.discriminator} - {rankings[i]['rp']} rank points.\n"
# Add member
index = -1
print(rankings)
for i in range(len(rankings)):
if int(rankings[i]['id']) == ctx.author.id:
index = i
if index <= (end) and index >= (end - 10):
embed = discord.Embed(title="Global rank point leaderboard",
colour=discord.Colour(0xa72693),
description=description,
inline=True)
embed.set_footer(text=f"Requested by {ctx.author.display_name}",
icon_url=ctx.author.avatar_url_as(static_format="png"))
await ctx.send(content="Here you go!", embed=embed)
return
description += "--==ME==--"
for i in [index - 2, index - 1, index, index + 1, index + 2]:
if i != len(rankings):
user = await ctx.bot.fetch_user(rankings[i]['id'])
description += f"\n**{i + 1}.** {user.name}#{user.discriminator} - {rankings[i]['rp']} rank points."
embed = discord.Embed(title="Rank leaderboard",
colour=discord.Colour(0xa72693),
description=description,
inline=True)
embed.set_footer(text=f"Requested by {ctx.author.display_name}",
icon_url=ctx.author.avatar_url_as(static_format="png"))
# Send embed
await ctx.send(content="Here you go!", embed=embed)
@profile.group(name = 'options', aliases = ['option', 'o', 'O'])
async def pOptions(self, ctx, option:str = None, value:str = None):
"""
Checks or change profile options.
To check options, don't specify an option or values.
To change an option, specify the option and it's new value.
Leave the value blank to see possible settings.
"""
profiles = data_handler.load("profiles")
try:
player = profiles[str(ctx.author.id)]
except KeyError:
await ctx.send("An error occured. Please try again.")
if option is None:
embed = discord.Embed(title = "Personal Settings",
description = "To change an option, specify the option and it's new value.\nLeave the value blank to see possible settings.",
colour = int(player["Settings"]["colours"][player["Settings"]["colour"]], 16))
# rankUpMessage setting
if player["Settings"]["rankUpMessage"] == "any":
embed.add_field(name = "`RankUpMessage` **:** `any`",
value = "This means the bot will try to tell you in chat when you level up, or in the server's level up channel. If it can't do either, it will DM you.")
elif player["Settings"]["rankUpMessage"] == "chat":
embed.add_field(name = "`RankUpMessage` **:** `chat`",
value = "This means the bot will try to tell you in chat when you level up, or in the server's level up channel. If it can't do either, it will **not** DM you.")
elif player["Settings"]["rankUpMessage"] == "dm":
embed.add_field(name = "`RankUpMessage` **:** `dm`",
value = "This means the bot shall try to DM you with the rank up message. If that's not possible, you won't be informed.")
elif player["Settings"]["rankUpMessage"] == "none":
embed.add_field(name = "`RankUpMessage` **:** `none`",
value = "This means you will not be told when you rank up.")
# Not sure if I want to use this feature...
# permissions = "None"
# if "*" in player["Settings"]["permissions"]:
# permissions = "*"
# embed.add_field(name = "Permissions",
# value = permissions)
embed.set_footer(text = f"Requested by {ctx.author.display_name}",
icon_url = ctx.author.avatar_url_as(static_format='png'))
embed.set_thumbnail(url = ctx.author.avatar_url_as(static_format = 'png'))
await ctx.send(content = "", embed=embed)
elif option.lower() in ["rum", "rankupmessage", "rankup"]:
if value is None:
embed = discord.Embed(title = "Rank Up Message",
description = "Specify where rank up messages should be allowed.",
colour = int(player["Settings"]["colours"][player["Settings"]["colour"]], 16))
embed.add_field(name = "`any`",
value = "This means the bot will try to tell you in chat when you level up, or in the server's level up channel. If it can't do either, it will DM you.")
embed.add_field(name = "`chat`",
value = "This means the bot will try to tell you in chat when you level up, or in the server's level up channel. If it can't do either, it will **not** DM you.")
embed.add_field(name = "`dm`",
value = "This means the bot shall try to DM you with the rank up message. If that's not possible, you won't be informed.")
embed.add_field(name = "`none`",
value = "This means you will not be told when you rank up.")
embed.set_footer(text = f"Requested by {ctx.author.display_name}",
icon_url = ctx.author.avatar_url_as(static_format='png'))
await ctx.send(content = "", embed=embed)
elif value.lower() == "any":
player["Settings"]["rankUpMessage"] = "any"
await ctx.send(f"{option} updated.")
elif value.lower() == "chat":
player["Settings"]["rankUpMessage"] = "chat"
await ctx.send(f"{option} updated.")
elif value.lower() == "dm":
player["Settings"]["rankUpMessage"] = "dm"
await ctx.send(f"{option} updated.")
elif value.lower() == "none":
player["Settings"]["rankUpMessage"] = "none"
await ctx.send(f"{option} updated.")
profiles[str(ctx.author.id)] = player
data_handler.dump(profiles, "profiles")
@commands.is_owner()
@profile.command(name = 'reset', hidden = True)
async def resetLevel(self, ctx):
"""
Resets All rp. Used when testing rate of earn
"""
profiles = {}
data_handler.dump(profiles, "profiles")
await ctx.send("Reset all profiles.")
def setup(bot):
bot.add_cog(Profiles(bot))
|
[
"discord.ext.commands.Cog.listener",
"datetime.datetime.utcfromtimestamp",
"collections.OrderedDict.fromkeys",
"discord.Colour",
"data.data_handler.data_handler.load",
"discord.Embed",
"discord.ext.commands.is_owner",
"discord.ext.commands.group",
"data.data_handler.data_handler.dump",
"time.time",
"random.randint"
] |
[((1201, 1227), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""clans"""'], {}), "('clans')\n", (1218, 1227), False, 'from data.data_handler import data_handler\n'), ((1243, 1272), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (1260, 1272), False, 'from data.data_handler import data_handler\n'), ((4429, 4458), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (4446, 4458), False, 'from data.data_handler import data_handler\n'), ((6182, 6276), 'discord.ext.commands.group', 'commands.group', ([], {'name': '"""profile"""', 'invoke_without_command': '(True)', 'aliases': "['p', 'P', 'Profile']"}), "(name='profile', invoke_without_command=True, aliases=['p',\n 'P', 'Profile'])\n", (6196, 6276), False, 'from discord.ext import commands\n'), ((14560, 14583), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (14581, 14583), False, 'from discord.ext import commands\n'), ((25044, 25063), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (25061, 25063), False, 'from discord.ext import commands\n'), ((316, 327), 'time.time', 'time.time', ([], {}), '()\n', (325, 327), False, 'import time\n'), ((6535, 6564), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (6552, 6564), False, 'from data.data_handler import data_handler\n'), ((13016, 13045), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (13033, 13045), False, 'from data.data_handler import data_handler\n'), ((14467, 14506), 'data.data_handler.data_handler.dump', 'data_handler.dump', (['profiles', '"""profiles"""'], {}), "(profiles, 'profiles')\n", (14484, 14506), False, 'from data.data_handler import data_handler\n'), ((14779, 14808), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (14796, 14808), False, 'from data.data_handler import data_handler\n'), ((17525, 17564), 'data.data_handler.data_handler.dump', 'data_handler.dump', (['profiles', '"""profiles"""'], {}), "(profiles, 'profiles')\n", (17542, 17564), False, 'from data.data_handler import data_handler\n'), ((17988, 18017), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (18005, 18017), False, 'from data.data_handler import data_handler\n'), ((20760, 20789), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (20777, 20789), False, 'from data.data_handler import data_handler\n'), ((24985, 25024), 'data.data_handler.data_handler.dump', 'data_handler.dump', (['profiles', '"""profiles"""'], {}), "(profiles, 'profiles')\n", (25002, 25024), False, 'from data.data_handler import data_handler\n'), ((25261, 25300), 'data.data_handler.data_handler.dump', 'data_handler.dump', (['profiles', '"""profiles"""'], {}), "(profiles, 'profiles')\n", (25278, 25300), False, 'from data.data_handler import data_handler\n'), ((10622, 10651), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""profiles"""'], {}), "('profiles')\n", (10639, 10651), False, 'from data.data_handler import data_handler\n'), ((12709, 12748), 'data.data_handler.data_handler.dump', 'data_handler.dump', (['profiles', '"""profiles"""'], {}), "(profiles, 'profiles')\n", (12726, 12748), False, 'from data.data_handler import data_handler\n'), ((15851, 15862), 'time.time', 'time.time', ([], {}), '()\n', (15860, 15862), False, 'import time\n'), ((16055, 16083), 'data.data_handler.data_handler.load', 'data_handler.load', (['"""servers"""'], {}), "('servers')\n", (16072, 16083), False, 'from data.data_handler import data_handler\n'), ((7307, 7336), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['userids'], {}), '(userids)\n', (7327, 7336), False, 'from collections import OrderedDict\n'), ((8311, 8429), 'discord.Embed', 'discord.Embed', ([], {'title': '"""I found more than one matching profile. Please select the correct one:"""', 'description': '""""""'}), "(title=\n 'I found more than one matching profile. Please select the correct one:',\n description='')\n", (8324, 8429), False, 'import discord\n'), ((15600, 15644), 'random.randint', 'random.randint', (['config.rp_min', 'config.rp_max'], {}), '(config.rp_min, config.rp_max)\n', (15614, 15644), False, 'import random\n'), ((19988, 20012), 'discord.Colour', 'discord.Colour', (['(10954387)'], {}), '(10954387)\n', (20002, 20012), False, 'import discord\n'), ((19192, 19216), 'discord.Colour', 'discord.Colour', (['(10954387)'], {}), '(10954387)\n', (19206, 19216), False, 'import discord\n'), ((3405, 3416), 'time.time', 'time.time', ([], {}), '()\n', (3414, 3416), False, 'import time\n'), ((3440, 3477), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(0)'], {}), '(0)\n', (3474, 3477), False, 'import datetime\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import numpy as np
from src.options.general import opts
from src.models.ADNet import adnet
from mindspore import Tensor, export, context
parser = argparse.ArgumentParser(
description='ADNet test')
parser.add_argument('--weight_file', default='', type=str, help='The pretrained weight file')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'])
parser.add_argument('--target_device', type=int, default=0)
args = parser.parse_args()
context.set_context(device_target=args.device_target, mode=context.PYNATIVE_MODE, device_id=args.target_device)
opts['num_videos'] = 1
net, domain_specific_nets = adnet(opts, trained_file=args.weight_file)
input_ = np.random.uniform(0.0, 1.0, size=[128, 3, 112, 112]).astype(np.float32)
export(net, Tensor(input_), file_name='ADNet', file_format='MINDIR')
print('export finished')
|
[
"argparse.ArgumentParser",
"mindspore.context.set_context",
"mindspore.Tensor",
"numpy.random.uniform",
"src.models.ADNet.adnet"
] |
[((832, 881), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ADNet test"""'}), "(description='ADNet test')\n", (855, 881), False, 'import argparse\n'), ((1169, 1285), 'mindspore.context.set_context', 'context.set_context', ([], {'device_target': 'args.device_target', 'mode': 'context.PYNATIVE_MODE', 'device_id': 'args.target_device'}), '(device_target=args.device_target, mode=context.\n PYNATIVE_MODE, device_id=args.target_device)\n', (1188, 1285), False, 'from mindspore import Tensor, export, context\n'), ((1332, 1374), 'src.models.ADNet.adnet', 'adnet', (['opts'], {'trained_file': 'args.weight_file'}), '(opts, trained_file=args.weight_file)\n', (1337, 1374), False, 'from src.models.ADNet import adnet\n'), ((1469, 1483), 'mindspore.Tensor', 'Tensor', (['input_'], {}), '(input_)\n', (1475, 1483), False, 'from mindspore import Tensor, export, context\n'), ((1385, 1437), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '[128, 3, 112, 112]'}), '(0.0, 1.0, size=[128, 3, 112, 112])\n', (1402, 1437), True, 'import numpy as np\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
def get_package_data():
paths_test = [os.path.join('data', '*.out')]
return {'pyradex.tests': paths_test}
|
[
"os.path.join"
] |
[((117, 146), 'os.path.join', 'os.path.join', (['"""data"""', '"""*.out"""'], {}), "('data', '*.out')\n", (129, 146), False, 'import os\n')]
|
"""Finds Guids that do not have referents or that point to referents that no longer exist.
E.g. a node was created and given a guid but an error caused the node to
get deleted, leaving behind a guid that points to nothing.
"""
import sys
from modularodm import Q
from framework.guid.model import Guid
from website.app import init_app
from scripts import utils as scripts_utils
import logging
logger = logging.getLogger(__name__)
def main():
if 'dry' not in sys.argv:
scripts_utils.add_file_logger(logger, __file__)
# Set up storage backends
init_app(routes=False)
logger.info('{n} invalid GUID objects found'.format(n=len(get_targets())))
logger.info('Finished.')
def get_targets():
"""Find GUIDs with no referents and GUIDs with referents that no longer exist."""
# Use a loop because querying MODM with Guid.find(Q('referent', 'eq', None))
# only catches the first case.
ret = []
# NodeFiles were once a GuidStored object and are no longer used any more.
# However, they still exist in the production database. We just skip over them
# for now, but they can probably need to be removed in the future.
# There were also 10 osfguidfile objects that lived in a corrupt repo that
# were not migrated to OSF storage, so we skip those as well. /sloria /jmcarp
for each in Guid.find(Q('referent.1', 'nin', ['nodefile', 'osfguidfile'])):
if each.referent is None:
logger.info('GUID {} has no referent.'.format(each._id))
ret.append(each)
return ret
if __name__ == '__main__':
main()
|
[
"logging.getLogger",
"modularodm.Q",
"scripts.utils.add_file_logger",
"website.app.init_app"
] |
[((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((566, 588), 'website.app.init_app', 'init_app', ([], {'routes': '(False)'}), '(routes=False)\n', (574, 588), False, 'from website.app import init_app\n'), ((484, 531), 'scripts.utils.add_file_logger', 'scripts_utils.add_file_logger', (['logger', '__file__'], {}), '(logger, __file__)\n', (513, 531), True, 'from scripts import utils as scripts_utils\n'), ((1353, 1404), 'modularodm.Q', 'Q', (['"""referent.1"""', '"""nin"""', "['nodefile', 'osfguidfile']"], {}), "('referent.1', 'nin', ['nodefile', 'osfguidfile'])\n", (1354, 1404), False, 'from modularodm import Q\n')]
|
"""
This script is for testing/calling in several different ways
functions from QRColorChecker modules.
@author: <NAME>
@mail: <EMAIL>
"""
import unittest
import hashlib
import dateutil
from chalicelib.server import Server
import sys
import json
from datetime import datetime
sys.path.append('../chalicelib')
class AppTest(unittest.TestCase):
def setUp(self):
self.sns_client = TestSNS()
self.log = TestLog()
self.dynamodb_device_data = TestDynamoDB()
self.dynamodb_device = TestDynamoDB()
self.str_data = '{"DevEUI_uplink": {"Time": "2017-03-11T11:52:50.412+01:00","DevEUI": "0004A30B001C3306",' \
'"FPort": "7","FCntUp": "1","MType": "2","FCntDn": "2","payload_hex": "10bb17f18198100734",' \
'"mic_hex": "c00c1cfa","Lrcid": "00000127","LrrRSSI": "-64.000000","LrrSNR": "9.000000",' \
'"SpFact": "11","SubBand": "G1","Channel": "LC2","DevLrrCnt": "1","Lrrid": "08060412","Late":' \
' "0","LrrLAT": "41.550377","LrrLON": "2.241691","Lrrs": {"Lrr": {"Lrrid": "08060412",' \
'"Chain": "0","LrrRSSI": "-64.000000","LrrSNR": "9.000000","LrrESP": "-64.514969"}},' \
'"CustomerID": "100001774",' \
'"CustomerData": {"alr":{"pro":"LORA/Generic","ver":"1"}},' \
'"ModelCfg": "0","DevAddr": "260113E2","AckRequested": "0",' \
'"rawMacCommands": "0703070307030703"}}'
def test_parse_lora_json(self):
jsonbody = json.loads(self.str_data)
parsed_json = Server.parse_lora_json(self.str_data)
time = jsonbody["DevEUI_uplink"]["Time"]
payload = jsonbody["DevEUI_uplink"]["payload_hex"]
device_id = jsonbody["DevEUI_uplink"]["DevAddr"]
virtual_tx = device_id + "-" + time
hash_object = hashlib.sha256(virtual_tx.encode())
hex_dig = hash_object.hexdigest()
dt = dateutil.parser.parse(time)
strftime = dt.strftime("%s")
time_millis = int(strftime) * 1000
self.assertEqual(parsed_json["time_json"], time)
self.assertEqual(parsed_json["timeStamp"], int(time_millis))
self.assertEqual(parsed_json["payload"], payload)
self.assertEqual(parsed_json["DevEUI"], device_id)
self.assertEqual(parsed_json["type"], "LORA")
self.assertEqual(parsed_json["extra"], json.dumps(jsonbody))
self.assertEqual(parsed_json["virtual_tx"], hex_dig)
# http "https://d8dsx2bkn9.execute-api.eu-west-1.amazonaws.com/api/sigfox?time=1515360218&id=IDTest&data=02180AE4"
def test_parse_sigfox(self):
data_dic = {
"context": {
"httpMethod": "GET",
"identity": {
"sourceIp": "127.0.0.1"
},
"resourcePath": "/sigfox"
},
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate",
"connection": "keep-alive",
"host": "localhost:8000",
"user-agent": "HTTPie/0.9.8"
},
"method": "GET",
"query_params": {
"data": "10bb17f18198100734",
"id": "260113E2",
"time": "1515360218"
},
"stage_vars": {},
"uri_params": {}
}
parsed_dic = Server.parse_sigfox_dic(data_dic)
d = datetime.utcfromtimestamp(int("1515360218") * 1000 / 1e3)
json_date = str(d.isoformat()) + "Z"
virtual_tx = "260113E2" + "-" + json_date
hash_object = hashlib.sha256(virtual_tx.encode())
hex_dig = hash_object.hexdigest()
self.assertEqual(parsed_dic["time_json"], json_date)
self.assertEqual(parsed_dic["timeStamp"], int("1515360218"))
self.assertEqual(parsed_dic["payload"], "10bb17f18198100734")
self.assertEqual(parsed_dic["DevEUI"], "260113E2")
self.assertEqual(parsed_dic["type"], "SIGFOX")
self.assertEqual(parsed_dic["virtual_tx"], hex_dig)
# http "https://d8dsx2bkn9.execute-api.eu-west-1.amazonaws.com/api/sigfox?time=1515360218&id=IDTest&data=02180AE4&test=test"
def test_parse_sigfox_with_test_data(self):
data_dic = {
"method": "GET",
"query_params": {
"data": "10bb17f18198100734",
"id": "260113E2",
"time": "1515360218",
"test": "test"
},
"stage_vars": {},
"uri_params": {}
}
parsed_dic = Server.parse_sigfox_dic(data_dic)
self.assertEqual(parsed_dic["timeStamp"], int("1515360218"))
self.assertEqual(parsed_dic["payload"], "10bb17f18198100734")
self.assertEqual(parsed_dic["DevEUI"], "260113E2")
self.assertEqual(parsed_dic["type"], "SIGFOX")
self.assertEqual(parsed_dic["test"], "test")
def test_publishing_data_to_SNS(self):
data_to_publish = {
"DevEUI": "260113E3",
"extra": {
"DevEUI_uplink": {
"CustomerID": "100001774",
"DevAddr": "260113E3"
}
},
"payload": "010000beef",
"timeStamp": 1499366509000,
"time_json": "2017-07-06T18:41:49.51+02:00",
"type": "LORA",
"virtual_tx": "2dd66154468fa5d433420f5bad5d3f580f3dab46fa33e127ef69c511f641ae2f"
}
server = Server(None, None, self.sns_client, self.log)
expected_message = json.dumps(data_to_publish)
server.publish_data_store_device(data_to_publish)
self.assertEqual(1, self.sns_client.return_published_times())
self.assertEqual(expected_message, self.sns_client.return_message())
self.assertEqual("arn:aws:sns:eu-west-1:488643450383:StoreDeviceData", self.sns_client.return_topicarn())
def test_persist_data_to_DynamoDB(self):
server = Server(self.dynamodb_device_data, None, None, self.log)
expected_item = {
'title': "The Big New Movie",
'year': 2015,
'info': {
'plot': "Nothing happens at all.",
'rating': "0"
}
}
server.persist_data(expected_item)
self.assertEqual(1, self.dynamodb_device_data.return_persisted_times())
self.assertEqual(expected_item, self.dynamodb_device_data.return_persisted_item())
def test_parsing_none_known_payload(self):
expected_item = {"virtual_tx": "A001", "time_json": "2017-01-21T12:12:12.001Z", "timeStamp": 1499366509000,
"payload": "A1bb17f18198100734",
"DevEUI": "260113E3", "type": "LORA", "extra": "{}"}
geolocation = Server.parse_payload(expected_item)
self.assertIsNone(geolocation)
def test_parsing_geolocation_payload(self):
expected_item = {"virtual_tx": "A001", "time_json": "2017-01-21T12:12:12.001Z", "timeStamp": 1499366509000,
"payload": "10bb17f18198100734",
"DevEUI": "260113E3", "type": "LORA", "extra": "{}"}
geolocation = Server.parse_payload(expected_item)
self.assertIsNotNone(geolocation)
payload = expected_item["payload"]
lat_hex = payload[2:8]
lat_str = int(lat_hex, 16)
lat = (lat_str * 180 / 16777215) - 90
lng_hex = payload[8:14]
lng_str = int(lng_hex, 16)
lng = (lng_str * 360 / 16777215) - 180
self.assertEqual(1499366509000, geolocation["timeStamp"])
self.assertIsNotNone(geolocation["GEO"])
# AppTest.printGeoLocation(lat, lat_hex, lat_str, lng_hex, lng_str, payload, lng)
self.assertEqual(str(lat), geolocation["GEO"]["lat"])
self.assertEqual(str(lng), geolocation["GEO"]["lng"])
# Example query:
# http "https://d8dsx2bkn9.execute-api.eu-west-1.amazonaws.com/api/sigfox?time=1510098998&id=260113E3&data=02180AE4"
def test_parsing_keep_alive_payload(self):
expected_item = {"virtual_tx": "A001", "time_json": "2017-01-21T12:12:12.001Z", "timeStamp": 1499366509000,
"payload": "02180AE4",
"DevEUI": "260113E3", "type": "LORA", "extra": "{}"}
keep_alive = Server.parse_payload(expected_item)
self.assertIsNotNone(keep_alive)
payload = expected_item["payload"]
interval = payload[2:4]
interval_int = int(interval, 16)
voltatge_hex = payload[4:8]
voltatge_hex_dec = int(voltatge_hex, 16) / 1000
self.assertEqual(1499366509000, keep_alive["timeStamp"])
self.assertIsNotNone(keep_alive["KA"])
self.assertEqual(str(interval_int), keep_alive["KA"]["interval"])
self.assertEqual(str(voltatge_hex_dec), keep_alive["KA"]["voltage"])
def test_dispatch_alarm_Keep_Alive_low_value(self):
server = Server(None, None, self.sns_client, self.log)
virtual_tx = "AE1234567"
data = {"timeStamp": "1499366509000",
"DevEUI": "260113E3",
"KA":
{"interval": "24",
"voltage": "2.456"}}
server.dispatch_alarm(virtual_tx, data)
data.update({"virtual_tx": virtual_tx})
expected_message = json.dumps(data)
self.assertEqual(1, self.sns_client.return_published_times())
self.assertEqual("arn:aws:sns:eu-west-1:488643450383:NotifySNS", self.sns_client.return_topicarn())
self.assertEqual(expected_message, self.sns_client.return_message())
self.assertEqual("Triggered Alarm 260113E3", self.sns_client.return_subject())
def test_no_dispatch_alarm_for_Keep_Alive_high_value(self):
server = Server(None, None, self.sns_client, self.log)
data = {"timeStamp": "1499366509000",
"DevEUI": "260113E3",
"KA":
{"interval": "24",
"voltage": "2.856"}}
server.dispatch_alarm("AE1234567", data)
self.assertEqual(0, self.sns_client.return_published_times())
def test_not_update_data_in_DynamoDB_if_None(self):
server = Server(self.dynamodb_device_data, None, None, self.log)
expected_item = None
server.update_data(expected_item)
self.assertEqual(0, self.dynamodb_device_data.return_updated_times())
def test_update_data_in_DynamoDB(self):
server = Server(self.dynamodb_device_data, None, None, self.log)
expected_item = {
"timeStamp": 1499366509000,
"DevEUI": "260113E3",
"GEO": {"lat": "12.5", "lng": "1.4"}
}
server.update_data(expected_item)
self.assertEqual(1, self.dynamodb_device_data.return_updated_times())
self.assertEqual(
{"timeStamp": 1499366509000, "DevEUI": "260113E3"},
self.dynamodb_device_data.return_updated_item()["Key"])
self.assertEqual(
'SET geo = :val',
self.dynamodb_device_data.return_updated_item()["UpdateExpression"])
self.assertEqual(
{':val': {"lat": "12.5", "lng": "1.4"}},
self.dynamodb_device_data.return_updated_item()["ExpressionAttributeValues"])
@staticmethod
def printGeoLocation(lat, lat_hex, lat_str, lng_hex, lng_str, payload, lng):
str_packet_id = str_packet_id = payload[:2]
print("payload:\t" + payload)
print("packed_id:\t" + str_packet_id)
print("lat_hex:\t" + lat_hex)
print("lat_str\t" + str(lat_str))
print("lat\t" + str(lat))
print("lng_hex:\t" + lng_hex)
print("lng_str:\t" + str(lng_str))
print("lat: " + str(lat) + ", lng: " + str(lng))
class TestLog:
def __init__(self):
self.message = ''
self.logged = 0
def debug(self, message):
self.message = message
self.logged += 1
return message
def return_message(self):
return self.message
def return_logging_times(self):
return self.logged
class TestSNS:
def __init__(self):
self.Message = ''
self.TopicArn = ''
self.Subject = ''
self.published = 0
def publish(self, TopicArn, Subject, Message):
self.Message = Message
self.TopicArn = TopicArn
self.Subject = Subject
self.published += 1
def return_topicarn(self):
return self.TopicArn
def return_message(self):
return self.Message
def return_published_times(self):
return self.published
def return_subject(self):
return self.Subject
class TestDynamoDB:
def __init__(self):
self.Item = ''
self.persisted = 0
self.updated = 0
self.Key = ''
self.UpdateExpression = ''
self.ExpressionAttributeValues = ''
self.ReturnValues = ''
def put_item(self, Item):
self.Item = Item
self.persisted += 1
def update_item(self, Key, UpdateExpression, ExpressionAttributeValues, ReturnValues):
self.Key = Key
self.UpdateExpression = UpdateExpression
self.ExpressionAttributeValues = ExpressionAttributeValues
self.ReturnValues = ReturnValues
self.updated += 1
def return_persisted_item(self):
return self.Item
def return_persisted_times(self):
return self.persisted
def return_updated_item(self):
return {"Key": self.Key,
"UpdateExpression": self.UpdateExpression,
"ExpressionAttributeValues": self.ExpressionAttributeValues,
"ReturnValues": self.ReturnValues}
def return_updated_times(self):
return self.updated
|
[
"dateutil.parser.parse",
"json.loads",
"chalicelib.server.Server.parse_payload",
"chalicelib.server.Server.parse_lora_json",
"json.dumps",
"chalicelib.server.Server.parse_sigfox_dic",
"chalicelib.server.Server",
"sys.path.append"
] |
[((281, 313), 'sys.path.append', 'sys.path.append', (['"""../chalicelib"""'], {}), "('../chalicelib')\n", (296, 313), False, 'import sys\n'), ((1581, 1606), 'json.loads', 'json.loads', (['self.str_data'], {}), '(self.str_data)\n', (1591, 1606), False, 'import json\n'), ((1629, 1666), 'chalicelib.server.Server.parse_lora_json', 'Server.parse_lora_json', (['self.str_data'], {}), '(self.str_data)\n', (1651, 1666), False, 'from chalicelib.server import Server\n'), ((1992, 2019), 'dateutil.parser.parse', 'dateutil.parser.parse', (['time'], {}), '(time)\n', (2013, 2019), False, 'import dateutil\n'), ((3452, 3485), 'chalicelib.server.Server.parse_sigfox_dic', 'Server.parse_sigfox_dic', (['data_dic'], {}), '(data_dic)\n', (3475, 3485), False, 'from chalicelib.server import Server\n'), ((4641, 4674), 'chalicelib.server.Server.parse_sigfox_dic', 'Server.parse_sigfox_dic', (['data_dic'], {}), '(data_dic)\n', (4664, 4674), False, 'from chalicelib.server import Server\n'), ((5551, 5596), 'chalicelib.server.Server', 'Server', (['None', 'None', 'self.sns_client', 'self.log'], {}), '(None, None, self.sns_client, self.log)\n', (5557, 5596), False, 'from chalicelib.server import Server\n'), ((5624, 5651), 'json.dumps', 'json.dumps', (['data_to_publish'], {}), '(data_to_publish)\n', (5634, 5651), False, 'import json\n'), ((6034, 6089), 'chalicelib.server.Server', 'Server', (['self.dynamodb_device_data', 'None', 'None', 'self.log'], {}), '(self.dynamodb_device_data, None, None, self.log)\n', (6040, 6089), False, 'from chalicelib.server import Server\n'), ((6847, 6882), 'chalicelib.server.Server.parse_payload', 'Server.parse_payload', (['expected_item'], {}), '(expected_item)\n', (6867, 6882), False, 'from chalicelib.server import Server\n'), ((7246, 7281), 'chalicelib.server.Server.parse_payload', 'Server.parse_payload', (['expected_item'], {}), '(expected_item)\n', (7266, 7281), False, 'from chalicelib.server import Server\n'), ((8379, 8414), 'chalicelib.server.Server.parse_payload', 'Server.parse_payload', (['expected_item'], {}), '(expected_item)\n', (8399, 8414), False, 'from chalicelib.server import Server\n'), ((9005, 9050), 'chalicelib.server.Server', 'Server', (['None', 'None', 'self.sns_client', 'self.log'], {}), '(None, None, self.sns_client, self.log)\n', (9011, 9050), False, 'from chalicelib.server import Server\n'), ((9396, 9412), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (9406, 9412), False, 'import json\n'), ((9838, 9883), 'chalicelib.server.Server', 'Server', (['None', 'None', 'self.sns_client', 'self.log'], {}), '(None, None, self.sns_client, self.log)\n', (9844, 9883), False, 'from chalicelib.server import Server\n'), ((10266, 10321), 'chalicelib.server.Server', 'Server', (['self.dynamodb_device_data', 'None', 'None', 'self.log'], {}), '(self.dynamodb_device_data, None, None, self.log)\n', (10272, 10321), False, 'from chalicelib.server import Server\n'), ((10534, 10589), 'chalicelib.server.Server', 'Server', (['self.dynamodb_device_data', 'None', 'None', 'self.log'], {}), '(self.dynamodb_device_data, None, None, self.log)\n', (10540, 10589), False, 'from chalicelib.server import Server\n'), ((2445, 2465), 'json.dumps', 'json.dumps', (['jsonbody'], {}), '(jsonbody)\n', (2455, 2465), False, 'import json\n')]
|
#!/usr/bin/env python
import os
import json
import unittest
from collections import OrderedDict
from spdown.config import Config
TEST_CONFIG_PATHS = OrderedDict([
('local', 'config.json'),
('home', os.path.join(
os.path.expanduser('~'), '.config',
'spdown', 'config'
))
])
TEST_CONFIG = {
'download_directory': '~/TestMusic'
}
class TestConfig(unittest.TestCase):
@staticmethod
def get_backup_path(config_location):
return '{}.bak'.format(
TEST_CONFIG_PATHS[config_location]
)
@staticmethod
def backup_configuration(config_location):
backup_path = TestConfig.get_backup_path(config_location)
if os.path.exists(TEST_CONFIG_PATHS[config_location]):
os.rename(
TEST_CONFIG_PATHS[config_location],
backup_path
)
@staticmethod
def restore_configuration(config_location):
backup_path = TestConfig.get_backup_path(config_location)
if os.path.exists(backup_path):
os.rename(
backup_path,
TEST_CONFIG_PATHS[config_location]
)
@staticmethod
def create_test_config(config_location):
TestConfig.backup_configuration(config_location)
with open(TEST_CONFIG_PATHS[config_location], 'w') as f:
json.dump(TEST_CONFIG, f)
def test_find_configuration_file(self):
config = Config()
for config_path in TEST_CONFIG_PATHS.keys():
TestConfig.backup_configuration(config_path)
for config_path in TEST_CONFIG_PATHS.keys():
config.set_config_path(None)
TestConfig.create_test_config(config_path)
config._load(exit_on_error=False)
TestConfig.restore_configuration(config_path)
self.assertEqual(TEST_CONFIG, config._configuration)
def test_get(self):
config = Config()
for config_path in TEST_CONFIG_PATHS.keys():
TestConfig.backup_configuration(config_path)
for config_path in TEST_CONFIG_PATHS.keys():
config.set_config_path(None)
TestConfig.create_test_config(config_path)
download_directory = config.get('download_directory')
TestConfig.restore_configuration(config_path)
self.assertEqual(download_directory, TEST_CONFIG['download_directory'])
def test_set(self):
config = Config()
for config_path in TEST_CONFIG_PATHS.keys():
TestConfig.backup_configuration(config_path)
for config_path in TEST_CONFIG_PATHS.keys():
config.set_config_path(None)
TestConfig.create_test_config(config_path)
config.set('download_directory', 'test')
TestConfig.restore_configuration(config_path)
self.assertEqual(config.get('download_directory'), 'test')
def test_fix_path_errors(self):
config = Config()
for config_path in TEST_CONFIG_PATHS.keys():
TestConfig.backup_configuration(config_path)
for config_path in TEST_CONFIG_PATHS.keys():
config.set_config_path(None)
TestConfig.create_test_config(config_path)
config.set('download_directory', '~/Music/')
config._configuration = None
self.assertEqual(config.get('download_directory'), '~/Music')
TestConfig.restore_configuration(config_path)
if __name__ == "__main__":
unittest.main()
|
[
"os.path.expanduser",
"os.path.exists",
"spdown.config.Config",
"os.rename",
"unittest.main",
"json.dump"
] |
[((3481, 3496), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3494, 3496), False, 'import unittest\n'), ((694, 744), 'os.path.exists', 'os.path.exists', (['TEST_CONFIG_PATHS[config_location]'], {}), '(TEST_CONFIG_PATHS[config_location])\n', (708, 744), False, 'import os\n'), ((1008, 1035), 'os.path.exists', 'os.path.exists', (['backup_path'], {}), '(backup_path)\n', (1022, 1035), False, 'import os\n'), ((1441, 1449), 'spdown.config.Config', 'Config', ([], {}), '()\n', (1447, 1449), False, 'from spdown.config import Config\n'), ((1922, 1930), 'spdown.config.Config', 'Config', ([], {}), '()\n', (1928, 1930), False, 'from spdown.config import Config\n'), ((2442, 2450), 'spdown.config.Config', 'Config', ([], {}), '()\n', (2448, 2450), False, 'from spdown.config import Config\n'), ((2948, 2956), 'spdown.config.Config', 'Config', ([], {}), '()\n', (2954, 2956), False, 'from spdown.config import Config\n'), ((758, 816), 'os.rename', 'os.rename', (['TEST_CONFIG_PATHS[config_location]', 'backup_path'], {}), '(TEST_CONFIG_PATHS[config_location], backup_path)\n', (767, 816), False, 'import os\n'), ((1049, 1107), 'os.rename', 'os.rename', (['backup_path', 'TEST_CONFIG_PATHS[config_location]'], {}), '(backup_path, TEST_CONFIG_PATHS[config_location])\n', (1058, 1107), False, 'import os\n'), ((1353, 1378), 'json.dump', 'json.dump', (['TEST_CONFIG', 'f'], {}), '(TEST_CONFIG, f)\n', (1362, 1378), False, 'import json\n'), ((230, 253), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (248, 253), False, 'import os\n')]
|
from emulation._emulate.microphysics import TimeMask
from emulation.config import (
EmulationConfig,
ModelConfig,
StorageConfig,
_load_nml,
_get_timestep,
_get_storage_hook,
get_hooks,
)
import emulation.zhao_carr
import datetime
def test_EmulationConfig_from_dict():
seconds = 60
month = 2
config = EmulationConfig.from_dict(
{
"model": {
"path": "some-path",
"online_schedule": {
"period": seconds,
"initial_time": datetime.datetime(2000, month, 1),
},
}
}
)
assert config.model.online_schedule.period == datetime.timedelta(seconds=seconds)
assert config.model.online_schedule.initial_time.month == month
def test_ModelConfig_no_interval():
config = ModelConfig(path="")
assert len(list(config._build_masks())) == 0
def test_ModelConfig_with_interval():
def schedule(time):
return 1.0
config = ModelConfig(path="", online_schedule=schedule)
time_schedule = [
mask for mask in config._build_masks() if isinstance(mask, TimeMask)
][0]
assert time_schedule.schedule == schedule
def test__get_timestep(dummy_rundir):
namelist = _load_nml()
timestep = _get_timestep(namelist)
assert timestep == 900
def test__load_nml(dummy_rundir):
namelist = _load_nml()
assert namelist["coupler_nml"]["hours"] == 1
def test__get_storage_hook(dummy_rundir):
config = StorageConfig()
hook = _get_storage_hook(config)
assert hook
def test_get_hooks(dummy_rundir):
gscond, model, storage = get_hooks()
assert storage
assert model
assert gscond
def test_ModelConfig_mask_where_fortran_cloud_identical():
config = ModelConfig(path="", mask_gscond_identical_cloud=True)
(a,) = config._build_masks()
assert a == emulation.zhao_carr.mask_where_fortran_cloud_identical
def test_ModelConfig_mask_gscond_zero_cloud():
config = ModelConfig(path="", mask_gscond_zero_cloud=True)
(a,) = config._build_masks()
assert a == emulation.zhao_carr.mask_where_fortran_cloud_vanishes_gscond
|
[
"datetime.datetime",
"emulation.config.get_hooks",
"emulation.config.ModelConfig",
"emulation.config._get_timestep",
"emulation.config._load_nml",
"datetime.timedelta",
"emulation.config.StorageConfig",
"emulation.config._get_storage_hook"
] |
[((840, 860), 'emulation.config.ModelConfig', 'ModelConfig', ([], {'path': '""""""'}), "(path='')\n", (851, 860), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1007, 1053), 'emulation.config.ModelConfig', 'ModelConfig', ([], {'path': '""""""', 'online_schedule': 'schedule'}), "(path='', online_schedule=schedule)\n", (1018, 1053), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1263, 1274), 'emulation.config._load_nml', '_load_nml', ([], {}), '()\n', (1272, 1274), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1290, 1313), 'emulation.config._get_timestep', '_get_timestep', (['namelist'], {}), '(namelist)\n', (1303, 1313), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1394, 1405), 'emulation.config._load_nml', '_load_nml', ([], {}), '()\n', (1403, 1405), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1512, 1527), 'emulation.config.StorageConfig', 'StorageConfig', ([], {}), '()\n', (1525, 1527), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1539, 1564), 'emulation.config._get_storage_hook', '_get_storage_hook', (['config'], {}), '(config)\n', (1556, 1564), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1646, 1657), 'emulation.config.get_hooks', 'get_hooks', ([], {}), '()\n', (1655, 1657), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((1786, 1840), 'emulation.config.ModelConfig', 'ModelConfig', ([], {'path': '""""""', 'mask_gscond_identical_cloud': '(True)'}), "(path='', mask_gscond_identical_cloud=True)\n", (1797, 1840), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((2007, 2056), 'emulation.config.ModelConfig', 'ModelConfig', ([], {'path': '""""""', 'mask_gscond_zero_cloud': '(True)'}), "(path='', mask_gscond_zero_cloud=True)\n", (2018, 2056), False, 'from emulation.config import EmulationConfig, ModelConfig, StorageConfig, _load_nml, _get_timestep, _get_storage_hook, get_hooks\n'), ((685, 720), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (703, 720), False, 'import datetime\n'), ((551, 584), 'datetime.datetime', 'datetime.datetime', (['(2000)', 'month', '(1)'], {}), '(2000, month, 1)\n', (568, 584), False, 'import datetime\n')]
|
from os import path
import numpy as np
import pytest
import autofit as af
import autolens as al
from autolens.mock import mock
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
class TestMakeAnalysis:
def test__positions__settings_inputs_are_used_in_positions(
self, positions_x2, positions_x2_noise_map
):
phase_positions_x2 = al.PhasePointSource(
settings=al.SettingsPhasePositions(),
search=mock.MockSearch(),
positions_solver=mock.MockPositionsSolver(model_positions=positions_x2),
)
assert isinstance(phase_positions_x2.settings, al.SettingsPhasePositions)
analysis = phase_positions_x2.make_analysis(
positions=positions_x2, positions_noise_map=positions_x2_noise_map
)
assert analysis.positions.in_grouped_list == positions_x2.in_grouped_list
assert (
analysis.noise_map.in_grouped_list == positions_x2_noise_map.in_grouped_list
)
def test___phase_info_is_made(
self, phase_positions_x2, positions_x2, positions_x2_noise_map
):
phase_positions_x2.make_analysis(
positions=positions_x2,
positions_noise_map=positions_x2_noise_map,
results=mock.MockResults(),
)
file_phase_info = path.join(
phase_positions_x2.search.paths.output_path, "phase.info"
)
phase_info = open(file_phase_info, "r")
search = phase_info.readline()
cosmology = phase_info.readline()
phase_info.close()
assert search == "Optimizer = MockSearch \n"
assert (
cosmology
== 'Cosmology = FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307, Tcmb0=2.725 K, '
"Neff=3.05, m_nu=[0. 0. 0.06] eV, Ob0=0.0486) \n"
)
|
[
"autolens.mock.mock.MockResults",
"pytest.mark.filterwarnings",
"autolens.mock.mock.MockPositionsSolver",
"autolens.SettingsPhasePositions",
"os.path.join",
"os.path.realpath",
"autolens.mock.mock.MockSearch"
] |
[((152, 457), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result either in an error or a different result."""'], {}), "(\n 'ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result either in an error or a different result.'\n )\n", (178, 457), False, 'import pytest\n'), ((500, 523), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (513, 523), False, 'from os import path\n'), ((1691, 1759), 'os.path.join', 'path.join', (['phase_positions_x2.search.paths.output_path', '"""phase.info"""'], {}), "(phase_positions_x2.search.paths.output_path, 'phase.info')\n", (1700, 1759), False, 'from os import path\n'), ((752, 779), 'autolens.SettingsPhasePositions', 'al.SettingsPhasePositions', ([], {}), '()\n', (777, 779), True, 'import autolens as al\n'), ((801, 818), 'autolens.mock.mock.MockSearch', 'mock.MockSearch', ([], {}), '()\n', (816, 818), False, 'from autolens.mock import mock\n'), ((850, 904), 'autolens.mock.mock.MockPositionsSolver', 'mock.MockPositionsSolver', ([], {'model_positions': 'positions_x2'}), '(model_positions=positions_x2)\n', (874, 904), False, 'from autolens.mock import mock\n'), ((1631, 1649), 'autolens.mock.mock.MockResults', 'mock.MockResults', ([], {}), '()\n', (1647, 1649), False, 'from autolens.mock import mock\n')]
|
from .pressureprofile import PressureProfile
import numpy as np
class ArrayPressureProfile(PressureProfile):
def __init__(self, array, reverse=False):
super().__init__(self.__class__.__name__, array.shape[-1])
if reverse:
self.pressure_profile = array[::-1]
else:
self.pressure_profile = array
def compute_pressure_profile(self):
"""
Sets up the pressure profile for the atmosphere model
"""
logp = np.log10(self.pressure_profile)
gradp = np.gradient(logp)
self.pressure_profile_levels = \
10**np.append(logp-gradp/2, logp[-1]+gradp[-1]/2)
@property
def profile(self):
return self.pressure_profile
def write(self, output):
pressure = super().write(output)
return pressure
@classmethod
def input_keywords(self):
return ['array', 'fromarray',]
|
[
"numpy.append",
"numpy.log10",
"numpy.gradient"
] |
[((503, 534), 'numpy.log10', 'np.log10', (['self.pressure_profile'], {}), '(self.pressure_profile)\n', (511, 534), True, 'import numpy as np\n'), ((551, 568), 'numpy.gradient', 'np.gradient', (['logp'], {}), '(logp)\n', (562, 568), True, 'import numpy as np\n'), ((627, 680), 'numpy.append', 'np.append', (['(logp - gradp / 2)', '(logp[-1] + gradp[-1] / 2)'], {}), '(logp - gradp / 2, logp[-1] + gradp[-1] / 2)\n', (636, 680), True, 'import numpy as np\n')]
|
from datadog import initialize, api
options = {
'api_key': '16ff05c7af6ed4652a20f5a8d0c609ce',
'app_key': 'e6a169b9b337355eef90002878fbf9a565e9ee77'
}
initialize(**options)
title = "Mymetric timeboard"
description = "Mymetric Timeboard"
graphs = [
{
"definition": {
"events": [],
"requests": [
{"q": "avg:mymetric{host:ubuntu-xenial}"}
],
"viz": "timeseries"
},
"title": "mymetric in timeseries"
},
{
"definition": {
"events": [],
"requests": [
{"q": "anomalies(avg:postgres.connections.current{host:ubuntu-xenial}, 'basic', 2)"}
],
"viz": "timeseries"
},
"title": "PostgreSQL connections"
},
{
"definition": {
"events": [],
"requests": [
{"q": "avg:mymetric{host:ubuntu-xenial}.rollup(sum, 3600)"}
],
"viz": "timeseries"
},
"title": "Rollup function mymetric"
},
]
template_variables = [{
"name": "ubuntu_xenial",
"prefix": "host",
"default": "host:my-host"
}]
read_only = True
api.Timeboard.create(title=title,description=description,graphs=graphs,template_variables=template_variables)
|
[
"datadog.api.Timeboard.create",
"datadog.initialize"
] |
[((161, 182), 'datadog.initialize', 'initialize', ([], {}), '(**options)\n', (171, 182), False, 'from datadog import initialize, api\n'), ((1191, 1307), 'datadog.api.Timeboard.create', 'api.Timeboard.create', ([], {'title': 'title', 'description': 'description', 'graphs': 'graphs', 'template_variables': 'template_variables'}), '(title=title, description=description, graphs=graphs,\n template_variables=template_variables)\n', (1211, 1307), False, 'from datadog import initialize, api\n')]
|
#! /usr/bin/env python3
from subprocess import call
r = call(["python3", "-m", "korali.rlview", "--help"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--maxObservations", "10000", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--maxReward", "20.0", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--minReward", "-1.0", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--showCI", "0.2", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--averageDepth", "30", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "abf2d_vracer2", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--output", "test.png", "--test"])
if r!=0:
exit(r)
exit(0)
|
[
"subprocess.call"
] |
[((57, 107), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--help']"], {}), "(['python3', '-m', 'korali.rlview', '--help'])\n", (61, 107), False, 'from subprocess import call\n'), ((132, 208), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1', '--test'])\n", (136, 208), False, 'from subprocess import call\n'), ((233, 343), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--maxObservations', '10000', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--maxObservations', '10000', '--test'])\n", (237, 343), False, 'from subprocess import call\n'), ((364, 467), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1', '--maxReward',\n '20.0', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--maxReward', '20.0', '--test'])\n", (368, 467), False, 'from subprocess import call\n'), ((488, 591), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1', '--minReward',\n '-1.0', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--minReward', '-1.0', '--test'])\n", (492, 591), False, 'from subprocess import call\n'), ((612, 711), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1', '--showCI',\n '0.2', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--showCI', '0.2', '--test'])\n", (616, 711), False, 'from subprocess import call\n'), ((732, 836), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--averageDepth', '30', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--averageDepth', '30', '--test'])\n", (736, 836), False, 'from subprocess import call\n'), ((857, 954), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n 'abf2d_vracer2', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n 'abf2d_vracer2', '--test'])\n", (861, 954), False, 'from subprocess import call\n'), ((975, 1079), 'subprocess.call', 'call', (["['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1', '--output',\n 'test.png', '--test']"], {}), "(['python3', '-m', 'korali.rlview', '--dir', 'abf2d_vracer1',\n '--output', 'test.png', '--test'])\n", (979, 1079), False, 'from subprocess import call\n')]
|
from assets.lambdas.transform_findings.index import TransformFindings
import boto3
from moto import mock_s3
def __make_bucket(bucket_name: str):
bucket = boto3.resource('s3').Bucket(bucket_name)
bucket.create()
return bucket
@mock_s3
def test_fix_dictionary():
bucket = __make_bucket('tester')
transform_findings = TransformFindings(bucket.name)
finding = {
'first/level/test': 'test',
'ProductArn': 'arn:aws:securityhub:us-east-1::product/aws/securityhub',
'Types': ['Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark'],
'Description': 'Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to port 22.',
'SchemaVersion': '2018-10-08',
'Compliance': {'Status': 'PASSED'},
'GeneratorId': 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0/rule/4.1',
'FirstObservedAt': '2021-01-31T04:52:30.123Z',
'CreatedAt': '2021-01-31T04:52:30.123Z',
'RecordState': 'ACTIVE',
'Title': '4.1 Ensure no security groups allow ingress from 0.0.0.0/0 to port 22',
'Workflow': {'Status': 'RESOLVED'},
'LastObservedAt': '2021-05-07T11:05:27.353Z',
'Severity': {'Normalized': 0, 'Label': 'INFORMATIONAL', 'Product': 0, 'Original': 'INFORMATIONAL'},
'UpdatedAt': '2021-05-07T11:05:25.775Z',
'FindingProviderFields': {
'Types': [
'Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark'],
'Severity': {'Normalized': 0, 'Label': 'INFORMATIONAL', 'Product': 0, 'Original': 'INFORMATIONAL'}
},
'WorkflowState': 'NEW',
'ProductFields': {
'StandardsGuideArn': 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0',
'StandardsGuideSubscriptionArn': 'arn:aws:securityhub:us-east-1:0123456789:subscription/cis-aws-foundations-benchmark/v/1.2.0',
'RuleId': '4.1',
'RecommendationUrl': 'https://docs.aws.amazon.com/console/securityhub/standards-cis-4.1/remediation',
'RelatedAWSResources:0/name': 'securityhub-restricted-ssh-38a80c22',
'RelatedAWSResources:0/type': 'AWS::Config::ConfigRule',
'StandardsControlArn': 'arn:aws:securityhub:us-east-1:0123456789:control/cis-aws-foundations-benchmark/v/1.2.0/4.1',
'aws/securityhub/ProductName': 'Security Hub',
'aws/securityhub/CompanyName': 'AWS',
'aws/securityhub/FindingId': 'arn:aws:securityhub:us-east-1::product/aws/securityhub/arn:aws:securityhub:us-east-1:0123456789:subscription/cis-aws-foundations-benchmark/v/1.2.0/4.1/finding/2a55570b-74e9-4aa3-9f4e-66f515c7ff03'
},
'AwsAccountId': '0123456789',
'Id': 'arn:aws:securityhub:us-east-1:0123456789:subscription/cis-aws-foundations-benchmark/v/1.2.0/4.1/finding/2a55570b-74e9-4aa3-9f4e-66f515c7ff03',
'Remediation': {
'Recommendation': {
'Text': 'For directions on how to fix this issue, please consult the AWS Security Hub CIS documentation.',
'Url': 'https://docs.aws.amazon.com/console/securityhub/standards-cis-4.1/remediation'}
},
'Resources': [{
'Partition': 'aws',
'Type': 'AwsEc2SecurityGroup',
'Details': {
'AwsEc2SecurityGroup': {
'GroupName': 'default',
'OwnerId': '0123456789',
'VpcId': 'vpc-0123456789',
'IpPermissions': [{'IpProtocol': '-1', 'UserIdGroupPairs': [
{'UserId': '0123456789', 'GroupId': 'sg-0123456789'}]}],
'IpPermissionsEgress': [{'IpProtocol': '-1', 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}],
'GroupId': 'sg-0123456789'}
},
'Region': 'us-east-1', 'Id': 'arn:aws:ec2:us-east-1:0123456789:security-group/sg-0123456789'
}]
}
result = transform_findings.fix_dictionary(finding)
assert isinstance(result, dict)
assert 'first/level/test' not in result
assert 'first_level_test' in result
assert 'ProductFields' in result
assert 'aws/securityhub/ProductName' not in result['ProductFields']
assert 'aws_securityhub_ProductName' in result['ProductFields']
assert 'aws/securityhub/CompanyName' not in result['ProductFields']
assert 'aws_securityhub_CompanyName' in result['ProductFields']
assert 'aws/securityhub/FindingId' not in result['ProductFields']
assert 'aws_securityhub_FindingId' in result['ProductFields']
assert 'RelatedAWSResources:0/name' not in result['ProductFields']
assert 'RelatedAWSResources_0_name' in result['ProductFields']
|
[
"boto3.resource",
"assets.lambdas.transform_findings.index.TransformFindings"
] |
[((339, 369), 'assets.lambdas.transform_findings.index.TransformFindings', 'TransformFindings', (['bucket.name'], {}), '(bucket.name)\n', (356, 369), False, 'from assets.lambdas.transform_findings.index import TransformFindings\n'), ((160, 180), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (174, 180), False, 'import boto3\n')]
|
# -*- coding: utf-8 -*-
################################################################################
# | #
# | ______________________________________________________________ #
# | :~8a.`~888a:::::::::::::::88......88:::::::::::::::;a8~".a88::| #
# | ::::~8a.`~888a::::::::::::88......88::::::::::::;a8~".a888~:::| #
# | :::::::~8a.`~888a:::::::::88......88:::::::::;a8~".a888~::::::| #
# | ::::::::::~8a.`~888a::::::88......88::::::;a8~".a888~:::::::::| #
# | :::::::::::::~8a.`~888a:::88......88:::;a8~".a888~::::::::::::| #
# | :::::::::::: :~8a.`~888a:88 .....88;a8~".a888~:::::::::::::::| #
# | :::::::::::::::::::~8a.`~888......88~".a888~::::::::::::::::::| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ..............................................................| #
# | ..............................................................| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ::::::::::::::::::a888~".a88......888a."~8;:::::::::::::::::::| #
# | :::::::::::::::a888~".a8~:88......88~888a."~8;::::::::::::::::| #
# | ::::::::::::a888~".a8~::::88......88:::~888a."~8;:::::::::::::| #
# | :::::::::a888~".a8~:::::::88......88::::::~888a."~8;::::::::::| #
# | ::::::a888~".a8~::::::::::88......88:::::::::~888a."~8;:::::::| #
# | :::a888~".a8~:::::::::::::88......88::::::::::::~888a."~8;::::| #
# | a888~".a8~::::::::::::::::88......88:::::::::::::::~888a."~8;:| #
# | #
# | Rebirth Addon #
# | Copyright (C) 2017 Cypher #
# | #
# | This program is free software: you can redistribute it and/or modify #
# | it under the terms of the GNU General Public License as published by #
# | the Free Software Foundation, either version 3 of the License, or #
# | (at your option) any later version. #
# | #
# | This program is distributed in the hope that it will be useful, #
# | but WITHOUT ANY WARRANTY; without even the implied warranty of #
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# | GNU General Public License for more details. #
# | #
################################################################################
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
import datetime
import json
import os
import re
import sys
import urllib
import urlparse
import xbmc
from resources.lib.modules import control
from resources.lib.modules import cleantitle
class lib_tools:
@staticmethod
def create_folder(folder):
try:
folder = xbmc.makeLegalFilename(folder)
control.makeFile(folder)
try:
if not 'ftp://' in folder: raise Exception()
from ftplib import FTP
ftparg = re.compile('ftp://(.+?):(.+?)@(.+?):?(\d+)?/(.+/?)').findall(folder)
ftp = FTP(ftparg[0][2], ftparg[0][0], ftparg[0][1])
try:
ftp.cwd(ftparg[0][4])
except:
ftp.mkd(ftparg[0][4])
ftp.quit()
except:
pass
except:
pass
@staticmethod
def write_file(path, content):
try:
path = xbmc.makeLegalFilename(path)
if not isinstance(content, basestring):
content = str(content)
file = control.openFile(path, 'w')
file.write(str(content))
file.close()
except Exception as e:
pass
@staticmethod
def nfo_url(media_string, ids):
tvdb_url = 'http://thetvdb.com/?tab=series&id=%s'
tmdb_url = 'https://www.themoviedb.org/%s/%s'
imdb_url = 'http://www.imdb.com/title/%s/'
if 'tvdb' in ids:
return tvdb_url % (str(ids['tvdb']))
elif 'tmdb' in ids:
return tmdb_url % (media_string, str(ids['tmdb']))
elif 'imdb' in ids:
return imdb_url % (str(ids['imdb']))
else:
return ''
@staticmethod
def check_sources(title, year, imdb, tvdb=None, season=None, episode=None, tvshowtitle=None, premiered=None):
try:
from resources.lib.modules import sources
src = sources.sources().getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
return src and len(src) > 5
except:
return False
@staticmethod
def legal_filename(filename):
try:
filename = filename.strip()
filename = re.sub(r'(?!%s)[^\w\-_\.]', '.', filename)
filename = re.sub('\.+', '.', filename)
filename = re.sub(re.compile('(CON|PRN|AUX|NUL|COM\d|LPT\d)\.', re.I), '\\1_', filename)
xbmc.makeLegalFilename(filename)
return filename
except:
return filename
@staticmethod
def make_path(base_path, title, year='', season=''):
show_folder = re.sub(r'[^\w\-_\. ]', '_', title)
show_folder = '%s (%s)' % (show_folder, year) if year else show_folder
path = os.path.join(base_path, show_folder)
if season:
path = os.path.join(path, 'Season %s' % season)
return path
class libmovies:
def __init__(self):
self.library_folder = os.path.join(control.transPath(control.setting('library.movie')), '')
self.check_setting = control.setting('library.check_movie') or 'false'
self.library_setting = control.setting('library.update') or 'true'
self.dupe_setting = control.setting('library.check') or 'true'
self.infoDialog = False
def add(self, name, title, year, imdb, tmdb, range=False):
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
try:
if not self.dupe_setting == 'true': raise Exception()
id = [imdb, tmdb] if not tmdb == '0' else [imdb]
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["imdbnumber", "originaltitle", "year"]}, "id": 1}' % (year, str(int(year)+1), str(int(year)-1)))
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['movies']
lib = [i for i in lib if str(i['imdbnumber']) in id or (i['originaltitle'].encode('utf-8') == title and str(i['year']) == year)][0]
except:
lib = []
files_added = 0
try:
if not lib == []: raise Exception()
if self.check_setting == 'true':
src = lib_tools.check_sources(title, year, imdb, None, None, None, None, None)
if not src: raise Exception()
self.strmFile({'name': name, 'title': title, 'year': year, 'imdb': imdb, 'tmdb': tmdb})
files_added += 1
except:
pass
if range == True: return
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0:
control.execute('UpdateLibrary(video)')
def range(self, url):
control.idle()
yes = control.yesnoDialog(control.lang(32555).encode('utf-8'), '', '')
if not yes: return
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
from resources.lib.indexers import movies
items = movies.movies().get(url, idx=False)
if items == None: items = []
for i in items:
try:
if xbmc.abortRequested == True: return sys.exit()
self.add('%s (%s)' % (i['title'], i['year']), i['title'], i['year'], i['imdb'], i['tmdb'], range=True)
except:
pass
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo'):
control.execute('UpdateLibrary(video)')
def strmFile(self, i):
try:
name, title, year, imdb, tmdb = i['name'], i['title'], i['year'], i['imdb'], i['tmdb']
sysname, systitle = urllib.quote_plus(name), urllib.quote_plus(title)
transtitle = cleantitle.normalize(title.translate(None, '\/:*?"<>|'))
content = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s' % (sys.argv[0], sysname, systitle, year, imdb, tmdb)
folder = lib_tools.make_path(self.library_folder, transtitle, year)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename(transtitle) + '.strm'), content)
lib_tools.write_file(os.path.join(folder, 'movie.nfo'), lib_tools.nfo_url('movie', i))
except:
pass
class libtvshows:
def __init__(self):
self.library_folder = os.path.join(control.transPath(control.setting('library.tv')),'')
self.version = control.version()
self.check_setting = control.setting('library.check_episode') or 'false'
self.include_unknown = control.setting('library.include_unknown') or 'true'
self.library_setting = control.setting('library.update') or 'true'
self.dupe_setting = control.setting('library.check') or 'true'
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.date = (self.datetime - datetime.timedelta(hours = 24)).strftime('%Y%m%d')
self.infoDialog = False
self.block = False
def add(self, tvshowtitle, year, imdb, tvdb, range=False):
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
from resources.lib.indexers import episodes
items = episodes.episodes().get(tvshowtitle, year, imdb, tvdb, idx=False)
try: items = [{'title': i['title'], 'year': i['year'], 'imdb': i['imdb'], 'tvdb': i['tvdb'], 'season': i['season'], 'episode': i['episode'], 'tvshowtitle': i['tvshowtitle'], 'premiered': i['premiered']} for i in items]
except: items = []
try:
if not self.dupe_setting == 'true': raise Exception()
if items == []: raise Exception()
id = [items[0]['imdb'], items[0]['tvdb']]
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}')
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['tvshows']
lib = [i['title'].encode('utf-8') for i in lib if str(i['imdbnumber']) in id or (i['title'].encode('utf-8') == items[0]['tvshowtitle'] and str(i['year']) == items[0]['year'])][0]
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}' % lib)
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['episodes']
lib = ['S%02dE%02d' % (int(i['season']), int(i['episode'])) for i in lib]
items = [i for i in items if not 'S%02dE%02d' % (int(i['season']), int(i['episode'])) in lib]
except:
pass
files_added = 0
for i in items:
try:
if xbmc.abortRequested == True: return sys.exit()
if self.check_setting == 'true':
if i['episode'] == '1':
self.block = True
src = lib_tools.check_sources(i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered'])
if src: self.block = False
if self.block == True: raise Exception()
premiered = i.get('premiered', '0')
if (premiered != '0' and int(re.sub('[^0-9]', '', str(premiered))) > int(self.date)) or (premiered == '0' and not self.include_unknown):
continue
self.strmFile(i)
files_added += 1
except:
pass
if range == True: return
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0:
control.execute('UpdateLibrary(video)')
def range(self, url):
control.idle()
yes = control.yesnoDialog(control.lang(32555).encode('utf-8'), '', '')
if not yes: return
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
from resources.lib.indexers import tvshows
items = tvshows.tvshows().get(url, idx=False)
if items == None: items = []
for i in items:
try:
if xbmc.abortRequested == True: return sys.exit()
self.add(i['title'], i['year'], i['imdb'], i['tvdb'], range=True)
except:
pass
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo'):
control.execute('UpdateLibrary(video)')
def strmFile(self, i):
try:
title, year, imdb, tvdb, season, episode, tvshowtitle, premiered = i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered']
episodetitle = urllib.quote_plus(title)
systitle, syspremiered = urllib.quote_plus(tvshowtitle), urllib.quote_plus(premiered)
transtitle = cleantitle.normalize(tvshowtitle.translate(None, '\/:*?"<>|'))
content = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&date=%s' % (sys.argv[0], episodetitle, year, imdb, tvdb, season, episode, systitle, syspremiered)
folder = lib_tools.make_path(self.library_folder, transtitle, year)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, 'tvshow.nfo'), lib_tools.nfo_url('tv', i))
folder = lib_tools.make_path(self.library_folder, transtitle, year, season)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename('%s S%02dE%02d' % (transtitle, int(season), int(episode))) + '.strm'), content)
except:
pass
class libepisodes:
def __init__(self):
self.library_folder = os.path.join(control.transPath(control.setting('library.tv')),'')
self.library_setting = control.setting('library.update') or 'true'
self.include_unknown = control.setting('library.include_unknown') or 'true'
self.property = '%s_service_property' % control.addonInfo('name').lower()
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.date = (self.datetime - datetime.timedelta(hours = 24)).strftime('%Y%m%d')
self.infoDialog = False
def update(self, query=None, info='true'):
if not query == None: control.idle()
try:
items = []
season, episode = [], []
show = [os.path.join(self.library_folder, i) for i in control.listDir(self.library_folder)[0]]
for s in show:
try: season += [os.path.join(s, i) for i in control.listDir(s)[0]]
except: pass
for s in season:
try: episode.append([os.path.join(s, i) for i in control.listDir(s)[1] if i.endswith('.strm')][-1])
except: pass
for file in episode:
try:
file = control.openFile(file)
read = file.read()
read = read.encode('utf-8')
file.close()
if not read.startswith(sys.argv[0]): raise Exception()
params = dict(urlparse.parse_qsl(read.replace('?','')))
try: tvshowtitle = params['tvshowtitle']
except: tvshowtitle = None
try: tvshowtitle = params['show']
except: pass
if tvshowtitle == None or tvshowtitle == '': raise Exception()
year, imdb, tvdb = params['year'], params['imdb'], params['tvdb']
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
try: tmdb = params['tmdb']
except: tmdb = '0'
items.append({'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb})
except:
pass
items = [i for x, i in enumerate(items) if i not in items[x + 1:]]
if len(items) == 0: raise Exception()
except:
return
try:
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}')
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['tvshows']
except:
return
if info == 'true' and not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32553).encode('utf-8'), time=10000000)
self.infoDialog = True
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.libcacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS tvshows (""id TEXT, ""items TEXT, ""UNIQUE(id)"");")
except:
return
try:
from resources.lib.indexers import episodes
except:
return
files_added = 0
for item in items:
it = None
if xbmc.abortRequested == True: return sys.exit()
try:
dbcur.execute("SELECT * FROM tvshows WHERE id = '%s'" % item['tvdb'])
fetch = dbcur.fetchone()
it = eval(fetch[1].encode('utf-8'))
except:
pass
try:
if not it == None: raise Exception()
it = episodes.episodes().get(item['tvshowtitle'], item['year'], item['imdb'], item['tvdb'], idx=False)
status = it[0]['status'].lower()
it = [{'title': i['title'], 'year': i['year'], 'imdb': i['imdb'], 'tvdb': i['tvdb'], 'season': i['season'], 'episode': i['episode'], 'tvshowtitle': i['tvshowtitle'], 'premiered': i['premiered']} for i in it]
if status == 'continuing': raise Exception()
dbcur.execute("INSERT INTO tvshows Values (?, ?)", (item['tvdb'], repr(it)))
dbcon.commit()
except:
pass
try:
id = [item['imdb'], item['tvdb']]
if not item['tmdb'] == '0': id += [item['tmdb']]
ep = [x['title'].encode('utf-8') for x in lib if str(x['imdbnumber']) in id or (x['title'].encode('utf-8') == item['tvshowtitle'] and str(x['year']) == item['year'])][0]
ep = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}' % ep)
ep = unicode(ep, 'utf-8', errors='ignore')
ep = json.loads(ep).get('result', {}).get('episodes', {})
ep = [{'season': int(i['season']), 'episode': int(i['episode'])} for i in ep]
ep = sorted(ep, key=lambda x: (x['season'], x['episode']))[-1]
num = [x for x,y in enumerate(it) if str(y['season']) == str(ep['season']) and str(y['episode']) == str(ep['episode'])][-1]
it = [y for x,y in enumerate(it) if x > num]
if len(it) == 0: continue
except:
continue
for i in it:
try:
if xbmc.abortRequested == True: return sys.exit()
premiered = i.get('premiered', '0')
if (premiered != '0' and int(re.sub('[^0-9]', '', str(premiered))) > int(self.date)) or (premiered == '0' and not self.include_unknown):
continue
libtvshows().strmFile(i)
files_added += 1
except:
pass
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0:
control.execute('UpdateLibrary(video)')
def service(self):
try:
lib_tools.create_folder(os.path.join(control.transPath(control.setting('library.movie')), ''))
lib_tools.create_folder(os.path.join(control.transPath(control.setting('library.tv')), ''))
except:
pass
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.libcacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS service (""setting TEXT, ""value TEXT, ""UNIQUE(setting)"");")
dbcur.execute("SELECT * FROM service WHERE setting = 'last_run'")
fetch = dbcur.fetchone()
if fetch == None:
serviceProperty = "1970-01-01 23:59:00.000000"
dbcur.execute("INSERT INTO service Values (?, ?)", ('last_run', serviceProperty))
dbcon.commit()
else:
serviceProperty = str(fetch[1])
dbcon.close()
except:
try: return dbcon.close()
except: return
try: control.window.setProperty(self.property, serviceProperty)
except: return
while not xbmc.abortRequested:
try:
serviceProperty = control.window.getProperty(self.property)
t1 = datetime.timedelta(hours=6)
t2 = datetime.datetime.strptime(serviceProperty, '%Y-%m-%d %H:%M:%S.%f')
t3 = datetime.datetime.now()
check = abs(t3 - t2) > t1
if check == False: raise Exception()
if (control.player.isPlaying() or control.condVisibility('Library.IsScanningVideo')): raise Exception()
serviceProperty = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
control.window.setProperty(self.property, serviceProperty)
try:
dbcon = database.connect(control.libcacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS service (""setting TEXT, ""value TEXT, ""UNIQUE(setting)"");")
dbcur.execute("DELETE FROM service WHERE setting = 'last_run'")
dbcur.execute("INSERT INTO service Values (?, ?)", ('last_run', serviceProperty))
dbcon.commit()
dbcon.close()
except:
try: dbcon.close()
except: pass
if not control.setting('library.service.update') == 'true': raise Exception()
info = control.setting('library.service.notification') or 'true'
self.update(info=info)
except:
pass
control.sleep(10000)
|
[
"re.compile",
"resources.lib.modules.control.makeFile",
"resources.lib.modules.sources.sources",
"sys.exit",
"datetime.timedelta",
"ftplib.FTP",
"urllib.quote_plus",
"resources.lib.modules.control.version",
"resources.lib.modules.control.lang",
"resources.lib.modules.control.idle",
"resources.lib.modules.control.player.isPlaying",
"resources.lib.indexers.movies.movies",
"resources.lib.indexers.tvshows.tvshows",
"resources.lib.modules.control.jsonrpc",
"json.loads",
"resources.lib.modules.control.openFile",
"resources.lib.indexers.episodes.episodes",
"resources.lib.modules.control.setting",
"resources.lib.modules.control.execute",
"resources.lib.modules.control.sleep",
"re.sub",
"xbmc.makeLegalFilename",
"resources.lib.modules.control.listDir",
"datetime.datetime.utcnow",
"pysqlite2.dbapi2.connect",
"resources.lib.modules.control.window.getProperty",
"datetime.datetime.strptime",
"os.path.join",
"datetime.datetime.now",
"resources.lib.modules.control.condVisibility",
"resources.lib.modules.control.window.setProperty",
"resources.lib.modules.control.addonInfo"
] |
[((5709, 5745), 're.sub', 're.sub', (['"""[^\\\\w\\\\-_\\\\. ]"""', '"""_"""', 'title'], {}), "('[^\\\\w\\\\-_\\\\. ]', '_', title)\n", (5715, 5745), False, 'import re\n'), ((5838, 5874), 'os.path.join', 'os.path.join', (['base_path', 'show_folder'], {}), '(base_path, show_folder)\n', (5850, 5874), False, 'import os\n'), ((8296, 8310), 'resources.lib.modules.control.idle', 'control.idle', ([], {}), '()\n', (8308, 8310), False, 'from resources.lib.modules import control\n'), ((10308, 10325), 'resources.lib.modules.control.version', 'control.version', ([], {}), '()\n', (10323, 10325), False, 'from resources.lib.modules import control\n'), ((14026, 14040), 'resources.lib.modules.control.idle', 'control.idle', ([], {}), '()\n', (14038, 14040), False, 'from resources.lib.modules import control\n'), ((3335, 3365), 'xbmc.makeLegalFilename', 'xbmc.makeLegalFilename', (['folder'], {}), '(folder)\n', (3357, 3365), False, 'import xbmc\n'), ((3378, 3402), 'resources.lib.modules.control.makeFile', 'control.makeFile', (['folder'], {}), '(folder)\n', (3394, 3402), False, 'from resources.lib.modules import control\n'), ((3999, 4027), 'xbmc.makeLegalFilename', 'xbmc.makeLegalFilename', (['path'], {}), '(path)\n', (4021, 4027), False, 'import xbmc\n'), ((4139, 4166), 'resources.lib.modules.control.openFile', 'control.openFile', (['path', '"""w"""'], {}), "(path, 'w')\n", (4155, 4166), False, 'from resources.lib.modules import control\n'), ((5298, 5342), 're.sub', 're.sub', (['"""(?!%s)[^\\\\w\\\\-_\\\\.]"""', '"""."""', 'filename'], {}), "('(?!%s)[^\\\\w\\\\-_\\\\.]', '.', filename)\n", (5304, 5342), False, 'import re\n'), ((5364, 5393), 're.sub', 're.sub', (['"""\\\\.+"""', '"""."""', 'filename'], {}), "('\\\\.+', '.', filename)\n", (5370, 5393), False, 'import re\n'), ((5506, 5538), 'xbmc.makeLegalFilename', 'xbmc.makeLegalFilename', (['filename'], {}), '(filename)\n', (5528, 5538), False, 'import xbmc\n'), ((5913, 5953), 'os.path.join', 'os.path.join', (['path', "('Season %s' % season)"], {}), "(path, 'Season %s' % season)\n", (5925, 5953), False, 'import os\n'), ((6146, 6184), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.check_movie"""'], {}), "('library.check_movie')\n", (6161, 6184), False, 'from resources.lib.modules import control\n'), ((6227, 6260), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.update"""'], {}), "('library.update')\n", (6242, 6260), False, 'from resources.lib.modules import control\n'), ((6299, 6331), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.check"""'], {}), "('library.check')\n", (6314, 6331), False, 'from resources.lib.modules import control\n'), ((8220, 8259), 'resources.lib.modules.control.execute', 'control.execute', (['"""UpdateLibrary(video)"""'], {}), "('UpdateLibrary(video)')\n", (8235, 8259), False, 'from resources.lib.modules import control\n'), ((9293, 9332), 'resources.lib.modules.control.execute', 'control.execute', (['"""UpdateLibrary(video)"""'], {}), "('UpdateLibrary(video)')\n", (9308, 9332), False, 'from resources.lib.modules import control\n'), ((10356, 10396), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.check_episode"""'], {}), "('library.check_episode')\n", (10371, 10396), False, 'from resources.lib.modules import control\n'), ((10439, 10481), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.include_unknown"""'], {}), "('library.include_unknown')\n", (10454, 10481), False, 'from resources.lib.modules import control\n'), ((10523, 10556), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.update"""'], {}), "('library.update')\n", (10538, 10556), False, 'from resources.lib.modules import control\n'), ((10595, 10627), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.check"""'], {}), "('library.check')\n", (10610, 10627), False, 'from resources.lib.modules import control\n'), ((10664, 10690), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (10688, 10690), False, 'import datetime\n'), ((10693, 10720), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (10711, 10720), False, 'import datetime\n'), ((11766, 11919), 'resources.lib.modules.control.jsonrpc', 'control.jsonrpc', (['"""{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}"""'], {}), '(\n \'{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}\'\n )\n', (11781, 11919), False, 'from resources.lib.modules import control\n'), ((12232, 12455), 'resources.lib.modules.control.jsonrpc', 'control.jsonrpc', (['(\'{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}\'\n % lib)'], {}), '(\n \'{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}\'\n % lib)\n', (12247, 12455), False, 'from resources.lib.modules import control\n'), ((13950, 13989), 'resources.lib.modules.control.execute', 'control.execute', (['"""UpdateLibrary(video)"""'], {}), "('UpdateLibrary(video)')\n", (13965, 13989), False, 'from resources.lib.modules import control\n'), ((14989, 15028), 'resources.lib.modules.control.execute', 'control.execute', (['"""UpdateLibrary(video)"""'], {}), "('UpdateLibrary(video)')\n", (15004, 15028), False, 'from resources.lib.modules import control\n'), ((15283, 15307), 'urllib.quote_plus', 'urllib.quote_plus', (['title'], {}), '(title)\n', (15300, 15307), False, 'import urllib\n'), ((16421, 16454), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.update"""'], {}), "('library.update')\n", (16436, 16454), False, 'from resources.lib.modules import control\n'), ((16496, 16538), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.include_unknown"""'], {}), "('library.include_unknown')\n", (16511, 16538), False, 'from resources.lib.modules import control\n'), ((16657, 16683), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (16681, 16683), False, 'import datetime\n'), ((16686, 16713), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (16704, 16713), False, 'import datetime\n'), ((16917, 16931), 'resources.lib.modules.control.idle', 'control.idle', ([], {}), '()\n', (16929, 16931), False, 'from resources.lib.modules import control\n'), ((18691, 18844), 'resources.lib.modules.control.jsonrpc', 'control.jsonrpc', (['"""{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}"""'], {}), '(\n \'{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}\'\n )\n', (18706, 18844), False, 'from resources.lib.modules import control\n'), ((19267, 19301), 'resources.lib.modules.control.makeFile', 'control.makeFile', (['control.dataPath'], {}), '(control.dataPath)\n', (19283, 19301), False, 'from resources.lib.modules import control\n'), ((19322, 19360), 'pysqlite2.dbapi2.connect', 'database.connect', (['control.libcacheFile'], {}), '(control.libcacheFile)\n', (19338, 19360), True, 'from pysqlite2 import dbapi2 as database\n'), ((22604, 22643), 'resources.lib.modules.control.execute', 'control.execute', (['"""UpdateLibrary(video)"""'], {}), "('UpdateLibrary(video)')\n", (22619, 22643), False, 'from resources.lib.modules import control\n'), ((22960, 22994), 'resources.lib.modules.control.makeFile', 'control.makeFile', (['control.dataPath'], {}), '(control.dataPath)\n', (22976, 22994), False, 'from resources.lib.modules import control\n'), ((23015, 23053), 'pysqlite2.dbapi2.connect', 'database.connect', (['control.libcacheFile'], {}), '(control.libcacheFile)\n', (23031, 23053), True, 'from pysqlite2 import dbapi2 as database\n'), ((23730, 23788), 'resources.lib.modules.control.window.setProperty', 'control.window.setProperty', (['self.property', 'serviceProperty'], {}), '(self.property, serviceProperty)\n', (23756, 23788), False, 'from resources.lib.modules import control\n'), ((25391, 25411), 'resources.lib.modules.control.sleep', 'control.sleep', (['(10000)'], {}), '(10000)\n', (25404, 25411), False, 'from resources.lib.modules import control\n'), ((3637, 3682), 'ftplib.FTP', 'FTP', (['ftparg[0][2]', 'ftparg[0][0]', 'ftparg[0][1]'], {}), '(ftparg[0][2], ftparg[0][0], ftparg[0][1])\n', (3640, 3682), False, 'from ftplib import FTP\n'), ((5423, 5477), 're.compile', 're.compile', (['"""(CON|PRN|AUX|NUL|COM\\\\d|LPT\\\\d)\\\\."""', 're.I'], {}), "('(CON|PRN|AUX|NUL|COM\\\\d|LPT\\\\d)\\\\.', re.I)\n", (5433, 5477), False, 'import re\n'), ((6077, 6109), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.movie"""'], {}), "('library.movie')\n", (6092, 6109), False, 'from resources.lib.modules import control\n'), ((6455, 6509), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Window.IsVisible(infodialog)"""'], {}), "('Window.IsVisible(infodialog)')\n", (6477, 6509), False, 'from resources.lib.modules import control\n'), ((6518, 6559), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Player.HasVideo"""'], {}), "('Player.HasVideo')\n", (6540, 6559), False, 'from resources.lib.modules import control\n'), ((8137, 8186), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Library.IsScanningVideo"""'], {}), "('Library.IsScanningVideo')\n", (8159, 8186), False, 'from resources.lib.modules import control\n'), ((8434, 8488), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Window.IsVisible(infodialog)"""'], {}), "('Window.IsVisible(infodialog)')\n", (8456, 8488), False, 'from resources.lib.modules import control\n'), ((8497, 8538), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Player.HasVideo"""'], {}), "('Player.HasVideo')\n", (8519, 8538), False, 'from resources.lib.modules import control\n'), ((8725, 8740), 'resources.lib.indexers.movies.movies', 'movies.movies', ([], {}), '()\n', (8738, 8740), False, 'from resources.lib.indexers import movies\n'), ((9230, 9279), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Library.IsScanningVideo"""'], {}), "('Library.IsScanningVideo')\n", (9252, 9279), False, 'from resources.lib.modules import control\n'), ((9507, 9530), 'urllib.quote_plus', 'urllib.quote_plus', (['name'], {}), '(name)\n', (9524, 9530), False, 'import urllib\n'), ((9532, 9556), 'urllib.quote_plus', 'urllib.quote_plus', (['title'], {}), '(title)\n', (9549, 9556), False, 'import urllib\n'), ((10045, 10078), 'os.path.join', 'os.path.join', (['folder', '"""movie.nfo"""'], {}), "(folder, 'movie.nfo')\n", (10057, 10078), False, 'import os\n'), ((10249, 10278), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.tv"""'], {}), "('library.tv')\n", (10264, 10278), False, 'from resources.lib.modules import control\n'), ((10952, 11006), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Window.IsVisible(infodialog)"""'], {}), "('Window.IsVisible(infodialog)')\n", (10974, 11006), False, 'from resources.lib.modules import control\n'), ((11015, 11056), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Player.HasVideo"""'], {}), "('Player.HasVideo')\n", (11037, 11056), False, 'from resources.lib.modules import control\n'), ((11245, 11264), 'resources.lib.indexers.episodes.episodes', 'episodes.episodes', ([], {}), '()\n', (11262, 11264), False, 'from resources.lib.indexers import episodes\n'), ((13867, 13916), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Library.IsScanningVideo"""'], {}), "('Library.IsScanningVideo')\n", (13889, 13916), False, 'from resources.lib.modules import control\n'), ((14164, 14218), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Window.IsVisible(infodialog)"""'], {}), "('Window.IsVisible(infodialog)')\n", (14186, 14218), False, 'from resources.lib.modules import control\n'), ((14227, 14268), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Player.HasVideo"""'], {}), "('Player.HasVideo')\n", (14249, 14268), False, 'from resources.lib.modules import control\n'), ((14456, 14473), 'resources.lib.indexers.tvshows.tvshows', 'tvshows.tvshows', ([], {}), '()\n', (14471, 14473), False, 'from resources.lib.indexers import tvshows\n'), ((14926, 14975), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Library.IsScanningVideo"""'], {}), "('Library.IsScanningVideo')\n", (14948, 14975), False, 'from resources.lib.modules import control\n'), ((15345, 15375), 'urllib.quote_plus', 'urllib.quote_plus', (['tvshowtitle'], {}), '(tvshowtitle)\n', (15362, 15375), False, 'import urllib\n'), ((15377, 15405), 'urllib.quote_plus', 'urllib.quote_plus', (['premiered'], {}), '(premiered)\n', (15394, 15405), False, 'import urllib\n'), ((15859, 15893), 'os.path.join', 'os.path.join', (['folder', '"""tvshow.nfo"""'], {}), "(folder, 'tvshow.nfo')\n", (15871, 15893), False, 'import os\n'), ((16354, 16383), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.tv"""'], {}), "('library.tv')\n", (16369, 16383), False, 'from resources.lib.modules import control\n'), ((17027, 17063), 'os.path.join', 'os.path.join', (['self.library_folder', 'i'], {}), '(self.library_folder, i)\n', (17039, 17063), False, 'import os\n'), ((19017, 19071), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Window.IsVisible(infodialog)"""'], {}), "('Window.IsVisible(infodialog)')\n", (19039, 19071), False, 'from resources.lib.modules import control\n'), ((19080, 19121), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Player.HasVideo"""'], {}), "('Player.HasVideo')\n", (19102, 19121), False, 'from resources.lib.modules import control\n'), ((19770, 19780), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19778, 19780), False, 'import sys\n'), ((21053, 21275), 'resources.lib.modules.control.jsonrpc', 'control.jsonrpc', (['(\'{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}\'\n % ep)'], {}), '(\n \'{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}\'\n % ep)\n', (21068, 21275), False, 'from resources.lib.modules import control\n'), ((22521, 22570), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Library.IsScanningVideo"""'], {}), "('Library.IsScanningVideo')\n", (22543, 22570), False, 'from resources.lib.modules import control\n'), ((23903, 23944), 'resources.lib.modules.control.window.getProperty', 'control.window.getProperty', (['self.property'], {}), '(self.property)\n', (23929, 23944), False, 'from resources.lib.modules import control\n'), ((23967, 23994), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(6)'}), '(hours=6)\n', (23985, 23994), False, 'import datetime\n'), ((24016, 24083), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['serviceProperty', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(serviceProperty, '%Y-%m-%d %H:%M:%S.%f')\n", (24042, 24083), False, 'import datetime\n'), ((24105, 24128), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24126, 24128), False, 'import datetime\n'), ((24455, 24513), 'resources.lib.modules.control.window.setProperty', 'control.window.setProperty', (['self.property', 'serviceProperty'], {}), '(self.property, serviceProperty)\n', (24481, 24513), False, 'from resources.lib.modules import control\n'), ((4993, 5010), 'resources.lib.modules.sources.sources', 'sources.sources', ([], {}), '()\n', (5008, 5010), False, 'from resources.lib.modules import sources\n'), ((7285, 7300), 'json.loads', 'json.loads', (['lib'], {}), '(lib)\n', (7295, 7300), False, 'import json\n'), ((8346, 8365), 'resources.lib.modules.control.lang', 'control.lang', (['(32555)'], {}), '(32555)\n', (8358, 8365), False, 'from resources.lib.modules import control\n'), ((8895, 8905), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8903, 8905), False, 'import sys\n'), ((10761, 10789), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (10779, 10789), False, 'import datetime\n'), ((11985, 12000), 'json.loads', 'json.loads', (['lib'], {}), '(lib)\n', (11995, 12000), False, 'import json\n'), ((12521, 12536), 'json.loads', 'json.loads', (['lib'], {}), '(lib)\n', (12531, 12536), False, 'import json\n'), ((12907, 12917), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12915, 12917), False, 'import sys\n'), ((14076, 14095), 'resources.lib.modules.control.lang', 'control.lang', (['(32555)'], {}), '(32555)\n', (14088, 14095), False, 'from resources.lib.modules import control\n'), ((14628, 14638), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14636, 14638), False, 'import sys\n'), ((16597, 16622), 'resources.lib.modules.control.addonInfo', 'control.addonInfo', (['"""name"""'], {}), "('name')\n", (16614, 16622), False, 'from resources.lib.modules import control\n'), ((16754, 16782), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (16772, 16782), False, 'import datetime\n'), ((17509, 17531), 'resources.lib.modules.control.openFile', 'control.openFile', (['file'], {}), '(file)\n', (17525, 17531), False, 'from resources.lib.modules import control\n'), ((18910, 18925), 'json.loads', 'json.loads', (['lib'], {}), '(lib)\n', (18920, 18925), False, 'import json\n'), ((24246, 24272), 'resources.lib.modules.control.player.isPlaying', 'control.player.isPlaying', ([], {}), '()\n', (24270, 24272), False, 'from resources.lib.modules import control\n'), ((24276, 24325), 'resources.lib.modules.control.condVisibility', 'control.condVisibility', (['"""Library.IsScanningVideo"""'], {}), "('Library.IsScanningVideo')\n", (24298, 24325), False, 'from resources.lib.modules import control\n'), ((24564, 24602), 'pysqlite2.dbapi2.connect', 'database.connect', (['control.libcacheFile'], {}), '(control.libcacheFile)\n', (24580, 24602), True, 'from pysqlite2 import dbapi2 as database\n'), ((25240, 25287), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.service.notification"""'], {}), "('library.service.notification')\n", (25255, 25287), False, 'from resources.lib.modules import control\n'), ((3546, 3599), 're.compile', 're.compile', (['"""ftp://(.+?):(.+?)@(.+?):?(\\\\d+)?/(.+/?)"""'], {}), "('ftp://(.+?):(.+?)@(.+?):?(\\\\d+)?/(.+/?)')\n", (3556, 3599), False, 'import re\n'), ((6592, 6611), 'resources.lib.modules.control.lang', 'control.lang', (['(32552)'], {}), '(32552)\n', (6604, 6611), False, 'from resources.lib.modules import control\n'), ((8041, 8060), 'resources.lib.modules.control.lang', 'control.lang', (['(32554)'], {}), '(32554)\n', (8053, 8060), False, 'from resources.lib.modules import control\n'), ((8571, 8590), 'resources.lib.modules.control.lang', 'control.lang', (['(32552)'], {}), '(32552)\n', (8583, 8590), False, 'from resources.lib.modules import control\n'), ((9134, 9153), 'resources.lib.modules.control.lang', 'control.lang', (['(32554)'], {}), '(32554)\n', (9146, 9153), False, 'from resources.lib.modules import control\n'), ((11089, 11108), 'resources.lib.modules.control.lang', 'control.lang', (['(32552)'], {}), '(32552)\n', (11101, 11108), False, 'from resources.lib.modules import control\n'), ((13771, 13790), 'resources.lib.modules.control.lang', 'control.lang', (['(32554)'], {}), '(32554)\n', (13783, 13790), False, 'from resources.lib.modules import control\n'), ((14301, 14320), 'resources.lib.modules.control.lang', 'control.lang', (['(32552)'], {}), '(32552)\n', (14313, 14320), False, 'from resources.lib.modules import control\n'), ((14830, 14849), 'resources.lib.modules.control.lang', 'control.lang', (['(32554)'], {}), '(32554)\n', (14842, 14849), False, 'from resources.lib.modules import control\n'), ((17073, 17109), 'resources.lib.modules.control.listDir', 'control.listDir', (['self.library_folder'], {}), '(self.library_folder)\n', (17088, 17109), False, 'from resources.lib.modules import control\n'), ((17173, 17191), 'os.path.join', 'os.path.join', (['s', 'i'], {}), '(s, i)\n', (17185, 17191), False, 'import os\n'), ((19154, 19173), 'resources.lib.modules.control.lang', 'control.lang', (['(32553)'], {}), '(32553)\n', (19166, 19173), False, 'from resources.lib.modules import control\n'), ((20112, 20131), 'resources.lib.indexers.episodes.episodes', 'episodes.episodes', ([], {}), '()\n', (20129, 20131), False, 'from resources.lib.indexers import episodes\n'), ((21967, 21977), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21975, 21977), False, 'import sys\n'), ((22425, 22444), 'resources.lib.modules.control.lang', 'control.lang', (['(32554)'], {}), '(32554)\n', (22437, 22444), False, 'from resources.lib.modules import control\n'), ((22749, 22781), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.movie"""'], {}), "('library.movie')\n", (22764, 22781), False, 'from resources.lib.modules import control\n'), ((22856, 22885), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.tv"""'], {}), "('library.tv')\n", (22871, 22885), False, 'from resources.lib.modules import control\n'), ((24381, 24404), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24402, 24404), False, 'import datetime\n'), ((25146, 25187), 'resources.lib.modules.control.setting', 'control.setting', (['"""library.service.update"""'], {}), "('library.service.update')\n", (25161, 25187), False, 'from resources.lib.modules import control\n'), ((17201, 17219), 'resources.lib.modules.control.listDir', 'control.listDir', (['s'], {}), '(s)\n', (17216, 17219), False, 'from resources.lib.modules import control\n'), ((17319, 17337), 'os.path.join', 'os.path.join', (['s', 'i'], {}), '(s, i)\n', (17331, 17337), False, 'import os\n'), ((21346, 21360), 'json.loads', 'json.loads', (['ep'], {}), '(ep)\n', (21356, 21360), False, 'import json\n'), ((17347, 17365), 'resources.lib.modules.control.listDir', 'control.listDir', (['s'], {}), '(s)\n', (17362, 17365), False, 'from resources.lib.modules import control\n')]
|
# Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import sqlalchemy as sa
from sqlalchemy import event
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
import sys
from oslo_config import cfg
from oslo_log import log as logging
from mistral.db.sqlalchemy import model_base as mb
from mistral.db.sqlalchemy import types as st
from mistral import exceptions as exc
from mistral.services import security
from mistral import utils
# Definition objects.
LOG = logging.getLogger(__name__)
def _get_hash_function_by(column_name):
def calc_hash(context):
val = context.current_parameters[column_name] or {}
if isinstance(val, dict):
# If the value is a dictionary we need to make sure to have
# keys in the same order in a string representation.
hash_base = json.dumps(sorted(val.items()))
else:
hash_base = str(val)
return hashlib.sha256(hash_base.encode('utf-8')).hexdigest()
return calc_hash
def validate_long_type_length(cls, field_name, value):
"""Makes sure the value does not exceeds the maximum size."""
if value:
# Get the configured limit.
size_limit_kb = cfg.CONF.engine.execution_field_size_limit_kb
# If the size is unlimited.
if size_limit_kb < 0:
return
size_kb = int(sys.getsizeof(str(value)) / 1024)
if size_kb > size_limit_kb:
LOG.error(
"Size limit %dKB exceed for class [%s], "
"field %s of size %dKB.",
size_limit_kb, str(cls), field_name, size_kb
)
raise exc.SizeLimitExceededException(
field_name,
size_kb,
size_limit_kb
)
def register_length_validator(attr_name):
"""Register an event listener on the attribute.
This event listener will validate the size every
time a 'set' occurs.
"""
for cls in utils.iter_subclasses(Execution):
if hasattr(cls, attr_name):
event.listen(
getattr(cls, attr_name),
'set',
lambda t, v, o, i: validate_long_type_length(cls, attr_name, v)
)
class Definition(mb.MistralSecureModelBase):
__abstract__ = True
id = mb.id_column()
name = sa.Column(sa.String(255))
definition = sa.Column(st.MediumText(), nullable=True)
spec = sa.Column(st.JsonMediumDictType())
tags = sa.Column(st.JsonListType())
is_system = sa.Column(sa.Boolean())
# There's no WorkbookExecution so we safely omit "Definition" in the name.
class Workbook(Definition):
"""Contains info about workbook (including definition in Mistral DSL)."""
__tablename__ = 'workbooks_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
class WorkflowDefinition(Definition):
"""Contains info about workflow (including definition in Mistral DSL)."""
__tablename__ = 'workflow_definitions_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_is_system' % __tablename__, 'is_system'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
class ActionDefinition(Definition):
"""Contains info about registered Actions."""
__tablename__ = 'action_definitions_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_is_system' % __tablename__, 'is_system'),
sa.Index('%s_action_class' % __tablename__, 'action_class'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
# Main properties.
description = sa.Column(sa.Text())
input = sa.Column(sa.Text())
# Service properties.
action_class = sa.Column(sa.String(200))
attributes = sa.Column(st.JsonDictType())
# Execution objects.
class Execution(mb.MistralSecureModelBase):
__abstract__ = True
# Common properties.
id = mb.id_column()
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255), nullable=True)
workflow_name = sa.Column(sa.String(255))
workflow_id = sa.Column(sa.String(80))
spec = sa.Column(st.JsonMediumDictType())
state = sa.Column(sa.String(20))
state_info = sa.Column(sa.Text(), nullable=True)
tags = sa.Column(st.JsonListType())
# Internal properties which can be used by engine.
runtime_context = sa.Column(st.JsonLongDictType())
class ActionExecution(Execution):
"""Contains action execution information."""
__tablename__ = 'action_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at')
)
# Main properties.
accepted = sa.Column(sa.Boolean(), default=False)
input = sa.Column(st.JsonLongDictType(), nullable=True)
output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True))
class WorkflowExecution(Execution):
"""Contains workflow execution information."""
__tablename__ = 'workflow_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at'),
)
# Main properties.
accepted = sa.Column(sa.Boolean(), default=False)
input = sa.Column(st.JsonLongDictType(), nullable=True)
output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True))
params = sa.Column(st.JsonLongDictType())
# Initial workflow context containing workflow variables, environment,
# openstack security context etc.
# NOTES:
# * Data stored in this structure should not be copied into inbound
# contexts of tasks. No need to duplicate it.
# * This structure does not contain workflow input.
context = sa.Column(st.JsonLongDictType())
class TaskExecution(Execution):
"""Contains task runtime information."""
__tablename__ = 'task_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at'),
sa.UniqueConstraint('unique_key')
)
# Main properties.
action_spec = sa.Column(st.JsonLongDictType())
unique_key = sa.Column(sa.String(250), nullable=True)
type = sa.Column(sa.String(10))
# Whether the task is fully processed (publishing and calculating commands
# after it). It allows to simplify workflow controller implementations
# significantly.
processed = sa.Column(sa.BOOLEAN, default=False)
# Data Flow properties.
in_context = sa.Column(st.JsonLongDictType())
published = sa.Column(st.JsonLongDictType())
@property
def executions(self):
return (
self.action_executions
if not self.spec.get('workflow')
else self.workflow_executions
)
for cls in utils.iter_subclasses(Execution):
event.listen(
# Catch and trim Execution.state_info to always fit allocated size.
# Note that the limit is 65500 which is less than 65535 (2^16 -1).
# The reason is that utils.cut() is not exactly accurate in case if
# the value is not a string, but, for example, a dictionary. If we
# limit it exactly to 65535 then once in a while it may go slightly
# beyond the allowed maximum size. It may depend on the order of
# keys in a string representation and other things that are hidden
# inside utils.cut_dict() method.
cls.state_info,
'set',
lambda t, v, o, i: utils.cut(v, 65500),
retval=True
)
# Many-to-one for 'ActionExecution' and 'TaskExecution'.
ActionExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.action_executions = relationship(
ActionExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=ActionExecution.task_execution_id,
lazy='select'
)
sa.Index(
'%s_task_execution_id' % ActionExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'WorkflowExecution' and 'TaskExecution'.
WorkflowExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.workflow_executions = relationship(
WorkflowExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=WorkflowExecution.task_execution_id,
lazy='select'
)
sa.Index(
'%s_task_execution_id' % WorkflowExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'TaskExecution' and 'WorkflowExecution'.
TaskExecution.workflow_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowExecution.id, ondelete='CASCADE')
)
WorkflowExecution.task_executions = relationship(
TaskExecution,
backref=backref('workflow_execution', remote_side=[WorkflowExecution.id]),
cascade='all, delete-orphan',
foreign_keys=TaskExecution.workflow_execution_id,
lazy='select'
)
sa.Index(
'%s_workflow_execution_id' % TaskExecution.__tablename__,
TaskExecution.workflow_execution_id
)
# Other objects.
class DelayedCall(mb.MistralModelBase):
"""Contains info about delayed calls."""
__tablename__ = 'delayed_calls_v2'
id = mb.id_column()
factory_method_path = sa.Column(sa.String(200), nullable=True)
target_method_name = sa.Column(sa.String(80), nullable=False)
method_arguments = sa.Column(st.JsonDictType())
serializers = sa.Column(st.JsonDictType())
key = sa.Column(sa.String(250), nullable=True)
auth_context = sa.Column(st.JsonDictType())
execution_time = sa.Column(sa.DateTime, nullable=False)
processing = sa.Column(sa.Boolean, default=False, nullable=False)
sa.Index(
'%s_execution_time' % DelayedCall.__tablename__,
DelayedCall.execution_time
)
class Environment(mb.MistralSecureModelBase):
"""Contains environment variables for workflow execution."""
__tablename__ = 'environments_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_name' % __tablename__, 'name'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
# Main properties.
id = mb.id_column()
name = sa.Column(sa.String(200))
description = sa.Column(sa.Text())
variables = sa.Column(st.JsonLongDictType())
class CronTrigger(mb.MistralSecureModelBase):
"""Contains info about cron triggers."""
__tablename__ = 'cron_triggers_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.UniqueConstraint(
'workflow_input_hash', 'workflow_name', 'pattern', 'project_id',
'workflow_params_hash', 'remaining_executions',
'first_execution_time'
),
sa.Index(
'%s_next_execution_time' % __tablename__,
'next_execution_time'
),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_workflow_name' % __tablename__, 'workflow_name'),
)
id = mb.id_column()
name = sa.Column(sa.String(200))
pattern = sa.Column(
sa.String(100),
nullable=True,
default='0 0 30 2 0' # Set default to 'never'.
)
first_execution_time = sa.Column(sa.DateTime, nullable=True)
next_execution_time = sa.Column(sa.DateTime, nullable=False)
workflow_name = sa.Column(sa.String(255))
remaining_executions = sa.Column(sa.Integer)
workflow_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowDefinition.id)
)
workflow = relationship('WorkflowDefinition', lazy='joined')
workflow_params = sa.Column(st.JsonDictType())
workflow_params_hash = sa.Column(
sa.CHAR(64),
default=_get_hash_function_by('workflow_params')
)
workflow_input = sa.Column(st.JsonDictType())
workflow_input_hash = sa.Column(
sa.CHAR(64),
default=_get_hash_function_by('workflow_input')
)
trust_id = sa.Column(sa.String(80))
def to_dict(self):
d = super(CronTrigger, self).to_dict()
utils.datetime_to_str_in_dict(d, 'first_execution_time')
utils.datetime_to_str_in_dict(d, 'next_execution_time')
return d
# Register all hooks related to secure models.
mb.register_secure_model_hooks()
# TODO(rakhmerov): This is a bad solution. It's hard to find in the code,
# configure flexibly etc. Fix it.
# Register an event listener to verify that the size of all the long columns
# affected by the user do not exceed the limit configuration.
for attr_name in ['input', 'output', 'params', 'published']:
register_length_validator(attr_name)
class ResourceMember(mb.MistralModelBase):
"""Contains info about resource members."""
__tablename__ = 'resource_members_v2'
__table_args__ = (
sa.UniqueConstraint(
'resource_id',
'resource_type',
'member_id'
),
)
id = mb.id_column()
resource_id = sa.Column(sa.String(80), nullable=False)
resource_type = sa.Column(
sa.String(50),
nullable=False,
default='workflow'
)
project_id = sa.Column(sa.String(80), default=security.get_project_id)
member_id = sa.Column(sa.String(80), nullable=False)
status = sa.Column(sa.String(20), nullable=False, default="pending")
class EventTrigger(mb.MistralSecureModelBase):
"""Contains info about event triggers."""
__tablename__ = 'event_triggers_v2'
__table_args__ = (
sa.UniqueConstraint('exchange', 'topic', 'event', 'workflow_id',
'project_id'),
sa.Index('%s_project_id_workflow_id' % __tablename__, 'project_id',
'workflow_id'),
)
id = mb.id_column()
name = sa.Column(sa.String(200))
workflow_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowDefinition.id)
)
workflow_params = sa.Column(st.JsonDictType())
workflow_input = sa.Column(st.JsonDictType())
exchange = sa.Column(sa.String(80), nullable=False)
topic = sa.Column(sa.String(80), nullable=False)
event = sa.Column(sa.String(80), nullable=False)
trust_id = sa.Column(sa.String(80))
class NamedLock(mb.MistralModelBase):
"""Contains info about named locks.
Usage of named locks is based on properties of READ COMMITTED
transactions of the most generally used SQL databases such as
Postgres, MySQL, Oracle etc.
The locking scenario is as follows:
1. Transaction A (TX-A) inserts a row with unique 'id' and
some value that identifies a locked object stored in 'name'.
2. Transaction B (TX-B) and any subsequent transactions tries
to insert a row with unique 'id' and the same value of 'name'
field and it waits till TX-A is completed due to transactional
properties of READ COMMITTED.
3. If TX-A then immediately deletes the record and commits then
TX-B and or one of the subsequent transactions are released
and its 'insert' is completed.
4. Then the scenario repeats with step #2 where the role of TX-A
will be playing a transaction that just did insert.
Practically, this table should never contain any committed rows.
All its usage is around the play with transactional storages.
"""
__tablename__ = 'named_locks'
sa.UniqueConstraint('name')
id = mb.id_column()
name = sa.Column(sa.String(250))
sa.UniqueConstraint(NamedLock.name)
|
[
"sqlalchemy.Text",
"mistral.db.sqlalchemy.types.MediumText",
"sqlalchemy.String",
"sqlalchemy.Column",
"oslo_log.log.getLogger",
"mistral.db.sqlalchemy.types.JsonMediumDictType",
"sqlalchemy.orm.backref",
"sqlalchemy.CHAR",
"mistral.utils.datetime_to_str_in_dict",
"mistral.db.sqlalchemy.model_base.register_secure_model_hooks",
"sqlalchemy.Index",
"mistral.db.sqlalchemy.types.JsonDictType",
"mistral.db.sqlalchemy.model_base.id_column",
"sqlalchemy.orm.relationship",
"mistral.exceptions.SizeLimitExceededException",
"sqlalchemy.ForeignKey",
"sqlalchemy.UniqueConstraint",
"mistral.utils.iter_subclasses",
"mistral.utils.cut",
"mistral.db.sqlalchemy.types.JsonListType",
"sqlalchemy.Boolean",
"mistral.db.sqlalchemy.types.JsonLongDictType"
] |
[((1106, 1133), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1123, 1133), True, 'from oslo_log import log as logging\n'), ((8116, 8148), 'mistral.utils.iter_subclasses', 'utils.iter_subclasses', (['Execution'], {}), '(Execution)\n', (8137, 8148), False, 'from mistral import utils\n'), ((9300, 9389), 'sqlalchemy.Index', 'sa.Index', (["('%s_task_execution_id' % ActionExecution.__tablename__)", '"""task_execution_id"""'], {}), "('%s_task_execution_id' % ActionExecution.__tablename__,\n 'task_execution_id')\n", (9308, 9389), True, 'import sqlalchemy as sa\n'), ((9856, 9947), 'sqlalchemy.Index', 'sa.Index', (["('%s_task_execution_id' % WorkflowExecution.__tablename__)", '"""task_execution_id"""'], {}), "('%s_task_execution_id' % WorkflowExecution.__tablename__,\n 'task_execution_id')\n", (9864, 9947), True, 'import sqlalchemy as sa\n'), ((10403, 10510), 'sqlalchemy.Index', 'sa.Index', (["('%s_workflow_execution_id' % TaskExecution.__tablename__)", 'TaskExecution.workflow_execution_id'], {}), "('%s_workflow_execution_id' % TaskExecution.__tablename__,\n TaskExecution.workflow_execution_id)\n", (10411, 10510), True, 'import sqlalchemy as sa\n'), ((11151, 11241), 'sqlalchemy.Index', 'sa.Index', (["('%s_execution_time' % DelayedCall.__tablename__)", 'DelayedCall.execution_time'], {}), "('%s_execution_time' % DelayedCall.__tablename__, DelayedCall.\n execution_time)\n", (11159, 11241), True, 'import sqlalchemy as sa\n'), ((13803, 13835), 'mistral.db.sqlalchemy.model_base.register_secure_model_hooks', 'mb.register_secure_model_hooks', ([], {}), '()\n', (13833, 13835), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((16977, 17012), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['NamedLock.name'], {}), '(NamedLock.name)\n', (16996, 17012), True, 'import sqlalchemy as sa\n'), ((2598, 2630), 'mistral.utils.iter_subclasses', 'utils.iter_subclasses', (['Execution'], {}), '(Execution)\n', (2619, 2630), False, 'from mistral import utils\n'), ((2933, 2947), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (2945, 2947), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((4826, 4840), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (4838, 4840), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((7748, 7784), 'sqlalchemy.Column', 'sa.Column', (['sa.BOOLEAN'], {'default': '(False)'}), '(sa.BOOLEAN, default=False)\n', (7757, 7784), True, 'import sqlalchemy as sa\n'), ((8960, 8973), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (8969, 8973), True, 'import sqlalchemy as sa\n'), ((8979, 9030), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['TaskExecution.id'], {'ondelete': '"""CASCADE"""'}), "(TaskExecution.id, ondelete='CASCADE')\n", (8992, 9030), True, 'import sqlalchemy as sa\n'), ((9510, 9523), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (9519, 9523), True, 'import sqlalchemy as sa\n'), ((9529, 9580), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['TaskExecution.id'], {'ondelete': '"""CASCADE"""'}), "(TaskExecution.id, ondelete='CASCADE')\n", (9542, 9580), True, 'import sqlalchemy as sa\n'), ((10068, 10081), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (10077, 10081), True, 'import sqlalchemy as sa\n'), ((10087, 10142), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['WorkflowExecution.id'], {'ondelete': '"""CASCADE"""'}), "(WorkflowExecution.id, ondelete='CASCADE')\n", (10100, 10142), True, 'import sqlalchemy as sa\n'), ((10673, 10687), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (10685, 10687), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((11040, 11078), 'sqlalchemy.Column', 'sa.Column', (['sa.DateTime'], {'nullable': '(False)'}), '(sa.DateTime, nullable=False)\n', (11049, 11078), True, 'import sqlalchemy as sa\n'), ((11096, 11148), 'sqlalchemy.Column', 'sa.Column', (['sa.Boolean'], {'default': '(False)', 'nullable': '(False)'}), '(sa.Boolean, default=False, nullable=False)\n', (11105, 11148), True, 'import sqlalchemy as sa\n'), ((11686, 11700), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (11698, 11700), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((12570, 12584), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (12582, 12584), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((12783, 12820), 'sqlalchemy.Column', 'sa.Column', (['sa.DateTime'], {'nullable': '(True)'}), '(sa.DateTime, nullable=True)\n', (12792, 12820), True, 'import sqlalchemy as sa\n'), ((12847, 12885), 'sqlalchemy.Column', 'sa.Column', (['sa.DateTime'], {'nullable': '(False)'}), '(sa.DateTime, nullable=False)\n', (12856, 12885), True, 'import sqlalchemy as sa\n'), ((12959, 12980), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {}), '(sa.Integer)\n', (12968, 12980), True, 'import sqlalchemy as sa\n'), ((13100, 13149), 'sqlalchemy.orm.relationship', 'relationship', (['"""WorkflowDefinition"""'], {'lazy': '"""joined"""'}), "('WorkflowDefinition', lazy='joined')\n", (13112, 13149), False, 'from sqlalchemy.orm import relationship\n'), ((14481, 14495), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (14493, 14495), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((15272, 15286), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (15284, 15286), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((16885, 16912), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""'], {}), "('name')\n", (16904, 16912), True, 'import sqlalchemy as sa\n'), ((16923, 16937), 'mistral.db.sqlalchemy.model_base.id_column', 'mb.id_column', ([], {}), '()\n', (16935, 16937), True, 'from mistral.db.sqlalchemy import model_base as mb\n'), ((2969, 2983), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (2978, 2983), True, 'import sqlalchemy as sa\n'), ((3012, 3027), 'mistral.db.sqlalchemy.types.MediumText', 'st.MediumText', ([], {}), '()\n', (3025, 3027), True, 'from mistral.db.sqlalchemy import types as st\n'), ((3065, 3088), 'mistral.db.sqlalchemy.types.JsonMediumDictType', 'st.JsonMediumDictType', ([], {}), '()\n', (3086, 3088), True, 'from mistral.db.sqlalchemy import types as st\n'), ((3111, 3128), 'mistral.db.sqlalchemy.types.JsonListType', 'st.JsonListType', ([], {}), '()\n', (3126, 3128), True, 'from mistral.db.sqlalchemy import types as st\n'), ((3156, 3168), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (3166, 3168), True, 'import sqlalchemy as sa\n'), ((3421, 3462), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""', '"""project_id"""'], {}), "('name', 'project_id')\n", (3440, 3462), True, 'import sqlalchemy as sa\n'), ((3472, 3527), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (3480, 3527), True, 'import sqlalchemy as sa\n'), ((3537, 3582), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (3545, 3582), True, 'import sqlalchemy as sa\n'), ((3787, 3828), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""', '"""project_id"""'], {}), "('name', 'project_id')\n", (3806, 3828), True, 'import sqlalchemy as sa\n'), ((3838, 3891), 'sqlalchemy.Index', 'sa.Index', (["('%s_is_system' % __tablename__)", '"""is_system"""'], {}), "('%s_is_system' % __tablename__, 'is_system')\n", (3846, 3891), True, 'import sqlalchemy as sa\n'), ((3901, 3956), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (3909, 3956), True, 'import sqlalchemy as sa\n'), ((3966, 4011), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (3974, 4011), True, 'import sqlalchemy as sa\n'), ((4184, 4225), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""', '"""project_id"""'], {}), "('name', 'project_id')\n", (4203, 4225), True, 'import sqlalchemy as sa\n'), ((4235, 4288), 'sqlalchemy.Index', 'sa.Index', (["('%s_is_system' % __tablename__)", '"""is_system"""'], {}), "('%s_is_system' % __tablename__, 'is_system')\n", (4243, 4288), True, 'import sqlalchemy as sa\n'), ((4298, 4357), 'sqlalchemy.Index', 'sa.Index', (["('%s_action_class' % __tablename__)", '"""action_class"""'], {}), "('%s_action_class' % __tablename__, 'action_class')\n", (4306, 4357), True, 'import sqlalchemy as sa\n'), ((4367, 4422), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (4375, 4422), True, 'import sqlalchemy as sa\n'), ((4432, 4477), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (4440, 4477), True, 'import sqlalchemy as sa\n'), ((4537, 4546), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (4544, 4546), True, 'import sqlalchemy as sa\n'), ((4570, 4579), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (4577, 4579), True, 'import sqlalchemy as sa\n'), ((4637, 4651), 'sqlalchemy.String', 'sa.String', (['(200)'], {}), '(200)\n', (4646, 4651), True, 'import sqlalchemy as sa\n'), ((4680, 4697), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (4695, 4697), True, 'from mistral.db.sqlalchemy import types as st\n'), ((4862, 4876), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (4871, 4876), True, 'import sqlalchemy as sa\n'), ((4906, 4920), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (4915, 4920), True, 'import sqlalchemy as sa\n'), ((4967, 4981), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (4976, 4981), True, 'import sqlalchemy as sa\n'), ((5011, 5024), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (5020, 5024), True, 'import sqlalchemy as sa\n'), ((5047, 5070), 'mistral.db.sqlalchemy.types.JsonMediumDictType', 'st.JsonMediumDictType', ([], {}), '()\n', (5068, 5070), True, 'from mistral.db.sqlalchemy import types as st\n'), ((5094, 5107), 'sqlalchemy.String', 'sa.String', (['(20)'], {}), '(20)\n', (5103, 5107), True, 'import sqlalchemy as sa\n'), ((5136, 5145), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (5143, 5145), True, 'import sqlalchemy as sa\n'), ((5183, 5200), 'mistral.db.sqlalchemy.types.JsonListType', 'st.JsonListType', ([], {}), '()\n', (5198, 5200), True, 'from mistral.db.sqlalchemy import types as st\n'), ((5290, 5311), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (5309, 5311), True, 'from mistral.db.sqlalchemy import types as st\n'), ((5474, 5529), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (5482, 5529), True, 'import sqlalchemy as sa\n'), ((5539, 5584), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (5547, 5584), True, 'import sqlalchemy as sa\n'), ((5594, 5639), 'sqlalchemy.Index', 'sa.Index', (["('%s_state' % __tablename__)", '"""state"""'], {}), "('%s_state' % __tablename__, 'state')\n", (5602, 5639), True, 'import sqlalchemy as sa\n'), ((5649, 5704), 'sqlalchemy.Index', 'sa.Index', (["('%s_updated_at' % __tablename__)", '"""updated_at"""'], {}), "('%s_updated_at' % __tablename__, 'updated_at')\n", (5657, 5704), True, 'import sqlalchemy as sa\n'), ((5760, 5772), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (5770, 5772), True, 'import sqlalchemy as sa\n'), ((5811, 5832), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (5830, 5832), True, 'from mistral.db.sqlalchemy import types as st\n'), ((6094, 6149), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (6102, 6149), True, 'import sqlalchemy as sa\n'), ((6159, 6204), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (6167, 6204), True, 'import sqlalchemy as sa\n'), ((6214, 6259), 'sqlalchemy.Index', 'sa.Index', (["('%s_state' % __tablename__)", '"""state"""'], {}), "('%s_state' % __tablename__, 'state')\n", (6222, 6259), True, 'import sqlalchemy as sa\n'), ((6269, 6324), 'sqlalchemy.Index', 'sa.Index', (["('%s_updated_at' % __tablename__)", '"""updated_at"""'], {}), "('%s_updated_at' % __tablename__, 'updated_at')\n", (6277, 6324), True, 'import sqlalchemy as sa\n'), ((6381, 6393), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (6391, 6393), True, 'import sqlalchemy as sa\n'), ((6432, 6453), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (6451, 6453), True, 'from mistral.db.sqlalchemy import types as st\n'), ((6571, 6592), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (6590, 6592), True, 'from mistral.db.sqlalchemy import types as st\n'), ((6931, 6952), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (6950, 6952), True, 'from mistral.db.sqlalchemy import types as st\n'), ((7107, 7162), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (7115, 7162), True, 'import sqlalchemy as sa\n'), ((7172, 7217), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (7180, 7217), True, 'import sqlalchemy as sa\n'), ((7227, 7272), 'sqlalchemy.Index', 'sa.Index', (["('%s_state' % __tablename__)", '"""state"""'], {}), "('%s_state' % __tablename__, 'state')\n", (7235, 7272), True, 'import sqlalchemy as sa\n'), ((7282, 7337), 'sqlalchemy.Index', 'sa.Index', (["('%s_updated_at' % __tablename__)", '"""updated_at"""'], {}), "('%s_updated_at' % __tablename__, 'updated_at')\n", (7290, 7337), True, 'import sqlalchemy as sa\n'), ((7347, 7380), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""unique_key"""'], {}), "('unique_key')\n", (7366, 7380), True, 'import sqlalchemy as sa\n'), ((7439, 7460), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (7458, 7460), True, 'from mistral.db.sqlalchemy import types as st\n'), ((7489, 7503), 'sqlalchemy.String', 'sa.String', (['(250)'], {}), '(250)\n', (7498, 7503), True, 'import sqlalchemy as sa\n'), ((7541, 7554), 'sqlalchemy.String', 'sa.String', (['(10)'], {}), '(10)\n', (7550, 7554), True, 'import sqlalchemy as sa\n'), ((7841, 7862), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (7860, 7862), True, 'from mistral.db.sqlalchemy import types as st\n'), ((7890, 7911), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (7909, 7911), True, 'from mistral.db.sqlalchemy import types as st\n'), ((9134, 9191), 'sqlalchemy.orm.backref', 'backref', (['"""task_execution"""'], {'remote_side': '[TaskExecution.id]'}), "('task_execution', remote_side=[TaskExecution.id])\n", (9141, 9191), False, 'from sqlalchemy.orm import backref\n'), ((9688, 9745), 'sqlalchemy.orm.backref', 'backref', (['"""task_execution"""'], {'remote_side': '[TaskExecution.id]'}), "('task_execution', remote_side=[TaskExecution.id])\n", (9695, 9745), False, 'from sqlalchemy.orm import backref\n'), ((10227, 10292), 'sqlalchemy.orm.backref', 'backref', (['"""workflow_execution"""'], {'remote_side': '[WorkflowExecution.id]'}), "('workflow_execution', remote_side=[WorkflowExecution.id])\n", (10234, 10292), False, 'from sqlalchemy.orm import backref\n'), ((10724, 10738), 'sqlalchemy.String', 'sa.String', (['(200)'], {}), '(200)\n', (10733, 10738), True, 'import sqlalchemy as sa\n'), ((10790, 10803), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (10799, 10803), True, 'import sqlalchemy as sa\n'), ((10854, 10871), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (10869, 10871), True, 'from mistral.db.sqlalchemy import types as st\n'), ((10901, 10918), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (10916, 10918), True, 'from mistral.db.sqlalchemy import types as st\n'), ((10940, 10954), 'sqlalchemy.String', 'sa.String', (['(250)'], {}), '(250)\n', (10949, 10954), True, 'import sqlalchemy as sa\n'), ((11000, 11017), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (11015, 11017), True, 'from mistral.db.sqlalchemy import types as st\n'), ((11431, 11472), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""', '"""project_id"""'], {}), "('name', 'project_id')\n", (11450, 11472), True, 'import sqlalchemy as sa\n'), ((11482, 11525), 'sqlalchemy.Index', 'sa.Index', (["('%s_name' % __tablename__)", '"""name"""'], {}), "('%s_name' % __tablename__, 'name')\n", (11490, 11525), True, 'import sqlalchemy as sa\n'), ((11535, 11590), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (11543, 11590), True, 'import sqlalchemy as sa\n'), ((11600, 11645), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (11608, 11645), True, 'import sqlalchemy as sa\n'), ((11722, 11736), 'sqlalchemy.String', 'sa.String', (['(200)'], {}), '(200)\n', (11731, 11736), True, 'import sqlalchemy as sa\n'), ((11766, 11775), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (11773, 11775), True, 'import sqlalchemy as sa\n'), ((11803, 11824), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (11822, 11824), True, 'from mistral.db.sqlalchemy import types as st\n'), ((11991, 12032), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""', '"""project_id"""'], {}), "('name', 'project_id')\n", (12010, 12032), True, 'import sqlalchemy as sa\n'), ((12042, 12206), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""workflow_input_hash"""', '"""workflow_name"""', '"""pattern"""', '"""project_id"""', '"""workflow_params_hash"""', '"""remaining_executions"""', '"""first_execution_time"""'], {}), "('workflow_input_hash', 'workflow_name', 'pattern',\n 'project_id', 'workflow_params_hash', 'remaining_executions',\n 'first_execution_time')\n", (12061, 12206), True, 'import sqlalchemy as sa\n'), ((12254, 12327), 'sqlalchemy.Index', 'sa.Index', (["('%s_next_execution_time' % __tablename__)", '"""next_execution_time"""'], {}), "('%s_next_execution_time' % __tablename__, 'next_execution_time')\n", (12262, 12327), True, 'import sqlalchemy as sa\n'), ((12371, 12426), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id' % __tablename__)", '"""project_id"""'], {}), "('%s_project_id' % __tablename__, 'project_id')\n", (12379, 12426), True, 'import sqlalchemy as sa\n'), ((12436, 12481), 'sqlalchemy.Index', 'sa.Index', (["('%s_scope' % __tablename__)", '"""scope"""'], {}), "('%s_scope' % __tablename__, 'scope')\n", (12444, 12481), True, 'import sqlalchemy as sa\n'), ((12491, 12552), 'sqlalchemy.Index', 'sa.Index', (["('%s_workflow_name' % __tablename__)", '"""workflow_name"""'], {}), "('%s_workflow_name' % __tablename__, 'workflow_name')\n", (12499, 12552), True, 'import sqlalchemy as sa\n'), ((12606, 12620), 'sqlalchemy.String', 'sa.String', (['(200)'], {}), '(200)\n', (12615, 12620), True, 'import sqlalchemy as sa\n'), ((12655, 12669), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (12664, 12669), True, 'import sqlalchemy as sa\n'), ((12916, 12930), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (12925, 12930), True, 'import sqlalchemy as sa\n'), ((13019, 13032), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (13028, 13032), True, 'import sqlalchemy as sa\n'), ((13042, 13078), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['WorkflowDefinition.id'], {}), '(WorkflowDefinition.id)\n', (13055, 13078), True, 'import sqlalchemy as sa\n'), ((13183, 13200), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (13198, 13200), True, 'from mistral.db.sqlalchemy import types as st\n'), ((13248, 13259), 'sqlalchemy.CHAR', 'sa.CHAR', (['(64)'], {}), '(64)\n', (13255, 13259), True, 'import sqlalchemy as sa\n'), ((13355, 13372), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (13370, 13372), True, 'from mistral.db.sqlalchemy import types as st\n'), ((13419, 13430), 'sqlalchemy.CHAR', 'sa.CHAR', (['(64)'], {}), '(64)\n', (13426, 13430), True, 'import sqlalchemy as sa\n'), ((13520, 13533), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (13529, 13533), True, 'import sqlalchemy as sa\n'), ((13615, 13671), 'mistral.utils.datetime_to_str_in_dict', 'utils.datetime_to_str_in_dict', (['d', '"""first_execution_time"""'], {}), "(d, 'first_execution_time')\n", (13644, 13671), False, 'from mistral import utils\n'), ((13680, 13735), 'mistral.utils.datetime_to_str_in_dict', 'utils.datetime_to_str_in_dict', (['d', '"""next_execution_time"""'], {}), "(d, 'next_execution_time')\n", (13709, 13735), False, 'from mistral import utils\n'), ((14353, 14417), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""resource_id"""', '"""resource_type"""', '"""member_id"""'], {}), "('resource_id', 'resource_type', 'member_id')\n", (14372, 14417), True, 'import sqlalchemy as sa\n'), ((14524, 14537), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (14533, 14537), True, 'import sqlalchemy as sa\n'), ((14594, 14607), 'sqlalchemy.String', 'sa.String', (['(50)'], {}), '(50)\n', (14603, 14607), True, 'import sqlalchemy as sa\n'), ((14693, 14706), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (14702, 14706), True, 'import sqlalchemy as sa\n'), ((14767, 14780), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (14776, 14780), True, 'import sqlalchemy as sa\n'), ((14821, 14834), 'sqlalchemy.String', 'sa.String', (['(20)'], {}), '(20)\n', (14830, 14834), True, 'import sqlalchemy as sa\n'), ((15039, 15117), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""exchange"""', '"""topic"""', '"""event"""', '"""workflow_id"""', '"""project_id"""'], {}), "('exchange', 'topic', 'event', 'workflow_id', 'project_id')\n", (15058, 15117), True, 'import sqlalchemy as sa\n'), ((15155, 15241), 'sqlalchemy.Index', 'sa.Index', (["('%s_project_id_workflow_id' % __tablename__)", '"""project_id"""', '"""workflow_id"""'], {}), "('%s_project_id_workflow_id' % __tablename__, 'project_id',\n 'workflow_id')\n", (15163, 15241), True, 'import sqlalchemy as sa\n'), ((15308, 15322), 'sqlalchemy.String', 'sa.String', (['(200)'], {}), '(200)\n', (15317, 15322), True, 'import sqlalchemy as sa\n'), ((15362, 15375), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (15371, 15375), True, 'import sqlalchemy as sa\n'), ((15385, 15421), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['WorkflowDefinition.id'], {}), '(WorkflowDefinition.id)\n', (15398, 15421), True, 'import sqlalchemy as sa\n'), ((15460, 15477), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (15475, 15477), True, 'from mistral.db.sqlalchemy import types as st\n'), ((15510, 15527), 'mistral.db.sqlalchemy.types.JsonDictType', 'st.JsonDictType', ([], {}), '()\n', (15525, 15527), True, 'from mistral.db.sqlalchemy import types as st\n'), ((15555, 15568), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (15564, 15568), True, 'import sqlalchemy as sa\n'), ((15608, 15621), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (15617, 15621), True, 'import sqlalchemy as sa\n'), ((15661, 15674), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (15670, 15674), True, 'import sqlalchemy as sa\n'), ((15718, 15731), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (15727, 15731), True, 'import sqlalchemy as sa\n'), ((16959, 16973), 'sqlalchemy.String', 'sa.String', (['(250)'], {}), '(250)\n', (16968, 16973), True, 'import sqlalchemy as sa\n'), ((2271, 2337), 'mistral.exceptions.SizeLimitExceededException', 'exc.SizeLimitExceededException', (['field_name', 'size_kb', 'size_limit_kb'], {}), '(field_name, size_kb, size_limit_kb)\n', (2301, 2337), True, 'from mistral import exceptions as exc\n'), ((5888, 5909), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (5907, 5909), True, 'from mistral.db.sqlalchemy import types as st\n'), ((6509, 6530), 'mistral.db.sqlalchemy.types.JsonLongDictType', 'st.JsonLongDictType', ([], {}), '()\n', (6528, 6530), True, 'from mistral.db.sqlalchemy import types as st\n'), ((8802, 8821), 'mistral.utils.cut', 'utils.cut', (['v', '(65500)'], {}), '(v, 65500)\n', (8811, 8821), False, 'from mistral import utils\n')]
|
"""EESG.py
Created by <NAME>, <NAME>.
Copyright (c) NREL. All rights reserved.
Electromagnetic design based on conventional magnetic circuit laws
Structural design based on McDonald's thesis """
from openmdao.api import Group, Problem, Component,ExecComp,IndepVarComp,ScipyOptimizer,pyOptSparseDriver
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
from openmdao.drivers import *
import numpy as np
from numpy import array,float,min,sign
from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan
import pandas
class EESG(Component):
""" Estimates overall mass dimensions and Efficiency of Electrically Excited Synchronous generator. """
def __init__(self):
super(EESG, self).__init__()
# EESG generator design inputs
self.add_param('r_s', val=0.0, units ='m', desc='airgap radius r_s')
self.add_param('l_s', val=0.0, units ='m', desc='Stator core length l_s')
self.add_param('h_s', val=0.0, units ='m', desc='Yoke height h_s')
self.add_param('tau_p',val=0.0, units ='m', desc='Pole pitch self.tau_p')
self.add_param('machine_rating',val=0.0, units ='W', desc='Machine rating')
self.add_param('n_nom',val=0.0, units ='rpm', desc='rated speed')
self.add_param('Torque',val=0.0, units ='Nm', desc='Rated torque ')
self.add_param('I_f',val=0.0000,units='A',desc='Excitation current')
self.add_param('N_f',val=0.0,units='A',desc='field turns')
self.add_param('h_ys',val=0.0, units ='m', desc='Yoke height')
self.add_param('h_yr',val=0.0, units ='m', desc='rotor yoke height')
# structural design variables
self.add_param('n_s' ,val=0.0, desc='number of stator arms n_s')
self.add_param('b_st' , val=0.0, units ='m', desc='arm width b_st')
self.add_param('d_s',val=0.0,units ='m', desc='arm depth d_s')
self.add_param('t_ws' ,val=0.0,units ='m', desc='arm depth thickness self.t_wr')
self.add_param('n_r' ,val=0.0, desc='number of arms n')
self.add_param('b_r' ,val=0.0,units ='m', desc='arm width b_r')
self.add_param('d_r' ,val=0.0, units ='m', desc='arm depth d_r')
self.add_param('t_wr' ,val=0.0, units ='m', desc='arm depth thickness self.t_wr')
self.add_param('R_o',val=0.0, units ='m',desc='Shaft radius')
# EESG generator design outputs
# Magnetic loading
self.add_output('B_symax' ,val=0.0, desc='Peak Stator Yoke flux density B_ymax')
self.add_output('B_tmax',val=0.0, desc='Peak Teeth flux density')
self.add_output('B_rymax',val=0.0, desc='Peak Rotor yoke flux density')
self.add_output('B_gfm',val=0.0, desc='Average air gap flux density B_g')
self.add_output('B_g' ,val=0.0, desc='Peak air gap flux density B_g')
self.add_output('B_pc',val=0.0, desc='Pole core flux density')
# Stator design
self.add_output('N_s' ,val=0.0, desc='Number of turns in the stator winding')
self.add_output('b_s',val=0.0, desc='slot width')
self.add_output('b_t',val=0.0, desc='tooth width')
self.add_output('A_Cuscalc',val=0.0, desc='Conductor cross-section mm^2')
self.add_output('S',val=0.0, desc='Stator slots')
# # Output parameters : Rotor design
self.add_output('h_p',val=0.0, desc='Pole height')
self.add_output('b_p',val=0.0, desc='Pole width')
self.add_output('p',val=0.0, desc='No of pole pairs')
self.add_output('n_brushes',val=0.0, desc='number of brushes')
self.add_output('A_Curcalc',val=0.0, desc='Rotor Conductor cross-section')
# Output parameters : Electrical performance
self.add_output('E_s',val=0.0, desc='Stator phase voltage')
self.add_output('f',val=0.0, desc='Generator output frequency')
self.add_output('I_s',val=0.0, desc='Generator output phase current')
self.add_output('R_s',val=0.0, desc='Stator resistance')
self.add_output('R_r',val=0.0, desc='Rotor resistance')
self.add_output('L_m',val=0.0, desc='Stator synchronising inductance')
self.add_output('J_s',val=0.0, desc='Stator Current density')
self.add_output('J_f',val=0.0, desc='rotor Current density')
self.add_output('A_1',val=0.0, desc='Specific current loading')
self.add_output('Load_mmf_ratio',val=0.0, desc='mmf_ratio')
# Objective functions and output
self.add_output('Mass',val=0.0, desc='Actual mass')
self.add_output('K_rad',val=0.0, desc='K_rad')
self.add_output('Losses',val=0.0, desc='Total loss')
self.add_output('gen_eff',val=0.0, desc='Generator efficiency')
# Structural performance
self.add_output('u_Ar',val=0.0, desc='Rotor radial deflection')
self.add_output('y_Ar',val=0.0, desc='Rotor axial deflection')
self.add_output('z_A_r',val=0.0, desc='Rotor circumferential deflection')
self.add_output('u_As',val=0.0, desc='Stator radial deflection')
self.add_output('y_As',val=0.0, desc='Stator axial deflection')
self.add_output('z_A_s',val=0.0, desc='Stator circumferential deflection')
self.add_output('u_all_r',val=0.0, desc='Allowable radial rotor')
self.add_output('u_all_s',val=0.0, desc='Allowable radial stator')
self.add_output('y_all',val=0.0, desc='Allowable axial')
self.add_output('z_all_s',val=0.0, desc='Allowable circum stator')
self.add_output('z_all_r',val=0.0, desc='Allowable circum rotor')
self.add_output('b_all_s',val=0.0, desc='Allowable arm')
self.add_output('b_all_r',val=0.0, desc='Allowable arm dimensions')
self.add_output('TC1',val=0.0, desc='Torque constraint')
self.add_output('TC2',val=0.0, desc='Torque constraint-rotor')
self.add_output('TC3',val=0.0, desc='Torque constraint-stator')
#Material properties
self.add_param('rho_Fes',val=0.0,units='kg*m**-3', desc='Structural Steel density ')
self.add_param('rho_Fe',val=0.0,units='kg*m**-3', desc='Magnetic Steel density ')
self.add_param('rho_Copper',val=0.0,units='kg*m**-3', desc='Copper density ')
# Mass Outputs
self.add_output('Copper',val=0.0, desc='Copper Mass')
self.add_output('Iron',val=0.0, desc='Electrical Steel Mass')
self.add_output('Structural_mass' ,val=0.0, desc='Structural Mass')
# Other parameters
self.add_output('Power_ratio',val=0.0, desc='Power_ratio')
self.add_output('Slot_aspect_ratio',val=0.0,desc='Stator slot aspect ratio')
self.add_output('R_out',val=0.0, desc='Outer radius')
#inputs/outputs for interface with drivese
self.add_param('main_shaft_cm',val= np.array([0.0, 0.0, 0.0]),desc='Main Shaft CM')
self.add_param('main_shaft_length',val=0.0, desc='main shaft length')
self.add_output('I',val=np.array([0.0, 0.0, 0.0]),desc='Moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('cm', val=np.array([0.0, 0.0, 0.0]),desc='COM [x,y,z]')
self.gen_sizing = generator_sizing()
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['B_symax'], outputs['B_tmax'], outputs['B_rymax'], outputs['B_gfm'], outputs['B_g'],outputs['B_pc'], outputs['N_s'], outputs['b_s'], \
outputs['b_t'], outputs['A_Cuscalc'],outputs['A_Curcalc'], outputs['b_p'], outputs['h_p'], outputs['p'], outputs['E_s'], outputs['f'], \
outputs['I_s'], outputs['R_s'], outputs['L_m'], outputs['A_1'], outputs['J_s'], outputs['R_r'],outputs['Losses'], \
outputs['Load_mmf_ratio'],outputs['Power_ratio'],outputs['n_brushes'],outputs['J_f'],outputs['K_rad'], outputs['gen_eff'], outputs['S'],
outputs['Slot_aspect_ratio'], outputs['Copper'],outputs['Iron'],outputs['u_Ar'], outputs['y_Ar'], \
outputs['z_A_r'], outputs['u_As'], outputs['y_As'], outputs['z_A_s'], outputs['u_all_r'], outputs['u_all_s'], \
outputs['y_all'], outputs['z_all_s'], outputs['z_all_r'], outputs['b_all_s'], outputs['b_all_r'], outputs['TC1'], \
outputs['TC2'], outputs['TC3'], outputs['R_out'], outputs['Structural_mass'],outputs['Mass'],outputs['cm'], outputs['I']) \
= self.gen_sizing.compute(inputs['r_s'], inputs['l_s'], inputs['h_s'], inputs['tau_p'], inputs['machine_rating'],
inputs['n_nom'], inputs['Torque'], inputs['I_f'],inputs['N_f'],inputs['h_ys'], inputs['h_yr'],inputs['rho_Fe'], inputs['rho_Copper'],inputs['b_st'], inputs['d_s'], \
inputs['t_ws'], inputs['n_r'],inputs['n_s'], inputs['b_r'],inputs['d_r'], inputs['t_wr'], \
inputs['R_o'], inputs['rho_Fes'],inputs['main_shaft_cm'],inputs['main_shaft_length'])
return outputs
class generator_sizing(object):
def __init__(self):
pass
def compute(self,r_s, l_s,h_s,tau_p,machine_rating,n_nom,Torque,I_f,N_f,h_ys,h_yr, \
rho_Fe,rho_Copper,b_st, d_s,t_ws, n_r,n_s, b_r,d_r, t_wr, \
R_o, rho_Fes,main_shaft_cm,main_shaft_length):
self.r_s=r_s
self.l_s=l_s
self.h_s=h_s
self.tau_p=tau_p
self.N_f=N_f
self.I_f=I_f
self.h_ys=h_ys
self.h_yr=h_yr
self.machine_rating=machine_rating
self.n_nom=n_nom
self.Torque=Torque
self.b_st=b_st
self.d_s=d_s
self.t_ws=t_ws
self.n_r=n_r
self.n_s=n_s
self.b_r=b_r
self.d_r=d_r
self.t_wr=t_wr
self.R_o=R_o
self.rho_Fe=rho_Fe
self.rho_Copper=rho_Copper
self.rho_Fes=rho_Fes
self.main_shaft_cm=main_shaft_cm
self.main_shaft_length=main_shaft_length
#Assign values to universal constants
g1 =9.81 # m/s^2 acceleration due to gravity
E =2e11 # N/m^2 young's modulus
sigma =48.373e3 # shear stress
mu_0 =pi*4e-7 # permeability of free space
phi =90*2*pi/360
#Assign values to design constants
h_w =0.005
b_so = 0.004 # Stator slot opening
m =3 # number of phases
q1 =2 # no of stator slots per pole per phase
b_s_tau_s=0.45 # ratio of slot width to slot pitch
k_sfil =0.65 # Slot fill factor
P_Fe0h =4 #specific hysteresis losses W/kg @ 1.5 T @50 Hz
P_Fe0e =1 #specific hysteresis losses W/kg @ 1.5 T @50 Hz
rho_Cu=1.8*10**(-8)*1.4 # resisitivity of copper
k_fes =0.9 # iron fill factor
y_tau_p=1 # coil span/pole pitch fullpitch
k_fillr = 0.7 # rotor slot fill factor
k_s=0.2 #magnetic saturation factor for iron
T = self.Torque
cos_phi=0.85 #power factor
# back iron thickness for rotor and stator
self.t_s =self.h_ys
self.t =self.h_yr
# Aspect ratio
self.K_rad=self.l_s/(2*self.r_s)
###################################################### Electromagnetic design#############################################
alpha_p=pi/2*.7
dia=2*self.r_s # air gap diameter
# air gap length and minimum values
g=0.001*dia
if(g<0.005):
g=0.005
r_r=self.r_s-g #rotor radius
d_se=dia+2*self.h_s+2*self.h_ys # stator outer diameter
self.p=round(pi*dia/(2*self.tau_p)) # number of pole pairs
self.S=2*self.p*q1*m # number of slots of stator phase winding
N_conductors=self.S*2
self.N_s=N_conductors/2/3 # Stator turns per phase
alpha =180/self.S/self.p #electrical angle
tau_s=pi*dia/self.S # slot pitch
h_ps=0.1*self.tau_p # height of pole shoe
b_pc=0.4*self.tau_p # width of pole core
h_pc=0.6*self.tau_p # height of pole core
self.h_p=0.7*tau_p # pole height
self.b_p=self.h_p
self.b_s=tau_s * b_s_tau_s #slot width
self.Slot_aspect_ratio=self.h_s/self.b_s
self.b_t=tau_s-self.b_s #tooth width
# Calculating carter factor and effective air gap
g_a=g
K_C1=(tau_s+10*g_a)/(tau_s-self.b_s+10*g_a) # salient pole rotor
g_1=K_C1*g
# calculating angular frequency
om_m=2*pi*self.n_nom/60
om_e=60
self.f = self.n_nom*self.p/60
# Slot fill factor according to air gap radius
if (2*self.r_s>2):
K_fills=0.65
else:
K_fills=0.4
# Calculating Stator winding factor
k_y1=sin(y_tau_p*pi/2) # chording factor
k_q1=sin(pi/6)/q1/sin(pi/6/q1) # winding zone factor
k_wd=k_y1*k_q1
# Calculating stator winding conductor length, cross-section and resistance
shortpitch=0
l_Cus = 2*self.N_s*(2*(self.tau_p-shortpitch/m/q1)+self.l_s) #length of winding
A_s = self.b_s*(self.h_s-h_w)
A_scalc=self.b_s*1000*(self.h_s*1000-h_w*1000) # cross section in mm^2
A_Cus = A_s*q1*self.p*K_fills/self.N_s
self.A_Cuscalc = A_scalc*q1*self.p*K_fills/self.N_s
self.R_s=l_Cus*rho_Cu/A_Cus
#field winding design, conductor lenght, cross-section and resistance
self.N_f=round(self.N_f) # rounding the field winding turns to the nearest integer
I_srated=self.machine_rating/(sqrt(3)*5000*cos_phi)
l_pole=self.l_s-0.05+0.120 # 50mm smaller than stator and 120mm longer to accommodate end stack
K_fe=0.95
l_pfe=l_pole*K_fe
l_Cur=4*self.p*self.N_f*(l_pfe+b_pc+pi/4*(pi*(r_r-h_pc-h_ps)/self.p-b_pc))
A_Cur=k_fillr*h_pc*0.5/self.N_f*(pi*(r_r-h_pc-h_ps)/self.p-b_pc)
self.A_Curcalc=k_fillr*h_pc*1000*0.5/self.N_f*(pi*(r_r-h_pc-h_ps)*1000/self.p-b_pc*1000)
Slot_Area=A_Cur*2*self.N_f/k_fillr
self.R_r=rho_Cu*l_Cur/A_Cur
#field winding current density
self.J_f=self.I_f/self.A_Curcalc
# calculating air flux density
self.B_gfm=mu_0*self.N_f*self.I_f/(g_1*(1+k_s)) #No load air gap flux density
self.B_g=self.B_gfm*4*sin(0.5*self.b_p*pi/self.tau_p)/pi # fundamental component
self.B_symax=self.tau_p*self.B_g/pi/self.h_ys #stator yoke flux density
L_fg=2*mu_0*self.p*self.l_s*4*self.N_f**2*((h_ps/(self.tau_p-self.b_p))+(h_pc/(3*pi*(r_r-h_pc-h_ps)/self.p-b_pc)))
# calculating no load voltage and stator current
self.E_s=2*self.N_s*self.l_s*self.r_s*k_wd*om_m*self.B_g/sqrt(2) #no load voltage
self.I_s=(self.E_s-(self.E_s**2-4*self.R_s*self.machine_rating/m)**0.5)/(2*self.R_s)
# Calculating stator winding current density and specific current loading
self.A_1 = 6*self.N_s*self.I_s/(pi*dia)
self.J_s=self.I_s/self.A_Cuscalc
# Calculating magnetic loading in other parts of the machine
delta_m=0 # Initialising load angle
# peak flux density in pole core, rotor yoke and stator teeth
self.B_pc=(1/b_pc)*((2*self.tau_p/pi)*self.B_g*cos(delta_m)+(2*mu_0*self.I_f*self.N_f*((2*h_ps/(self.tau_p-self.b_p))+(h_pc/(self.tau_p-b_pc)))))
self.B_rymax= 0.5*b_pc*self.B_pc/self.h_yr
self.B_tmax=(self.B_gfm+self.B_g)*tau_s*0.5/self.b_t
# Calculating leakage inductances in the stator
L_ssigmas=2*mu_0*self.l_s*self.N_s**2/self.p/q1*((self.h_s-h_w)/(3*self.b_s)+h_w/b_so) #slot leakage inductance
L_ssigmaew=mu_0*1.2*self.N_s**2/self.p*1.2*(2/3*self.tau_p+0.01) #end winding leakage inductance
L_ssigmag=2*mu_0*self.l_s*self.N_s**2/self.p/q1*(5*(g/b_so)/(5+4*(g/b_so))) # tooth tip leakage inductance
L_ssigma=(L_ssigmas+L_ssigmaew+L_ssigmag) # stator leakage inductance
# Calculating effective air gap
At_g=g_1*self.B_gfm/mu_0
At_t=self.h_s*(400*self.B_tmax+7*(self.B_tmax)**13)
At_sy=self.tau_p*0.5*(400*self.B_symax+7*(self.B_symax)**13)
At_pc=(h_pc+h_ps)*(400*self.B_pc+7*(self.B_pc)**13)
At_ry=self.tau_p*0.5*(400*self.B_rymax+7*(self.B_rymax)**13)
g_eff = (At_g+At_t+At_sy+At_pc+At_ry)*g_1/At_g
self.L_m = 6*k_wd**2*self.N_s**2*mu_0*self.r_s*self.l_s/pi/g_eff/self.p**2
B_r1=(mu_0*self.I_f*self.N_f*4*sin(0.5*(self.b_p/self.tau_p)*pi))/g_eff/pi
# Calculating direct axis and quadrature axes inductances
L_dm= (self.b_p/self.tau_p +(1/pi)*sin(pi*self.b_p/self.tau_p))*self.L_m
L_qm=(self.b_p/self.tau_p -(1/pi)*sin(pi*self.b_p/self.tau_p)+2/(3*pi)*cos(self.b_p*pi/2*self.tau_p))*self.L_m
# Calculating actual load angle
delta_m=(atan(om_e*L_qm*self.I_s/self.E_s))
L_d=L_dm+L_ssigma
L_q=L_qm+L_ssigma
I_sd=self.I_s*sin(delta_m)
I_sq=self.I_s*cos(delta_m)
# induced voltage
E_p=om_e*L_dm*I_sd+sqrt(self.E_s**2-(om_e*L_qm*I_sq)**2)
#M_sf =mu_0*8*self.r_s*self.l_s*k_wd*self.N_s*self.N_f*sin(0.5*self.b_p/self.tau_p*pi)/(self.p*g_eff*pi)
#I_f1=sqrt(2)*(E_p)/(om_e*M_sf)
#I_f2=(E_p/self.E_s)*self.B_g*g_eff*pi/(4*self.N_f*mu_0*sin(pi*self.b_p/2/self.tau_p))
#phi_max_stator=k_wd*self.N_s*pi*self.r_s*self.l_s*2*mu_0*self.N_f*self.I_f*4*sin(0.5*self.b_p/self.tau_p/pi)/(self.p*pi*g_eff*pi)
#M_sf=mu_0*8*self.r_s*self.l_s*k_wd*self.N_s*self.N_f*sin(0.5*b_p/self.tau_p/pi)/(self.p*g_eff*pi)
L_tot=self.l_s+2*self.tau_p
# Excitation power
V_fn=500
Power_excitation=V_fn*2*self.I_f #total rated power in excitation winding
self.Power_ratio =Power_excitation*100/self.machine_rating
# Calculating Electromagnetically Active mass
L_tot=self.l_s+2*self.tau_p
V_Cuss=m*l_Cus*A_Cus # volume of copper in stator
V_Cusr=l_Cur*A_Cur # volume of copper in rotor
V_Fest=(self.l_s*pi*((self.r_s+self.h_s)**2-self.r_s**2)-2*m*q1*self.p*self.b_s*self.h_s*self.l_s) # volume of iron in stator tooth
V_Fesy=self.l_s*pi*((self.r_s+self.h_s+self.h_ys)**2-(self.r_s+self.h_s)**2) # volume of iron in stator yoke
V_Fert=2*self.p*l_pfe*(h_pc*b_pc+self.b_p*h_ps) # volume of iron in rotor pole
V_Fery=l_pfe*pi*((r_r-h_ps-h_pc)**2-(r_r-h_ps-h_pc-self.h_yr)**2) # # volume of iron in rotor yoke
self.Copper=(V_Cuss+V_Cusr)*self.rho_Copper
M_Fest=V_Fest*self.rho_Fe
M_Fesy=V_Fesy*self.rho_Fe
M_Fert=V_Fert*self.rho_Fe
M_Fery=V_Fery*self.rho_Fe
self.Iron=M_Fest+M_Fesy+M_Fert+M_Fery
I_snom=self.machine_rating/(3*self.E_s*cos_phi)
## Optional## Calculating mmf ratio
F_1no_load=3*2**0.5*self.N_s*k_wd*self.I_s/(pi*self.p)
Nf_If_no_load=self.N_f*self.I_f
F_1_rated=(3*2**0.5*self.N_s*k_wd*I_srated)/(pi*self.p)
Nf_If_rated=2*Nf_If_no_load
self.Load_mmf_ratio=Nf_If_rated/F_1_rated
## Calculating losses
#1. Copper losses
K_R=1.2
P_Cuss=m*I_snom**2*self.R_s*K_R
P_Cusr=self.I_f**2*self.R_r
P_Cusnom_total=P_Cuss+P_Cusr
#2. Iron losses ( Hysteresis and Eddy currents)
P_Hyys=M_Fesy*(self.B_symax/1.5)**2*(P_Fe0h*om_e/(2*pi*60)) # Hysteresis losses in stator yoke
P_Ftys=M_Fesy*(self.B_symax/1.5)**2*(P_Fe0e*(om_e/(2*pi*60))**2) # Eddy losses in stator yoke
P_Fesynom=P_Hyys+P_Ftys
P_Hyd=M_Fest*(self.B_tmax/1.5)**2*(P_Fe0h*om_e/(2*pi*60)) # Hysteresis losses in stator teeth
P_Ftd=M_Fest*(self.B_tmax/1.5)**2*(P_Fe0e*(om_e/(2*pi*60))**2) # Eddy losses in stator teeth
P_Festnom=P_Hyd+P_Ftd
# brushes
delta_v=1
self.n_brushes=(self.I_f*2/120)
if (self.n_brushes<0.5):
self.n_brushes=1
else:
self.n_brushes=round(self.n_brushes)
#3. brush losses
p_b=2*delta_v*(self.I_f)
self.Losses=P_Cusnom_total+P_Festnom+P_Fesynom+p_b
self.gen_eff=self.machine_rating*100/(self.Losses+self.machine_rating)
################################################## Structural Design ########################################################
## Structural deflection calculations
#rotor structure
q3 = self.B_g**2/2/mu_0 # normal component of Maxwell's stress
l = self.l_s #l-stator core length
l_b = 2*self.tau_p #end winding length
l_e =self.l_s+2*0.001*self.r_s # equivalent core length
a_r = (self.b_r*self.d_r)-((self.b_r-2*self.t_wr)*(self.d_r-2*self.t_wr)) # cross-sectional area of rotor armms
A_r = l*self.t # cross-sectional area of rotor cylinder
N_r = round(self.n_r)
theta_r =pi/N_r # half angle between spokes
I_r =l*self.t**3/12 # second moment of area of rotor cylinder
I_arm_axi_r =((self.b_r*self.d_r**3)-((self.b_r-2*self.t_wr)*(self.d_r-2*self.t_wr)**3))/12 # second moment of area of rotor arm
I_arm_tor_r = ((self.d_r*self.b_r**3)-((self.d_r-2*self.t_wr)*(self.b_r-2*self.t_wr)**3))/12 # second moment of area of rotot arm w.r.t torsion
R = r_r-h_ps-h_pc-0.5*self.h_yr
R_1 = R-self.h_yr*0.5 # inner radius of rotor cylinder
k_1 = sqrt(I_r/A_r) # radius of gyration
m1 =(k_1/R)**2
c =R/500
self.u_all_r =R/10000 # allowable radial deflection
self.b_all_r =2*pi*self.R_o/N_r # allowable circumferential arm dimension
# Calculating radial deflection of rotor structure according to <NAME>'s
Numer=R**3*((0.25*(sin(theta_r)-(theta_r*cos(theta_r)))/(sin(theta_r))**2)-(0.5/sin(theta_r))+(0.5/theta_r))
Pov=((theta_r/(sin(theta_r))**2)+1/tan(theta_r))*((0.25*R/A_r)+(0.25*R**3/I_r))
Qov=R**3/(2*I_r*theta_r*(m1+1))
Lov=(R_1-R_o)/a_r
Denom=I_r*(Pov-Qov+Lov) # radial deflection % rotor
self.u_Ar =(q3*R**2/E/self.h_yr)*(1+Numer/Denom)
# Calculating axial deflection of rotor structure
w_r =self.rho_Fes*g1*sin(phi)*a_r*N_r
mass_st_lam=self.rho_Fe*2*pi*(R+0.5*self.h_yr)*l*self.h_yr # mass of rotor yoke steel
W =g1*sin(phi)*(mass_st_lam+(V_Cusr*self.rho_Copper)+M_Fert)/N_r # weight of rotor cylinder
l_ir =R # length of rotor arm beam at which rotor cylinder acts
l_iir =R_1
self.y_Ar =(W*l_ir**3/12/E/I_arm_axi_r)+(w_r*l_iir**4/24/E/I_arm_axi_r) # axial deflection
#Calculating torsional deflection of rotor structure
self.z_all_r =0.05*2*pi*R/360 # allowable torsional deflection
self.z_A_r =(2*pi*(R-0.5*self.h_yr)*l/N_r)*sigma*(l_ir-0.5*self.h_yr)**3/3/E/I_arm_tor_r # circumferential deflection
#STATOR structure
A_st =l*self.t_s
a_s = (self.b_st*self.d_s)-((self.b_st-2*self.t_ws)*(self.d_s-2*self.t_ws))
N_st = round(self.n_s)
theta_s =pi/N_st
I_st =l*self.t_s**3/12
I_arm_axi_s =((self.b_st*self.d_s**3)-((self.b_st-2*self.t_ws)*(self.d_s-2*self.t_ws)**3))/12 # second moment of area of stator arm
I_arm_tor_s = ((self.d_s*self.b_st**3)-((self.d_s-2*self.t_ws)*(self.b_st-2*self.t_ws)**3))/12 # second moment of area of rotot arm w.r.t torsion
R_st =(self.r_s+self.h_s+self.h_ys*0.5)
R_1s = R_st-self.h_ys*0.5
k_2 = sqrt(I_st/A_st)
m2 =(k_2/R_st)**2
# allowable deflections
self.b_all_s =2*pi*self.R_o/N_st
self.u_all_s = R_st/10000
self.y_all =2*l/100 # allowable axial deflection
self.z_all_s =0.05*2*pi*R_st/360 # allowable torsional deflection
# Calculating radial deflection according to McDonald's
Numers=R_st**3*((0.25*(sin(theta_s)-(theta_s*cos(theta_s)))/(sin(theta_s))**2)-(0.5/sin(theta_s))+(0.5/theta_s))
Povs=((theta_s/(sin(theta_s))**2)+1/tan(theta_s))*((0.25*R_st/A_st)+(0.25*R_st**3/I_st))
Qovs=R_st**3/(2*I_st*theta_s*(m2+1))
Lovs=(R_1s-R_o)*0.5/a_s
Denoms=I_st*(Povs-Qovs+Lovs)
self.R_out=(R/0.995+self.h_s+self.h_ys)
self.u_As =(q3*R_st**2/E/self.t_s)*(1+Numers/Denoms)
# Calculating axial deflection according to McDonald
l_is =R_st-self.R_o
l_iis =l_is
l_iiis =l_is
mass_st_lam_s= M_Fest+pi*l*self.rho_Fe*((R_st+0.5*self.h_ys)**2-(R_st-0.5*self.h_ys)**2)
W_is =g1*sin(phi)*(self.rho_Fes*l*self.d_s**2*0.5) # weight of rotor cylinder # length of rotor arm beam at which self-weight acts
W_iis =g1*sin(phi)*(V_Cuss*self.rho_Copper+mass_st_lam_s)/2/N_st
w_s =self.rho_Fes*g1*sin(phi)*a_s*N_st
X_comp1 = (W_is*l_is**3/12/E/I_arm_axi_s)
X_comp2 =(W_iis*l_iis**4/24/E/I_arm_axi_s)
X_comp3 =w_s*l_iiis**4/24/E/I_arm_axi_s
self.y_As =X_comp1+X_comp2+X_comp3 # axial deflection
# Calculating torsional deflection
self.z_A_s =2*pi*(R_st+0.5*self.t_s)*l/(2*N_st)*sigma*(l_is+0.5*self.t_s)**3/3/E/I_arm_tor_s
# tangential stress constraints
self.TC1=T/(2*pi*sigma)
self.TC2=R**2*l
self.TC3=R_st**2*l
mass_stru_steel =2*(N_st*(R_1s-self.R_o)*a_s*self.rho_Fes)
# Calculating inactive mass and total mass
self.Structural_mass=mass_stru_steel+(N_r*(R_1-self.R_o)*a_r*self.rho_Fes)
self.Mass=self.Copper+self.Iron+self.Structural_mass
self.I = np.array([0.0, 0.0, 0.0])
# Calculating mass moments of inertia and center of mass
self.I[0] = (0.5*self.Mass*self.R_out**2)
self.I[1] = (0.25*self.Mass*self.R_out**2+(1/12)*self.Mass*self.l_s**2)
self.I[2] = self.I[1]
self.cm = np.array([0.0, 0.0, 0.0])
self.cm[0] = self.main_shaft_cm[0] + self.main_shaft_length/2. + self.l_s/2
self.cm[1] = self.main_shaft_cm[1]
self.cm[2] = self.main_shaft_cm[2]
return(self.B_symax, self.B_tmax, self.B_rymax,self.B_gfm, self.B_g,self.B_pc, self.N_s, self.b_s, \
self.b_t, self.A_Cuscalc, self.A_Curcalc,self.b_p,self.h_p, self.p, self.E_s, self.f,self.I_s, self.R_s, self.L_m, self.A_1,\
self.J_s,self.R_r, self.Losses,self.Load_mmf_ratio,self.Power_ratio,self.n_brushes,self.J_f,self.K_rad, self.gen_eff,\
self.S, self.Slot_aspect_ratio, self.Copper,self.Iron,self.u_Ar,self.y_Ar,self.z_A_r,\
self.u_As,self.y_As,self.z_A_s,self.u_all_r,self.u_all_s,self.y_all,self.z_all_s,self.z_all_r,self.b_all_s, \
self.b_all_r,self.TC1,self.TC2,self.TC3,self.R_out,self.Structural_mass,self.Mass,self.cm,self.I)
####################################################Cost Analysis#######################################################################
class EESG_Cost(Component):
""" Provides a material cost estimate for EESG. Manufacturing costs are excluded"""
def __init__(self):
super(EESG_Cost, self).__init__()
# Inputs
# Specific cost of material by type
self.add_param('C_Cu',val=0.0, desc='Specific cost of copper')
self.add_param('C_Fe',val=0.0,desc='Specific cost of magnetic steel/iron')
self.add_param('C_Fes',val=0.0,desc='Specific cost of structural steel')
# Mass of each material type
self.add_param('Copper',val=0.0, desc='Copper mass')
self.add_param('Iron',val=0.0, desc='Iron mass')
self.add_param('Structural_mass',val=0.0, desc='Structural mass')
# Outputs
self.add_output('Costs',val=0.0,desc='Total cost')
self.gen_costs=generator_costing()
def solve_nonlinear(self,inputs,outputs,resid):
(outputs['Costs'])=self.gen_costs.compute(inputs['Copper'],inputs['C_Cu'], \
inputs['Iron'],inputs['C_Fe'],inputs['C_Fes'],inputs['Structural_mass'])
return outputs
class generator_costing(object):
def __init__(self):
pass
def compute(self,Copper,C_Cu,Iron,C_Fe,C_Fes,Structural_mass):
self.Copper=Copper
self.Iron=Iron
self.Structural_mass=Structural_mass
# Material cost as a function of material mass and specific cost of material
K_gen=self.Copper*C_Cu+self.Iron*C_Fe
Cost_str=C_Fes*self.Structural_mass
Costs=K_gen+Cost_str
return(Costs)
####################################################OPTIMISATION SET_UP ###############################################################
class EESG_Opt(Group):
""" Creates a new Group containing EESG and EESG_Cost"""
def __init__(self):
super(EESG_Opt, self).__init__()
self.add('machine_rating', IndepVarComp('machine_rating',0.0),promotes=['*'])
self.add('Torque',IndepVarComp('Torque', val=0.0),promotes=['*'])
self.add('n_nom', IndepVarComp('n_nom', val=0.0),promotes=['*'])
self.add('main_shaft_cm', IndepVarComp('main_shaft_cm',val=np.array([0.0, 0.0, 0.0])),promotes=['*'])
self.add('main_shaft_length',IndepVarComp('main_shaft_length',val=0.0),promotes=['*'])
self.add('r_s',IndepVarComp('r_s',0.0),promotes=['*'])
self.add('l_s',IndepVarComp('l_s',0.0),promotes=['*'])
self.add('h_s',IndepVarComp('h_s',0.0),promotes=['*'])
self.add('tau_p',IndepVarComp('tau_p',0.0),promotes=['*'])
self.add('I_f',IndepVarComp('I_f',0.0),promotes=['*'])
self.add('N_f',IndepVarComp('N_f',0.0),promotes=['*'])
self.add('h_ys',IndepVarComp('h_ys',0.0),promotes=['*'])
self.add('h_yr',IndepVarComp('h_yr',0.0),promotes=['*'])
self.add('n_s',IndepVarComp('n_s',0.0),promotes=['*'])
self.add('b_st',IndepVarComp('b_st',0.0),promotes=['*'])
self.add('n_r',IndepVarComp('n_r',0.0),promotes=['*'])
self.add('b_r',IndepVarComp('b_r',0.0),promotes=['*'])
self.add('d_r',IndepVarComp('d_r',0.0),promotes=['*'])
self.add('d_s',IndepVarComp('d_s',0.0),promotes=['*'])
self.add('t_wr',IndepVarComp('t_wr',0.0),promotes=['*'])
self.add('t_ws',IndepVarComp('t_ws',0.0),promotes=['*'])
self.add('R_o',IndepVarComp('R_o',0.0),promotes=['*'])
self.add('rho_Fes',IndepVarComp('rho_Fes',0.0),promotes=['*'])
self.add('rho_Fe',IndepVarComp('rho_Fe',0.0),promotes=['*'])
self.add('rho_Copper',IndepVarComp('rho_Copper',0.0),promotes=['*'])
# add EESG component, create constraint equations
self.add('EESG',EESG(),promotes=['*'])
self.add('con_uAs', ExecComp('con_uAs =u_all_s-u_As'),promotes=['*'])
self.add('con_zAs', ExecComp('con_zAs =z_all_s-z_A_s'),promotes=['*'])
self.add('con_yAs', ExecComp('con_yAs =y_all-y_As'),promotes=['*'])
self.add('con_bst', ExecComp('con_bst =b_all_s-b_st'),promotes=['*'])
self.add('con_uAr', ExecComp('con_uAr =u_all_r-u_Ar'),promotes=['*'])
self.add('con_zAr', ExecComp('con_zAr =z_all_r-z_A_r'),promotes=['*'])
self.add('con_yAr', ExecComp('con_yAr =y_all-y_Ar'),promotes=['*'])
self.add('con_br', ExecComp('con_br =b_all_r-b_r'),promotes=['*'])
self.add('con_TC2', ExecComp('con_TC2 =TC2-TC1'),promotes=['*'])
self.add('con_TC3', ExecComp('con_TC3 =TC3-TC1'),promotes=['*'])
# add EESG_Cost component
self.add('EESG_Cost',EESG_Cost(),promotes=['*'])
self.add('C_Cu',IndepVarComp('C_Cu',val=0.0),promotes=['*'])
self.add('C_Fe',IndepVarComp('C_Fe',val=0.0),promotes=['*'])
self.add('C_Fes',IndepVarComp('C_Fes',val=0.0),promotes=['*'])
def EESG_Opt_example():
opt_problem=Problem(root=EESG_Opt())
#Example optimization of an EESG for costs on a 5 MW reference turbine
# add optimizer and set-up problem (using user defined input on objective function)
#
opt_problem.driver=pyOptSparseDriver()
opt_problem.driver.options['optimizer'] = 'CONMIN'
opt_problem.driver.add_objective('Costs') # Define Objective
opt_problem.driver.opt_settings['IPRINT'] = 4
opt_problem.driver.opt_settings['ITRM'] = 3
opt_problem.driver.opt_settings['ITMAX'] = 10
opt_problem.driver.opt_settings['DELFUN'] = 1e-3
opt_problem.driver.opt_settings['DABFUN'] = 1e-3
opt_problem.driver.opt_settings['IFILE'] = 'CONMIN_EESG.out'
opt_problem.root.deriv_options['type']='fd'
# Specificiency target efficiency(%)
Eta_Target = 93.0
# Set bounds for design variables for an EESG designed for a 5MW turbine
opt_problem.driver.add_desvar('r_s',lower=0.5,upper=9.0)
opt_problem.driver.add_desvar('l_s', lower=0.5, upper=2.5)
opt_problem.driver.add_desvar('h_s', lower=0.06, upper=0.15)
opt_problem.driver.add_desvar('tau_p', lower=0.04, upper=0.2)
opt_problem.driver.add_desvar('N_f', lower=10, upper=300)
opt_problem.driver.add_desvar('I_f', lower=1, upper=500)
opt_problem.driver.add_desvar('n_r', lower=5.0, upper=15.0)
opt_problem.driver.add_desvar('h_yr', lower=0.01, upper=0.25)
opt_problem.driver.add_desvar('h_ys', lower=0.01, upper=0.25)
opt_problem.driver.add_desvar('b_r', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('d_r', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('t_wr', lower=0.001, upper=0.2)
opt_problem.driver.add_desvar('n_s', lower=5.0, upper=15.0)
opt_problem.driver.add_desvar('b_st', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('d_s', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('t_ws', lower=0.001, upper=0.2)
# set up constraints for the PMSG_arms generator
opt_problem.driver.add_constraint('B_symax',upper=2.0-1.0e-6) #1
opt_problem.driver.add_constraint('B_rymax',upper=2.0-1.0e-6) #2
opt_problem.driver.add_constraint('B_tmax',upper=2.0-1.0e-6) #3
opt_problem.driver.add_constraint('B_gfm',lower=0.617031,upper=1.057768) #4
opt_problem.driver.add_constraint('B_g',lower=0.7,upper=1.2) #5
opt_problem.driver.add_constraint('B_pc',upper=2.0) #6
opt_problem.driver.add_constraint('E_s',lower=500.0,upper=5000.0) #7
opt_problem.driver.add_constraint('con_uAs',lower=0.0+1.0e-6) #8
opt_problem.driver.add_constraint('con_zAs',lower=0.0+1.0e-6) #9
opt_problem.driver.add_constraint('con_yAs',lower=0.0+1.0e-6) #10
opt_problem.driver.add_constraint('con_uAr',lower=0.0+1.0e-6) #11
opt_problem.driver.add_constraint('con_zAr',lower=0.0+1.0e-6) #12
opt_problem.driver.add_constraint('con_yAr',lower=0.0+1.0e-6) #13
opt_problem.driver.add_constraint('con_TC2',lower=0.0+1.0e-6) #14
opt_problem.driver.add_constraint('con_TC3',lower=0.0+1e-6) #15
opt_problem.driver.add_constraint('con_br',lower=0.0+1e-6) #16
opt_problem.driver.add_constraint('con_bst',lower=0.0-1e-6) #17
opt_problem.driver.add_constraint('A_1',upper=60000.0-1e-6) #18
opt_problem.driver.add_constraint('J_s',upper=6.0) #19
opt_problem.driver.add_constraint('J_f',upper=6.0) #20
opt_problem.driver.add_constraint('A_Cuscalc',lower=5.0,upper=300) #22
opt_problem.driver.add_constraint('A_Curcalc',lower=10,upper=300) #23
opt_problem.driver.add_constraint('K_rad',lower=0.2+1e-6,upper=0.27) #24
opt_problem.driver.add_constraint('Slot_aspect_ratio',lower=4.0,upper=10.0)#25
opt_problem.driver.add_constraint('gen_eff',lower=Eta_Target) #26
opt_problem.driver.add_constraint('n_brushes',upper=6) #27
opt_problem.driver.add_constraint('Power_ratio',upper=2-1.0e-6) #28
opt_problem.setup()
# Specify Target machine parameters
opt_problem['machine_rating']=5000000.0
opt_problem['Torque']=4.143289e6
opt_problem['n_nom']=12.1
# Initial design variables
opt_problem['r_s']=3.2
opt_problem['l_s']=1.4
opt_problem['h_s']= 0.060
opt_problem['tau_p']= 0.170
opt_problem['I_f']= 69
opt_problem['N_f']= 100
opt_problem['h_ys']= 0.130
opt_problem['h_yr']= 0.120
opt_problem['n_s']= 5
opt_problem['b_st']= 0.470
opt_problem['n_r']=5
opt_problem['b_r']= 0.480
opt_problem['d_r']= 0.510
opt_problem['d_s']= 0.400
opt_problem['t_wr']=0.140
opt_problem['t_ws']=0.070
opt_problem['R_o']=0.43 #10MW: 0.523950817,#5MW: 0.43, #3MW:0.363882632 #1.5MW: 0.2775 0.75MW: 0.17625
# Costs
opt_problem['C_Cu']=4.786
opt_problem['C_Fe']= 0.556
opt_problem['C_Fes']=0.50139
#Material properties
opt_problem['rho_Fe']= 7700 #Magnetic Steel/iron density
opt_problem['rho_Fes']= 7850 #structural Steel density
opt_problem['rho_Copper']=8900 # Kg/m3 copper density
opt_problem['main_shaft_cm']=np.array([0.0, 0.0, 0.0])
opt_problem['main_shaft_length'] =2.0
#Run optimization
opt_problem.run()
"""Uncomment to print solution to screen/an excel file
raw_data = {'Parameters': ['Rating','Stator Arms', 'Stator Axial arm dimension','Stator Circumferential arm dimension',' Stator arm Thickness' ,'Rotor Arms', 'Rotor Axial arm dimension','Rotor Circumferential arm dimension',\
'Rotor Arm thickness', ' Rotor Radial deflection', 'Rotor Axial deflection','Rotor circum deflection', 'Stator Radial deflection',' Stator Axial deflection',' Stator Circumferential deflection','Air gap diameter', 'Stator length',\
'l/D ratio', 'Pole pitch', 'Stator slot height','Stator slot width','Slot aspect ratio','Stator tooth width', 'Stator yoke height', 'Rotor yoke height', 'Rotor pole height', 'Rotor pole width', 'Average no load flux density', \
'Peak air gap flux density','Peak stator yoke flux density','Peak rotor yoke flux density','Stator tooth flux density','Rotor pole core flux density','Pole pairs', 'Generator output frequency', 'Generator output phase voltage(rms value)', \
'Generator Output phase current', 'Stator resistance', 'Synchronous inductance','Stator slots','Stator turns','Stator conductor cross-section','Stator Current density ','Specific current loading','Field turns','Conductor cross-section',\
'Field Current','D.C Field resistance','MMF ratio at rated load(Rotor/Stator)','Excitation Power (% of Rated Power)','Number of brushes/polarity','Field Current density','Generator Efficiency', 'Iron mass', 'Copper mass','Mass of Arms','Total Mass','Total Cost'],\
'Values': [opt_problem['machine_rating']/1e6,opt_problem['n_s'],opt_problem['d_s']*1000,opt_problem['b_st']*1000,opt_problem['t_ws']*1000,opt_problem['n_r'],opt_problem['d_r']*1000,opt_problem['b_r']*1000,opt_problem['t_wr']*1000,opt_problem['u_Ar']*1000,\
opt_problem['y_Ar']*1000,opt_problem['z_A_r']*1000,opt_problem['u_As']*1000,opt_problem['y_As']*1000,opt_problem['z_A_s']*1000,2*opt_problem['r_s'],opt_problem['l_s'],opt_problem['K_rad'],opt_problem['tau_p']*1000,opt_problem['h_s']*1000,opt_problem['b_s']*1000,\
opt_problem['Slot_aspect_ratio'],opt_problem['b_t']*1000,opt_problem['h_ys']*1000,opt_problem['h_yr']*1000,opt_problem['h_p']*1000,opt_problem['b_p']*1000,opt_problem['B_gfm'],opt_problem['B_g'],opt_problem['B_symax'],opt_problem['B_rymax'],opt_problem['B_tmax'],\
opt_problem['B_pc'],opt_problem['p'],opt_problem['f'],opt_problem['E_s'],opt_problem['I_s'],opt_problem['R_s'],opt_problem['L_m'],opt_problem['S'],opt_problem['N_s'],opt_problem['A_Cuscalc'],opt_problem['J_s'],opt_problem['A_1']/1000,opt_problem['N_f'],opt_problem['A_Curcalc'],\
opt_problem['I_f'],opt_problem['R_r'],opt_problem['Load_mmf_ratio'],opt_problem['Power_ratio'],opt_problem['n_brushes'],opt_problem['J_f'],opt_problem['gen_eff'],opt_problem['Iron']/1000,opt_problem['Copper']/1000,opt_problem['Structural_mass']/1000,\
opt_problem['Mass']/1000,opt_problem['Costs']/1000],
'Limit': ['','','',opt_problem['b_all_s']*1000,'','','',opt_problem['b_all_r']*1000,'',opt_problem['u_all_r']*1000,opt_problem['y_all']*1000,opt_problem['z_all_r']*1000,opt_problem['u_all_s']*1000,opt_problem['y_all']*1000,opt_problem['z_all_s']*1000,\
'','','(0.2-0.27)','','','','(4-10)','','','','','','(0.62-1.05)','1.2','2','2','2','2','','(10-60)','','','','','','','','(3-6)','<60','','','','','','<2%','','(3-6)',Eta_Target,'','','','',''],
'Units':['MW','unit','mm','mm','mm','unit','mm','mm','mm','mm','mm','mm','mm','mm','mm','m','m','','','mm','mm','mm','mm','mm','mm','mm','mm','T','T','T','T','T','T','-','Hz','V','A','om/phase',\
'p.u','slots','turns','mm^2','A/mm^2','kA/m','turns','mm^2','A','ohm','%','%','brushes','A/mm^2','turns','%','tons','tons','tons','1000$']}
df=pandas.DataFrame(raw_data, columns=['Parameters','Values','Limit','Units'])
print df
df.to_excel('EESG_'+str(opt_problem['machine_rating']/1e6)+'MW_1.7.x.xlsx')
"""
if __name__=="__main__":
# Run an example optimization of EESG generator on cost
EESG_Opt_example()
|
[
"openmdao.api.ExecComp",
"math.tan",
"openmdao.api.IndepVarComp",
"math.sqrt",
"math.cos",
"numpy.array",
"openmdao.drivers.pyoptsparse_driver.pyOptSparseDriver",
"math.sin",
"math.atan"
] |
[((30236, 30255), 'openmdao.drivers.pyoptsparse_driver.pyOptSparseDriver', 'pyOptSparseDriver', ([], {}), '()\n', (30253, 30255), False, 'from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver\n'), ((35050, 35075), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (35058, 35075), True, 'import numpy as np\n'), ((11885, 11906), 'math.sin', 'sin', (['(y_tau_p * pi / 2)'], {}), '(y_tau_p * pi / 2)\n', (11888, 11906), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15751, 15790), 'math.atan', 'atan', (['(om_e * L_qm * self.I_s / self.E_s)'], {}), '(om_e * L_qm * self.I_s / self.E_s)\n', (15755, 15790), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20151, 20166), 'math.sqrt', 'sqrt', (['(I_r / A_r)'], {}), '(I_r / A_r)\n', (20155, 20166), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22265, 22282), 'math.sqrt', 'sqrt', (['(I_st / A_st)'], {}), '(I_st / A_st)\n', (22269, 22282), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((24269, 24294), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (24277, 24294), True, 'import numpy as np\n'), ((24520, 24545), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (24528, 24545), True, 'import numpy as np\n'), ((11948, 11964), 'math.sin', 'sin', (['(pi / 6 / q1)'], {}), '(pi / 6 / q1)\n', (11951, 11964), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((13731, 13738), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (13735, 13738), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15845, 15857), 'math.sin', 'sin', (['delta_m'], {}), '(delta_m)\n', (15848, 15857), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15875, 15887), 'math.cos', 'cos', (['delta_m'], {}), '(delta_m)\n', (15878, 15887), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15939, 15986), 'math.sqrt', 'sqrt', (['(self.E_s ** 2 - (om_e * L_qm * I_sq) ** 2)'], {}), '(self.E_s ** 2 - (om_e * L_qm * I_sq) ** 2)\n', (15943, 15986), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((27278, 27313), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""machine_rating"""', '(0.0)'], {}), "('machine_rating', 0.0)\n", (27290, 27313), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27350, 27381), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""Torque"""'], {'val': '(0.0)'}), "('Torque', val=0.0)\n", (27362, 27381), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27419, 27449), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_nom"""'], {'val': '(0.0)'}), "('n_nom', val=0.0)\n", (27431, 27449), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27607, 27649), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""main_shaft_length"""'], {'val': '(0.0)'}), "('main_shaft_length', val=0.0)\n", (27619, 27649), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27687, 27711), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""r_s"""', '(0.0)'], {}), "('r_s', 0.0)\n", (27699, 27711), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27745, 27769), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""l_s"""', '(0.0)'], {}), "('l_s', 0.0)\n", (27757, 27769), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27803, 27827), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_s"""', '(0.0)'], {}), "('h_s', 0.0)\n", (27815, 27827), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27863, 27889), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""tau_p"""', '(0.0)'], {}), "('tau_p', 0.0)\n", (27875, 27889), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27923, 27947), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""I_f"""', '(0.0)'], {}), "('I_f', 0.0)\n", (27935, 27947), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27981, 28005), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""N_f"""', '(0.0)'], {}), "('N_f', 0.0)\n", (27993, 28005), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28044, 28069), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_ys"""', '(0.0)'], {}), "('h_ys', 0.0)\n", (28056, 28069), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28104, 28129), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_yr"""', '(0.0)'], {}), "('h_yr', 0.0)\n", (28116, 28129), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28163, 28187), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_s"""', '(0.0)'], {}), "('n_s', 0.0)\n", (28175, 28187), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28222, 28247), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""b_st"""', '(0.0)'], {}), "('b_st', 0.0)\n", (28234, 28247), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28281, 28305), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_r"""', '(0.0)'], {}), "('n_r', 0.0)\n", (28293, 28305), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28339, 28363), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""b_r"""', '(0.0)'], {}), "('b_r', 0.0)\n", (28351, 28363), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28397, 28421), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""d_r"""', '(0.0)'], {}), "('d_r', 0.0)\n", (28409, 28421), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28455, 28479), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""d_s"""', '(0.0)'], {}), "('d_s', 0.0)\n", (28467, 28479), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28514, 28539), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""t_wr"""', '(0.0)'], {}), "('t_wr', 0.0)\n", (28526, 28539), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28574, 28599), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""t_ws"""', '(0.0)'], {}), "('t_ws', 0.0)\n", (28586, 28599), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28633, 28657), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""R_o"""', '(0.0)'], {}), "('R_o', 0.0)\n", (28645, 28657), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28699, 28727), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Fes"""', '(0.0)'], {}), "('rho_Fes', 0.0)\n", (28711, 28727), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28764, 28791), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Fe"""', '(0.0)'], {}), "('rho_Fe', 0.0)\n", (28776, 28791), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28832, 28863), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Copper"""', '(0.0)'], {}), "('rho_Copper', 0.0)\n", (28844, 28863), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29005, 29038), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_uAs =u_all_s-u_As"""'], {}), "('con_uAs =u_all_s-u_As')\n", (29013, 29038), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29078, 29112), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_zAs =z_all_s-z_A_s"""'], {}), "('con_zAs =z_all_s-z_A_s')\n", (29086, 29112), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29152, 29183), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_yAs =y_all-y_As"""'], {}), "('con_yAs =y_all-y_As')\n", (29160, 29183), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29223, 29256), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_bst =b_all_s-b_st"""'], {}), "('con_bst =b_all_s-b_st')\n", (29231, 29256), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29296, 29329), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_uAr =u_all_r-u_Ar"""'], {}), "('con_uAr =u_all_r-u_Ar')\n", (29304, 29329), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29369, 29403), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_zAr =z_all_r-z_A_r"""'], {}), "('con_zAr =z_all_r-z_A_r')\n", (29377, 29403), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29443, 29474), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_yAr =y_all-y_Ar"""'], {}), "('con_yAr =y_all-y_Ar')\n", (29451, 29474), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29513, 29544), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_br =b_all_r-b_r"""'], {}), "('con_br =b_all_r-b_r')\n", (29521, 29544), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29584, 29612), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_TC2 =TC2-TC1"""'], {}), "('con_TC2 =TC2-TC1')\n", (29592, 29612), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29652, 29680), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_TC3 =TC3-TC1"""'], {}), "('con_TC3 =TC3-TC1')\n", (29660, 29680), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29801, 29830), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Cu"""'], {'val': '(0.0)'}), "('C_Cu', val=0.0)\n", (29813, 29830), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29865, 29894), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Fe"""'], {'val': '(0.0)'}), "('C_Fe', val=0.0)\n", (29877, 29894), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29930, 29960), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Fes"""'], {'val': '(0.0)'}), "('C_Fes', val=0.0)\n", (29942, 29960), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((6362, 6387), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6370, 6387), True, 'import numpy as np\n'), ((6510, 6535), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6518, 6535), True, 'import numpy as np\n'), ((6652, 6677), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6660, 6677), True, 'import numpy as np\n'), ((11935, 11946), 'math.sin', 'sin', (['(pi / 6)'], {}), '(pi / 6)\n', (11938, 11946), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((13358, 13395), 'math.sin', 'sin', (['(0.5 * self.b_p * pi / self.tau_p)'], {}), '(0.5 * self.b_p * pi / self.tau_p)\n', (13361, 13395), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((23259, 23267), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (23262, 23267), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((12644, 12651), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (12648, 12651), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((14243, 14255), 'math.cos', 'cos', (['delta_m'], {}), '(delta_m)\n', (14246, 14255), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15397, 15436), 'math.sin', 'sin', (['(0.5 * (self.b_p / self.tau_p) * pi)'], {}), '(0.5 * (self.b_p / self.tau_p) * pi)\n', (15400, 15436), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15544, 15575), 'math.sin', 'sin', (['(pi * self.b_p / self.tau_p)'], {}), '(pi * self.b_p / self.tau_p)\n', (15547, 15575), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15656, 15691), 'math.cos', 'cos', (['(self.b_p * pi / 2 * self.tau_p)'], {}), '(self.b_p * pi / 2 * self.tau_p)\n', (15659, 15691), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20629, 20641), 'math.tan', 'tan', (['theta_r'], {}), '(theta_r)\n', (20632, 20641), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20929, 20937), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (20932, 20937), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((21082, 21090), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (21085, 21090), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22769, 22781), 'math.tan', 'tan', (['theta_s'], {}), '(theta_s)\n', (22772, 22781), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((23515, 23523), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (23518, 23523), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((27532, 27557), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (27540, 27557), True, 'import numpy as np\n'), ((15619, 15650), 'math.sin', 'sin', (['(pi * self.b_p / self.tau_p)'], {}), '(pi * self.b_p / self.tau_p)\n', (15622, 15650), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20562, 20574), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20565, 20574), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20609, 20621), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20612, 20621), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22701, 22713), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22704, 22713), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22749, 22761), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22752, 22761), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((23428, 23436), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (23431, 23436), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20539, 20551), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20542, 20551), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22678, 22690), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22681, 22690), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20501, 20513), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20504, 20513), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22640, 22652), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22643, 22652), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20523, 20535), 'math.cos', 'cos', (['theta_r'], {}), '(theta_r)\n', (20526, 20535), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22662, 22674), 'math.cos', 'cos', (['theta_s'], {}), '(theta_s)\n', (22665, 22674), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n')]
|
#!/usr/bin/env python
"""A more advanced Reducer, using Python iterators and generators."""
from itertools import groupby
from operator import itemgetter
import sys
def read_mapper_output(file, separator='\t'):
for line in file:
yield line.rstrip().split(separator, 1)
def main(separator='\t'):
data = read_mapper_output(sys.stdin, separator=separator)
for key, value in groupby(data, itemgetter(0)):
print("{}\t{}".format(key, len(list(value))))
if __name__ == "__main__":
main()
|
[
"operator.itemgetter"
] |
[((410, 423), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (420, 423), False, 'from operator import itemgetter\n')]
|
import copy
import logging
import os
from typing import Dict, List, Tuple
import checksumdir
import imageio
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from ..adapter import download_object
logger = logging.getLogger("fastface.dataset")
class _IdentitiyTransforms:
"""Dummy tranforms"""
def __call__(self, img: np.ndarray, targets: Dict) -> Tuple:
return img, targets
def default_collate_fn(batch):
batch, targets = zip(*batch)
batch = np.stack(batch, axis=0).astype(np.float32)
batch = torch.from_numpy(batch).permute(0, 3, 1, 2).contiguous()
for i, target in enumerate(targets):
for k, v in target.items():
if isinstance(v, np.ndarray):
targets[i][k] = torch.from_numpy(v)
return batch, targets
class BaseDataset(Dataset):
def __init__(self, ids: List[str], targets: List[Dict], transforms=None, **kwargs):
super().__init__()
assert isinstance(ids, list), "given `ids` must be list"
assert isinstance(targets, list), "given `targets must be list"
assert len(ids) == len(targets), "lenght of both lists must be equal"
self.ids = ids
self.targets = targets
self.transforms = _IdentitiyTransforms() if transforms is None else transforms
# set given kwargs to the dataset
for key, value in kwargs.items():
if hasattr(self, key):
# log warning
continue
setattr(self, key, value)
def __getitem__(self, idx: int) -> Tuple:
img = self._load_image(self.ids[idx])
targets = copy.deepcopy(self.targets[idx])
# apply transforms
img, targets = self.transforms(img, targets)
# clip boxes
targets["target_boxes"] = self._clip_boxes(
targets["target_boxes"], img.shape[:2]
)
# discard zero sized boxes
targets["target_boxes"] = self._discard_zero_size_boxes(targets["target_boxes"])
return (img, targets)
def __len__(self) -> int:
return len(self.ids)
@staticmethod
def _clip_boxes(boxes: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
# TODO pydoc
height, width = shape
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(min=0, max=width - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(min=0, max=height - 1)
return boxes
@staticmethod
def _discard_zero_size_boxes(boxes: np.ndarray) -> np.ndarray:
# TODO pydoc
scale = (boxes[:, [2, 3]] - boxes[:, [0, 1]]).min(axis=1)
return boxes[scale > 0]
@staticmethod
def _load_image(img_file_path: str):
"""loads rgb image using given file path
Args:
img_path (str): image file path to load
Returns:
np.ndarray: rgb image as np.ndarray
"""
img = imageio.imread(img_file_path)
if not img.flags["C_CONTIGUOUS"]:
# if img is not contiguous than fix it
img = np.ascontiguousarray(img, dtype=img.dtype)
if len(img.shape) == 4:
# found RGBA, converting to => RGB
img = img[:, :, :3]
elif len(img.shape) == 2:
# found GRAYSCALE, converting to => RGB
img = np.stack([img, img, img], axis=-1)
return np.array(img, dtype=np.uint8)
def get_dataloader(
self,
batch_size: int = 1,
shuffle: bool = False,
num_workers: int = 0,
collate_fn=default_collate_fn,
pin_memory: bool = False,
**kwargs
):
return DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
**kwargs
)
def get_mean_std(self) -> Dict:
# TODO pydoc
mean_sum, mean_sq_sum = np.zeros(3), np.zeros(3)
for img, _ in tqdm(
self, total=len(self), desc="calculating mean and std for the dataset"
):
d = img.astype(np.float32) / 255
mean_sum[0] += np.mean(d[:, :, 0])
mean_sum[1] += np.mean(d[:, :, 1])
mean_sum[2] += np.mean(d[:, :, 2])
mean_sq_sum[0] += np.mean(d[:, :, 0] ** 2)
mean_sq_sum[1] += np.mean(d[:, :, 1] ** 2)
mean_sq_sum[2] += np.mean(d[:, :, 2] ** 2)
mean = mean_sum / len(self)
std = (mean_sq_sum / len(self) - mean ** 2) ** 0.5
return {"mean": mean.tolist(), "std": std.tolist()}
def get_normalized_boxes(self) -> np.ndarray:
# TODO pydoc
normalized_boxes = []
for img, targets in tqdm(
self, total=len(self), desc="computing normalized target boxes"
):
if targets["target_boxes"].shape[0] == 0:
continue
max_size = max(img.shape)
normalized_boxes.append(targets["target_boxes"] / max_size)
return np.concatenate(normalized_boxes, axis=0)
def get_box_scale_histogram(self) -> Tuple[np.ndarray, np.ndarray]:
bins = map(lambda x: 2 ** x, range(10))
total_boxes = []
for _, targets in tqdm(self, total=len(self), desc="getting box sizes"):
if targets["target_boxes"].shape[0] == 0:
continue
total_boxes.append(targets["target_boxes"])
total_boxes = np.concatenate(total_boxes, axis=0)
areas = (total_boxes[:, 2] - total_boxes[:, 0]) * (
total_boxes[:, 3] - total_boxes[:, 1]
)
return np.histogram(np.sqrt(areas), bins=list(bins))
def download(self, urls: List, target_dir: str):
for k, v in urls.items():
keys = list(v["check"].items())
checked_keys = []
for key, md5hash in keys:
target_sub_dir = os.path.join(target_dir, key)
if not os.path.exists(target_sub_dir):
checked_keys.append(False)
else:
checked_keys.append(
checksumdir.dirhash(target_sub_dir, hashfunc="md5") == md5hash
)
if sum(checked_keys) == len(keys):
logger.debug("found {} at {}".format(k, target_dir))
continue
# download
adapter = v.get("adapter")
kwargs = v.get("kwargs", {})
logger.warning(
"{} not found in the {}, downloading...".format(k, target_dir)
)
download_object(adapter, dest_path=target_dir, **kwargs)
|
[
"logging.getLogger",
"numpy.mean",
"os.path.exists",
"copy.deepcopy",
"numpy.sqrt",
"os.path.join",
"torch.from_numpy",
"numpy.ascontiguousarray",
"numpy.array",
"numpy.stack",
"numpy.zeros",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"imageio.imread",
"checksumdir.dirhash"
] |
[((261, 298), 'logging.getLogger', 'logging.getLogger', (['"""fastface.dataset"""'], {}), "('fastface.dataset')\n", (278, 298), False, 'import logging\n'), ((1663, 1695), 'copy.deepcopy', 'copy.deepcopy', (['self.targets[idx]'], {}), '(self.targets[idx])\n', (1676, 1695), False, 'import copy\n'), ((2914, 2943), 'imageio.imread', 'imageio.imread', (['img_file_path'], {}), '(img_file_path)\n', (2928, 2943), False, 'import imageio\n'), ((3365, 3394), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (3373, 3394), True, 'import numpy as np\n'), ((3637, 3779), 'torch.utils.data.DataLoader', 'DataLoader', (['self'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'pin_memory': 'pin_memory'}), '(self, batch_size=batch_size, shuffle=shuffle, num_workers=\n num_workers, collate_fn=collate_fn, pin_memory=pin_memory, **kwargs)\n', (3647, 3779), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((5044, 5084), 'numpy.concatenate', 'np.concatenate', (['normalized_boxes'], {'axis': '(0)'}), '(normalized_boxes, axis=0)\n', (5058, 5084), True, 'import numpy as np\n'), ((5470, 5505), 'numpy.concatenate', 'np.concatenate', (['total_boxes'], {'axis': '(0)'}), '(total_boxes, axis=0)\n', (5484, 5505), True, 'import numpy as np\n'), ((527, 550), 'numpy.stack', 'np.stack', (['batch'], {'axis': '(0)'}), '(batch, axis=0)\n', (535, 550), True, 'import numpy as np\n'), ((3055, 3097), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'img.dtype'}), '(img, dtype=img.dtype)\n', (3075, 3097), True, 'import numpy as np\n'), ((3959, 3970), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3967, 3970), True, 'import numpy as np\n'), ((3972, 3983), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3980, 3983), True, 'import numpy as np\n'), ((4179, 4198), 'numpy.mean', 'np.mean', (['d[:, :, 0]'], {}), '(d[:, :, 0])\n', (4186, 4198), True, 'import numpy as np\n'), ((4226, 4245), 'numpy.mean', 'np.mean', (['d[:, :, 1]'], {}), '(d[:, :, 1])\n', (4233, 4245), True, 'import numpy as np\n'), ((4273, 4292), 'numpy.mean', 'np.mean', (['d[:, :, 2]'], {}), '(d[:, :, 2])\n', (4280, 4292), True, 'import numpy as np\n'), ((4324, 4348), 'numpy.mean', 'np.mean', (['(d[:, :, 0] ** 2)'], {}), '(d[:, :, 0] ** 2)\n', (4331, 4348), True, 'import numpy as np\n'), ((4379, 4403), 'numpy.mean', 'np.mean', (['(d[:, :, 1] ** 2)'], {}), '(d[:, :, 1] ** 2)\n', (4386, 4403), True, 'import numpy as np\n'), ((4434, 4458), 'numpy.mean', 'np.mean', (['(d[:, :, 2] ** 2)'], {}), '(d[:, :, 2] ** 2)\n', (4441, 4458), True, 'import numpy as np\n'), ((5655, 5669), 'numpy.sqrt', 'np.sqrt', (['areas'], {}), '(areas)\n', (5662, 5669), True, 'import numpy as np\n'), ((790, 809), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (806, 809), False, 'import torch\n'), ((3314, 3348), 'numpy.stack', 'np.stack', (['[img, img, img]'], {'axis': '(-1)'}), '([img, img, img], axis=-1)\n', (3322, 3348), True, 'import numpy as np\n'), ((5923, 5952), 'os.path.join', 'os.path.join', (['target_dir', 'key'], {}), '(target_dir, key)\n', (5935, 5952), False, 'import os\n'), ((582, 605), 'torch.from_numpy', 'torch.from_numpy', (['batch'], {}), '(batch)\n', (598, 605), False, 'import torch\n'), ((5976, 6006), 'os.path.exists', 'os.path.exists', (['target_sub_dir'], {}), '(target_sub_dir)\n', (5990, 6006), False, 'import os\n'), ((6142, 6193), 'checksumdir.dirhash', 'checksumdir.dirhash', (['target_sub_dir'], {'hashfunc': '"""md5"""'}), "(target_sub_dir, hashfunc='md5')\n", (6161, 6193), False, 'import checksumdir\n')]
|
import pygame
import math
coef_turn = 0.3
coef_drift = 0.07 # adhérence au sol
coef_vel = 10
class Car:
def __init__(self):
self.dir_target = -1
self.dir = -1
self.posx = 0
self.velx = -1
self.w = 50
self.h = 100
def update(self, dt):
self.dir += dt * coef_turn * (self.dir_target - self.dir)
if abs(self.dir - self.velx) < 0.1:
self.velx = self.dir
self.velx += dt * coef_drift * (self.dir - self.velx)
self.posx += dt * coef_vel * self.velx
def display(self, screen):
theta = math.atan2(self.dir, 1)
points = []
for i in [1, -1]:
for j in [i, -i]:
x = 1920/2 + i * self.w / 2 * math.cos(theta) - j * self.h / 2 * math.sin(theta)
y = 700 + i * self.w / 2 * math.sin(theta) + j * self.h / 2 * math.cos(theta)
points.append((x, y))
pygame.draw.polygon(screen, (255, 0, 0), points)
|
[
"math.cos",
"math.sin",
"pygame.draw.polygon",
"math.atan2"
] |
[((595, 618), 'math.atan2', 'math.atan2', (['self.dir', '(1)'], {}), '(self.dir, 1)\n', (605, 618), False, 'import math\n'), ((941, 989), 'pygame.draw.polygon', 'pygame.draw.polygon', (['screen', '(255, 0, 0)', 'points'], {}), '(screen, (255, 0, 0), points)\n', (960, 989), False, 'import pygame\n'), ((785, 800), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (793, 800), False, 'import math\n'), ((879, 894), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (887, 894), False, 'import math\n'), ((750, 765), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (758, 765), False, 'import math\n'), ((844, 859), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (852, 859), False, 'import math\n')]
|
import os
import sys
import string
def is_active():
return True
def get_name():
return "WinRT"
def can_build():
if (os.name=="nt"):
#building natively on windows!
if (os.getenv("VSINSTALLDIR")):
return True
return False
def get_opts():
return []
def get_flags():
return []
def configure(env):
env.Append(CPPPATH=['#platform/winrt', '#platform/winrt/include'])
arch = ""
if os.getenv('PLATFORM') == "ARM":
# compiler commandline
# debug: /Yu"pch.h" /MP /GS /analyze- /W3 /wd"4453" /wd"28204" /Zc:wchar_t /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.WindowsPhone\" /I"Generated Files\" /I"ARM\Debug\" /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.Shared\" /ZW:nostdlib /Zi /Gm- /Od /sdl /Fd"ARM\Debug\vc120.pdb" /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /D "_DEBUG" /errorReport:prompt /WX- /Zc:forScope /RTC1 /ZW /Gd /Oy- /MDd /Fa"ARM\Debug\" /EHsc /nologo /Fo"ARM\Debug\" /Fp"ARM\Debug\App2.WindowsPhone.pch"
# release: /Yu"pch.h" /MP /GS /GL /analyze- /W3 /wd"4453" /wd"28204" /Gy /Zc:wchar_t /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.WindowsPhone\" /I"Generated Files\" /I"ARM\Release\" /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.Shared\" /ZW:nostdlib /Zi /Gm- /O2 /sdl /Fd"ARM\Release\vc120.pdb" /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /errorReport:prompt /WX- /Zc:forScope /ZW /Gd /Oy- /Oi /MD /Fa"ARM\Release\" /EHsc /nologo /Fo"ARM\Release\" /Fp"ARM\Release\App2.WindowsPhone.pch"
# linker commandline
# debug: /OUT:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.WindowsPhone.exe" /MANIFEST:NO /NXCOMPAT /PDB:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.WindowsPhone.pdb" /DYNAMICBASE "WindowsPhoneCore.lib" "RuntimeObject.lib" "PhoneAppModelHost.lib" /DEBUG /MACHINE:ARM /NODEFAULTLIB:"kernel32.lib" /NODEFAULTLIB:"ole32.lib" /WINMD /APPCONTAINER /INCREMENTAL /PGD:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.WindowsPhone.pgd" /WINMDFILE:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.winmd" /SUBSYSTEM:WINDOWS /MANIFESTUAC:NO /ManifestFile:"ARM\Debug\App2.WindowsPhone.exe.intermediate.manifest" /ERRORREPORT:PROMPT /NOLOGO /TLBID:1
# release: /OUT:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.WindowsPhone.exe" /MANIFEST:NO /LTCG /NXCOMPAT /PDB:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.WindowsPhone.pdb" /DYNAMICBASE "WindowsPhoneCore.lib" "RuntimeObject.lib" "PhoneAppModelHost.lib" /DEBUG /MACHINE:ARM /NODEFAULTLIB:"kernel32.lib" /NODEFAULTLIB:"ole32.lib" /WINMD /APPCONTAINER /OPT:REF /PGD:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.WindowsPhone.pgd" /WINMDFILE:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.winmd" /SUBSYSTEM:WINDOWS /MANIFESTUAC:NO /ManifestFile:"ARM\Release\App2.WindowsPhone.exe.intermediate.manifest" /OPT:ICF /ERRORREPORT:PROMPT /NOLOGO /TLBID:1
arch = "arm"
env.Append(LINKFLAGS=['/INCREMENTAL:NO', '/MANIFEST:NO', '/NXCOMPAT', '/DYNAMICBASE', "WindowsPhoneCore.lib", "RuntimeObject.lib", "PhoneAppModelHost.lib", "/DEBUG", "/MACHINE:ARM", '/NODEFAULTLIB:"kernel32.lib"', '/NODEFAULTLIB:"ole32.lib"', '/WINMD', '/APPCONTAINER', '/MANIFESTUAC:NO', '/ERRORREPORT:PROMPT', '/NOLOGO', '/TLBID:1'])
env.Append(LIBPATH=['#platform/winrt/ARM/lib'])
env.Append(CCFLAGS=string.split('/MP /GS /wd"4453" /wd"28204" /analyze- /Zc:wchar_t /Zi /Gm- /Od /fp:precise /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /DWINDOWSPHONE_ENABLED /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /errorReport:prompt /WX- /Zc:forScope /Gd /Oy- /Oi /MD /RTC1 /Gd /EHsc /nologo'))
env.Append(CXXFLAGS=string.split('/ZW'))
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
elif (env["target"]=="test"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Zi','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG', '/D_DEBUG'])
elif (env["target"]=="profile"):
env.Append(CCFLAGS=['-g','-pg'])
env.Append(LINKFLAGS=['-pg'])
env['ENV'] = os.environ;
# fix environment for windows phone 8.1
env['ENV']['WINDOWSPHONEKITDIR'] = env['ENV']['WINDOWSPHONEKITDIR'].replace("8.0", "8.1") # wtf
env['ENV']['INCLUDE'] = env['ENV']['INCLUDE'].replace("8.0", "8.1")
env['ENV']['LIB'] = env['ENV']['LIB'].replace("8.0", "8.1")
env['ENV']['PATH'] = env['ENV']['PATH'].replace("8.0", "8.1")
env['ENV']['LIBPATH'] = env['ENV']['LIBPATH'].replace("8.0\\Windows Metadata", "8.1\\References\\CommonConfiguration\\Neutral")
else:
arch = "x64"
env.Append(LINKFLAGS=['/MANIFEST:NO', '/NXCOMPAT', '/DYNAMICBASE', "kernel32.lib", '/MACHINE:X64', '/WINMD', '/APPCONTAINER', '/MANIFESTUAC:NO', '/ERRORREPORT:PROMPT', '/NOLOGO', '/TLBID:1'])
env.Append(LIBPATH=['#platform/winrt/x64/lib'])
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="test"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Zi','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG', '/D_DEBUG'])
elif (env["target"]=="profile"):
env.Append(CCFLAGS=['-g','-pg'])
env.Append(LINKFLAGS=['-pg'])
env.Append(CCFLAGS=string.split('/MP /GS /wd"4453" /wd"28204" /Zc:wchar_t /Gm- /Od /fp:precise /D "_UNICODE" /D "UNICODE" /D "WINAPI_FAMILY=WINAPI_FAMILY_APP" /errorReport:prompt /WX- /Zc:forScope /RTC1 /Gd /MDd /EHsc /nologo'))
env.Append(CXXFLAGS=string.split('/ZW'))
env.Append(CCFLAGS=['/AI', os.environ['VCINSTALLDIR']+'\\vcpackages', '/AI', os.environ['WINDOWSSDKDIR']+'\\References\\CommonConfiguration\\Neutral'])
env.Append(CCFLAGS=['/DWINAPI_FAMILY=WINAPI_FAMILY_APP', '/D_WIN32_WINNT=0x0603', '/DNTDDI_VERSION=0x06030000'])
env['ENV'] = os.environ;
env["PROGSUFFIX"]="."+arch+env["PROGSUFFIX"]
env["OBJSUFFIX"]="."+arch+env["OBJSUFFIX"]
env["LIBSUFFIX"]="."+arch+env["LIBSUFFIX"]
#env.Append(CCFLAGS=['/Gd','/GR','/nologo', '/EHsc'])
#env.Append(CXXFLAGS=['/TP', '/ZW'])
#env.Append(CPPFLAGS=['/DMSVC', '/GR', ])
##env.Append(CCFLAGS=['/I'+os.getenv("WindowsSdkDir")+"/Include"])
env.Append(CCFLAGS=['/DWINRT_ENABLED'])
env.Append(CCFLAGS=['/DWINDOWS_ENABLED'])
env.Append(CCFLAGS=['/DRTAUDIO_ENABLED'])
#env.Append(CCFLAGS=['/DWIN32'])
env.Append(CCFLAGS=['/DTYPED_METHOD_BIND'])
env.Append(CCFLAGS=['/DGLES2_ENABLED'])
#env.Append(CCFLAGS=['/DGLES1_ENABLED'])
LIBS=[
#'winmm',
'libEGL',
'libGLESv2',
'libANGLE',
#'kernel32','ole32','user32', 'advapi32'
]
env.Append(LINKFLAGS=[p+".lib" for p in LIBS])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
#/c/Program Files (x86)/Windows Phone Kits/8.1/lib/ARM/WindowsPhoneCore.lib
|
[
"string.split",
"os.getenv"
] |
[((182, 207), 'os.getenv', 'os.getenv', (['"""VSINSTALLDIR"""'], {}), "('VSINSTALLDIR')\n", (191, 207), False, 'import os\n'), ((404, 425), 'os.getenv', 'os.getenv', (['"""PLATFORM"""'], {}), "('PLATFORM')\n", (413, 425), False, 'import os\n'), ((3899, 4234), 'string.split', 'string.split', (['"""/MP /GS /wd"4453" /wd"28204" /analyze- /Zc:wchar_t /Zi /Gm- /Od /fp:precise /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /DWINDOWSPHONE_ENABLED /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /errorReport:prompt /WX- /Zc:forScope /Gd /Oy- /Oi /MD /RTC1 /Gd /EHsc /nologo"""'], {}), '(\n \'/MP /GS /wd"4453" /wd"28204" /analyze- /Zc:wchar_t /Zi /Gm- /Od /fp:precise /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /DWINDOWSPHONE_ENABLED /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /errorReport:prompt /WX- /Zc:forScope /Gd /Oy- /Oi /MD /RTC1 /Gd /EHsc /nologo\'\n )\n', (3911, 4234), False, 'import string\n'), ((4248, 4267), 'string.split', 'string.split', (['"""/ZW"""'], {}), "('/ZW')\n", (4260, 4267), False, 'import string\n'), ((6246, 6464), 'string.split', 'string.split', (['"""/MP /GS /wd"4453" /wd"28204" /Zc:wchar_t /Gm- /Od /fp:precise /D "_UNICODE" /D "UNICODE" /D "WINAPI_FAMILY=WINAPI_FAMILY_APP" /errorReport:prompt /WX- /Zc:forScope /RTC1 /Gd /MDd /EHsc /nologo"""'], {}), '(\n \'/MP /GS /wd"4453" /wd"28204" /Zc:wchar_t /Gm- /Od /fp:precise /D "_UNICODE" /D "UNICODE" /D "WINAPI_FAMILY=WINAPI_FAMILY_APP" /errorReport:prompt /WX- /Zc:forScope /RTC1 /Gd /MDd /EHsc /nologo\'\n )\n', (6258, 6464), False, 'import string\n'), ((6478, 6497), 'string.split', 'string.split', (['"""/ZW"""'], {}), "('/ZW')\n", (6490, 6497), False, 'import string\n')]
|
# Copyright 2015-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from webob import response
from monasca_log_api.middleware import role_middleware as rm
from monasca_log_api.tests import base
class SideLogicTestEnsureLowerRoles(base.BaseTestCase):
def test_should_ensure_lower_roles(self):
roles = ['CMM-Admin', ' CmM-User ']
expected = ['cmm-admin', 'cmm-user']
self.assertItemsEqual(expected, rm._ensure_lower_roles(roles))
def test_should_return_empty_array_for_falsy_input_1(self):
roles = []
expected = []
self.assertItemsEqual(expected, rm._ensure_lower_roles(roles))
def test_should_return_empty_array_for_falsy_input_2(self):
roles = None
expected = []
self.assertItemsEqual(expected, rm._ensure_lower_roles(roles))
class SideLogicTestIntersect(base.BaseTestCase):
def test_should_intersect_seqs(self):
seq_1 = [1, 2, 3]
seq_2 = [2]
expected = [2]
self.assertItemsEqual(expected, rm._intersect(seq_1, seq_2))
self.assertItemsEqual(expected, rm._intersect(seq_2, seq_1))
def test_should_intersect_empty(self):
seq_1 = []
seq_2 = []
expected = []
self.assertItemsEqual(expected, rm._intersect(seq_1, seq_2))
self.assertItemsEqual(expected, rm._intersect(seq_2, seq_1))
def test_should_not_intersect_without_common_elements(self):
seq_1 = [1, 2, 3]
seq_2 = [4, 5, 6]
expected = []
self.assertItemsEqual(expected, rm._intersect(seq_1, seq_2))
self.assertItemsEqual(expected, rm._intersect(seq_2, seq_1))
class RolesMiddlewareSideLogicTest(base.BaseTestCase):
def test_should_apply_middleware_for_valid_path(self):
paths = ['/', '/v2.0/', '/v2.0/log/']
instance = rm.RoleMiddleware(None)
instance._path = paths
for p in paths:
req = mock.Mock()
req.method = 'GET'
req.path = p
self.assertTrue(instance._can_apply_middleware(req))
def test_should_apply_middleware_for_invalid_path(self):
paths = ['/v2.0/', '/v2.0/log/']
instance = rm.RoleMiddleware(None)
instance._path = paths
for p in paths:
pp = 'test/%s' % p
req = mock.Mock()
req.method = 'GET'
req.path = pp
self.assertFalse(instance._can_apply_middleware(req))
def test_should_reject_OPTIONS_request(self):
instance = rm.RoleMiddleware(None)
req = mock.Mock()
req.method = 'OPTIONS'
req.path = '/'
self.assertFalse(instance._can_apply_middleware(req))
def test_should_return_true_if_authenticated(self):
instance = rm.RoleMiddleware(None)
req = mock.Mock()
req.headers = {rm._X_IDENTITY_STATUS: rm._CONFIRMED_STATUS}
self.assertTrue(instance._is_authenticated(req))
def test_should_return_false_if_not_authenticated(self):
instance = rm.RoleMiddleware(None)
req = mock.Mock()
req.headers = {rm._X_IDENTITY_STATUS: 'Some_Other_Status'}
self.assertFalse(instance._is_authenticated(req))
def test_should_return_false_if_identity_status_not_found(self):
instance = rm.RoleMiddleware(None)
req = mock.Mock()
req.headers = {}
self.assertFalse(instance._is_authenticated(req))
def test_should_return_true_if_is_agent(self):
roles = 'cmm-admin,cmm-user'
roles_array = roles.split(',')
default_roles = [roles_array[0]]
admin_roles = [roles_array[1]]
instance = rm.RoleMiddleware(None)
instance._default_roles = default_roles
instance._agent_roles = admin_roles
req = mock.Mock()
req.headers = {rm._X_ROLES: roles}
is_agent = instance._is_agent(req)
self.assertTrue(is_agent)
class RolesMiddlewareLogicTest(base.BaseTestCase):
def test_not_process_further_if_cannot_apply_path(self):
roles = 'cmm-admin,cmm-user'
roles_array = roles.split(',')
default_roles = [roles_array[0]]
admin_roles = [roles_array[1]]
instance = rm.RoleMiddleware(None)
instance._default_roles = default_roles
instance._agent_roles = admin_roles
instance._path = ['/test']
# spying
instance._is_authenticated = mock.Mock()
instance._is_agent = mock.Mock()
req = mock.Mock()
req.headers = {rm._X_ROLES: roles}
req.path = '/different/test'
instance.process_request(req=req)
self.assertFalse(instance._is_authenticated.called)
self.assertFalse(instance._is_agent.called)
def test_not_process_further_if_cannot_apply_method(self):
roles = 'cmm-admin,cmm-user'
roles_array = roles.split(',')
default_roles = [roles_array[0]]
admin_roles = [roles_array[1]]
instance = rm.RoleMiddleware(None)
instance._default_roles = default_roles
instance._agent_roles = admin_roles
instance._path = ['/test']
# spying
instance._is_authenticated = mock.Mock()
instance._is_agent = mock.Mock()
req = mock.Mock()
req.headers = {rm._X_ROLES: roles}
req.path = '/test'
req.method = 'OPTIONS'
instance.process_request(req=req)
self.assertFalse(instance._is_authenticated.called)
self.assertFalse(instance._is_agent.called)
def test_should_produce_json_response_if_not_authenticated(
self):
instance = rm.RoleMiddleware(None)
is_agent = True
is_authenticated = False
instance._can_apply_middleware = mock.Mock(return_value=True)
instance._is_agent = mock.Mock(return_value=is_agent)
instance._is_authenticated = mock.Mock(return_value=is_authenticated)
req = mock.Mock()
req.environ = {}
req.headers = {
'X-Tenant-Id': '11111111'
}
result = instance.process_request(req=req)
self.assertIsNotNone(result)
self.assertIsInstance(result, response.Response)
status = result.status_code
json_body = result.json_body
message = json_body.get('message')
self.assertIn('Failed to authenticate request for', message)
self.assertEqual(401, status)
|
[
"mock.Mock",
"monasca_log_api.middleware.role_middleware._intersect",
"monasca_log_api.middleware.role_middleware.RoleMiddleware",
"monasca_log_api.middleware.role_middleware._ensure_lower_roles"
] |
[((2362, 2385), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (2379, 2385), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((2716, 2739), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (2733, 2739), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((3050, 3073), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (3067, 3073), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((3088, 3099), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3097, 3099), False, 'import mock\n'), ((3292, 3315), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (3309, 3315), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((3331, 3342), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3340, 3342), False, 'import mock\n'), ((3550, 3573), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (3567, 3573), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((3589, 3600), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3598, 3600), False, 'import mock\n'), ((3816, 3839), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (3833, 3839), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((3855, 3866), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3864, 3866), False, 'import mock\n'), ((4180, 4203), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (4197, 4203), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((4311, 4322), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4320, 4322), False, 'import mock\n'), ((4737, 4760), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (4754, 4760), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((4943, 4954), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4952, 4954), False, 'import mock\n'), ((4984, 4995), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4993, 4995), False, 'import mock\n'), ((5011, 5022), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5020, 5022), False, 'import mock\n'), ((5500, 5523), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (5517, 5523), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((5706, 5717), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5715, 5717), False, 'import mock\n'), ((5747, 5758), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5756, 5758), False, 'import mock\n'), ((5774, 5785), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5783, 5785), False, 'import mock\n'), ((6146, 6169), 'monasca_log_api.middleware.role_middleware.RoleMiddleware', 'rm.RoleMiddleware', (['None'], {}), '(None)\n', (6163, 6169), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((6269, 6297), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (6278, 6297), False, 'import mock\n'), ((6327, 6359), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'is_agent'}), '(return_value=is_agent)\n', (6336, 6359), False, 'import mock\n'), ((6397, 6437), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'is_authenticated'}), '(return_value=is_authenticated)\n', (6406, 6437), False, 'import mock\n'), ((6453, 6464), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (6462, 6464), False, 'import mock\n'), ((965, 994), 'monasca_log_api.middleware.role_middleware._ensure_lower_roles', 'rm._ensure_lower_roles', (['roles'], {}), '(roles)\n', (987, 994), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((1142, 1171), 'monasca_log_api.middleware.role_middleware._ensure_lower_roles', 'rm._ensure_lower_roles', (['roles'], {}), '(roles)\n', (1164, 1171), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((1321, 1350), 'monasca_log_api.middleware.role_middleware._ensure_lower_roles', 'rm._ensure_lower_roles', (['roles'], {}), '(roles)\n', (1343, 1350), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((1557, 1584), 'monasca_log_api.middleware.role_middleware._intersect', 'rm._intersect', (['seq_1', 'seq_2'], {}), '(seq_1, seq_2)\n', (1570, 1584), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((1626, 1653), 'monasca_log_api.middleware.role_middleware._intersect', 'rm._intersect', (['seq_2', 'seq_1'], {}), '(seq_2, seq_1)\n', (1639, 1653), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((1801, 1828), 'monasca_log_api.middleware.role_middleware._intersect', 'rm._intersect', (['seq_1', 'seq_2'], {}), '(seq_1, seq_2)\n', (1814, 1828), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((1870, 1897), 'monasca_log_api.middleware.role_middleware._intersect', 'rm._intersect', (['seq_2', 'seq_1'], {}), '(seq_2, seq_1)\n', (1883, 1897), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((2081, 2108), 'monasca_log_api.middleware.role_middleware._intersect', 'rm._intersect', (['seq_1', 'seq_2'], {}), '(seq_1, seq_2)\n', (2094, 2108), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((2150, 2177), 'monasca_log_api.middleware.role_middleware._intersect', 'rm._intersect', (['seq_2', 'seq_1'], {}), '(seq_2, seq_1)\n', (2163, 2177), True, 'from monasca_log_api.middleware import role_middleware as rm\n'), ((2460, 2471), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2469, 2471), False, 'import mock\n'), ((2845, 2856), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2854, 2856), False, 'import mock\n')]
|
import logging
class NewLogger:
def __init__(self, log_abs_path:str):
self.logger = logging.getLogger()
handler = logging.FileHandler(log_abs_path)
handler.setLevel(logging.ERROR)
self.logger.addHandler(handler)
def log(self, msg:str):
self.logger.log(logging.ERROR, msg)
|
[
"logging.getLogger",
"logging.FileHandler"
] |
[((99, 118), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (116, 118), False, 'import logging\n'), ((137, 170), 'logging.FileHandler', 'logging.FileHandler', (['log_abs_path'], {}), '(log_abs_path)\n', (156, 170), False, 'import logging\n')]
|
from bast import Route
route = Route()
route.get('/', 'HelloController.index')
|
[
"bast.Route"
] |
[((32, 39), 'bast.Route', 'Route', ([], {}), '()\n', (37, 39), False, 'from bast import Route\n')]
|
import inspect
from fnmatch import fnmatchcase
from ..sched import meta
from .base import BaseFactory
class ModuleFactory(BaseFactory):
"""
Takes an imported module object and extracts callable objects from it.
A valid callable is any object that can be called and has pexen.sched
metadata.
Arguments:
match - also include metadata-less callables that fnmatch this string
"""
def __init__(self, match=None):
super().__init__()
self.match = match
def is_valid_callable(self, objname, obj):
if not callable(obj):
return False
if meta.has_meta(obj):
return True
if self.match and fnmatchcase(objname, self.match):
return True
return False
def extract_from_mod(self, mod):
"""Extract callables from an imported module."""
for name, obj in inspect.getmembers(mod):
if not self.is_valid_callable(name, obj):
continue
self.callpath_burn(obj, name)
yield obj
def __call__(self, mod):
yield from self.extract_from_mod(mod)
|
[
"fnmatch.fnmatchcase",
"inspect.getmembers"
] |
[((885, 908), 'inspect.getmembers', 'inspect.getmembers', (['mod'], {}), '(mod)\n', (903, 908), False, 'import inspect\n'), ((686, 718), 'fnmatch.fnmatchcase', 'fnmatchcase', (['objname', 'self.match'], {}), '(objname, self.match)\n', (697, 718), False, 'from fnmatch import fnmatchcase\n')]
|
import vstruct
from vstruct.primitives import *
EI_NIDENT = 4
EI_PADLEN = 7
class Elf32(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.e_ident = v_bytes(EI_NIDENT)
self.e_class = v_uint8()
self.e_data = v_uint8()
self.e_fileversion = v_uint8()
self.e_osabi = v_uint8()
self.e_abiversio = v_uint8()
self.e_pad = v_bytes(EI_PADLEN)
self.e_type = v_uint16(bigend=bigend)
self.e_machine = v_uint16(bigend=bigend)
self.e_version = v_uint32(bigend=bigend)
self.e_entry = v_uint32(bigend=bigend)
self.e_phoff = v_uint32(bigend=bigend)
self.e_shoff = v_uint32(bigend=bigend)
self.e_flags = v_uint32(bigend=bigend)
self.e_ehsize = v_uint16(bigend=bigend)
self.e_phentsize = v_uint16(bigend=bigend)
self.e_phnum = v_uint16(bigend=bigend)
self.e_shentsize = v_uint16(bigend=bigend)
self.e_shnum = v_uint16(bigend=bigend)
self.e_shstrndx = v_uint16(bigend=bigend)
class Elf32Section(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.sh_name = v_uint32(bigend=bigend)
self.sh_type = v_uint32(bigend=bigend)
self.sh_flags = v_uint32(bigend=bigend)
self.sh_addr = v_uint32(bigend=bigend)
self.sh_offset = v_uint32(bigend=bigend)
self.sh_size = v_uint32(bigend=bigend)
self.sh_link = v_uint32(bigend=bigend)
self.sh_info = v_uint32(bigend=bigend)
self.sh_addralign = v_uint32(bigend=bigend)
self.sh_entsize = v_uint32(bigend=bigend)
class Elf32Pheader(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.p_type = v_uint32(bigend=bigend)
self.p_offset = v_uint32(bigend=bigend)
self.p_vaddr = v_uint32(bigend=bigend)
self.p_paddr = v_uint32(bigend=bigend)
self.p_filesz = v_uint32(bigend=bigend)
self.p_memsz = v_uint32(bigend=bigend)
self.p_flags = v_uint32(bigend=bigend)
self.p_align = v_uint32(bigend=bigend)
class Elf32Reloc(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.r_offset = v_ptr32(bigend=bigend)
self.r_info = v_uint32(bigend=bigend)
def __eq__(self, other):
if self.name != other.name:
return False
if self.r_offset != other.r_offset:
return False
if self.r_info != other.r_info:
return False
return True
class Elf32Reloca(Elf32Reloc):
def __init__(self, bigend=False):
Elf32Reloc.__init__(self)
self.r_addend = v_uint32(bigend=bigend)
def __eq__(self, other):
if self.name != other.name:
return False
if self.r_offset != other.r_offset:
return False
if self.r_info != other.r_info:
return False
if self.r_addend != other.r_addend:
return False
return True
class Elf32Symbol(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.st_name = v_uint32(bigend=bigend)
self.st_value = v_uint32(bigend=bigend)
self.st_size = v_uint32(bigend=bigend)
self.st_info = v_uint8()
self.st_other = v_uint8()
self.st_shndx = v_uint16(bigend=bigend)
def __eq__(self, other):
if self.st_value != other.st_value:
return False
if self.st_name != other.st_name:
return False
if self.st_size != other.st_size:
return False
if self.st_info != other.st_info:
return False
if self.st_other != other.st_other:
return False
if self.st_shndx != other.st_shndx:
return False
return True
class Elf32Dynamic(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.d_tag = v_uint32(bigend=bigend)
self.d_value = v_uint32(bigend=bigend)
def __eq__(self, other):
if self.d_tag != other.d_tag:
return False
if self.d_value != other.d_value:
return False
return True
class Elf64(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.e_ident = v_bytes(EI_NIDENT)
self.e_class = v_uint8()
self.e_data = v_uint8()
self.e_fileversion = v_uint8()
self.e_osabi = v_uint8()
self.e_abiversio = v_uint8()
self.e_pad = v_bytes(EI_PADLEN)
self.e_type = v_uint16(bigend=bigend)
self.e_machine = v_uint16(bigend=bigend)
self.e_version = v_uint32(bigend=bigend)
self.e_entry = v_uint64(bigend=bigend)
self.e_phoff = v_uint64(bigend=bigend)
self.e_shoff = v_uint64(bigend=bigend)
self.e_flags = v_uint32(bigend=bigend)
self.e_ehsize = v_uint16(bigend=bigend)
self.e_phentsize = v_uint16(bigend=bigend)
self.e_phnum = v_uint16(bigend=bigend)
self.e_shentsize = v_uint16(bigend=bigend)
self.e_shnum = v_uint16(bigend=bigend)
self.e_shstrndx = v_uint16(bigend=bigend)
class Elf64Section(Elf32Section):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.sh_name = v_uint32(bigend=bigend)
self.sh_type = v_uint32(bigend=bigend)
self.sh_flags = v_uint64(bigend=bigend)
self.sh_addr = v_uint64(bigend=bigend)
self.sh_offset = v_uint64(bigend=bigend)
self.sh_size = v_uint64(bigend=bigend)
self.sh_link = v_uint32(bigend=bigend)
self.sh_info = v_uint32(bigend=bigend)
self.sh_addralign = v_uint64(bigend=bigend)
self.sh_entsize = v_uint64(bigend=bigend)
class Elf64Pheader(Elf32Pheader):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.p_type = v_uint32(bigend=bigend)
self.p_flags = v_uint32(bigend=bigend)
self.p_offset = v_uint64(bigend=bigend)
self.p_vaddr = v_uint64(bigend=bigend)
self.p_paddr = v_uint64(bigend=bigend)
self.p_filesz = v_uint64(bigend=bigend)
self.p_memsz = v_uint64(bigend=bigend)
self.p_align = v_uint64(bigend=bigend)
class Elf64Reloc(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.r_offset = v_ptr64(bigend=bigend)
self.r_info = v_uint64(bigend=bigend)
def __eq__(self, other):
if self.name != other.name:
return False
if self.r_offset != other.r_offset:
return False
if self.r_info != other.r_info:
return False
return True
class Elf64Reloca(Elf64Reloc):
def __init__(self, bigend=False):
#Elf64Reloc.__init__(self)
vstruct.VStruct.__init__(self)
self.r_offset = v_uint64(bigend=bigend)
self.r_info = v_uint64(bigend=bigend)
self.r_addend = v_uint64(bigend=bigend)
def __eq__(self, other):
if self.name != other.name:
return False
if self.r_offset != other.r_offset:
return False
if self.r_info != other.r_info:
return False
if self.r_addend != other.r_addend:
return False
return True
class Elf64Symbol(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.st_name = v_uint32(bigend=bigend)
self.st_info = v_uint8()
self.st_other = v_uint8()
self.st_shndx = v_uint16(bigend=bigend)
self.st_value = v_uint64(bigend=bigend)
self.st_size = v_uint64(bigend=bigend)
def __eq__(self, other):
if self.st_value != other.st_value:
return False
if self.st_name != other.st_name:
return False
if self.st_size != other.st_size:
return False
if self.st_info != other.st_info:
return False
if self.st_other != other.st_other:
return False
if self.st_shndx != other.st_shndx:
return False
return True
class Elf64Dynamic(Elf32Dynamic):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.d_tag = v_uint64(bigend=bigend)
self.d_value = v_uint64(bigend=bigend)
class ElfNote(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.namesz = v_uint32(bigend=bigend)
self.descsz = v_uint32(bigend=bigend)
self.ntype = v_uint32(bigend=bigend)
self.name = v_bytes()
self.desc = vstruct.VArray()
def pcb_namesz(self):
# padded to 4 byte alignment
namesz = ((self.namesz +3) /4) *4
self['name'].vsSetLength( namesz )
def pcb_descsz(self):
# padded to 4 byte alignment
descct = ((self.descsz +3) /4)
elems = [ v_uint32() for i in xrange(descct) ]
self.desc = vstruct.VArray(elems=elems)
|
[
"vstruct.VArray",
"vstruct.VStruct.__init__"
] |
[((154, 184), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (178, 184), False, 'import vstruct\n'), ((1249, 1279), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (1273, 1279), False, 'import vstruct\n'), ((1884, 1914), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (1908, 1914), False, 'import vstruct\n'), ((2381, 2411), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (2405, 2411), False, 'import vstruct\n'), ((3301, 3331), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (3325, 3331), False, 'import vstruct\n'), ((4134, 4164), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (4158, 4164), False, 'import vstruct\n'), ((4516, 4546), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (4540, 4546), False, 'import vstruct\n'), ((5608, 5638), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (5632, 5638), False, 'import vstruct\n'), ((6240, 6270), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (6264, 6270), False, 'import vstruct\n'), ((6738, 6768), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (6762, 6768), False, 'import vstruct\n'), ((7222, 7252), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (7246, 7252), False, 'import vstruct\n'), ((7794, 7824), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (7818, 7824), False, 'import vstruct\n'), ((8624, 8654), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (8648, 8654), False, 'import vstruct\n'), ((8829, 8859), 'vstruct.VStruct.__init__', 'vstruct.VStruct.__init__', (['self'], {}), '(self)\n', (8853, 8859), False, 'import vstruct\n'), ((9052, 9068), 'vstruct.VArray', 'vstruct.VArray', ([], {}), '()\n', (9066, 9068), False, 'import vstruct\n'), ((9396, 9423), 'vstruct.VArray', 'vstruct.VArray', ([], {'elems': 'elems'}), '(elems=elems)\n', (9410, 9423), False, 'import vstruct\n')]
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saharaclient.api import base
class JobBinaries(base.Resource):
resource_name = 'Job Binary'
class JobBinariesManagerV1(base.ResourceManager):
resource_class = JobBinaries
version = 1.1
def create(self, name, url, description=None, extra=None, is_public=None,
is_protected=None):
"""Create a Job Binary.
:param dict extra: authentication info needed for some job binaries,
containing the keys `user` and `password` for job binary in Swift
or the keys `accesskey`, `secretkey`, and `endpoint` for job
binary in S3
"""
data = {
"name": name,
"url": url
}
self._copy_if_defined(data, description=description, extra=extra,
is_public=is_public, is_protected=is_protected)
return self._create('/job-binaries', data, 'job_binary')
def list(self, search_opts=None, limit=None, marker=None,
sort_by=None, reverse=None):
"""Get a list of Job Binaries."""
query = base.get_query_string(search_opts, limit=limit, marker=marker,
sort_by=sort_by, reverse=reverse)
url = "/job-binaries%s" % query
return self._page(url, 'binaries', limit)
def get(self, job_binary_id):
"""Get information about a Job Binary."""
return self._get('/job-binaries/%s' % job_binary_id, 'job_binary')
def delete(self, job_binary_id):
"""Delete a Job Binary."""
self._delete('/job-binaries/%s' % job_binary_id)
def get_file(self, job_binary_id):
"""Download a Job Binary."""
resp = self.api.get('/job-binaries/%s/data' % job_binary_id)
if resp.status_code != 200:
self._raise_api_exception(resp)
return resp.content
def update(self, job_binary_id, data):
"""Update Job Binary.
:param dict data: dict that contains fields that should be updated
with new values.
Fields that can be updated:
* name
* description
* url
* is_public
* is_protected
* extra - dict with the keys `user` and `password` for job binary
in Swift, or with the keys `accesskey`, `secretkey`, and `endpoint`
for job binary in S3
"""
if self.version >= 2:
UPDATE_FUNC = self._patch
else:
UPDATE_FUNC = self._update
return UPDATE_FUNC(
'/job-binaries/%s' % job_binary_id, data, 'job_binary')
class JobBinariesManagerV2(JobBinariesManagerV1):
version = 2
# NOTE(jfreud): keep this around for backwards compatibility
JobBinariesManager = JobBinariesManagerV1
|
[
"saharaclient.api.base.get_query_string"
] |
[((1660, 1761), 'saharaclient.api.base.get_query_string', 'base.get_query_string', (['search_opts'], {'limit': 'limit', 'marker': 'marker', 'sort_by': 'sort_by', 'reverse': 'reverse'}), '(search_opts, limit=limit, marker=marker, sort_by=\n sort_by, reverse=reverse)\n', (1681, 1761), False, 'from saharaclient.api import base\n')]
|
# -*- coding: utf-8 -*-
import pytest
def test_translator():
def translator(string):
translations = {'String value is too long.': 'Tamanho de texto muito grande.'}
return translations.get(string, string)
from schematics.translator import register_translator
register_translator(translator)
from schematics.types import StringType
from schematics.exceptions import ValidationError
with pytest.raises(ValidationError) as exc:
StringType(max_length=1).validate_length('Abc')
assert exc.value == ['Tamanho de texto muito grande.']
|
[
"schematics.types.StringType",
"schematics.translator.register_translator",
"pytest.raises"
] |
[((290, 321), 'schematics.translator.register_translator', 'register_translator', (['translator'], {}), '(translator)\n', (309, 321), False, 'from schematics.translator import register_translator\n'), ((430, 460), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (443, 460), False, 'import pytest\n'), ((477, 501), 'schematics.types.StringType', 'StringType', ([], {'max_length': '(1)'}), '(max_length=1)\n', (487, 501), False, 'from schematics.types import StringType\n')]
|
"""
Module for managing enemies.
"""
import random
import constants as const
import pygame
import random
import platforms
from spritesheet_functions import SpriteSheet
class Enemy(pygame.sprite.Sprite):
# -- Methods
def __init__(self, x_cord, y_cord,level, x_speed=2, char_type=0):
""" Constructor function """
# Call the parent's constructor
super().__init__()
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
self.direction="R"
self.load_images()
self.image = self.standing_frames[0]
self.current_frame = 0
self.last_update = 0
self.rect = self.image.get_rect()
self.radius= 20
self.walking = False
self.jumping = False
self.type = char_type
# Set a referance to the image rect.
self.rect = self.image.get_rect()
self.radius = 35
#
self.rect.x = x_cord
self.rect.y = y_cord
self.change_x = x_speed
self.change_y = 0
self.sign_direc = 1
# List of sprites we can bump against
self.level = level
self.platforms = level.platform_list
def load_images(self):
sprite_sheet = SpriteSheet("spritesheet_players.png")
self.standing_frames = [sprite_sheet.get_image(156, 101, 45,54,const.WHITE)]
for frame in self.standing_frames:
frame.set_colorkey(const.BLACK)
self.walk_frames_r = [sprite_sheet.get_image(156, 156, 45,54,const.BLACK),
sprite_sheet.get_image(115, 48, 45, 52,const.BLACK),
sprite_sheet.get_image(156, 101, 45, 54,const.BLACK)]
self.walk_frames_l = []
for frame in self.walk_frames_r:
frame.set_colorkey(const.BLACK)
self.walk_frames_l.append(pygame.transform.flip(frame, True, False))
self.jump_frame = sprite_sheet.get_image(156, 101, 45, 54,const.BLACK)
self.jump_frame.set_colorkey(const.BLACK)
def animate(self):
now = pygame.time.get_ticks()
if self.change_x != 0:
self.walking = True
else:
self.walking = False
# show walk animation
if self.walking:
if now - self.last_update > 400:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.walk_frames_l)
#bottom = self.rect.bottom
if self.sign_direc > 0:
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
#self.rect.bottom = bottom
# show idle animation
if not self.jumping and not self.walking:
if now - self.last_update > 350:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.standing_frames)
#bottom = self.rect.bottom
self.image = self.standing_frames[self.current_frame]
#self.rect = self.image.get_rect()
#self.rect.bottom = bottom
self.mask = pygame.mask.from_surface(self.image)
# Set a referance to the image rect.
#self.rect = self.image.get_rect()
def update(self):
self.animate()
self.calc_grav()
# If the player gets near the right side, shift the world left (-x)
if self.rect.right > const.SCREEN_WIDTH:
self.rect.right = const.SCREEN_WIDTH
self.sign_direc = -self.sign_direc
# If the player gets near the left side, shift the world right (+x)
if self.rect.left < 0:
self.rect.left = 0
self.sign_direc = -self.sign_direc
# If the player gets near the right side, shift the world left (-x)
if self.rect.bottom > const.SCREEN_HEIGHT:
self.rect.bottom = const.SCREEN_HEIGHT
# If the player gets near the left side, shift the world right (+x)
if self.rect.top < 0:
self.rect.top = 0
self.rect.x += self.sign_direc * self.change_x
# Check where is the enemy
# Check if enemy is on the platform
platform_hit_list = pygame.sprite.spritecollide(self, self.platforms, False)
self.rect.x += self.sign_direc * 1
# Check if there if another platform next to
platform_hit_list_2 = pygame.sprite.spritecollide(self, self.platforms, False)
self.rect.y -= 2
self.rect.x -= self.sign_direc * 1
# if enemy is only on one platform we have to check if there is on an edge
if platform_hit_list == platform_hit_list_2 and len(platform_hit_list)==1:
for block in platform_hit_list:
if self.sign_direc > 0:
if self.rect.right >= block.rect.right :
self.sign_direc = -self.sign_direc
#self.image = self.smaller_left
elif self.sign_direc < 0:
if self.rect.left <= block.rect.left :
self.sign_direc = -self.sign_direc
#self.image=self.smaller_right
#self.rect.top = block.rect.bottom
else:
self.sign_direc=self.sign_direc
for block in platform_hit_list:
if self.change_y > 0 and self.rect.bottom >= block.rect.top and self.rect.top <= block.rect.top:
self.rect.bottom = block.rect.top
self.change_y = 0
if self.type and random.uniform(0,1)<0.1 and len(platform_hit_list)>0:
self.change_y = -3
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_stone_list, False)
for block in block_hit_list :
# Reset our position based on the top/bottom of the object.
if self.change_y < 0 and self.rect.top <= block.rect.bottom and self.rect.bottom >= block.rect.bottom:
self.rect.top = block.rect.bottom +2
self.change_y = -self.change_y
block_hit_list = pygame.sprite.spritecollide(self, self.level.block_list, False)
if len(block_hit_list)>=1:
if len(block_hit_list)>=1:
block= block_hit_list[0]
if self.sign_direc > 0:
self.rect.right = block.rect.left
self.sign_direc = -self.sign_direc
elif self.sign_direc < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
self.sign_direc = -self.sign_direc
self.rect.y += self.change_y
def calc_grav(self):
""" Calculate effect of gravity. """
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .15
# See if we are on the ground.
if self.rect.y >= const.SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = const.SCREEN_HEIGHT - self.rect.height
class Enemy_bubble(pygame.sprite.Sprite):
# -- Methods
def __init__(self, enemy):
""" Constructor function """
# Call the parent's constructor
super().__init__()
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
self.direction= enemy.direction
sprite_sheet_left = SpriteSheet("playerBlue_dead.png")
# Grab the image for this platform
self.image_left = sprite_sheet_left.get_image(0,
0,
45,
47, const.BLACK)
# Set a referance to the image rect.
self.size = self.image_left.get_size()
self.smaller_left = pygame.transform.scale(self.image_left, (int(self.size[0]*0.7), int(self.size[1]*0.7)))
self.type=enemy.type
self.x_speed=enemy.change_x
bub_2 = pygame.image.load("bubble.png").convert()
bub = pygame.Surface([225, 225]).convert()
#bub_2.set_alpha(90)
# Copy the sprite from the large sheet onto the smaller image
bub.blit(bub_2, (0, 0))
bub=pygame.transform.scale(bub, (int(bub.get_size()[0]*0.2), int(bub.get_size()[1]*0.2)))
# Assuming black works as the transparent color
bub.set_colorkey(const.BLACK)
bub.set_alpha(150)
pygame.Surface.blit(bub,self.smaller_left, (6, 4))
#self.smaller_right.blit(bub,(-100,-100))
bub.set_colorkey(const.BLACK)
#bub.set_alpha(90)
self.image = bub
#self.image.set_alpha(90)
# Set a referance to the image rect.
self.rect = self.image.get_rect()
self.radius = 35
#pygame.draw.circle(self.image, RED, self.rect.center , self.radius)
self.start_time = pygame.time.get_ticks()
self.time = self.start_time
self.rect.x = enemy.rect.x
self.rect.y = enemy.rect.y
self.platforms = enemy.platforms
self.change_y = -3
self.level = enemy.level
def update(self):
# If the player gets near the right side, shift the world left (-x)
if self.rect.right > const.SCREEN_WIDTH:
self.rect.right = const.SCREEN_WIDTH
self.speedy=-self.speedy
# If the player gets near the left side, shift the world right (+x)
if self.rect.left < 0:
self.rect.left = 0
# If the player gets near the right side, shift the world left (-x)
if self.rect.bottom > const.SCREEN_HEIGHT:
self.rect.bottom = const.SCREEN_HEIGHT
# If the player gets near the left side, shift the world right (+x)
if self.rect.top < 0:
self.rect.top = 0
# life time
self.time += 1
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_stone_list, False)
for block in block_hit_list :
if self.change_y > 0 and self.rect.bottom >= block.rect.top and self.rect.top <= block.rect.top:
self.rect.bottom = block.rect.top -2
self.change_y = 0
elif self.change_y < 0 and self.rect.top <= block.rect.bottom and self.rect.bottom >= block.rect.bottom:
self.rect.top = block.rect.bottom +2
self.change_y = 0
self.rect.y += self.change_y
if self.time - self.start_time > 500:
enemy=Enemy(self.rect.x, self.rect.y, self.level,self.x_speed,self.type)
self.level.enemy_list.add(enemy)
self.level.active_sprite.add(enemy)
self.level.active_sprite.remove(self)
self.kill()
|
[
"pygame.transform.flip",
"random.uniform",
"pygame.time.get_ticks",
"pygame.sprite.spritecollide",
"spritesheet_functions.SpriteSheet",
"pygame.mask.from_surface",
"pygame.Surface",
"pygame.image.load",
"pygame.Surface.blit"
] |
[((1378, 1416), 'spritesheet_functions.SpriteSheet', 'SpriteSheet', (['"""spritesheet_players.png"""'], {}), "('spritesheet_players.png')\n", (1389, 1416), False, 'from spritesheet_functions import SpriteSheet\n'), ((2212, 2235), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (2233, 2235), False, 'import pygame\n'), ((3363, 3399), 'pygame.mask.from_surface', 'pygame.mask.from_surface', (['self.image'], {}), '(self.image)\n', (3387, 3399), False, 'import pygame\n'), ((4575, 4631), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['self', 'self.platforms', '(False)'], {}), '(self, self.platforms, False)\n', (4602, 4631), False, 'import pygame\n'), ((4762, 4818), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['self', 'self.platforms', '(False)'], {}), '(self, self.platforms, False)\n', (4789, 4818), False, 'import pygame\n'), ((6206, 6278), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['self', 'self.level.platform_stone_list', '(False)'], {}), '(self, self.level.platform_stone_list, False)\n', (6233, 6278), False, 'import pygame\n'), ((6689, 6752), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['self', 'self.level.block_list', '(False)'], {}), '(self, self.level.block_list, False)\n', (6716, 6752), False, 'import pygame\n'), ((8112, 8146), 'spritesheet_functions.SpriteSheet', 'SpriteSheet', (['"""playerBlue_dead.png"""'], {}), "('playerBlue_dead.png')\n", (8123, 8146), False, 'from spritesheet_functions import SpriteSheet\n'), ((9185, 9236), 'pygame.Surface.blit', 'pygame.Surface.blit', (['bub', 'self.smaller_left', '(6, 4)'], {}), '(bub, self.smaller_left, (6, 4))\n', (9204, 9236), False, 'import pygame\n'), ((9679, 9702), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (9700, 9702), False, 'import pygame\n'), ((10738, 10810), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['self', 'self.level.platform_stone_list', '(False)'], {}), '(self, self.level.platform_stone_list, False)\n', (10765, 10810), False, 'import pygame\n'), ((1994, 2035), 'pygame.transform.flip', 'pygame.transform.flip', (['frame', '(True)', '(False)'], {}), '(frame, True, False)\n', (2015, 2035), False, 'import pygame\n'), ((6083, 6103), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (6097, 6103), False, 'import random\n'), ((8721, 8752), 'pygame.image.load', 'pygame.image.load', (['"""bubble.png"""'], {}), "('bubble.png')\n", (8738, 8752), False, 'import pygame\n'), ((8777, 8803), 'pygame.Surface', 'pygame.Surface', (['[225, 225]'], {}), '([225, 225])\n', (8791, 8803), False, 'import pygame\n')]
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import boto3
import botocore
import os
import logging
import time
import json
import datetime
log = logging.getLogger()
log.setLevel('INFO')
bucket = os.environ['BUCKET']
region = os.environ['AWS_REGION']
solution_version = os.environ.get('SOLUTION_VERSION', 'v1.0.0')
solution_id = os.environ.get('SOLUTION_ID')
user_agent_config = {
'user_agent_extra': f'AwsSolution/{solution_id}/{solution_version}',
'region_name': region
}
default_config = botocore.config.Config(**user_agent_config)
athena_client = boto3.client('athena', config=default_config)
def handler(event, context):
s3_prefix = event['s3_prefix']
table_prefix = event["stackName"]
log.info(f"table_prefix: {table_prefix}, s3_prefix: {s3_prefix}")
table_name = f"{table_prefix}_qc_batch_evaluation_metrics_hist"
view_name = f"{table_prefix}_qc_batch_evaluation_metrics"
ATHENA_OUTPUT_LOCATION = f"s3://{bucket}/{s3_prefix}/athena-out/"
location = f"s3://{bucket}/{s3_prefix}/batch_evaluation_metrics/"
createDBSql = "CREATE DATABASE IF NOT EXISTS qc_db"
dropTableSql = f"DROP TABLE IF EXISTS qc_db.{table_name}"
createTableSql = f'''
CREATE EXTERNAL TABLE IF NOT EXISTS qc_db.{table_name} (
Execution_Id string,
Compute_Type string,
Resolver string,
Complexity integer,
End_To_End_Time float,
Running_Time float,
Time_Info string,
Start_Time string,
Experiment_Name string,
Task_Id string,
Model_Name string,
Model_FileName string,
Scenario string,
Resource string,
Model_Param string,
Opt_Param string,
Create_Time string,
Result_Detail string,
Result_Location string
) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' LINES TERMINATED BY '\\n' LOCATION '{location}'
'''
createViewSql = f"CREATE OR REPLACE VIEW qc_db.{view_name} AS SELECT h1.* FROM qc_db.{table_name} h1, (SELECT DISTINCT Execution_Id, Start_Time FROM qc_db.{table_name} ORDER BY Start_Time DESC LIMIT 20) h2 WHERE (h1.Execution_Id = h2.Execution_Id)" #nosec B608
querySql = f"SELECT * FROM qc_db.{view_name}" #nosec B608
sqlStmSeq = [createDBSql, dropTableSql, createTableSql, createViewSql, querySql]
for sqlStm in sqlStmSeq:
log.info(sqlStm)
response = athena_client.start_query_execution(
QueryString=sqlStm,
ResultConfiguration={
'OutputLocation': ATHENA_OUTPUT_LOCATION
}
)
execution_id = response['QueryExecutionId']
wait_for_complete(execution_id)
log.info("all done")
return {
'queryResult': ATHENA_OUTPUT_LOCATION,
'endTime': datetime.datetime.utcnow().isoformat()
}
def wait_for_complete(execution_id):
log.info("execution_id:{}".format(execution_id))
response = athena_client.get_query_execution(
QueryExecutionId=execution_id
)
while True:
status = response['QueryExecution']['Status']
log.info("State: {}".format(status['State']))
if status['State'] == 'SUCCEEDED':
return status
elif status['State'] in ['QUEUED', 'RUNNING']:
time.sleep(3)
response = athena_client.get_query_execution(
QueryExecutionId=execution_id
)
else:
log.error(json.dumps(response, default=str))
raise Exception(json.dumps(response, default=str))
|
[
"logging.getLogger",
"boto3.client",
"datetime.datetime.utcnow",
"botocore.config.Config",
"json.dumps",
"os.environ.get",
"time.sleep"
] |
[((209, 228), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (226, 228), False, 'import logging\n'), ((335, 379), 'os.environ.get', 'os.environ.get', (['"""SOLUTION_VERSION"""', '"""v1.0.0"""'], {}), "('SOLUTION_VERSION', 'v1.0.0')\n", (349, 379), False, 'import os\n'), ((394, 423), 'os.environ.get', 'os.environ.get', (['"""SOLUTION_ID"""'], {}), "('SOLUTION_ID')\n", (408, 423), False, 'import os\n'), ((565, 608), 'botocore.config.Config', 'botocore.config.Config', ([], {}), '(**user_agent_config)\n', (587, 608), False, 'import botocore\n'), ((626, 671), 'boto3.client', 'boto3.client', (['"""athena"""'], {'config': 'default_config'}), "('athena', config=default_config)\n", (638, 671), False, 'import boto3\n'), ((2827, 2853), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2851, 2853), False, 'import datetime\n'), ((3318, 3331), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3328, 3331), False, 'import time\n'), ((3486, 3519), 'json.dumps', 'json.dumps', (['response'], {'default': 'str'}), '(response, default=str)\n', (3496, 3519), False, 'import json\n'), ((3549, 3582), 'json.dumps', 'json.dumps', (['response'], {'default': 'str'}), '(response, default=str)\n', (3559, 3582), False, 'import json\n')]
|
import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject
from osf.models.provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
def validate_input(custom_provider, data, provider_type='osf.preprintprovider', copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
logger.info('Validating data')
includes = data.get('include', [])
excludes = data.get('exclude', [])
customs = data.get('custom', {})
merges = data.get('merge', {})
if copy:
included_subjects = rules_to_subjects(custom_provider.subjects_acceptable)
else:
assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes'
for text in includes:
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text)
included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children()
logger.info('Successfully validated `include`')
for text in excludes:
try:
Subject.objects.get(provider=BEPRESS_PROVIDER, text=text)
except Subject.DoesNotExist:
raise RuntimeError('Unable to find excluded subject with text {}'.format(text))
assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text)
included_subjects = included_subjects.exclude(text__in=excludes)
logger.info('Successfully validated `exclude`')
for cust_name, map_dict in customs.items():
assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name)
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))
if map_dict.get('parent'): # Null parent possible
assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])
# TODO: hierarchy length validation? Probably more trouble than worth here, done on .save
logger.info('Successfully validated `custom`')
included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()])
for merged_from, merged_into in merges.items():
assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from)
assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into)
included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys())
missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in Preprint.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True))
if not add_missing:
assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True)))
if isinstance(custom_provider, PreprintProvider):
assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.'
logger.info('Successfully validated mapping completeness')
return list(missing_subjects) if add_missing else None
def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None):
logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id))
bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text)
custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider)
custom_subj.save()
# This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded.
# It could also be problematic if they didn't, if any of those children are used by existing preprints.
# TODO: Determine correct resolution
for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True):
create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj)
def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None):
tries = 0
subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable))
if missing and add_missing:
subjects_to_copy = subjects_to_copy + missing
while len(subjects_to_copy):
previous_len = len(subjects_to_copy)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))
for subj in list(subjects_to_copy):
if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text):
subjects_to_copy.remove(subj)
elif add_missing and subj.parent and subj.parent not in subjects_to_copy:
# Dirty
subjects_to_copy.append(subj.parent)
previous_len += 1
else:
logger.warn('Failed. Retrying next iteration')
new_len = len(subjects_to_copy)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))
def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None):
if copy:
create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing)
else:
for root_text in includes:
create_subjects_recursive(custom_provider, root_text, excludes)
def map_custom_subject(custom_provider, name, parent, mapping):
logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent'))
if parent:
parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first()
else:
parent_subject = None
bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping)
if parent and not parent_subject:
return False
custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject)
custom_subject.save()
return True
def do_custom_mapping(custom_provider, customs):
tries = 0
unmapped_customs = customs
while len(unmapped_customs):
previous_len = len(unmapped_customs)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')
successes = []
for cust_name, map_dict in unmapped_customs.items():
if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')):
successes.append(cust_name)
else:
logger.warn('Failed. Retrying next iteration')
[unmapped_customs.pop(key) for key in successes]
new_len = len(unmapped_customs)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input')
def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False):
for preprint in Preprint.objects.filter(provider=custom_provider):
logger.info('Preparing to migrate preprint {}'.format(preprint.id))
old_hier = preprint.subject_hierarchy
subjects_to_map = [hier[-1] for hier in old_hier]
merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True))
subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys())
aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids
aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)]
old_subjects = list(preprint.subjects.values_list('id', flat=True))
preprint.subjects.clear()
for hier in aliased_hiers:
validate_subject_hierarchy([s._id for s in hier])
for s in hier:
preprint.subjects.add(s)
# Update preprint in SHARE
if not dry_run:
on_preprint_updated(preprint._id, old_subjects=old_subjects)
preprint.reload()
new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())]
logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier))
def migrate(provider=None, provider_type='osf.preprintprovider', share_title=None, data=None, dry_run=False, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
if not BEPRESS_PROVIDER:
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
custom_provider = AbstractProvider.objects.filter(_id=provider, type=provider_type).first()
assert custom_provider, 'Unable to find specified provider: {}'.format(provider)
assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider'
assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy'
if isinstance(custom_provider, PreprintProvider) and custom_provider.share_title in [None, '', 'bepress']:
if not share_title:
raise RuntimeError('`--share-title` is required if not already set on the provider')
custom_provider.share_title = share_title
custom_provider.save()
missing = validate_input(custom_provider, data, provider_type=provider_type, copy=copy, add_missing=add_missing)
do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing)
do_custom_mapping(custom_provider, data.get('custom', {}))
map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--data',
action='store',
dest='data',
help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'
'\n"exclude": [<list of children to exclude from included trees>],'
'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'
'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}',
)
parser.add_argument(
'--provider',
action='store',
dest='provider',
required=True,
help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.'
)
parser.add_argument(
'--from-subjects-acceptable',
action='store_true',
dest='from_subjects_acceptable',
help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used'
)
parser.add_argument(
'--add-missing',
action='store_true',
dest='add_missing',
help='Adds "used-but-not-included" subjects.'
)
parser.add_argument(
'--share-title',
action='store',
type=str,
dest='share_title',
help='Sets <provider>.share_title. Ignored if already set on provider, required if not.'
)
parser.add_argument(
'--type',
action='store',
type=str,
dest='provider_type',
help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]'
)
def handle(self, *args, **options):
global BEPRESS_PROVIDER
provider_type = options.get('provider_type') or 'osf.preprintprovider'
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
dry_run = options.get('dry_run')
provider = options['provider']
data = json.loads(options['data'] or '{}')
share_title = options.get('share_title')
copy = options.get('from_subjects_acceptable')
add_missing = options.get('add_missing')
if copy:
data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True))
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
migrate(provider=provider, share_title=share_title, provider_type=provider_type, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
|
[
"logging.getLogger",
"osf.models.provider.rules_to_subjects",
"json.loads",
"django.db.transaction.atomic",
"osf.models.validators.validate_subject_hierarchy",
"osf.models.Preprint.objects.filter",
"osf.models.Subject",
"osf.models.Subject.objects.filter",
"website.preprints.tasks.on_preprint_updated",
"scripts.utils.add_file_logger",
"osf.models.Subject.objects.get",
"osf.models.AbstractProvider.objects.filter"
] |
[((411, 438), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (428, 438), False, 'import logging\n'), ((4490, 4552), 'osf.models.Subject.objects.get', 'Subject.objects.get', ([], {'provider': 'BEPRESS_PROVIDER', 'text': 'root_text'}), '(provider=BEPRESS_PROVIDER, text=root_text)\n', (4509, 4552), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((4571, 4669), 'osf.models.Subject', 'Subject', ([], {'text': 'root_text', 'parent': 'parent', 'bepress_subject': 'bepress_subj', 'provider': 'custom_provider'}), '(text=root_text, parent=parent, bepress_subject=bepress_subj,\n provider=custom_provider)\n', (4578, 4669), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((7162, 7222), 'osf.models.Subject.objects.get', 'Subject.objects.get', ([], {'provider': 'BEPRESS_PROVIDER', 'text': 'mapping'}), '(provider=BEPRESS_PROVIDER, text=mapping)\n', (7181, 7222), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((7305, 7409), 'osf.models.Subject', 'Subject', ([], {'provider': 'custom_provider', 'text': 'name', 'parent': 'parent_subject', 'bepress_subject': 'bepress_subject'}), '(provider=custom_provider, text=name, parent=parent_subject,\n bepress_subject=bepress_subject)\n', (7312, 7409), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((8421, 8470), 'osf.models.Preprint.objects.filter', 'Preprint.objects.filter', ([], {'provider': 'custom_provider'}), '(provider=custom_provider)\n', (8444, 8470), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((1076, 1130), 'osf.models.provider.rules_to_subjects', 'rules_to_subjects', (['custom_provider.subjects_acceptable'], {}), '(custom_provider.subjects_acceptable)\n', (1093, 1130), False, 'from osf.models.provider import rules_to_subjects\n'), ((5314, 5368), 'osf.models.provider.rules_to_subjects', 'rules_to_subjects', (['custom_provider.subjects_acceptable'], {}), '(custom_provider.subjects_acceptable)\n', (5331, 5368), False, 'from osf.models.provider import rules_to_subjects\n'), ((13948, 13983), 'json.loads', 'json.loads', (["(options['data'] or '{}')"], {}), "(options['data'] or '{}')\n", (13958, 13983), False, 'import json\n'), ((768, 839), 'osf.models.AbstractProvider.objects.filter', 'AbstractProvider.objects.filter', ([], {'_id': '"""osf"""', 'type': '"""osf.preprintprovider"""'}), "(_id='osf', type='osf.preprintprovider')\n", (799, 839), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((9403, 9452), 'osf.models.validators.validate_subject_hierarchy', 'validate_subject_hierarchy', (['[s._id for s in hier]'], {}), '([s._id for s in hier])\n', (9429, 9452), False, 'from osf.models.validators import validate_subject_hierarchy\n'), ((9593, 9653), 'website.preprints.tasks.on_preprint_updated', 'on_preprint_updated', (['preprint._id'], {'old_subjects': 'old_subjects'}), '(preprint._id, old_subjects=old_subjects)\n', (9612, 9653), False, 'from website.preprints.tasks import on_preprint_updated\n'), ((10398, 10463), 'osf.models.AbstractProvider.objects.filter', 'AbstractProvider.objects.filter', ([], {'_id': 'provider', 'type': 'provider_type'}), '(_id=provider, type=provider_type)\n', (10429, 10463), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((14329, 14375), 'scripts.utils.add_file_logger', 'script_utils.add_file_logger', (['logger', '__file__'], {}), '(logger, __file__)\n', (14357, 14375), True, 'from scripts import utils as script_utils\n'), ((14389, 14409), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (14407, 14409), False, 'from django.db import transaction\n'), ((1457, 1525), 'osf.models.Subject.objects.filter', 'Subject.objects.filter', ([], {'provider': 'BEPRESS_PROVIDER', 'text__in': 'includes'}), '(provider=BEPRESS_PROVIDER, text__in=includes)\n', (1479, 1525), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((1665, 1722), 'osf.models.Subject.objects.get', 'Subject.objects.get', ([], {'provider': 'BEPRESS_PROVIDER', 'text': 'text'}), '(provider=BEPRESS_PROVIDER, text=text)\n', (1684, 1722), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((7030, 7091), 'osf.models.Subject.objects.filter', 'Subject.objects.filter', ([], {'provider': 'custom_provider', 'text': 'parent'}), '(provider=custom_provider, text=parent)\n', (7052, 7091), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((9193, 9243), 'osf.models.Subject.objects.filter', 'Subject.objects.filter', ([], {'id__in': 'aliased_subject_ids'}), '(id__in=aliased_subject_ids)\n', (9215, 9243), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((10296, 10367), 'osf.models.AbstractProvider.objects.filter', 'AbstractProvider.objects.filter', ([], {'_id': '"""osf"""', 'type': '"""osf.preprintprovider"""'}), "(_id='osf', type='osf.preprintprovider')\n", (10327, 10367), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((13772, 13843), 'osf.models.AbstractProvider.objects.filter', 'AbstractProvider.objects.filter', ([], {'_id': '"""osf"""', 'type': '"""osf.preprintprovider"""'}), "(_id='osf', type='osf.preprintprovider')\n", (13803, 13843), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((1298, 1358), 'osf.models.Subject.objects.filter', 'Subject.objects.filter', ([], {'provider': 'BEPRESS_PROVIDER', 'text': 'text'}), '(provider=BEPRESS_PROVIDER, text=text)\n', (1320, 1358), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((8996, 9093), 'osf.models.Subject.objects.filter', 'Subject.objects.filter', ([], {'bepress_subject__id__in': 'subject_ids_to_map', 'provider': 'custom_provider'}), '(bepress_subject__id__in=subject_ids_to_map, provider\n =custom_provider)\n', (9018, 9093), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((14190, 14260), 'osf.models.Subject.objects.filter', 'Subject.objects.filter', ([], {'provider': 'BEPRESS_PROVIDER', 'parent__isnull': '(True)'}), '(provider=BEPRESS_PROVIDER, parent__isnull=True)\n', (14212, 14260), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n'), ((3581, 3630), 'osf.models.Preprint.objects.filter', 'Preprint.objects.filter', ([], {'provider': 'custom_provider'}), '(provider=custom_provider)\n', (3604, 3630), False, 'from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject\n')]
|
import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
import random
from os import listdir
from os.path import join, splitext
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from PIL import Image, ImageFilter, ImageFile
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
init(autoreset=True)
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_dataload,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.t7', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_dataload = num_dataload
if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list() # read all the video files
def _load_feature(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
feat_path = os.path.join(directory, self.image_tmpl.format(idx))
try:
feat = [torch.load(feat_path)]
except:
print(Back.RED + feat_path)
return feat
elif self.modality == 'Flow':
x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))
y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))
return [x_feat, y_feat]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
# repeat the list if the length is less than num_dataload (especially for target data)
n_repeat = self.num_dataload//len(self.video_list)
n_left = self.num_dataload%len(self.video_list)
self.video_list = self.video_list*n_repeat + self.video_list[:n_left]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
#np.random.seed(1)
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1]
offsets = np.append(id_select, id_expand)
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
frames = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_feats = self._load_feature(record.path, p)
frames.extend(seg_feats)
if p < record.num_frames:
p += 1
# process_data = self.transform(frames)
process_data = torch.stack(frames)
return process_data, record.label
def __len__(self):
return len(self.video_list)
class VideoDataset(data.Dataset):
def __init__(
self,
folder,
n_frames,
frame_size=224,
separator="_"
):
self.folder = folder
self.num_segments = n_frames
self.frame_size = frame_size
self.data_transform = transforms.Compose(
[
transforms.Resize(self.frame_size),
transforms.CenterCrop(self.frame_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
self.separator = separator
self.classes = [c for c in sorted(listdir(folder))]
self.videos_with_classes = []
for c_index, c in enumerate(self.classes):
c_path = join(self.folder, c)
videos = listdir(c_path)
for v in videos:
v_path = join(c_path, v)
num_frames = len(listdir(v_path))
if num_frames >= self.num_segments:
pair = (v_path, c_index)
self.videos_with_classes.append(pair)
def _get_test_indices(self, num_frames):
num_min = self.num_segments
num_select = num_frames
if num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]
) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = (
np.ones(self.num_segments - num_select, dtype=int)
* id_select[id_select[0] - 1]
)
offsets = np.append(id_select, id_expand)
return offsets
def __getitem__(self, index):
video, label = self.videos_with_classes[index]
frames_temp = sorted(
listdir(video),
key=lambda path: int(path.split(self.separator)[-1].split(".")[0]),
)
frames = [f for f in frames_temp if f.endswith('jpg') or f.endswith('jpeg')]
num_frames = len(frames)
data = []
segment_indices = self._get_test_indices(num_frames)
for index in segment_indices:
frame = frames[index]
frame_path = join(video, frame)
frame_img = Image.open(frame_path)
frame_feat = self.data_transform(frame_img)
data.append(frame_feat)
tensor = torch.stack(data)
return tensor, label
def __len__(self):
return len(self.videos_with_classes)
|
[
"torchvision.transforms.CenterCrop",
"torch.utils.data.append",
"os.listdir",
"PIL.Image.open",
"numpy.ones",
"torch.load",
"torch.stack",
"os.path.join",
"numpy.append",
"numpy.zeros",
"numpy.random.randint",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"colorama.init"
] |
[((505, 525), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (509, 525), False, 'from colorama import init\n'), ((5481, 5500), 'torch.stack', 'torch.stack', (['frames'], {}), '(frames)\n', (5492, 5500), False, 'import torch\n'), ((8333, 8350), 'torch.stack', 'torch.stack', (['data'], {}), '(data)\n', (8344, 8350), False, 'import torch\n'), ((3855, 3885), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3863, 3885), True, 'import numpy as np\n'), ((4659, 4690), 'numpy.append', 'np.append', (['id_select', 'id_expand'], {}), '(id_select, id_expand)\n', (4668, 4690), True, 'import numpy as np\n'), ((6438, 6458), 'os.path.join', 'join', (['self.folder', 'c'], {}), '(self.folder, c)\n', (6442, 6458), False, 'from os.path import join, splitext\n'), ((6480, 6495), 'os.listdir', 'listdir', (['c_path'], {}), '(c_path)\n', (6487, 6495), False, 'from os import listdir\n'), ((7567, 7598), 'numpy.append', 'np.append', (['id_select', 'id_expand'], {}), '(id_select, id_expand)\n', (7576, 7598), True, 'import numpy as np\n'), ((7756, 7770), 'os.listdir', 'listdir', (['video'], {}), '(video)\n', (7763, 7770), False, 'from os import listdir\n'), ((8158, 8176), 'os.path.join', 'join', (['video', 'frame'], {}), '(video, frame)\n', (8162, 8176), False, 'from os.path import join, splitext\n'), ((8201, 8223), 'PIL.Image.open', 'Image.open', (['frame_path'], {}), '(frame_path)\n', (8211, 8223), False, 'from PIL import Image, ImageFilter, ImageFile\n'), ((8292, 8315), 'torch.utils.data.append', 'data.append', (['frame_feat'], {}), '(frame_feat)\n', (8303, 8315), True, 'import torch.utils.data as data\n'), ((3151, 3200), 'numpy.random.randint', 'randint', (['average_duration'], {'size': 'self.num_segments'}), '(average_duration, size=self.num_segments)\n', (3158, 3200), False, 'from numpy.random import randint\n'), ((3393, 3423), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3401, 3423), True, 'import numpy as np\n'), ((4563, 4613), 'numpy.ones', 'np.ones', (['(self.num_segments - num_select)'], {'dtype': 'int'}), '(self.num_segments - num_select, dtype=int)\n', (4570, 4613), True, 'import numpy as np\n'), ((5945, 5979), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.frame_size'], {}), '(self.frame_size)\n', (5962, 5979), False, 'from torchvision import transforms\n'), ((5997, 6035), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['self.frame_size'], {}), '(self.frame_size)\n', (6018, 6035), False, 'from torchvision import transforms\n'), ((6053, 6074), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6072, 6074), False, 'from torchvision import transforms\n'), ((6092, 6167), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (6112, 6167), False, 'from torchvision import transforms\n'), ((6550, 6565), 'os.path.join', 'join', (['c_path', 'v'], {}), '(c_path, v)\n', (6554, 6565), False, 'from os.path import join, splitext\n'), ((7434, 7484), 'numpy.ones', 'np.ones', (['(self.num_segments - num_select)'], {'dtype': 'int'}), '(self.num_segments - num_select, dtype=int)\n', (7441, 7484), True, 'import numpy as np\n'), ((2015, 2036), 'torch.load', 'torch.load', (['feat_path'], {}), '(feat_path)\n', (2025, 2036), False, 'import torch\n'), ((3283, 3355), 'numpy.random.randint', 'randint', (['(record.num_frames - self.new_length + 1)'], {'size': 'self.num_segments'}), '(record.num_frames - self.new_length + 1, size=self.num_segments)\n', (3290, 3355), False, 'from numpy.random import randint\n'), ((6309, 6324), 'os.listdir', 'listdir', (['folder'], {}), '(folder)\n', (6316, 6324), False, 'from os import listdir\n'), ((6599, 6614), 'os.listdir', 'listdir', (['v_path'], {}), '(v_path)\n', (6606, 6614), False, 'from os import listdir\n')]
|
# -*- coding: utf-8 -*-
from django.http import HttpResponseForbidden
from django.template import loader
from django.utils.translation import ugettext_lazy as _
# 普通用户
def _requred_forbid(msg):
t = loader.get_template('limit_ip.html')
content = t.render({'message': msg })
return HttpResponseForbidden(content)
_msg = _(u'请求太频繁,请等待30s后重试(Request too often)。')
limitip_requred_forbid = _requred_forbid(_msg)
|
[
"django.http.HttpResponseForbidden",
"django.utils.translation.ugettext_lazy",
"django.template.loader.get_template"
] |
[((333, 374), 'django.utils.translation.ugettext_lazy', '_', (['u"""请求太频繁,请等待30s后重试(Request too often)。"""'], {}), "(u'请求太频繁,请等待30s后重试(Request too often)。')\n", (334, 374), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((204, 240), 'django.template.loader.get_template', 'loader.get_template', (['"""limit_ip.html"""'], {}), "('limit_ip.html')\n", (223, 240), False, 'from django.template import loader\n'), ((294, 324), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', (['content'], {}), '(content)\n', (315, 324), False, 'from django.http import HttpResponseForbidden\n')]
|
import math
import aerospike
from aerospike import predicates as p
from aerospike import exception as ex
from flask import current_app
aerospike_host = current_app.config['AEROSPIKE_HOST']
aerospike_port = current_app.config['AEROSPIKE_PORT']
namespace = current_app.config['AEROSPIKE_NAMESPACE']
set_name = current_app.config['AEROSPIKE_SET_NAME']
n_replicas = 1
config = {
'hosts': [
(aerospike_host, aerospike_port)
],
'policies': {
'timeout': 1000 # milliseconds
}
}
client = aerospike.client(config).connect()
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
def init_app(app):
pass
# if there is no more record, return -1 as next
def list(limit=10, cursor=None):
if cursor:
start = int(cursor)
else:
start = 0
end = start + limit
records = []
for i in range(start, end):
rec = read(str(i))
if rec:
records.append(rec)
if end >= __get_objs_cnt__():
next_key = -1
else:
next_key = len(records)
return records, next_key
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
# if there is no more record, return -1 as next
def list_by_user(user_id, limit=10, cursor=None):
if cursor:
start = cursor
else:
start = 0
query = client.query(namespace, set_name)
query.where(p.equals('createdById', user_id))
records = []
results = query.results()
if cursor:
start = cursor
else:
start = 0
cnt = 0
records = []
for i, result in enumerate(results):
if cnt >= limit:
break
if i < start:
continue
else:
rec = result[2]
records.append(rec)
cnt += 1
if cnt == limit:
next_key = cnt
else:
next_key = -1
return records, next_key
def __get_objs_cnt__():
info = client.info("sets" + "/" + namespace + "/" + set_name)
for value in info.values():
info_str = value[1]
try:
start_idx = info_str.index("=") + 1
end_idx = info_str.index(":")
n_str = info_str[start_idx:end_idx]
return math.ceil(int(n_str) / n_replicas)
except ValueError:
return 0
def create(data, id=None):
if id:
key = str(id)
else:
key = str(__get_objs_cnt__())
data['id'] = key
client.put((namespace, set_name, key), data)
return read(key)
def read(id):
try:
(key, metadata) = client.exists((namespace, set_name, id))
(key, metadata, record) = client.get((namespace, set_name, id))
return record
except ex.RecordNotFound:
print("Record not found:", id)
return None
except ex.AerospikeError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
return None
def update(data, id):
if client.exists((namespace, set_name, id)):
delete(id)
return create(data, id)
def delete(id):
client.remove((namespace, set_name, id))
|
[
"aerospike.client",
"aerospike.predicates.equals"
] |
[((520, 544), 'aerospike.client', 'aerospike.client', (['config'], {}), '(config)\n', (536, 544), False, 'import aerospike\n'), ((1928, 1960), 'aerospike.predicates.equals', 'p.equals', (['"""createdById"""', 'user_id'], {}), "('createdById', user_id)\n", (1936, 1960), True, 'from aerospike import predicates as p\n')]
|
"""
author: @nimrobotics
description: calculates the effective connectivity between regions and plots them
"""
import numpy as np
import scipy.io
import glob
import sys
sys.path.append('../utils')
from plots import plotData
dir = "./process3/" #directory of the data
outdir = 'process3/' #directory to save the plots
regions = 3 #number of regions
files = glob.glob(dir+'/*_.mat') # get all the files in the directory
for file in files:
print('Processing condition: ', file)
data = scipy.io.loadmat(file) #load data from the directory
fval = data['fval'] #fval
pval = data['pval'] #pval
sig = data['sig'] #sig
cd = data['cd'] #cd
print('fval shape: ',fval.shape)
print('\nfval \n',fval)
print('pval shape: ',pval.shape)
print('sig shape: ',sig.shape)
print('\nsig \n',sig)
print(cd.shape)
# elementwise multiplication of fval and sig(0/1)
fval_sig = np.multiply(fval, sig)
print(fval_sig.shape)
print('\nfval_sig \n',fval_sig)
# fval_sig = np.mean(fval_sig, axis=2) # average over files
# print(fval_sig.shape)
# fval = np.mean(fval, axis=2)
labels = ['PFC', 'PM-MC', 'VC'] #labels for the regions
condition = file.split('/')[-1].split('.')[0] #get the condition name
plot = plotData(fval_sig, labels, outdir, colormap='viridis', dpi=300, title='EC: '+condition, filename='EC_'+condition +'.png')
plot.matrixPlot()
plot.circularPlot()
|
[
"numpy.multiply",
"plots.plotData",
"sys.path.append",
"glob.glob"
] |
[((170, 197), 'sys.path.append', 'sys.path.append', (['"""../utils"""'], {}), "('../utils')\n", (185, 197), False, 'import sys\n'), ((358, 384), 'glob.glob', 'glob.glob', (["(dir + '/*_.mat')"], {}), "(dir + '/*_.mat')\n", (367, 384), False, 'import glob\n'), ((909, 931), 'numpy.multiply', 'np.multiply', (['fval', 'sig'], {}), '(fval, sig)\n', (920, 931), True, 'import numpy as np\n'), ((1268, 1399), 'plots.plotData', 'plotData', (['fval_sig', 'labels', 'outdir'], {'colormap': '"""viridis"""', 'dpi': '(300)', 'title': "('EC: ' + condition)", 'filename': "('EC_' + condition + '.png')"}), "(fval_sig, labels, outdir, colormap='viridis', dpi=300, title=\n 'EC: ' + condition, filename='EC_' + condition + '.png')\n", (1276, 1399), False, 'from plots import plotData\n')]
|
from django.db import models
from sorl.thumbnail import ImageField
# Create your models here.
class Post(models.Model):
text = models.CharField(max_length=140, blank=False, null=False)
image = ImageField()
def __str__(self):
return self.text
|
[
"sorl.thumbnail.ImageField",
"django.db.models.CharField"
] |
[((132, 189), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)', 'blank': '(False)', 'null': '(False)'}), '(max_length=140, blank=False, null=False)\n', (148, 189), False, 'from django.db import models\n'), ((202, 214), 'sorl.thumbnail.ImageField', 'ImageField', ([], {}), '()\n', (212, 214), False, 'from sorl.thumbnail import ImageField\n')]
|
#!/usr/bin/env python3
import pyglet
import glooey
import autoprop
import datetime
from pyglet.gl import *
from vecrec import Vector, Rect
@autoprop
class LineClock(glooey.Widget):
custom_radius = 50
custom_color = 'green'
custom_hour_hand_width = 3
custom_minute_hand_width = 2
custom_second_hand_width = 1
custom_face_border_width = 3
def __init__(self):
super().__init__()
# User-controlled attributes:
self._radius = self.custom_radius
self._color = self.custom_color
# Internal attributes:
self._face = None
self._hands = {
'hour': glooey.drawing.Rectangle(),
'min': glooey.drawing.Rectangle(),
'sec': glooey.drawing.Rectangle(),
}
def get_radius(self):
return self._radius
def set_radius(self, radius):
self._radius = radius
self._repack()
def get_color(self):
return self._color
def set_color(self, color):
self._color = color
self._draw()
def on_update(self, dt):
self._draw()
def do_attach(self):
# Update the clock ten times a second.
pyglet.clock.schedule_interval(self.on_update, 1/10)
def do_detach(self):
pyglet.clock.unschedule(self.on_update)
def do_claim(self):
width = height = 2 * self.radius
return width, height
def do_regroup(self):
if self._face is not None:
self.batch.migrate(
self._face, GL_TRIANGLE_STRIP, self.group, self.batch)
for k in self._hands:
self._hands[k].batch = self.batch
self._hands[k].group = HandGroup(self)
def do_draw(self):
self.do_draw_face()
self.do_draw_hands()
def do_draw_face(self):
N = 48
vertices = []
for i in range(N + 2):
direction = Vector.from_degrees(360 * i / N)
radius = self._radius - (i % 2 * self.custom_face_border_width)
vertex = self.rect.center + radius * direction
vertices += vertex.tuple
# Insert duplicate vertices at the beginning and end of the list,
# otherwise this triangle strip will end up connected to any other
# triangle strips in the scene.
vertices = vertices[:2] + vertices + vertices[-2:]
num_vertices = len(vertices) // 2
color = glooey.drawing.Color.from_anything(self._color)
colors = num_vertices * color.rgb
# The vertex list for the face may or may not exist yet, e.g. if the
# clock is being drawn for the first time or was previously being
# hidden. So create the vertex list if we need to, otherwise just
# update its coordinates.
if self._face is None:
self._face = self.batch.add(
num_vertices,
GL_TRIANGLE_STRIP,
self.group,
('v2f', vertices),
('c3B', colors),
)
else:
self._face.vertices = vertices
self._face.colors = colors
def do_draw_hands(self):
# We're hard-coding the radii of the hands here. Probably it would be
# better to make separate attributes for these, but I think that would
# start to detract from the clarity of the example.
rects = {
'hour': Rect.from_size(self.custom_hour_hand_width, self.radius/2),
'min': Rect.from_size(self.custom_minute_hand_width, self.radius),
'sec': Rect.from_size(self.custom_second_hand_width, self.radius),
}
# The clock hands all start pointing towards 12:00, and the rotations
# are clockwise, so 90° is 3:00, 180° is 6:00, 270° is 9:00, etc.
now = datetime.datetime.now()
angles = {
'hour': 360 * now.hour / 12,
'min': 360 * now.minute / 60,
'sec': 360 * now.second / 60,
}
for k in self._hands:
rects[k].bottom = 0
rects[k].center_x = 0
self._hands[k].rect = rects[k]
self._hands[k].group.angle = angles[k]
self._hands[k].color = self._color
self._hands[k].show()
def do_undraw(self):
if self._face is not None:
self._face.delete()
self._face = None
for k in self._hands:
self._hands[k].hide()
class HandGroup(pyglet.graphics.Group):
def __init__(self, clock):
super().__init__(parent=clock.group)
self.clock = clock
self.angle = 0
def set_state(self):
x, y = self.clock.rect.center
clockwise = -1
glPushMatrix()
glLoadIdentity()
glTranslatef(x, y, 0)
glRotatef(self.angle, 0, 0, clockwise)
def unset_state(self):
glPopMatrix()
window = pyglet.window.Window()
gui = glooey.Gui(window)
gui.add(LineClock())
pyglet.app.run()
|
[
"pyglet.clock.unschedule",
"pyglet.clock.schedule_interval",
"pyglet.app.run",
"glooey.Gui",
"vecrec.Vector.from_degrees",
"glooey.drawing.Rectangle",
"datetime.datetime.now",
"glooey.drawing.Color.from_anything",
"pyglet.window.Window",
"vecrec.Rect.from_size"
] |
[((4923, 4945), 'pyglet.window.Window', 'pyglet.window.Window', ([], {}), '()\n', (4943, 4945), False, 'import pyglet\n'), ((4952, 4970), 'glooey.Gui', 'glooey.Gui', (['window'], {}), '(window)\n', (4962, 4970), False, 'import glooey\n'), ((4992, 5008), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (5006, 5008), False, 'import pyglet\n'), ((1194, 1248), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['self.on_update', '(1 / 10)'], {}), '(self.on_update, 1 / 10)\n', (1224, 1248), False, 'import pyglet\n'), ((1281, 1320), 'pyglet.clock.unschedule', 'pyglet.clock.unschedule', (['self.on_update'], {}), '(self.on_update)\n', (1304, 1320), False, 'import pyglet\n'), ((2436, 2483), 'glooey.drawing.Color.from_anything', 'glooey.drawing.Color.from_anything', (['self._color'], {}), '(self._color)\n', (2470, 2483), False, 'import glooey\n'), ((3840, 3863), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3861, 3863), False, 'import datetime\n'), ((642, 668), 'glooey.drawing.Rectangle', 'glooey.drawing.Rectangle', ([], {}), '()\n', (666, 668), False, 'import glooey\n'), ((693, 719), 'glooey.drawing.Rectangle', 'glooey.drawing.Rectangle', ([], {}), '()\n', (717, 719), False, 'import glooey\n'), ((744, 770), 'glooey.drawing.Rectangle', 'glooey.drawing.Rectangle', ([], {}), '()\n', (768, 770), False, 'import glooey\n'), ((1920, 1952), 'vecrec.Vector.from_degrees', 'Vector.from_degrees', (['(360 * i / N)'], {}), '(360 * i / N)\n', (1939, 1952), False, 'from vecrec import Vector, Rect\n'), ((3443, 3503), 'vecrec.Rect.from_size', 'Rect.from_size', (['self.custom_hour_hand_width', '(self.radius / 2)'], {}), '(self.custom_hour_hand_width, self.radius / 2)\n', (3457, 3503), False, 'from vecrec import Vector, Rect\n'), ((3522, 3580), 'vecrec.Rect.from_size', 'Rect.from_size', (['self.custom_minute_hand_width', 'self.radius'], {}), '(self.custom_minute_hand_width, self.radius)\n', (3536, 3580), False, 'from vecrec import Vector, Rect\n'), ((3601, 3659), 'vecrec.Rect.from_size', 'Rect.from_size', (['self.custom_second_hand_width', 'self.radius'], {}), '(self.custom_second_hand_width, self.radius)\n', (3615, 3659), False, 'from vecrec import Vector, Rect\n')]
|
# coding=utf-8
# Generated by Django 2.0.7 on 2018-07-27 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0003_auto_20180522_0820'),
]
operations = [
migrations.AlterField(
model_name='field',
name='help_text',
field=models.CharField(blank=True, max_length=2000, verbose_name='Help text'),
),
migrations.AlterField(
model_name='form',
name='slug',
field=models.SlugField(max_length=100, unique=True, verbose_name='Slug'),
),
]
|
[
"django.db.models.SlugField",
"django.db.models.CharField"
] |
[((351, 422), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(2000)', 'verbose_name': '"""Help text"""'}), "(blank=True, max_length=2000, verbose_name='Help text')\n", (367, 422), False, 'from django.db import migrations, models\n'), ((540, 606), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(100)', 'unique': '(True)', 'verbose_name': '"""Slug"""'}), "(max_length=100, unique=True, verbose_name='Slug')\n", (556, 606), False, 'from django.db import migrations, models\n')]
|
import os
import re
import sys
import json
#upper import
sys.path.append("../../")
from utils import levenshtein
from utils.io import load_json, write_to
def strQ2B(ustring):
"""全角转半角"""
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 12288: #全角空格直接转换
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): #全角字符(除空格)根据关系转化
inside_code -= 65248
rstring += chr(inside_code)
return rstring
def get_sighan_from_json():
all_data = {
"train":None,
"dev":None,
"test":None,
"test14":None,
"test15":None,
}
data_dir = "../../data/rawdata/sighan/csc/"
train_file1 = os.path.join(data_dir, "train_dev.json")
train_file2 = os.path.join(data_dir, "train131415.json")
test14_file = os.path.join(data_dir, "test14.json")
test15_file = os.path.join(data_dir, "test15.json")
#test15_file = "../../data/rawdata/sighan/enchanted/test15.enc.json"
all_data["train"] = load_json(train_file1)
all_data["train"].extend(load_json(train_file2))
all_data["train"] = all_data["train"]
all_data["valid14"] = load_json(test14_file)
all_data["valid"] = load_json(test15_file)
#all_data["test"].extend(load_json(test15_file))
return all_data
def preprocess(sentence):
s = strQ2B(sentence)
back_num = re.findall('\d+', s)
back_eng = re.findall(r'[a-zA-Z]+', s)
#s = re.sub(r'[a-zA-Z]+', 'e', s)
#s = re.sub('\d+', 'n', s)
return s
def json2list(data, need_preprocess):
source, target = [], []
for i, element in enumerate(data):
if need_preprocess:
source.append(preprocess(element["original_text"]))
target.append(preprocess(element["correct_text"]))
assert len(preprocess(element["original_text"])) == len(preprocess(element["correct_text"])), preprocess(element["original_text"])+preprocess(element["correct_text"])
else:
print("ERROR: ABORT !")
exit(0)
source.append(strQ2B((element["original_text"])))
target.append(strQ2B((element["correct_text"])))
return source, target
def generate(need_preprocess=True):
"""
split raw data(train.json) to preprocessed target
"""
#file = open("../../data/rawdata/ctc2021/train.json", 'r', encoding='utf-8')
data = get_sighan_from_json()
train_source, train_target = json2list(data["train"], need_preprocess)
valid14_source, valid14_target = json2list(data["valid14"], need_preprocess)
valid_source, valid_target = json2list(data["valid"], need_preprocess)
print(train_source[:3], train_target[:3])
print(len(train_source), len(train_target))
print(valid_source[:3], valid_target[:3])
print(len(valid_source), len(valid_target))
need_remove = {}
# cluster all need_remove
for i, sample in enumerate(valid_source):
for j, char in enumerate(sample):
tgt = valid_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
for i, sample in enumerate(valid14_source):
for j, char in enumerate(sample):
tgt = valid14_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
#remove
remove_count = 0
new_train_source, new_train_target = [], []
for i, sample in enumerate(train_source):
skip = False
for j, char in enumerate(sample):
tgt = train_target[i][j]
if char != tgt:
key = (char, tgt)
if key in need_remove:
skip = True
remove_count += 1
break
if not skip:
new_train_source.append(sample)
new_train_target.append(train_target[i])
print("Total Skip: ", remove_count)
train_source, train_target = new_train_source, new_train_target
#f_src = levenstein.tokenize(source, vocab_file_path="vocab.txt")
train_through = levenshtein.convert_from_sentpair_through(train_source, train_target, train_source)
valid14_through = levenshtein.convert_from_sentpair_through(valid14_source, valid14_target, valid14_source)
valid_through = levenshtein.convert_from_sentpair_through(valid_source, valid_target, valid_source)
#print(train_through[0], valid_through[0])
#output_name = "enchanted"
#output_name = "raw"
output_name = "holy"
write_to("../../data/rawdata/sighan/" + output_name + "/train.src", "\n".join(train_source))
write_to("../../data/rawdata/sighan/"+output_name+"/train.tgt", "\n".join(train_target))
#write_to("../../data/rawdata/sighan/std/train.through", "\n".join(train_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.src", "\n".join(valid14_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.tgt", "\n".join(valid14_target))
#write_to("../../data/rawdata/sighan/std/valid14.through", "\n".join(valid14_through))
write_to("../../data/rawdata/sighan/"+output_name+"/test.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/test.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/test.through", "\n".join(valid_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/valid.through", "\n".join(valid_through[:500]))
if __name__ == "__main__":
generate()
|
[
"utils.io.load_json",
"os.path.join",
"re.findall",
"utils.levenshtein.convert_from_sentpair_through",
"sys.path.append"
] |
[((59, 84), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (74, 84), False, 'import sys\n'), ((780, 820), 'os.path.join', 'os.path.join', (['data_dir', '"""train_dev.json"""'], {}), "(data_dir, 'train_dev.json')\n", (792, 820), False, 'import os\n'), ((839, 881), 'os.path.join', 'os.path.join', (['data_dir', '"""train131415.json"""'], {}), "(data_dir, 'train131415.json')\n", (851, 881), False, 'import os\n'), ((901, 938), 'os.path.join', 'os.path.join', (['data_dir', '"""test14.json"""'], {}), "(data_dir, 'test14.json')\n", (913, 938), False, 'import os\n'), ((958, 995), 'os.path.join', 'os.path.join', (['data_dir', '"""test15.json"""'], {}), "(data_dir, 'test15.json')\n", (970, 995), False, 'import os\n'), ((1095, 1117), 'utils.io.load_json', 'load_json', (['train_file1'], {}), '(train_file1)\n', (1104, 1117), False, 'from utils.io import load_json, write_to\n'), ((1241, 1263), 'utils.io.load_json', 'load_json', (['test14_file'], {}), '(test14_file)\n', (1250, 1263), False, 'from utils.io import load_json, write_to\n'), ((1288, 1310), 'utils.io.load_json', 'load_json', (['test15_file'], {}), '(test15_file)\n', (1297, 1310), False, 'from utils.io import load_json, write_to\n'), ((1452, 1473), 're.findall', 're.findall', (['"""\\\\d+"""', 's'], {}), "('\\\\d+', s)\n", (1462, 1473), False, 'import re\n'), ((1488, 1514), 're.findall', 're.findall', (['"""[a-zA-Z]+"""', 's'], {}), "('[a-zA-Z]+', s)\n", (1498, 1514), False, 'import re\n'), ((4120, 4207), 'utils.levenshtein.convert_from_sentpair_through', 'levenshtein.convert_from_sentpair_through', (['train_source', 'train_target', 'train_source'], {}), '(train_source, train_target,\n train_source)\n', (4161, 4207), False, 'from utils import levenshtein\n'), ((4226, 4319), 'utils.levenshtein.convert_from_sentpair_through', 'levenshtein.convert_from_sentpair_through', (['valid14_source', 'valid14_target', 'valid14_source'], {}), '(valid14_source, valid14_target,\n valid14_source)\n', (4267, 4319), False, 'from utils import levenshtein\n'), ((4336, 4423), 'utils.levenshtein.convert_from_sentpair_through', 'levenshtein.convert_from_sentpair_through', (['valid_source', 'valid_target', 'valid_source'], {}), '(valid_source, valid_target,\n valid_source)\n', (4377, 4423), False, 'from utils import levenshtein\n'), ((1147, 1169), 'utils.io.load_json', 'load_json', (['train_file2'], {}), '(train_file2)\n', (1156, 1169), False, 'from utils.io import load_json, write_to\n')]
|
from __future__ import with_statement
from nose.tools import assert_true
from os.path import exists
import numpy as np
from nibabel import Nifti1Image
from numpy.testing import assert_equal
from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset
from ..bsa_io import make_bsa_image
from nibabel.tmpdirs import InTemporaryDirectory
def test_parcel_intra_from_3d_images_list():
"""Test that a parcellation is generated, starting from a list of 3D images
"""
# Generate an image
shape = (5, 5, 5)
contrast_id = 'plop'
mask_image = Nifti1Image(np.ones(shape), np.eye(4))
#mask_images = [mask_image for _ in range(5)]
with InTemporaryDirectory() as dir_context:
data_image = ['image_%d.nii' % i for i in range(5)]
for datim in data_image:
surrogate_3d_dataset(mask=mask_image, out_image_file=datim)
#run the algo
landmark, hrois = make_bsa_image(
mask_image, data_image, threshold=10., smin=0, sigma=1.,
prevalence_threshold=0, prevalence_pval=0.5, write_dir=dir_context,
algorithm='density', contrast_id=contrast_id)
assert_equal(landmark, None)
assert_equal(len(hrois), 5)
assert_true(exists('density_%s.nii' % contrast_id))
assert_true(exists('prevalence_%s.nii' % contrast_id))
assert_true(exists('AR_%s.nii' % contrast_id))
assert_true(exists('CR_%s.nii' % contrast_id))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
[
"os.path.exists",
"numpy.eye",
"numpy.ones",
"numpy.testing.assert_equal",
"nibabel.tmpdirs.InTemporaryDirectory",
"nose.run"
] |
[((1505, 1534), 'nose.run', 'nose.run', ([], {'argv': "['', __file__]"}), "(argv=['', __file__])\n", (1513, 1534), False, 'import nose\n'), ((584, 598), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (591, 598), True, 'import numpy as np\n'), ((600, 609), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (606, 609), True, 'import numpy as np\n'), ((671, 693), 'nibabel.tmpdirs.InTemporaryDirectory', 'InTemporaryDirectory', ([], {}), '()\n', (691, 693), False, 'from nibabel.tmpdirs import InTemporaryDirectory\n'), ((1156, 1184), 'numpy.testing.assert_equal', 'assert_equal', (['landmark', 'None'], {}), '(landmark, None)\n', (1168, 1184), False, 'from numpy.testing import assert_equal\n'), ((1241, 1279), 'os.path.exists', 'exists', (["('density_%s.nii' % contrast_id)"], {}), "('density_%s.nii' % contrast_id)\n", (1247, 1279), False, 'from os.path import exists\n'), ((1301, 1342), 'os.path.exists', 'exists', (["('prevalence_%s.nii' % contrast_id)"], {}), "('prevalence_%s.nii' % contrast_id)\n", (1307, 1342), False, 'from os.path import exists\n'), ((1364, 1397), 'os.path.exists', 'exists', (["('AR_%s.nii' % contrast_id)"], {}), "('AR_%s.nii' % contrast_id)\n", (1370, 1397), False, 'from os.path import exists\n'), ((1419, 1452), 'os.path.exists', 'exists', (["('CR_%s.nii' % contrast_id)"], {}), "('CR_%s.nii' % contrast_id)\n", (1425, 1452), False, 'from os.path import exists\n')]
|
"""Utilities for working with Doxygen tag files.
"""
__all__ = ["get_tag_entity_names"]
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import List, Optional, Sequence, Union
try:
from sphinxcontrib.doxylink import doxylink
except ImportError:
print(
"sphinxcontrib.doxylink is missing. Install documenteer with the "
"pipelines extra:\n\n pip install documenteer[pipelines]"
)
def get_tag_entity_names(
tag_path: Union[str, Path], kinds: Optional[Sequence[str]] = None
) -> List[str]:
"""Get the list of API names in a Doxygen tag file.
Parameters
----------
tag_path : `str` or `~pathlib.Path`
File path of the Doxygen tag file.
kinds : sequence of `str`, optional
If provided, a sequence of API kinds to include in the listing.
Doxygen types are:
- namespace
- struct
- class
- file
- define
- group
- variable
- typedef
- enumeration
- function
Returns
-------
names : `list` of `str`
List of API names.
"""
doc = ET.parse(str(tag_path))
symbol_map = doxylink.SymbolMap(doc)
keys = []
for key in symbol_map._mapping.keys():
entry = symbol_map[key]
if kinds:
if entry.kind in kinds:
keys.append(key)
else:
keys.append(key)
keys.sort()
return keys
|
[
"sphinxcontrib.doxylink.doxylink.SymbolMap"
] |
[((1176, 1199), 'sphinxcontrib.doxylink.doxylink.SymbolMap', 'doxylink.SymbolMap', (['doc'], {}), '(doc)\n', (1194, 1199), False, 'from sphinxcontrib.doxylink import doxylink\n')]
|
# Shell Game, by <NAME> <EMAIL>
# A random gambling game.
import random, time, sys
print('''SHELL GAME
By <NAME> <EMAIL>
Try to find the diamond!
Press Enter to continue...''')
input()
CUPS = ['diamond', 'pocket lint', 'nothing']
while True:
print()
print('Shuffling the cups', end='')
random.shuffle(CUPS) # This happens instantly.
# We add fake pauses to make it seem more interesting:
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print()
while True:
print('Okay! Pick a cup 1-{}'.format(len(CUPS)))
pickedCup = input()
if pickedCup.isdecimal() and 1 <= int(pickedCup) <= len(CUPS):
break
print('Type a number between 1 and {}.'.format(len(CUPS)))
print()
if CUPS[int(pickedCup) - 1] == 'diamond':
print('You found the cup with the diamond!')
else:
print('Nope! You picked the cup that had {} in it.'.format(CUPS[int(pickedCup) - 1]))
print('Would you like to play again? Y/N')
response = input().upper()
if not response.startswith('Y'):
print('Thanks for playing!')
sys.exit()
|
[
"random.shuffle",
"time.sleep",
"sys.exit"
] |
[((303, 323), 'random.shuffle', 'random.shuffle', (['CUPS'], {}), '(CUPS)\n', (317, 323), False, 'import random, time, sys\n'), ((414, 429), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (424, 429), False, 'import random, time, sys\n'), ((457, 472), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (467, 472), False, 'import random, time, sys\n'), ((500, 515), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (510, 515), False, 'import random, time, sys\n'), ((543, 558), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (553, 558), False, 'import random, time, sys\n'), ((1210, 1220), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1218, 1220), False, 'import random, time, sys\n')]
|
import pyttsx3
import speech_recognition as sr
import openai as op
import os
op.api_key = os.getenv("OPENAI_API_KEY")
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 1.0)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def tell(text):
engine.say(text)
engine.runAndWait()
def takecommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print("Please repeat")
return "Nothing"
return query
while True:
query = takecommand()
response = op.Completion.create(
engine="text-davinci-001",
prompt="The following is a conversation with an AI friend. The friend is helpful, creative, clever, and very friendly.\n\nHuman: " + query + "\nAI: ",
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
)
presponse= response["choices"][0]["text"]
print(presponse)
tell(presponse)
|
[
"os.getenv",
"pyttsx3.init",
"speech_recognition.Recognizer",
"speech_recognition.Microphone",
"openai.Completion.create"
] |
[((92, 119), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (101, 119), False, 'import os\n'), ((130, 144), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (142, 144), False, 'import pyttsx3\n'), ((381, 396), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (394, 396), True, 'import speech_recognition as sr\n'), ((836, 1135), 'openai.Completion.create', 'op.Completion.create', ([], {'engine': '"""text-davinci-001"""', 'prompt': '(\n """The following is a conversation with an AI friend. The friend is helpful, creative, clever, and very friendly.\n\nHuman: """\n + query + \'\\nAI: \')', 'temperature': '(0.9)', 'max_tokens': '(150)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0.6)'}), '(engine=\'text-davinci-001\', prompt=\n """The following is a conversation with an AI friend. The friend is helpful, creative, clever, and very friendly.\n\nHuman: """\n + query + \'\\nAI: \', temperature=0.9, max_tokens=150, top_p=1,\n frequency_penalty=0, presence_penalty=0.6)\n', (856, 1135), True, 'import openai as op\n'), ((406, 421), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (419, 421), True, 'import speech_recognition as sr\n')]
|
# To run the job:
# pyats run job BGP_check_job.py --testbed-file <testbed_file.yaml>
# Description: This job file checks that all BGP neighbors are in Established state
import os
# All run() must be inside a main function
def main(runtime):
# Find the location of the script in relation to the job file
bgp_tests = os.path.join(os.path.dirname(__file__),
'BGP_Neighbors_Established.py')
# Execute the testscript
runtime.tasks.run(testscript=bgp_tests)
|
[
"os.path.dirname"
] |
[((338, 363), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (353, 363), False, 'import os\n')]
|
import os
def get_dirs():
cwd = os.path.dirname(os.path.realpath(__file__))
local_savedir = cwd
local_datadir = cwd
local_wandbdir = cwd
return local_savedir, local_datadir, local_wandbdir
def configure_logging(config, name, model):
if config['wandb_on']:
import wandb
wandb.init(name=name,
project='YOUR_PROJECT_NAME',
entity='YOUR_ENTITY_NAME',
dir=config['wandb_dir'],
config=config)
wandb.watch(model)
def log(key, val):
print(f"{key}: {val}")
wandb.log({key: val})
checkpoint_path = os.path.join(wandb.run.dir, 'checkpoint.tar')
else:
def log(key, val):
print(f"{key}: {val}")
checkpoint_path = './checkpoint.tar'
return log, checkpoint_path
|
[
"wandb.log",
"os.path.join",
"wandb.init",
"os.path.realpath",
"wandb.watch"
] |
[((53, 79), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((315, 437), 'wandb.init', 'wandb.init', ([], {'name': 'name', 'project': '"""YOUR_PROJECT_NAME"""', 'entity': '"""YOUR_ENTITY_NAME"""', 'dir': "config['wandb_dir']", 'config': 'config'}), "(name=name, project='YOUR_PROJECT_NAME', entity=\n 'YOUR_ENTITY_NAME', dir=config['wandb_dir'], config=config)\n", (325, 437), False, 'import wandb\n'), ((519, 537), 'wandb.watch', 'wandb.watch', (['model'], {}), '(model)\n', (530, 537), False, 'import wandb\n'), ((662, 707), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""checkpoint.tar"""'], {}), "(wandb.run.dir, 'checkpoint.tar')\n", (674, 707), False, 'import os\n'), ((613, 634), 'wandb.log', 'wandb.log', (['{key: val}'], {}), '({key: val})\n', (622, 634), False, 'import wandb\n')]
|
import torch
from torch import nn
from transformers import BertTokenizer, VisualBertModel, VisualBertConfig
import numpy as np
class VisualBertClassifier(nn.Module):
def __init__(self,
visual_bert_model,
num_classes: int = 8,
initial_visual_embedding_dim: int = 96,
final_dropout_rate: float = 0.1):
"""
pooler_output (torch.FloatTensor of shape (batch_size, hidden_size))
— Last layer hidden-state of the first token of the sequence (classification token)
after further processing through the layers used for the auxiliary pretraining task.
E.g. for BERT-family of models, this returns the classification token after processing through
a linear layer and a tanh activation function.
The linear layer weights are trained from the next sentence prediction (classification) objective
during pretraining.
@param initial_visual_embedding_dim:
"""
super().__init__()
self.visual_embedding_projection = nn.Linear(initial_visual_embedding_dim, 2048)
self.visual_bert = visual_bert_model
self.final_dropout = nn.Dropout(final_dropout_rate)
self.out = nn.Linear(768, num_classes)
def forward(self,
text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask
):
visual_embeds = self.visual_embedding_projection(visual_embeds)
output = self.visual_bert(input_ids=text_input_ids,
token_type_ids=text_token_type_ids,
attention_mask=text_attention_mask,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
visual_attention_mask=visual_attention_mask)
output = self.final_dropout(output.pooler_output)
output = self.out(output)
return output
if __name__ == '__main__':
bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt")
text_input_ids = inputs.data['input_ids'].to('cuda')
text_token_type_ids = inputs.data['token_type_ids'].to('cuda')
text_attention_mask = inputs.data['attention_mask'].to('cuda')
sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy"
sample_face_body_embedding = np.load(sample_face_body_embedding_path)
visual_embeds = torch.from_numpy(sample_face_body_embedding)
visual_embeds = visual_embeds.to('cuda')
visual_embeds = torch.unsqueeze(visual_embeds, 0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda')
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda')
classifier = VisualBertClassifier()
classifier.to('cuda')
classifier.forward(text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask)
|
[
"torch.nn.Dropout",
"torch.unsqueeze",
"transformers.BertTokenizer.from_pretrained",
"torch.from_numpy",
"torch.nn.Linear",
"numpy.load",
"torch.ones"
] |
[((2161, 2211), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (2190, 2211), False, 'from transformers import BertTokenizer, VisualBertModel, VisualBertConfig\n'), ((2689, 2729), 'numpy.load', 'np.load', (['sample_face_body_embedding_path'], {}), '(sample_face_body_embedding_path)\n', (2696, 2729), True, 'import numpy as np\n'), ((2750, 2794), 'torch.from_numpy', 'torch.from_numpy', (['sample_face_body_embedding'], {}), '(sample_face_body_embedding)\n', (2766, 2794), False, 'import torch\n'), ((2860, 2893), 'torch.unsqueeze', 'torch.unsqueeze', (['visual_embeds', '(0)'], {}), '(visual_embeds, 0)\n', (2875, 2893), False, 'import torch\n'), ((1070, 1115), 'torch.nn.Linear', 'nn.Linear', (['initial_visual_embedding_dim', '(2048)'], {}), '(initial_visual_embedding_dim, 2048)\n', (1079, 1115), False, 'from torch import nn\n'), ((1190, 1220), 'torch.nn.Dropout', 'nn.Dropout', (['final_dropout_rate'], {}), '(final_dropout_rate)\n', (1200, 1220), False, 'from torch import nn\n'), ((1240, 1267), 'torch.nn.Linear', 'nn.Linear', (['(768)', 'num_classes'], {}), '(768, num_classes)\n', (1249, 1267), False, 'from torch import nn\n'), ((2922, 2976), 'torch.ones', 'torch.ones', (['visual_embeds.shape[:-1]'], {'dtype': 'torch.long'}), '(visual_embeds.shape[:-1], dtype=torch.long)\n', (2932, 2976), False, 'import torch\n'), ((3016, 3071), 'torch.ones', 'torch.ones', (['visual_embeds.shape[:-1]'], {'dtype': 'torch.float'}), '(visual_embeds.shape[:-1], dtype=torch.float)\n', (3026, 3071), False, 'import torch\n')]
|
from narrative2vec.logging_instance.logging_instance import LoggingInstance, _get_first_rdf_query_result
from narrative2vec.logging_instance.reasoning_task import ReasoningTask
from narrative2vec.ontology.neemNarrativeDefinitions import QUATERNION
from narrative2vec.ontology.ontologyHandler import get_knowrob_uri
class Pose(LoggingInstance):
def get_translation(self):
read_translation = self._get_property_('translation')
return read_translation.strip().split()
def get_quaternion(self):
read_orientation = self._get_property_(QUATERNION)
return read_orientation.strip().split()
def get_reasoning_task__id(self):
reasoning_task_property = self._graph_.subjects(get_knowrob_uri('parameter2'), self.context)
reasoning_task = _get_first_rdf_query_result(reasoning_task_property)
if reasoning_task and not reasoning_task.startswith('file://'):
return ReasoningTask(reasoning_task, self._graph_).get_id()
return ''
|
[
"narrative2vec.logging_instance.logging_instance._get_first_rdf_query_result",
"narrative2vec.logging_instance.reasoning_task.ReasoningTask",
"narrative2vec.ontology.ontologyHandler.get_knowrob_uri"
] |
[((790, 842), 'narrative2vec.logging_instance.logging_instance._get_first_rdf_query_result', '_get_first_rdf_query_result', (['reasoning_task_property'], {}), '(reasoning_task_property)\n', (817, 842), False, 'from narrative2vec.logging_instance.logging_instance import LoggingInstance, _get_first_rdf_query_result\n'), ((720, 749), 'narrative2vec.ontology.ontologyHandler.get_knowrob_uri', 'get_knowrob_uri', (['"""parameter2"""'], {}), "('parameter2')\n", (735, 749), False, 'from narrative2vec.ontology.ontologyHandler import get_knowrob_uri\n'), ((935, 978), 'narrative2vec.logging_instance.reasoning_task.ReasoningTask', 'ReasoningTask', (['reasoning_task', 'self._graph_'], {}), '(reasoning_task, self._graph_)\n', (948, 978), False, 'from narrative2vec.logging_instance.reasoning_task import ReasoningTask\n')]
|
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME>. Created on Mon Jul 10 20:12:27 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME> (张仪). Created on Thu Jul 6 16:00:33 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
from function_space import FunctionSpace
import numpy as np
from mesh import CrazyMesh
from forms import Form
from hodge import hodge
from coboundaries import d
from assemble import assemble
from _assembling import assemble_, integral1d_
import matplotlib.pyplot as plt
from quadrature import extended_gauss_quad
from scipy.integrate import quad
from sympy import Matrix
import scipy.io
from scipy import sparse
import scipy as sp
from inner_product import inner
# %% exact solution define
# u^{(1)} = { u, v }^T
def u(x,y):
return +np.cos(np.pi*x) * np.sin(np.pi*y)
def v(x,y):
return -np.sin(np.pi*x) * np.cos(np.pi*y)
def r_u(x,y):
return -2* np.pi**2 * np.cos(np.pi*x) * np.sin(np.pi*y)
def r_v(x,y):
return 2* np.pi**2 * np.sin(np.pi*x) * np.cos(np.pi*y)
# %% define the mesh
mesh = CrazyMesh( 2, (2, 2), ((-1, 1), (-1, 1)), 0.05 )
func_space_gauss1 = FunctionSpace(mesh, '1-gauss', (5, 5), is_inner=False)
func_space_lobatto1 = FunctionSpace(mesh, '1-lobatto', (5, 5), is_inner=False)
form_1_gauss = Form(func_space_gauss1)
form_1_lobatto = Form(func_space_lobatto1)
M = inner(form_1_lobatto.basis,form_1_gauss.basis)
|
[
"inner_product.inner",
"forms.Form",
"function_space.FunctionSpace",
"numpy.cos",
"numpy.sin",
"mesh.CrazyMesh"
] |
[((1661, 1707), 'mesh.CrazyMesh', 'CrazyMesh', (['(2)', '(2, 2)', '((-1, 1), (-1, 1))', '(0.05)'], {}), '(2, (2, 2), ((-1, 1), (-1, 1)), 0.05)\n', (1670, 1707), False, 'from mesh import CrazyMesh\n'), ((1732, 1786), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""1-gauss"""', '(5, 5)'], {'is_inner': '(False)'}), "(mesh, '1-gauss', (5, 5), is_inner=False)\n", (1745, 1786), False, 'from function_space import FunctionSpace\n'), ((1809, 1865), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""1-lobatto"""', '(5, 5)'], {'is_inner': '(False)'}), "(mesh, '1-lobatto', (5, 5), is_inner=False)\n", (1822, 1865), False, 'from function_space import FunctionSpace\n'), ((1884, 1907), 'forms.Form', 'Form', (['func_space_gauss1'], {}), '(func_space_gauss1)\n', (1888, 1907), False, 'from forms import Form\n'), ((1925, 1950), 'forms.Form', 'Form', (['func_space_lobatto1'], {}), '(func_space_lobatto1)\n', (1929, 1950), False, 'from forms import Form\n'), ((1956, 2003), 'inner_product.inner', 'inner', (['form_1_lobatto.basis', 'form_1_gauss.basis'], {}), '(form_1_lobatto.basis, form_1_gauss.basis)\n', (1961, 2003), False, 'from inner_product import inner\n'), ((1404, 1421), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1410, 1421), True, 'import numpy as np\n'), ((1462, 1479), 'numpy.cos', 'np.cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1468, 1479), True, 'import numpy as np\n'), ((1539, 1556), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1545, 1556), True, 'import numpy as np\n'), ((1616, 1633), 'numpy.cos', 'np.cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1622, 1633), True, 'import numpy as np\n'), ((1386, 1403), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1392, 1403), True, 'import numpy as np\n'), ((1444, 1461), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1450, 1461), True, 'import numpy as np\n'), ((1521, 1538), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1527, 1538), True, 'import numpy as np\n'), ((1598, 1615), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1604, 1615), True, 'import numpy as np\n')]
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
class TestPretrainedTransformerTokenizer(AllenNlpTestCase):
def test_splits_roberta(self):
tokenizer = PretrainedTransformerTokenizer("roberta-base")
sentence = "A, <mask> AllenNLP sentence."
expected_tokens = ["<s>", "A", ",", "<mask>", "Allen", "N", "LP", "Ġsentence", ".", "</s>"]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
# sentence pair
sentence_1 = "A, <mask> AllenNLP sentence."
sentence_2 = "A sentence."
expected_tokens = [
"<s>",
"A",
",",
"<mask>",
"Allen",
"N",
"LP",
"Ġsentence",
".",
"</s>",
"</s>",
"A",
"Ġsentence",
".",
"</s>",
]
tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)]
assert tokens == expected_tokens
def test_splits_cased_bert(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
# sentence pair
sentence_1 = "A, [MASK] AllenNLP sentence."
sentence_2 = "A sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
"A",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)]
assert tokens == expected_tokens
def test_splits_uncased_bert(self):
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"a",
",",
"[MASK]",
"allen",
"##nl",
"##p",
"sentence",
".",
"[SEP]",
]
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
|
[
"allennlp.data.tokenizers.PretrainedTransformerTokenizer"
] |
[((238, 284), 'allennlp.data.tokenizers.PretrainedTransformerTokenizer', 'PretrainedTransformerTokenizer', (['"""roberta-base"""'], {}), "('roberta-base')\n", (268, 284), False, 'from allennlp.data.tokenizers import PretrainedTransformerTokenizer\n'), ((1175, 1224), 'allennlp.data.tokenizers.PretrainedTransformerTokenizer', 'PretrainedTransformerTokenizer', (['"""bert-base-cased"""'], {}), "('bert-base-cased')\n", (1205, 1224), False, 'from allennlp.data.tokenizers import PretrainedTransformerTokenizer\n'), ((2527, 2578), 'allennlp.data.tokenizers.PretrainedTransformerTokenizer', 'PretrainedTransformerTokenizer', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (2557, 2578), False, 'from allennlp.data.tokenizers import PretrainedTransformerTokenizer\n')]
|
# -*- coding: UTF-8 -*-
"""
collector.xhn - 新华网数据采集
官网:http://www.xinhuanet.com/
接口分析:
1. 获取文章列表
http://qc.wa.news.cn/nodeart/list?nid=115093&pgnum=1&cnt=10000
新华全媒体头条
http://www.xinhuanet.com/politics/qmtt/index.htm
====================================================================
"""
import requests
import re
from datetime import datetime
from bs4 import BeautifulSoup
from zb.crawlers.utils import get_header
import traceback
import pandas as pd
from tqdm import tqdm
import tma
home_url = "http://www.xinhuanet.com/"
def get_website_map():
wzdt_url = "http://www.xinhuanet.com/wzdt2014.htm"
html = requests.get(wzdt_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
map_raw = bsobj.find('div', {'class': "content_left"})
raise NotImplementedError
def get_special_topics(pgnum=1):
"""获取专题列表"""
url = "http://qc.wa.news.cn/nodeart/list?" \
"nid=115093&pgnum=%s&cnt=200" % str(pgnum)
res = requests.get(url).text
res = res.replace("null", "\'\'")
res = eval(res)
assert res['status'] == 0, "获取文章列表失败"
data = res['data']['list']
specials = []
for a in data:
special = {
"Abstract": a['Abstract'],
"Author": a['Author'],
"LinkUrl": a['LinkUrl'],
"PubTime": a['PubTime'],
"Title": a['Title'],
"allPics": a['allPics'],
}
specials.append(special)
return specials
def get_article_detail(article_url):
"""获取新华网article_url中的文章内容
:param article_url: 文章url
:return:
{
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
"""
# article_url = "http://www.xinhuanet.com/fortune/2018-06/20/c_129897476.htm"
html = requests.get(article_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
# 解析标题
cols = bsobj.find('div', {"class": "h-news"}).text.strip().split("\r\n")
title = cols[0].strip()
pub_time = cols[1].strip()
source = cols[-1].strip()
# 解析内容
content = bsobj.find('div', {"id": "p-detail"}).text.strip()
content = content.replace("\u3000\u3000", "")
content = [x.strip() for x in content.split("\n")]
content = [x for x in content if x != ""]
content = "\n".join(content)
return {
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
class HomePage(object):
"""新华网首页"""
def __init__(self):
self.home_url = "http://www.xinhuanet.com/"
@staticmethod
def _get_date_from_url(url):
pat = re.compile("(\d{4}-\d{2}[/-]\d{2})")
res = pat.findall(url)
if res is not None and len(res) == 1:
return res[0].replace('/', "-")
else:
return None
def get_article_list(self, d=None):
"""获取首页的头条文章列表"""
html = requests.get(self.home_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
a_list = []
for a in bsobj.find_all("a"):
try:
url = a['href']
title = a.text.strip()
date_ = self._get_date_from_url(url)
a_list.append([url, title, date_])
except:
if tma.DEBUG:
traceback.print_exc()
continue
a_list = [a for a in a_list if
a[0] != ""
and a[0].strip("/") != "http://xhgy.xinhuanet.com"
and a[0].startswith("http")
and a[1] != ""
and a[1] != "视频MP4地址"
and "c_" in a[0]
and a[2] != ""
# and 'photo' not in a[0]
# and 'video' not in a[0]
]
# 根据url去重
df = pd.DataFrame(a_list, columns=['url', 'title', 'date'])
df.drop_duplicates('url', inplace=True)
res = [list(x) for x in list(df.values)]
if d is None:
date_list = [datetime.now().date().__str__()]
else:
date_list = d
res = [a for a in res if a[2] in date_list]
res = sorted(res, key=lambda x: x[2], reverse=True)
return res
def get_articles(self, d=None):
"""获取首页文章内容
:param d: list
限定获取文章的日期,默认是当日日期,可以指定多个离散的日期
:return: list
"""
# 获取首页文章列表URL、按发布日期过滤、按URL去重
res = self.get_article_list(d)
a_list = [a[0] for a in res]
a_list = list(set(a_list))
articles = []
for a in tqdm(a_list, ncols=100, desc="xhn.get_articles"):
try:
article = get_article_detail(a)
articles.append(article)
except:
if tma.DEBUG:
traceback.print_exc()
return articles
class Fortune(object):
def __init__(self):
self.url1 = "http://www.xinhuanet.com/fortune/"
self.url2 = "http://www.xinhuanet.com/fortune/caiyan.htm"
self.url3 = "http://www.xinhuanet.com/fortune/cfx.htm"
self.url4 = "http://www.xinhuanet.com/fortune/bcxc.htm"
|
[
"re.compile",
"tqdm.tqdm",
"requests.get",
"datetime.datetime.now",
"pandas.DataFrame",
"traceback.print_exc",
"zb.crawlers.utils.get_header"
] |
[((986, 1003), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (998, 1003), False, 'import requests\n'), ((2738, 2777), 're.compile', 're.compile', (['"""(\\\\d{4}-\\\\d{2}[/-]\\\\d{2})"""'], {}), "('(\\\\d{4}-\\\\d{2}[/-]\\\\d{2})')\n", (2748, 2777), False, 'import re\n'), ((3967, 4021), 'pandas.DataFrame', 'pd.DataFrame', (['a_list'], {'columns': "['url', 'title', 'date']"}), "(a_list, columns=['url', 'title', 'date'])\n", (3979, 4021), True, 'import pandas as pd\n'), ((4716, 4764), 'tqdm.tqdm', 'tqdm', (['a_list'], {'ncols': '(100)', 'desc': '"""xhn.get_articles"""'}), "(a_list, ncols=100, desc='xhn.get_articles')\n", (4720, 4764), False, 'from tqdm import tqdm\n'), ((655, 667), 'zb.crawlers.utils.get_header', 'get_header', ([], {}), '()\n', (665, 667), False, 'from zb.crawlers.utils import get_header\n'), ((1881, 1893), 'zb.crawlers.utils.get_header', 'get_header', ([], {}), '()\n', (1891, 1893), False, 'from zb.crawlers.utils import get_header\n'), ((3052, 3064), 'zb.crawlers.utils.get_header', 'get_header', ([], {}), '()\n', (3062, 3064), False, 'from zb.crawlers.utils import get_header\n'), ((3455, 3476), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3474, 3476), False, 'import traceback\n'), ((4942, 4963), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4961, 4963), False, 'import traceback\n'), ((4167, 4181), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4179, 4181), False, 'from datetime import datetime\n')]
|
import unittest
from plugins.onetv import OneTV
class TestPluginPerviyKanal(unittest.TestCase):
def test_can_handle_url(self):
regex_test_list = [
"https://media.1tv.ru/embed/ctcmedia/ctc-che.html?start=auto",
"https://media.1tv.ru/embed/ctcmedia/ctc-dom.html?start=auto",
"https://media.1tv.ru/embed/ctcmedia/ctc-love.html?start=auto",
"https://stream.1tv.ru/live",
"https://www.1tv.ru/embedlive?start=auto",
"https://www.1tv.ru/live",
"https://www.chetv.ru/online/",
"https://www.ctc.ru/online/",
"https://www.ctclove.ru/online/",
"https://domashniy.ru/online",
"https://ren.tv/live",
"https://media.1tv.ru/embed/nmg/nmg-ren.html",
"https://www.5-tv.ru/live/",
"https://media.1tv.ru/embed/nmg/nmg-5tv.html",
]
for url in regex_test_list:
self.assertTrue(OneTV.can_handle_url(url))
|
[
"plugins.onetv.OneTV.can_handle_url"
] |
[((968, 993), 'plugins.onetv.OneTV.can_handle_url', 'OneTV.can_handle_url', (['url'], {}), '(url)\n', (988, 993), False, 'from plugins.onetv import OneTV\n')]
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from random import uniform
from compas.geometry import transform_points
from compas.geometry import centroid_points
from compas.geometry import bounding_box
from compas.geometry import Primitive
from compas.geometry import Point
__all__ = ['Pointcloud']
class Pointcloud(Primitive):
"""Class for working with pointclouds.
Parameters
----------
points : sequence[point]
A sequence of points to add to the cloud.
**kwargs : dict[str, Any], optional
Additional keyword arguments collected in a dict.
Attributes
----------
points : list[:class:`compas.geometry.Point`]
The points of the cloud.
Examples
--------
>>>
"""
def __init__(self, points, **kwargs):
super(Pointcloud, self).__init__(**kwargs)
self._points = None
self.points = points
@property
def DATASCHEMA(self):
from schema import Schema
from compas.data import is_float3
return Schema({
'points': lambda points: all(is_float3(point) for point in points)
})
@property
def JSONSCHEMANAME(self):
return 'pointcloud'
@property
def data(self):
return {'points': [point.data for point in self.points]}
@data.setter
def data(self, data):
self._points = [Point.from_data(point) for point in data['points']]
@classmethod
def from_data(cls, data):
return cls(data['points'])
# ==========================================================================
# properties
# ==========================================================================
@property
def points(self):
return self._points
@points.setter
def points(self, points):
self._points = [Point(*point) for point in points]
@property
def centroid(self):
return centroid_points(self.points)
@property
def bounding_box(self):
return bounding_box(self.points)
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return 'Pointcloud({0!r})'.format(self.points)
def __len__(self):
return len(self.points)
def __getitem__(self, key):
if key > len(self) - 1:
raise KeyError
return self.points[key]
def __setitem__(self, key, value):
if key > len(self) - 1:
raise KeyError
self.points[key] = value
def __iter__(self):
return iter(self.points)
def __eq__(self, other):
"""Is this pointcloud equal to the other pointcloud?
Two pointclouds are considered equal if they have the same number of points
and if the XYZ coordinates of the corresponding points are identical.
Parameters
----------
other : :class:`compas.geometry.Pointcloud` | list[[float, float, float] | :class:`compas.geometry.Point`]
The pointcloud to compare.
Returns
-------
bool
True if the pointclouds are equal.
False otherwise.
"""
if len(self) != len(other):
return False
A = sorted(self, key=lambda point: (point[0], point[1], point[2]))
B = sorted(other, key=lambda point: (point[0], point[1], point[2]))
return all(a == b for a, b in zip(A, B))
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_ply(cls, filepath):
"""Construct a pointcloud from a PLY file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PLY file.
Returns
-------
:class:`compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_pcd(cls, filepath):
"""Construct a pointcloud from a PCD file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PCD file.
Returns
-------
:class:`compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_bounds(cls, x, y, z, n):
"""Construct a point cloud within a given box.
Parameters
----------
x : float | tuple[float, float]
Size of the cloud in the X direction.
If a single value, the size is (0, x).
If a pair of values, the size is (x[0], x[1]).
y : float | tuple[float, float]
Size of the cloud in the Y direction.
If a single value, the size is (0, y).
If a pair of values, the size is (y[0], y[1]).
z : float | tuple[float, float]
Size of the cloud in the Z direction.
If a single value, the size is (0, z).
If a pair of values, the size is (z[0], z[1]).
n : int
The number of points in the cloud.
Returns
-------
:class:`compas.geometry.Pointcloud`
Notes
-----
The XYZ coordinates of the `n` points are radnomly chosen within the provided `x`, `y`, and `z` bounds.
Thererefor, there is no guarantee that the bounds are part of the resulting coordinates.
Examples
--------
>>>
"""
try:
len(x)
except TypeError:
xmin = 0
xmax = x
else:
xmin, xmax = x
try:
len(y)
except TypeError:
ymin = 0
ymax = y
else:
ymin, ymax = y
try:
len(z)
except TypeError:
zmin = 0
zmax = z
else:
zmin, zmax = z
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
@classmethod
def from_box(cls, box, n):
"""Construct a point cloud within a given box.
Parameters
----------
box: :class:`compas.geometry.Box`
The axis aligned bounding box of the cloud.
n: int
The number of points in the cloud.
Returns
-------
:class:`compas.geometry.Pointcloud`
Examples
--------
>>> from compas.geometry import Box
>>> cloud = Pointcloud.from_box(Box.from_width_height_depth(10, 3, 5), 100)
>>> all((-5 < x < +5) and (-2.5 < y < +2.5) and (-1.5 < z < +1.5) for x, y, z in cloud.points)
True
"""
points = box.points
x, y, z = zip(*points)
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
zmin, zmax = min(z), max(z)
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
# ==========================================================================
# methods
# ==========================================================================
def transform(self, T):
"""Apply a transformation to the pointcloud.
Parameters
----------
T : :class:`compas.geometry.Transformation`
The transformation.
Returns
-------
None
The cloud is modified in place.
"""
for index, point in enumerate(transform_points(self.points, T)):
self.points[index].x = point[0]
self.points[index].y = point[1]
self.points[index].z = point[2]
|
[
"compas.geometry.Point.from_data",
"compas.geometry.centroid_points",
"compas.geometry.Point",
"random.uniform",
"compas.geometry.bounding_box",
"compas.data.is_float3",
"compas.geometry.transform_points"
] |
[((1972, 2000), 'compas.geometry.centroid_points', 'centroid_points', (['self.points'], {}), '(self.points)\n', (1987, 2000), False, 'from compas.geometry import centroid_points\n'), ((2059, 2084), 'compas.geometry.bounding_box', 'bounding_box', (['self.points'], {}), '(self.points)\n', (2071, 2084), False, 'from compas.geometry import bounding_box\n'), ((1429, 1451), 'compas.geometry.Point.from_data', 'Point.from_data', (['point'], {}), '(point)\n', (1444, 1451), False, 'from compas.geometry import Point\n'), ((1883, 1896), 'compas.geometry.Point', 'Point', (['*point'], {}), '(*point)\n', (1888, 1896), False, 'from compas.geometry import Point\n'), ((6013, 6032), 'random.uniform', 'uniform', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (6020, 6032), False, 'from random import uniform\n'), ((6065, 6084), 'random.uniform', 'uniform', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (6072, 6084), False, 'from random import uniform\n'), ((6117, 6136), 'random.uniform', 'uniform', (['zmin', 'zmax'], {}), '(zmin, zmax)\n', (6124, 6136), False, 'from random import uniform\n'), ((7058, 7077), 'random.uniform', 'uniform', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (7065, 7077), False, 'from random import uniform\n'), ((7110, 7129), 'random.uniform', 'uniform', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (7117, 7129), False, 'from random import uniform\n'), ((7162, 7181), 'random.uniform', 'uniform', (['zmin', 'zmax'], {}), '(zmin, zmax)\n', (7169, 7181), False, 'from random import uniform\n'), ((7773, 7805), 'compas.geometry.transform_points', 'transform_points', (['self.points', 'T'], {}), '(self.points, T)\n', (7789, 7805), False, 'from compas.geometry import transform_points\n'), ((1139, 1155), 'compas.data.is_float3', 'is_float3', (['point'], {}), '(point)\n', (1148, 1155), False, 'from compas.data import is_float3\n')]
|
"""Handle regex conversions."""
from builtins import object
import re
import operator
from functools import reduce
import oa.errors
# Map of perl flags and the corresponding re ones.
FLAGS = {
"i": re.IGNORECASE,
"s": re.DOTALL,
"m": re.MULTILINE,
"x": re.VERBOSE,
}
DELIMS = {
"/": "/",
"{": "}",
"%": "%",
"<": ">",
"'": "'",
"~": "~",
",": ",",
"!": "!",
";": ";",
}
# Regex substitution for Perl -> Python compatibility
_CONVERTS = (
(re.compile(r"""
# Python does not support local extensions so remove those. For example:
# (?i:test) becomes (?:test)
(?<=\(\?) # Look-behind and match (?
(([adlupimsx-]*?)|(\^[?^alupimsx]*?)) # Capture the extension
(?=:) # Look-ahead and match the :
""", re.VERBOSE), r""),
(re.compile(r"""
# Python doesn't have support for expression such as \b?
# Replace it with (\b)?
(\\b) # Capture group that matches \b or \B
(?=\?) # Look-ahead that matches ?
""", re.VERBOSE | re.IGNORECASE), r"(\1)"),
(re.compile(r"""
# Python doesn't have support for "independent" subexpression (?>)
# Replace those with non capturing groups (?:)
(?<=\(\?) # Look-behind and match (?
(>) # Match >
""", re.VERBOSE), r":"),
)
class Pattern(object):
"""Abstract class for rule regex matching."""
def __init__(self, pattern):
self._pattern = pattern
def match(self, text):
raise NotImplementedError()
class MatchPattern(Pattern):
"""This pattern does a search on the text and returns either 1 or 0."""
def match(self, text):
return 1 if self._pattern.search(text) else 0
class NotMatchPattern(Pattern):
"""This pattern does a search on the text and returns either 1 or 0."""
def match(self, text):
return 0 if self._pattern.search(text) else 1
def perl2re(pattern, match_op="=~"):
"""Convert a Perl type regex to a Python one."""
# We don't need to consider the pre-flags
pattern = pattern.strip().lstrip("mgs")
delim = pattern[0]
try:
rev_delim = DELIMS[delim]
except KeyError:
raise oa.errors.InvalidRegex("Invalid regex delimiter %r in %r" %
(delim, pattern))
try:
pattern, flags_str = pattern.lstrip(delim).rsplit(rev_delim, 1)
except ValueError:
raise oa.errors.InvalidRegex("Invalid regex %r. Please make sure you "
"have escaped all the special characters "
"when you defined the regex in "
"configuration file" % pattern)
for conv_p, repl in _CONVERTS:
pattern = conv_p.sub(repl, pattern)
flags = reduce(operator.or_, (FLAGS.get(flag, 0) for flag in flags_str), 0)
try:
if match_op == "=~":
return MatchPattern(re.compile(pattern, flags))
elif match_op == "!~": return NotMatchPattern(re.compile(pattern, flags))
except re.error as e:
raise oa.errors.InvalidRegex("Invalid regex %r: %s" % (pattern, e))
class Regex(object):
"""Customised regex class to work in lazy mode"""
compiled = None
def __init__(self, pattern, flags=0):
self.pattern = pattern
self.flags = flags
def compile(self):
from oa.config import LAZY_MODE
if LAZY_MODE:
return re.compile(self.pattern, self.flags)
elif not self.compiled:
self.compiled = re.compile(self.pattern, self.flags)
return self.compiled
def search(self, string):
return self.compile().search(string)
def match(self, string):
return self.compile().match(string)
def fullmatch(self, string):
return self.compile().fullmatch(string)
def sub(self, repl, string, count=0):
return self.compile().sub(repl, string, count)
def subn(self, repl, string, count=0):
return self.compile().sub(repl, string, count)
def split(self, string, maxsplit=0):
return self.compile().split(string, maxsplit)
def findall(self, string):
return self.compile().findall(string)
def finditer(self, string):
return self.compile().finditer(string)
|
[
"re.compile"
] |
[((501, 874), 're.compile', 're.compile', (['"""\n # Python does not support local extensions so remove those. For example:\n # (?i:test) becomes (?:test)\n\n (?<=\\\\(\\\\?) # Look-behind and match (?\n (([adlupimsx-]*?)|(\\\\^[?^alupimsx]*?)) # Capture the extension\n (?=:) # Look-ahead and match the :\n"""', 're.VERBOSE'], {}), '(\n """\n # Python does not support local extensions so remove those. For example:\n # (?i:test) becomes (?:test)\n\n (?<=\\\\(\\\\?) # Look-behind and match (?\n (([adlupimsx-]*?)|(\\\\^[?^alupimsx]*?)) # Capture the extension\n (?=:) # Look-ahead and match the :\n"""\n , re.VERBOSE)\n', (511, 874), False, 'import re\n'), ((876, 1138), 're.compile', 're.compile', (['"""\n # Python doesn\'t have support for expression such as \\\\b?\n # Replace it with (\\\\b)?\n\n (\\\\\\\\b) # Capture group that matches \\\\b or \\\\B\n (?=\\\\?) # Look-ahead that matches ?\n"""', '(re.VERBOSE | re.IGNORECASE)'], {}), '(\n """\n # Python doesn\'t have support for expression such as \\\\b?\n # Replace it with (\\\\b)?\n\n (\\\\\\\\b) # Capture group that matches \\\\b or \\\\B\n (?=\\\\?) # Look-ahead that matches ?\n"""\n , re.VERBOSE | re.IGNORECASE)\n', (886, 1138), False, 'import re\n'), ((1140, 1385), 're.compile', 're.compile', (['"""\n # Python doesn\'t have support for "independent" subexpression (?>)\n # Replace those with non capturing groups (?:)\n\n (?<=\\\\(\\\\?) # Look-behind and match (?\n (>) # Match >\n"""', 're.VERBOSE'], {}), '(\n """\n # Python doesn\'t have support for "independent" subexpression (?>)\n # Replace those with non capturing groups (?:)\n\n (?<=\\\\(\\\\?) # Look-behind and match (?\n (>) # Match >\n"""\n , re.VERBOSE)\n', (1150, 1385), False, 'import re\n'), ((3520, 3556), 're.compile', 're.compile', (['self.pattern', 'self.flags'], {}), '(self.pattern, self.flags)\n', (3530, 3556), False, 'import re\n'), ((3005, 3031), 're.compile', 're.compile', (['pattern', 'flags'], {}), '(pattern, flags)\n', (3015, 3031), False, 'import re\n'), ((3617, 3653), 're.compile', 're.compile', (['self.pattern', 'self.flags'], {}), '(self.pattern, self.flags)\n', (3627, 3653), False, 'import re\n'), ((3087, 3113), 're.compile', 're.compile', (['pattern', 'flags'], {}), '(pattern, flags)\n', (3097, 3113), False, 'import re\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from kalman_filter import KalmanFilter
raw_data = np.loadtxt("barometer_data.txt")
# Truncate raw data (it's super long)
raw_data = raw_data[:raw_data.size//4]
raw_data_step = np.loadtxt("barometer_data_step.txt")
t1 = np.arange(0, raw_data.size/12.5, 1/12.5)
t2 = np.arange(0, raw_data_step.size/12.5, 1/12.5)
fig1 = plt.figure("Data")
ax1 = fig1.add_subplot(121)
ax2 = fig1.add_subplot(122)
fig1.subplots_adjust(bottom=0.25)
[unfiltered_raw_line] = ax1.plot(t1, raw_data)
[unfiltered__step_line] = ax2.plot(t2, raw_data_step)
def filter_data(data, x0, P, Q, R):
filter1 = KalmanFilter(x0, P, 1, 0, 1, Q, R)
x_out = np.zeros(data.size)
P_out = np.zeros(data.size)
for k in np.arange(1, data.size):
x_out[k], P_out[k] = filter1.update(0, data[k])
return x_out, P_out
P0 = 2
Q0 = 1e-4
[filtered_raw_line] = ax1.plot(t1, filter_data(raw_data, 0, P0, Q0, R=raw_data.var())[0])
[filtered_step_line] = ax2.plot(t2, filter_data(raw_data_step, 0, P0, Q0, R=raw_data.var())[0])
P_slider_ax = fig1.add_axes([0.25, 0.15, 0.65, 0.03])
Q_slider_ax = fig1.add_axes([0.25, 0.1, 0.65, 0.03])
P_slider = Slider(P_slider_ax, 'P', 0.5, 5, valinit=P0)
Q_slider = Slider(Q_slider_ax, 'Q', 1e-4, 1e-3, valinit=Q0)
def sliders_on_changed(val):
P = P_slider.val
Q = Q_slider.val
x_raw_new, P_raw_new = filter_data(raw_data, 0, P, Q, R=raw_data.var())
filtered_raw_line.set_ydata(x_raw_new)
x_step_new, P_step_new = filter_data(raw_data_step, 0, P, Q, R=raw_data.var())
filtered_step_line.set_ydata(x_step_new)
P_slider.on_changed(sliders_on_changed)
Q_slider.on_changed(sliders_on_changed)
plt.show()
|
[
"kalman_filter.KalmanFilter",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.widgets.Slider",
"numpy.loadtxt",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((140, 172), 'numpy.loadtxt', 'np.loadtxt', (['"""barometer_data.txt"""'], {}), "('barometer_data.txt')\n", (150, 172), True, 'import numpy as np\n'), ((266, 303), 'numpy.loadtxt', 'np.loadtxt', (['"""barometer_data_step.txt"""'], {}), "('barometer_data_step.txt')\n", (276, 303), True, 'import numpy as np\n'), ((309, 353), 'numpy.arange', 'np.arange', (['(0)', '(raw_data.size / 12.5)', '(1 / 12.5)'], {}), '(0, raw_data.size / 12.5, 1 / 12.5)\n', (318, 353), True, 'import numpy as np\n'), ((355, 404), 'numpy.arange', 'np.arange', (['(0)', '(raw_data_step.size / 12.5)', '(1 / 12.5)'], {}), '(0, raw_data_step.size / 12.5, 1 / 12.5)\n', (364, 404), True, 'import numpy as np\n'), ((409, 427), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Data"""'], {}), "('Data')\n", (419, 427), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1276), 'matplotlib.widgets.Slider', 'Slider', (['P_slider_ax', '"""P"""', '(0.5)', '(5)'], {'valinit': 'P0'}), "(P_slider_ax, 'P', 0.5, 5, valinit=P0)\n", (1238, 1276), False, 'from matplotlib.widgets import Slider\n'), ((1288, 1339), 'matplotlib.widgets.Slider', 'Slider', (['Q_slider_ax', '"""Q"""', '(0.0001)', '(0.001)'], {'valinit': 'Q0'}), "(Q_slider_ax, 'Q', 0.0001, 0.001, valinit=Q0)\n", (1294, 1339), False, 'from matplotlib.widgets import Slider\n'), ((1738, 1748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1746, 1748), True, 'import matplotlib.pyplot as plt\n'), ((671, 705), 'kalman_filter.KalmanFilter', 'KalmanFilter', (['x0', 'P', '(1)', '(0)', '(1)', 'Q', 'R'], {}), '(x0, P, 1, 0, 1, Q, R)\n', (683, 705), False, 'from kalman_filter import KalmanFilter\n'), ((723, 742), 'numpy.zeros', 'np.zeros', (['data.size'], {}), '(data.size)\n', (731, 742), True, 'import numpy as np\n'), ((755, 774), 'numpy.zeros', 'np.zeros', (['data.size'], {}), '(data.size)\n', (763, 774), True, 'import numpy as np\n'), ((797, 820), 'numpy.arange', 'np.arange', (['(1)', 'data.size'], {}), '(1, data.size)\n', (806, 820), True, 'import numpy as np\n')]
|
import heapq
from typing import List
class Solution:
def get_number_of_backlog_orders(self, orders: List[List[int]]) -> int:
sell_backlog = []
buy_backlog = []
for price, amount, order_type in orders:
if order_type == 0:
while amount > 0:
if sell_backlog and sell_backlog[0][0] <= price:
sell_price, sell_amount = heapq.heappop(sell_backlog)
if sell_amount > amount:
heapq.heappush(sell_backlog,
(sell_price, sell_amount - amount))
amount = 0
else:
amount -= sell_amount
else:
heapq.heappush(buy_backlog, (-price, amount))
amount = 0
else:
while amount > 0:
if buy_backlog and -buy_backlog[0][0] >= price:
buy_price, buy_amount = heapq.heappop(buy_backlog)
if buy_amount > amount:
heapq.heappush(buy_backlog,
(buy_price, buy_amount - amount))
amount = 0
else:
amount -= buy_amount
else:
heapq.heappush(sell_backlog, (price, amount))
amount = 0
result = 0
for _, amount in sell_backlog:
result += amount
for _, amount in buy_backlog:
result += amount
return result % (10**9 + 7)
|
[
"heapq.heappop",
"heapq.heappush"
] |
[((416, 443), 'heapq.heappop', 'heapq.heappop', (['sell_backlog'], {}), '(sell_backlog)\n', (429, 443), False, 'import heapq\n'), ((798, 843), 'heapq.heappush', 'heapq.heappush', (['buy_backlog', '(-price, amount)'], {}), '(buy_backlog, (-price, amount))\n', (812, 843), False, 'import heapq\n'), ((1047, 1073), 'heapq.heappop', 'heapq.heappop', (['buy_backlog'], {}), '(buy_backlog)\n', (1060, 1073), False, 'import heapq\n'), ((1423, 1468), 'heapq.heappush', 'heapq.heappush', (['sell_backlog', '(price, amount)'], {}), '(sell_backlog, (price, amount))\n', (1437, 1468), False, 'import heapq\n'), ((521, 585), 'heapq.heappush', 'heapq.heappush', (['sell_backlog', '(sell_price, sell_amount - amount)'], {}), '(sell_backlog, (sell_price, sell_amount - amount))\n', (535, 585), False, 'import heapq\n'), ((1150, 1211), 'heapq.heappush', 'heapq.heappush', (['buy_backlog', '(buy_price, buy_amount - amount)'], {}), '(buy_backlog, (buy_price, buy_amount - amount))\n', (1164, 1211), False, 'import heapq\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.