code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from random import choice, randint
from re import sub, split, findall
from string import ascii_letters
from subprocess import PIPE, Popen
from sys import argv, executable, stderr
from .responses import pronouns, reactions, remarks
def owoify(text):
if type(text) == bytes:
text = str(text)[2:-1].replace("\\n", "\n")
text = sub("[rlv]", "w", text)
text = sub("[RLV]", "W", text)
text = sub("ee", "wee", text)
# This is to convert the string into a array whilst maintaining whitespace
words = split(r"\s+", text)
whitespace = findall(r"\s+", text)
text = [None] * (len(words) + len(whitespace))
text[::2], text[1::2] = words, whitespace
# Random stutter
for idx, word in enumerate(text):
if len(word) > 0:
if word[0] in ascii_letters and word[0].lower() not in "aeiouw":
if randint(1, 10) == 1:
text[idx] = f"{word[0]}-{word}"
text = "".join(text)
return text
def main():
process = Popen([executable] + argv[1:], stderr=PIPE)
while process.poll() is None:
for line in iter(process.stderr.readline, b""):
if line == b"Traceback (most recent call last):\n":
# Easter egg :)
if randint(1, 10) == 1:
stderr.write(f"{choice(pronouns)}, {choice(remarks)}, you sussy baka {choice(reactions)}\n")
else:
stderr.write(f"{choice(pronouns)}, {choice(remarks)} {choice(reactions)}\n")
stderr.write(owoify(line))
if __name__ == "__main__":
main()
|
[
"re.split",
"random.choice",
"subprocess.Popen",
"re.sub",
"re.findall",
"random.randint"
] |
[((344, 367), 're.sub', 'sub', (['"""[rlv]"""', '"""w"""', 'text'], {}), "('[rlv]', 'w', text)\n", (347, 367), False, 'from re import sub, split, findall\n'), ((379, 402), 're.sub', 'sub', (['"""[RLV]"""', '"""W"""', 'text'], {}), "('[RLV]', 'W', text)\n", (382, 402), False, 'from re import sub, split, findall\n'), ((414, 436), 're.sub', 'sub', (['"""ee"""', '"""wee"""', 'text'], {}), "('ee', 'wee', text)\n", (417, 436), False, 'from re import sub, split, findall\n'), ((529, 548), 're.split', 'split', (['"""\\\\s+"""', 'text'], {}), "('\\\\s+', text)\n", (534, 548), False, 'from re import sub, split, findall\n'), ((566, 587), 're.findall', 'findall', (['"""\\\\s+"""', 'text'], {}), "('\\\\s+', text)\n", (573, 587), False, 'from re import sub, split, findall\n'), ((1012, 1055), 'subprocess.Popen', 'Popen', (['([executable] + argv[1:])'], {'stderr': 'PIPE'}), '([executable] + argv[1:], stderr=PIPE)\n', (1017, 1055), False, 'from subprocess import PIPE, Popen\n'), ((868, 882), 'random.randint', 'randint', (['(1)', '(10)'], {}), '(1, 10)\n', (875, 882), False, 'from random import choice, randint\n'), ((1262, 1276), 'random.randint', 'randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1269, 1276), False, 'from random import choice, randint\n'), ((1319, 1335), 'random.choice', 'choice', (['pronouns'], {}), '(pronouns)\n', (1325, 1335), False, 'from random import choice, randint\n'), ((1339, 1354), 'random.choice', 'choice', (['remarks'], {}), '(remarks)\n', (1345, 1354), False, 'from random import choice, randint\n'), ((1373, 1390), 'random.choice', 'choice', (['reactions'], {}), '(reactions)\n', (1379, 1390), False, 'from random import choice, randint\n'), ((1454, 1470), 'random.choice', 'choice', (['pronouns'], {}), '(pronouns)\n', (1460, 1470), False, 'from random import choice, randint\n'), ((1474, 1489), 'random.choice', 'choice', (['remarks'], {}), '(remarks)\n', (1480, 1489), False, 'from random import choice, randint\n'), ((1492, 1509), 'random.choice', 'choice', (['reactions'], {}), '(reactions)\n', (1498, 1509), False, 'from random import choice, randint\n')]
|
from app import App
import requests
import json
import polling2
from behave import step
from openshift import Openshift
from util import substitute_scenario_id
from string import Template
class GenericTestApp(App):
deployment_name_pattern = "{name}"
def __init__(self, name, namespace, app_image="ghcr.io/multi-arch/sbo-generic-test-app:latest"):
App.__init__(self, name, namespace, app_image, "8080")
def get_env_var_value(self, name):
resp = polling2.poll(lambda: requests.get(url=f"http://{self.route_url}/env/{name}"),
check_success=lambda r: r.status_code in [200, 404], step=5, timeout=400, ignore_exceptions=(requests.exceptions.ConnectionError,))
print(f'env endpoint response: {resp.text} code: {resp.status_code}')
if resp.status_code == 200:
return json.loads(resp.text)
else:
return None
def format_pattern(self, pattern):
return pattern.format(name=self.name)
def get_file_value(self, file_path):
resp = polling2.poll(lambda: requests.get(url=f"http://{self.route_url}{file_path}"),
check_success=lambda r: r.status_code == 200, step=5, timeout=400, ignore_exceptions=(requests.exceptions.ConnectionError,))
print(f'file endpoint response: {resp.text} code: {resp.status_code}')
return resp.text
def assert_file_not_exist(self, file_path):
polling2.poll(lambda: requests.get(url=f"http://{self.route_url}{file_path}"),
check_success=lambda r: r.status_code == 404, step=5, timeout=400, ignore_exceptions=(requests.exceptions.ConnectionError,))
def set_label(self, label):
self.openshift.set_label(self.name, label, self.namespace)
@step(u'Generic test application "{application_name}" is running')
@step(u'Generic test application "{application_name}" is running with binding root as "{bindingRoot}"')
@step(u'Generic test application is running')
@step(u'Generic test application is running with binding root as "{bindingRoot}"')
def is_running(context, application_name=None, bindingRoot=None, asDeploymentConfig=False):
if application_name is None:
application_name = substitute_scenario_id(context)
application = GenericTestApp(application_name, context.namespace.name)
if asDeploymentConfig:
application.resource = "deploymentconfig"
if not application.is_running():
print("application is not running, trying to import it")
application.install(bindingRoot=bindingRoot)
context.application = application
# save the generation number
context.original_application_generation = application.get_generation()
context.latest_application_generation = application.get_generation()
@step(u'Generic test application is running as deployment config')
def is_running_deployment_config(context):
is_running(context, asDeploymentConfig=True)
@step(u'The application env var "{name}" has value "{value}"')
def check_env_var_value(context, name, value):
value = substitute_scenario_id(context, value)
found = polling2.poll(lambda: context.application.get_env_var_value(name) == value, step=5, timeout=400)
assert found, f'Env var "{name}" should contain value "{value}"'
@step(u'The env var "{name}" is not available to the application')
def check_env_var_existence(context, name):
output = polling2.poll(lambda: context.application.get_env_var_value(name) is None, step=5, timeout=400)
assert output, f'Env var "{name}" should not exist'
@step(u'Content of file "{file_path}" in application pod is')
def check_file_value(context, file_path):
value = Template(context.text.strip()).substitute(NAMESPACE=context.namespace.name)
resource = substitute_scenario_id(context, file_path)
polling2.poll(lambda: context.application.get_file_value(resource) == value, step=5, timeout=400)
@step(u'File "{file_path}" exists in application pod')
def check_file_exists(context, file_path):
resource = substitute_scenario_id(context, file_path)
polling2.poll(lambda: context.application.get_file_value(resource) != "", step=5, timeout=400)
@step(u'File "{file_path}" is unavailable in application pod')
def check_file_unavailable(context, file_path):
context.application.assert_file_not_exist(file_path)
@step(u'Test applications "{first_app_name}" and "{second_app_name}" is running')
def are_two_apps_running(context, first_app_name, second_app_name, bindingRoot=None):
application1 = GenericTestApp(first_app_name, context.namespace.name)
if not application1.is_running():
print("application1 is not running, trying to import it")
application1.install(bindingRoot=bindingRoot)
context.application1 = application1
application2 = GenericTestApp(second_app_name, context.namespace.name)
if not application2.is_running():
print("application2 is not running, trying to import it")
application2.install(bindingRoot=bindingRoot)
context.application2 = application2
@step(u'The common label "{label}" is set for both apps')
def set_common_label(context, label):
context.application1.set_label(f"{label}")
context.application2.set_label(f"{label}")
@step(u'The application env var "{name}" has value "{value}" in both apps')
def check_env_var_value_in_both_apps(context, name, value):
polling2.poll(lambda: context.application1.get_env_var_value(name) == value, step=5, timeout=400)
polling2.poll(lambda: context.application2.get_env_var_value(name) == value, step=5, timeout=400)
@step(u'The container declared in application resource contains env "{envVar}" set only once')
@step(u'The container declared in application "{app_name}" resource contains env "{envVar}" set only once')
def check_env_var_count_set_on_container(context, envVar, app_name=None):
openshift = Openshift()
if app_name is None:
app_name = context.application.name
app_name = substitute_scenario_id(context, app_name)
env = openshift.get_deployment_env_info(app_name, context.namespace.name)
assert str(env).count(envVar) == 1
|
[
"json.loads",
"behave.step",
"util.substitute_scenario_id",
"requests.get",
"openshift.Openshift",
"app.App.__init__"
] |
[((1776, 1841), 'behave.step', 'step', (['u"""Generic test application "{application_name}" is running"""'], {}), '(u\'Generic test application "{application_name}" is running\')\n', (1780, 1841), False, 'from behave import step\n'), ((1843, 1955), 'behave.step', 'step', (['u"""Generic test application "{application_name}" is running with binding root as "{bindingRoot}\\""""'], {}), '(\n u\'Generic test application "{application_name}" is running with binding root as "{bindingRoot}"\'\n )\n', (1847, 1955), False, 'from behave import step\n'), ((1947, 1991), 'behave.step', 'step', (['u"""Generic test application is running"""'], {}), "(u'Generic test application is running')\n", (1951, 1991), False, 'from behave import step\n'), ((1993, 2084), 'behave.step', 'step', (['u"""Generic test application is running with binding root as "{bindingRoot}\\""""'], {}), '(\n u\'Generic test application is running with binding root as "{bindingRoot}"\'\n )\n', (1997, 2084), False, 'from behave import step\n'), ((2789, 2854), 'behave.step', 'step', (['u"""Generic test application is running as deployment config"""'], {}), "(u'Generic test application is running as deployment config')\n", (2793, 2854), False, 'from behave import step\n'), ((2950, 3011), 'behave.step', 'step', (['u"""The application env var "{name}" has value "{value}\\""""'], {}), '(u\'The application env var "{name}" has value "{value}"\')\n', (2954, 3011), False, 'from behave import step\n'), ((3291, 3356), 'behave.step', 'step', (['u"""The env var "{name}" is not available to the application"""'], {}), '(u\'The env var "{name}" is not available to the application\')\n', (3295, 3356), False, 'from behave import step\n'), ((3569, 3629), 'behave.step', 'step', (['u"""Content of file "{file_path}" in application pod is"""'], {}), '(u\'Content of file "{file_path}" in application pod is\')\n', (3573, 3629), False, 'from behave import step\n'), ((3923, 3976), 'behave.step', 'step', (['u"""File "{file_path}" exists in application pod"""'], {}), '(u\'File "{file_path}" exists in application pod\')\n', (3927, 3976), False, 'from behave import step\n'), ((4180, 4241), 'behave.step', 'step', (['u"""File "{file_path}" is unavailable in application pod"""'], {}), '(u\'File "{file_path}" is unavailable in application pod\')\n', (4184, 4241), False, 'from behave import step\n'), ((4350, 4435), 'behave.step', 'step', (['u"""Test applications "{first_app_name}" and "{second_app_name}" is running"""'], {}), '(u\'Test applications "{first_app_name}" and "{second_app_name}" is running\'\n )\n', (4354, 4435), False, 'from behave import step\n'), ((5066, 5122), 'behave.step', 'step', (['u"""The common label "{label}" is set for both apps"""'], {}), '(u\'The common label "{label}" is set for both apps\')\n', (5070, 5122), False, 'from behave import step\n'), ((5258, 5332), 'behave.step', 'step', (['u"""The application env var "{name}" has value "{value}" in both apps"""'], {}), '(u\'The application env var "{name}" has value "{value}" in both apps\')\n', (5262, 5332), False, 'from behave import step\n'), ((5600, 5703), 'behave.step', 'step', (['u"""The container declared in application resource contains env "{envVar}" set only once"""'], {}), '(\n u\'The container declared in application resource contains env "{envVar}" set only once\'\n )\n', (5604, 5703), False, 'from behave import step\n'), ((5695, 5811), 'behave.step', 'step', (['u"""The container declared in application "{app_name}" resource contains env "{envVar}" set only once"""'], {}), '(\n u\'The container declared in application "{app_name}" resource contains env "{envVar}" set only once\'\n )\n', (5699, 5811), False, 'from behave import step\n'), ((3071, 3109), 'util.substitute_scenario_id', 'substitute_scenario_id', (['context', 'value'], {}), '(context, value)\n', (3093, 3109), False, 'from util import substitute_scenario_id\n'), ((3775, 3817), 'util.substitute_scenario_id', 'substitute_scenario_id', (['context', 'file_path'], {}), '(context, file_path)\n', (3797, 3817), False, 'from util import substitute_scenario_id\n'), ((4035, 4077), 'util.substitute_scenario_id', 'substitute_scenario_id', (['context', 'file_path'], {}), '(context, file_path)\n', (4057, 4077), False, 'from util import substitute_scenario_id\n'), ((5892, 5903), 'openshift.Openshift', 'Openshift', ([], {}), '()\n', (5901, 5903), False, 'from openshift import Openshift\n'), ((5988, 6029), 'util.substitute_scenario_id', 'substitute_scenario_id', (['context', 'app_name'], {}), '(context, app_name)\n', (6010, 6029), False, 'from util import substitute_scenario_id\n'), ((367, 421), 'app.App.__init__', 'App.__init__', (['self', 'name', 'namespace', 'app_image', '"""8080"""'], {}), "(self, name, namespace, app_image, '8080')\n", (379, 421), False, 'from app import App\n'), ((2227, 2258), 'util.substitute_scenario_id', 'substitute_scenario_id', (['context'], {}), '(context)\n', (2249, 2258), False, 'from util import substitute_scenario_id\n'), ((850, 871), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (860, 871), False, 'import json\n'), ((499, 554), 'requests.get', 'requests.get', ([], {'url': 'f"""http://{self.route_url}/env/{name}"""'}), "(url=f'http://{self.route_url}/env/{name}')\n", (511, 554), False, 'import requests\n'), ((1075, 1130), 'requests.get', 'requests.get', ([], {'url': 'f"""http://{self.route_url}{file_path}"""'}), "(url=f'http://{self.route_url}{file_path}')\n", (1087, 1130), False, 'import requests\n'), ((1469, 1524), 'requests.get', 'requests.get', ([], {'url': 'f"""http://{self.route_url}{file_path}"""'}), "(url=f'http://{self.route_url}{file_path}')\n", (1481, 1524), False, 'import requests\n')]
|
import email
import logging
import re
from imapclient import IMAPClient
class IMAPError(IOError):
pass
class ImapWrapper:
"""A wrapper around imaplib, since that's a bit
lower-level than I'd prefer to work with."""
#This regex is:
# list of flags in parens
# quoted delimiter
# possible-quoted folder name
list_matcher = re.compile(r'^\(([^()]*)\) "([^"]*)" (([^" ]+)|"([^"]*)")$')
def __init__(self, host, user, pw, **kwargs):
"""kwargs: Paassed through to IMAPClient"""
self.M = IMAPClient(host, **kwargs)
self.M.login(user, pw)
self._selected_folder = None
self._update_folders()
def logout(self):
self.M.logout()
def _update_folders(self):
listing = self.M.list_folders()
self.folder_list = [name for (flags, delim, name) in listing]
def ensure_folder(self, name):
"""Return True if the folder was created, False if it already existed."""
l = logging.getLogger(__name__)
search_name = name[:-1] if name.endswith('/') else name
if not any(n == search_name for n in self.folder_list):
rslt = self.M.create_folder(name)
l.info(f"Folder create result: {rslt}")
self.folder_list.append(search_name)
return True
else:
return False
def fetch_messages(self, folder, *search_args):
l = logging.getLogger(__name__)
ret = []
self.select_folder(folder)
message_ids = self.M.search(search_args)
message_dict = self.M.fetch(message_ids, 'RFC822')
for msg in message_dict.values():
l.debug("Got message: %s", msg)
msg = email.message_from_string(msg[b'RFC822'].decode('UTF-8'))
ret.append(msg)
return ret
def check_folder_for_message_ids(self, folder, msgids):
self.select_folder(folder)
search_ids = []
for msgid in msgids:
if len(search_ids) > 0:
search_ids.insert(0, 'OR')
search_ids.append(['HEADER', 'Message-Id', msgid])
message_numbers = self.M.search(['NOT', 'DELETED', search_ids])
message_envelopes = self.M.fetch(message_numbers, 'ENVELOPE')
have_ids = []
for msgdata in message_envelopes.values():
envelope = msgdata[b'ENVELOPE']
have_ids.append(envelope.message_id)
return have_ids
def append(self, folder_name, email):
response = self.M.append(folder_name, str(email).encode('utf-8'))
logging.getLogger(__name__).debug("Append response: %s", response)
# FIXME sets the context folder
def select_folder(self, name):
if self._selected_folder == name:
return
dtl = self.M.select_folder(name)
logging.getLogger(__name__).debug("select_folder = %s", dtl)
self._selected_folder = name
def create_subscribe_folder(self, name):
created = self.ensure_folder(name)
if created:
res = self.M.subscribe_folder(name)
logging.getLogger(__name__).debug("Subscribe result: %s", res)
|
[
"logging.getLogger",
"imapclient.IMAPClient",
"re.compile"
] |
[((357, 418), 're.compile', 're.compile', (['"""^\\\\(([^()]*)\\\\) "([^"]*)" (([^" ]+)|"([^"]*)")$"""'], {}), '(\'^\\\\(([^()]*)\\\\) "([^"]*)" (([^" ]+)|"([^"]*)")$\')\n', (367, 418), False, 'import re\n'), ((537, 563), 'imapclient.IMAPClient', 'IMAPClient', (['host'], {}), '(host, **kwargs)\n', (547, 563), False, 'from imapclient import IMAPClient\n'), ((982, 1009), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (999, 1009), False, 'import logging\n'), ((1413, 1440), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1430, 1440), False, 'import logging\n'), ((2563, 2590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2580, 2590), False, 'import logging\n'), ((2812, 2839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2829, 2839), False, 'import logging\n'), ((3079, 3106), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3096, 3106), False, 'import logging\n')]
|
import sys
from . import app
sys.path.append(str(app.config['LIB_PATH']))
from musicautobot.music_transformer import *
from musicautobot.config import *
from flask import Response, send_from_directory, send_file, request, jsonify
from .save import to_s3
import torch
import traceback
torch.set_num_threads(4)
data = load_data(app.config['DATA_PATH'], app.config['DATA_SAVE_NAME'], num_workers=1)
learn = music_model_learner(data, pretrained_path=app.config['MUSIC_MODEL_PATH'])
if torch.cuda.is_available(): learn.model.cuda()
# learn.to_fp16(loss_scale=512) # fp16 not supported for cpu - https://github.com/pytorch/pytorch/issues/17699
@app.route('/predict/midi', methods=['POST'])
def predict_midi():
args = request.form.to_dict()
midi = request.files['midi'].read()
print('THE ARGS PASSED:', args)
bpm = float(args['bpm']) # (AS) TODO: get bpm from midi file instead
temperatures = (float(args.get('noteTemp', 1.2)), float(args.get('durationTemp', 0.8)))
n_words = int(args.get('nSteps', 200))
seed_len = int(args.get('seedLen', 12))
# debugging 1 - send exact midi back
# with open('/tmp/test.mid', 'wb') as f:
# f.write(midi)
# return send_from_directory('/tmp', 'test.mid', mimetype='audio/midi')
# debugging 2 - test music21 conversion
# stream = file2stream(midi) # 1.
# debugging 3 - test npenc conversion
# seed_np = midi2npenc(midi) # music21 can handle bytes directly
# stream = npenc2stream(seed_np, bpm=bpm)
# debugging 4 - midi in, convert, midi out
# stream = file2stream(midi) # 1.
# midi_in = Path(stream.write("musicxml"))
# print('Midi in:', midi_in)
# stream_sep = separate_melody_chord(stream)
# midi_out = Path(stream_sep.write("midi"))
# print('Midi out:', midi_out)
# s3_id = to_s3(midi_out, args)
# result = {
# 'result': s3_id
# }
# return jsonify(result)
# Main logic
try:
full = predict_from_midi(learn, midi=midi, n_words=n_words, seed_len=seed_len, temperatures=temperatures)
stream = separate_melody_chord(full.to_stream(bpm=bpm))
midi_out = Path(stream.write("midi"))
print('Wrote to temporary file:', midi_out)
except Exception as e:
traceback.print_exc()
return jsonify({'error': f'Failed to predict: {e}'})
s3_id = to_s3(midi_out, args)
result = {
'result': s3_id
}
return jsonify(result)
# return send_from_directory(midi_out.parent, midi_out.name, mimetype='audio/midi')
# @app.route('/midi/song/<path:sid>')
# def get_song_midi(sid):
# return send_from_directory(file_path/data_dir, htlist[sid]['midi'], mimetype='audio/midi')
@app.route('/midi/convert', methods=['POST'])
def convert_midi():
args = request.form.to_dict()
if 'midi' in request.files:
midi = request.files['midi'].read()
elif 'midi_path'in args:
midi = args['midi_path']
stream = file2stream(midi) # 1.
# stream = file2stream(midi).chordify() # 1.
stream_out = Path(stream.write('musicxml'))
return send_from_directory(stream_out.parent, stream_out.name, mimetype='xml')
|
[
"flask.send_from_directory",
"torch.set_num_threads",
"flask.request.form.to_dict",
"torch.cuda.is_available",
"traceback.print_exc",
"flask.jsonify"
] |
[((287, 311), 'torch.set_num_threads', 'torch.set_num_threads', (['(4)'], {}), '(4)\n', (308, 311), False, 'import torch\n'), ((486, 511), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (509, 511), False, 'import torch\n'), ((721, 743), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (741, 743), False, 'from flask import Response, send_from_directory, send_file, request, jsonify\n'), ((2426, 2441), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (2433, 2441), False, 'from flask import Response, send_from_directory, send_file, request, jsonify\n'), ((2771, 2793), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (2791, 2793), False, 'from flask import Response, send_from_directory, send_file, request, jsonify\n'), ((3077, 3148), 'flask.send_from_directory', 'send_from_directory', (['stream_out.parent', 'stream_out.name'], {'mimetype': '"""xml"""'}), "(stream_out.parent, stream_out.name, mimetype='xml')\n", (3096, 3148), False, 'from flask import Response, send_from_directory, send_file, request, jsonify\n'), ((2252, 2273), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2271, 2273), False, 'import traceback\n'), ((2289, 2334), 'flask.jsonify', 'jsonify', (["{'error': f'Failed to predict: {e}'}"], {}), "({'error': f'Failed to predict: {e}'})\n", (2296, 2334), False, 'from flask import Response, send_from_directory, send_file, request, jsonify\n')]
|
from __future__ import absolute_import, division, print_function
from tests.core import mock
from trakt import Trakt
from httmock import HTTMock
import pytest
def test_likes():
with HTTMock(mock.fixtures, mock.unknown):
with Trakt.configuration.auth('mock', 'mock'):
likes = Trakt['users'].likes()
assert likes is not None
likes = list(likes)
assert len(likes) == 3
assert likes[0].keys == [
('trakt', 1519)
]
assert likes[1].keys == [
('trakt', '1238362'),
('slug', 'star-wars-machete')
]
assert likes[2].keys == [
('trakt', '840781'),
('slug', 'star-wars-timeline')
]
def test_likes_invalid_response():
with HTTMock(mock.fixtures, mock.unknown):
likes = Trakt['users'].likes()
assert likes is None
def test_likes_invalid_type():
with HTTMock(mock.fixtures, mock.unknown):
with pytest.raises(ValueError):
likes = Trakt['users'].likes('invalid')
assert likes is not None
likes = list(likes)
|
[
"trakt.Trakt.configuration.auth",
"httmock.HTTMock",
"pytest.raises"
] |
[((190, 226), 'httmock.HTTMock', 'HTTMock', (['mock.fixtures', 'mock.unknown'], {}), '(mock.fixtures, mock.unknown)\n', (197, 226), False, 'from httmock import HTTMock\n'), ((741, 777), 'httmock.HTTMock', 'HTTMock', (['mock.fixtures', 'mock.unknown'], {}), '(mock.fixtures, mock.unknown)\n', (748, 777), False, 'from httmock import HTTMock\n'), ((886, 922), 'httmock.HTTMock', 'HTTMock', (['mock.fixtures', 'mock.unknown'], {}), '(mock.fixtures, mock.unknown)\n', (893, 922), False, 'from httmock import HTTMock\n'), ((241, 281), 'trakt.Trakt.configuration.auth', 'Trakt.configuration.auth', (['"""mock"""', '"""mock"""'], {}), "('mock', 'mock')\n", (265, 281), False, 'from trakt import Trakt\n'), ((937, 962), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (950, 962), False, 'import pytest\n')]
|
import sys
import PyQt5.QtWidgets as qtw
import PyQt5.QtCore as qtc
from Image import Image
from main_layout import Ui_MainWindow
import logging
import os
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(asctime)s - %(message)s')
file_handler = logging.FileHandler('log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class MainWindow(qtw.QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.show()
self.images = {
'1': {
'original': self.ui.image_1_original,
'filtered': self.ui.image_1_after_filter,
'picker': self.ui.image_1_pick
},
'2': {
'original': self.ui.image_2_original,
'filtered': self.ui.image_2_after_filter,
'picker': self.ui.image_2_pick
}
}
self.img = {}
self.modes = {'Output 1': '', 'Output 2': ''}
self.output_channels = {
'Output 1': self.ui.output_1,
'Output 2': self.ui.output_2
}
self.output_channels_controlers = {
'': {
'select1': '',
'select2': '',
'slider1': 0,
'slider2': 0,
'type1': '',
'type2': '',
'percentage1': 0,
'percentage2': 0,
},
'Output 1': {
'select1': '',
'select2': '',
'slider1': 0,
'slider2': 0,
'type1': '',
'type2': '',
'percentage1': 0,
'percentage2': 0,
},
'Output 2': {
'select1': '',
'select2': '',
'slider1': 0,
'slider2': 0,
'type1': '',
'type2': '',
'percentage1': 0,
'percentage2': 0,
},
}
self.output_complementary = {
'': ['', 'Magnitude', 'Phase', 'Real', 'Imaginary', 'Uniform Magnitude', 'Uniform Phase'],
'Magnitude': ['Phase', 'Uniform Phase'],
'Phase': ['Magnitude', 'Uniform Magnitude'],
'Real': ['Imaginary'],
'Imaginary': ['Real'],
'Uniform Magnitude': ['Phase', 'Uniform Phase'],
'Uniform Phase': ['Magnitude', 'Uniform Magnitude'],
}
self.available_images = {
'': ''
}
self.enables = {
'': [self.ui.component_1_select, self.ui.component_2_select, self.ui.component_1_percentage,
self.ui.component_1_slider, self.ui.component_1_type,
self.ui.component_2_percentage, self.ui.component_2_slider, self.ui.component_2_type],
'output-select': [self.ui.component_1_select, self.ui.component_2_select],
'select1': [self.ui.component_1_percentage, self.ui.component_1_type],
'select2': [self.ui.component_2_percentage, self.ui.component_2_type],
'type1': [self.ui.component_1_slider],
'type2': [self.ui.component_2_slider]
}
self.current_output_channel = None
self.ui.action_new.triggered.connect(self.new_instance)
self.ui.action_exit.triggered.connect(self.close)
self.ui.action_open_image_1.triggered.connect(lambda: self.open_image(self.images['1'], 1))
self.ui.action_open_image_2.triggered.connect(lambda: self.open_image(self.images['2'], 2))
self.ui.image_1_pick.currentIndexChanged.connect(lambda: self.display_component(self.img['Image 1']))
self.ui.image_2_pick.currentIndexChanged.connect(lambda: self.display_component(self.img['Image 2']))
self.ui.output_select.currentIndexChanged.connect(lambda: self.pick_mixer_output())
self.ui.component_1_select.currentIndexChanged.connect(lambda: self.select_enable('select1', self.ui.component_1_select.currentText()))
self.ui.component_2_select.currentIndexChanged.connect(lambda: self.select_enable('select2', self.ui.component_2_select.currentText()))
self.ui.component_1_slider.sliderReleased.connect(lambda: self.mixer('slider1', str(self.ui.component_1_slider.value())))
self.ui.component_2_slider.sliderReleased.connect(lambda: self.mixer('slider2', str(self.ui.component_2_slider.value())))
self.ui.component_1_percentage.valueChanged.connect(lambda: self.change_image('percentage1', str(self.ui.component_1_percentage.value())))
self.ui.component_2_percentage.valueChanged.connect(lambda: self.change_image('percentage2', str(self.ui.component_2_percentage.value())))
self.ui.component_1_type.currentIndexChanged.connect(lambda: self.component_1_conplementary())
self.ui.component_1_type.currentIndexChanged.connect(lambda: self.select_enable('type1', str(self.ui.component_1_type.currentText())))
self.ui.component_2_type.currentIndexChanged.connect(lambda: self.select_enable('type2', str(self.ui.component_2_type.currentText())))
def new_instance(self) -> None:
self.child_window = MainWindow()
self.child_window.show()
def open_image(self, imageWidget: dict, channel: int) -> None:
image = Image()
if not image.path:
return
if len(self.img) == 1:
if f'Image {2//channel}' in self.img:
if not image.compare(self.img[f'Image {2//channel}']['image']):
qtw.QMessageBox.warning(self, 'failed', 'The Two Images Must be of the same size')
return
else :
self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget}
if f'Image {channel}' not in self.available_images:
self.available_images[f'Image {channel}'] = f'Image {channel}'
self.append_outputs(isOneChanneled=False)
else :
self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget}
elif len(self.img) >= 2:
if not image.compare(self.img[f'Image {2//channel}']['image']):
qtw.QMessageBox.warning(self, 'failed', 'The Two Images Must be of the same size')
return
self.img[f'Image {channel}']["image"] = image
self.img[f'Image {channel}']["widgets"] = imageWidget
else :
self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget}
if f'Image {channel}' not in self.available_images:
self.available_images[f'Image {channel}'] = f'Image {channel}'
self.append_outputs(channel=self.available_images[f'Image {channel}'])
imageWidget['original'].setPixmap(image.get_pixmap().scaled(300,300, aspectRatioMode=qtc.Qt.KeepAspectRatio, transformMode=qtc.Qt.SmoothTransformation))
imageWidget['picker'].setDisabled(False)
self.ui.output_select.setDisabled(False)
def append_outputs(self, isOneChanneled: bool=True, channel: str='') -> None:
if isOneChanneled:
self.ui.component_1_select.addItem('')
self.ui.component_2_select.addItem('')
self.ui.component_1_select.setItemText(0, '')
self.ui.component_1_select.setItemText(1, channel)
self.ui.component_2_select.setItemText(0, '')
self.ui.component_2_select.setItemText(1, channel)
else:
self.ui.component_1_select.addItem('')
self.ui.component_2_select.addItem('')
self.ui.component_1_select.setItemText(0, '')
self.ui.component_1_select.setItemText(1, 'Image 1')
self.ui.component_1_select.setItemText(2, 'Image 2')
self.ui.component_2_select.setItemText(0, '')
self.ui.component_2_select.setItemText(1, 'Image 1')
self.ui.component_2_select.setItemText(2, 'Image 2')
def display_component(self, imageWidget: dict) -> None:
component = imageWidget['widgets']['picker'].currentText()
imageWidget['widgets']['filtered'].setPixmap(imageWidget['image'].get_component_pixmap(component).scaled(300,300, aspectRatioMode=qtc.Qt.KeepAspectRatio, transformMode=qtc.Qt.SmoothTransformation))
try:
os.remove('test.png')
except:
pass
def pick_mixer_output(self) -> None:
self.current_output_channel = self.ui.output_select.currentText()
self.ui.component_1_slider.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['slider1']))
self.ui.component_1_percentage.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['percentage1']))
self.ui.component_1_select.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['select1'])
self.ui.component_1_type.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['type1'])
self.ui.component_2_slider.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['slider2']))
self.ui.component_2_percentage.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['percentage2']))
self.ui.component_2_select.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['select2'])
self.ui.component_2_type.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['type2'])
if self.ui.output_select.currentText() != '':
self.set_mixer_components_disabled(self.enables['output-select'] ,False)
else:
self.set_mixer_components_disabled(self.enables['output-select'], True)
def set_mixer_components_disabled(self, components: list, logic: bool) -> None:
for component in components:
component.setDisabled(logic)
def select_enable(self, component: str, value: str):
self.change_image(component, value)
if value != '':
self.set_mixer_components_disabled(self.enables[component], False)
else:
self.set_mixer_components_disabled(self.enables[component], True)
def change_image(self, component: str, value: str) -> None:
self.output_channels_controlers[self.current_output_channel][component] = value
def component_1_conplementary(self):
self.ui.component_2_type.clear()
self.ui.component_2_type.addItems(self.output_complementary[self.ui.component_1_type.currentText()])
self.ui.component_2_type.update()
self.change_image('type1', self.ui.component_1_type.currentText())
def mixer(self, slider: str, sliderValue: str) -> None:
self.change_image(slider, sliderValue)
channel_1_ratio = float(self.output_channels_controlers[self.current_output_channel]['slider1']) / 100
channel_2_ratio = float(self.output_channels_controlers[self.current_output_channel]['slider2']) / 100
image_1 = self.output_channels_controlers[self.current_output_channel]['select1']
image_2 = self.output_channels_controlers[self.current_output_channel]['select2']
type1 = self.output_channels_controlers[self.current_output_channel]['type1']
type2 = self.output_channels_controlers[self.current_output_channel]['type2']
if image_1 == "" or image_2 == "" or type1 == "" or type2 == "":
return
try:
if (type1 in ['Magnitude', 'Phase', 'Uniform Magnitude', 'Uniform Phase']
and type2 in ['Magnitude', 'Phase', 'Uniform Magnitude', 'Uniform Phase']):
self.modes[self.current_output_channel] = 'mag-phase'
elif (type1 in ['Real', 'Imaginary']and type2 in ['Real', 'Imaginary']):
self.modes[self.current_output_channel] = 'real-imag'
else:
print('Error')
return
self.outImage = self.img[image_1]['image'].mix(self.img[image_2]['image'], self.output_channels_controlers[self.current_output_channel]['type1'], self.output_channels_controlers[self.current_output_channel]['type2'], channel_1_ratio, channel_2_ratio, self.modes[self.current_output_channel])
self.output_channels[self.current_output_channel].setPixmap(self.outImage.scaled(300,300, aspectRatioMode=qtc.Qt.KeepAspectRatio, transformMode=qtc.Qt.SmoothTransformation))
except:
pass
try:
os.remove('test.png')
except:
pass
def main_window():
app = qtw.QApplication(sys.argv)
app.setStyle("Fusion")
window = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main_window()
|
[
"logging.getLogger",
"logging.Formatter",
"logging.FileHandler",
"main_layout.Ui_MainWindow",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QMessageBox.warning",
"Image.Image",
"os.remove"
] |
[((165, 192), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (182, 192), False, 'import logging\n'), ((236, 305), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s:%(name)s:%(asctime)s - %(message)s"""'], {}), "('%(levelname)s:%(name)s:%(asctime)s - %(message)s')\n", (253, 305), False, 'import logging\n'), ((322, 348), 'logging.FileHandler', 'logging.FileHandler', (['"""log"""'], {}), "('log')\n", (341, 348), False, 'import logging\n'), ((12832, 12858), 'PyQt5.QtWidgets.QApplication', 'qtw.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (12848, 12858), True, 'import PyQt5.QtWidgets as qtw\n'), ((560, 575), 'main_layout.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (573, 575), False, 'from main_layout import Ui_MainWindow\n'), ((5482, 5489), 'Image.Image', 'Image', ([], {}), '()\n', (5487, 5489), False, 'from Image import Image\n'), ((8522, 8543), 'os.remove', 'os.remove', (['"""test.png"""'], {}), "('test.png')\n", (8531, 8543), False, 'import os\n'), ((12746, 12767), 'os.remove', 'os.remove', (['"""test.png"""'], {}), "('test.png')\n", (12755, 12767), False, 'import os\n'), ((5717, 5803), 'PyQt5.QtWidgets.QMessageBox.warning', 'qtw.QMessageBox.warning', (['self', '"""failed"""', '"""The Two Images Must be of the same size"""'], {}), "(self, 'failed',\n 'The Two Images Must be of the same size')\n", (5740, 5803), True, 'import PyQt5.QtWidgets as qtw\n'), ((6399, 6485), 'PyQt5.QtWidgets.QMessageBox.warning', 'qtw.QMessageBox.warning', (['self', '"""failed"""', '"""The Two Images Must be of the same size"""'], {}), "(self, 'failed',\n 'The Two Images Must be of the same size')\n", (6422, 6485), True, 'import PyQt5.QtWidgets as qtw\n')]
|
from django import forms
# Form for create sales
class FormSales(forms.Form):
# Atributes for form
# NOTE: date_creation and total fields, it will be created dynamically
name = forms.CharField(label="Nombre",
max_length=50,
required=True,
help_text="Ingrese su nombre")
description = forms.CharField(label="Descripción",
max_length=120,
help_text="Ingrese una descripción de su producto",
widget=forms.Textarea())
count = forms.IntegerField(label="Cantidad",min_value=0,required=True)
price = forms.DecimalField(label="Precio",min_value=0,required=True)
# Defaul value is True
paid_out = forms.BooleanField(label="Pagado",
initial=True,
help_text="Si es verdadero, está pagado",
widget=forms.CheckboxInput())
|
[
"django.forms.CheckboxInput",
"django.forms.CharField",
"django.forms.Textarea",
"django.forms.IntegerField",
"django.forms.DecimalField"
] |
[((191, 288), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Nombre"""', 'max_length': '(50)', 'required': '(True)', 'help_text': '"""Ingrese su nombre"""'}), "(label='Nombre', max_length=50, required=True, help_text=\n 'Ingrese su nombre')\n", (206, 288), False, 'from django import forms\n'), ((629, 693), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'label': '"""Cantidad"""', 'min_value': '(0)', 'required': '(True)'}), "(label='Cantidad', min_value=0, required=True)\n", (647, 693), False, 'from django import forms\n'), ((704, 766), 'django.forms.DecimalField', 'forms.DecimalField', ([], {'label': '"""Precio"""', 'min_value': '(0)', 'required': '(True)'}), "(label='Precio', min_value=0, required=True)\n", (722, 766), False, 'from django import forms\n'), ((598, 614), 'django.forms.Textarea', 'forms.Textarea', ([], {}), '()\n', (612, 614), False, 'from django import forms\n'), ((1007, 1028), 'django.forms.CheckboxInput', 'forms.CheckboxInput', ([], {}), '()\n', (1026, 1028), False, 'from django import forms\n')]
|
from models import shoe_box
def createHBjsons() -> None:
for i in range(4):
# increment on shoebox dimensions
_width = 4.0 + i*0.5
_height = 3.5 + i*0.1
_depth = 4.0 + i*0.5
# init shoe_box
sb = shoe_box.Shoebox(width=_width , height=_height , depth=_depth )
# set grid size and offset
sb.gridSize = 0.5
sb.gridOffset = 0.75
# set window to wall ratio
sb.wwr = 0.4
# create room
sb.createRoom()
# create model
sb.createModel()
# save to hbjson
sb.saveToHBJson()
# run
if __name__ == "__main__":
# create models
createHBjsons()
|
[
"models.shoe_box.Shoebox"
] |
[((264, 324), 'models.shoe_box.Shoebox', 'shoe_box.Shoebox', ([], {'width': '_width', 'height': '_height', 'depth': '_depth'}), '(width=_width, height=_height, depth=_depth)\n', (280, 324), False, 'from models import shoe_box\n')]
|
from sqlalchemy import engine_from_config
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import MetaData
import zope.sqlalchemy
from .node import Node
NAMING_CONVENTION = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=NAMING_CONVENTION)
Base = declarative_base(metadata=metadata)
def get_session(transaction_manager, dbmaker):
dbsession = dbmaker()
zope.sqlalchemy.register(dbsession,
transaction_manager=transaction_manager)
return dbsession
def get_engine(settings, prefix='sqlalchemy.'):
return engine_from_config(settings, prefix)
def get_dbmaker(engine):
dbmaker = sessionmaker()
dbmaker.configure(bind=engine)
return dbmaker
def includeme(config):
settings = config.get_settings()
dbmaker = get_dbmaker(get_engine(settings))
config.add_request_method(
lambda r: get_session(r.tm, dbmaker),
'dbsession',
reify=True
)
|
[
"sqlalchemy.engine_from_config",
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.schema.MetaData",
"sqlalchemy.ext.declarative.declarative_base"
] |
[((498, 543), 'sqlalchemy.schema.MetaData', 'MetaData', ([], {'naming_convention': 'NAMING_CONVENTION'}), '(naming_convention=NAMING_CONVENTION)\n', (506, 543), False, 'from sqlalchemy.schema import MetaData\n'), ((551, 586), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {'metadata': 'metadata'}), '(metadata=metadata)\n', (567, 586), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((854, 890), 'sqlalchemy.engine_from_config', 'engine_from_config', (['settings', 'prefix'], {}), '(settings, prefix)\n', (872, 890), False, 'from sqlalchemy import engine_from_config\n'), ((932, 946), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {}), '()\n', (944, 946), False, 'from sqlalchemy.orm import sessionmaker\n')]
|
import typing
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Set,
Tuple,
Union,
)
from ncadquery import Workplane
from OCP.Quantity import Quantity_NameOfColor
from OCP.TCollection import TCollection_ExtendedString
from OCP.TDataStd import TDataStd_Name
from OCP.TDF import TDF_Label, TDF_TagSource
from OCP.TNaming import TNaming_Builder, TNaming_NamedShape
from OCP.TopoDS import TopoDS_Shape
from OCP.TPrsStd import TPrsStd_AISPresentation
from PyQt5.QtCore import QPersistentModelIndex, Qt
from nales.nales_cq_impl import NALES_TYPES, CQMethodCall, Part
from nales.utils import TypeChecker
from nales.widgets.msg_boxs import StdErrorMsgBox
class NNode:
def __init__(self, name=None, parent=None):
self._parent = parent
self._columns_nb = 1
self._childs = []
if parent:
self._row = len(parent._childs)
parent._childs.append(self)
parent._columns_nb = max(self.column, parent.column)
self._label = TDF_TagSource.NewChild_s(parent._label)
self._name = name
TDataStd_Name.Set_s(self._label, TCollection_ExtendedString(name))
else:
self._label = TDF_Label()
self._name = "root"
self._row = 0
def _create_sublabel(self):
"""
Create an additionnal OCCT label that is needed if you want to display several shapes
(It's one shape per label)
"""
sublabel = TDF_TagSource.NewChild_s(self._label)
TDataStd_Name.Set_s(
sublabel, TCollection_ExtendedString(f"{self.name} subshape")
)
return sublabel
def walk(self, node: "NNode" = None) -> "NNode":
"""
Walks all the node starting from 'node'
If 'node' is None, starts from the called node
"""
base_node = node if node else self
yield base_node
for child in base_node.childs:
yield from self.walk(child)
def find(self, node_name: str, node_type=None) -> "NNode" or None:
for node in self.walk():
if node.name == node_name:
if node_type:
if isinstance(node, node_type):
return node
else:
return node
def data(self, column):
if column >= 0 and column < len(self._data):
return self._data[column]
@property
def column(self):
return self._columns_nb
def child_count(self):
return len(self._childs)
def child(self, row) -> "NNode":
if row >= 0 and row < self.child_count():
return self._childs[row]
def has_children(self):
if len(self._childs) != 0:
return True
else:
return False
@property
def parent(self):
return self._parent
@property
def childs(self):
return self._childs
@childs.setter
def childs(self, new_childs):
self._childs = new_childs
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def root_node(self):
root = self.parent
while True:
if root.parent:
root = root.parent
else:
return root
@property
def row(self):
return self._row
class NPart(NNode):
def __init__(self, name: str, parent):
super().__init__(name, parent=parent)
self.visible = True
self._solid = TopoDS_Shape()
self._active_shape = None
self.display()
@property
def part(self):
return self.childs[-1].part_obj
def _update_display_shapes(self):
try:
solid = self.part._findSolid().wrapped
except ValueError:
solid = TopoDS_Shape()
self._solid = solid
if not (active_shape := self.part._val().wrapped) is solid and isinstance(
active_shape, TopoDS_Shape
):
self._active_shape = active_shape
else:
self._active_shape = None
def hide(self):
self.visible = False
self.ais_solid.Erase(remove=True)
self.ais_active_shape.Erase(remove=True)
self.root_node._viewer.Update()
def display(self, update=False):
"""
Builds the display object and attach it to the OCAF tree
"""
if update:
self.ais_solid.Erase(remove=True)
if self._active_shape:
self.ais_active_shape.Erase(remove=True)
self._update_display_shapes()
# self.root_node._viewer.Update()
solid_bldr = TNaming_Builder(self._label) # _label is TDF_Label
solid_bldr.Generated(self._solid)
solid_shape_attr = solid_bldr.NamedShape()
self.ais_solid = TPrsStd_AISPresentation.Set_s(solid_shape_attr)
if self._active_shape:
active_shape_bldr = TNaming_Builder(self._create_sublabel())
active_shape_bldr.Generated(self._active_shape)
active_shape_attr = active_shape_bldr.NamedShape()
self.ais_active_shape = TPrsStd_AISPresentation.Set_s(active_shape_attr)
self.ais_active_shape.Display(update=True)
self.root_node._viewer.Update()
# There is color mixing due to overlapping, maybe this can help to solve the issue :
# https://dev.opencascade.org/doc/refman/html/class_a_i_s___interactive_context.html#a1e0f9550cc001adbb52329ac243bb3b2
# It's considered good enough for now
self.ais_solid.SetTransparency(0.9)
self.ais_solid.Display()
self.root_node._viewer.Update()
self.visible = True
def update(self):
"""
When called this method rebuild the entire Part, by calling each child Operation
"""
child_ops = self.childs
for pos, child_op in enumerate(child_ops):
child_op.update(pos)
def remove_operation(self, row: int):
"""
Remove an operation from the operation tree
"""
ops: List[NOperation] = self.childs
ops.pop(row)
ops[row - 1].update_from_node()
class NShape(NNode):
def __init__(self, name, cq_shape, parent: NNode):
self._occt_shape = shape = cq_shape.wrapped
self.shape = cq_shape
self.visible = True
super().__init__(name, parent=parent)
self.bldr = TNaming_Builder(self._label) # _label is TDF_Label
self.bldr.Generated(shape)
named_shape = self.bldr.NamedShape()
self._label.FindAttribute(TNaming_NamedShape.GetID_s(), named_shape)
self.ais_shape = TPrsStd_AISPresentation.Set_s(named_shape)
self.ais_shape.SetTransparency(0.5)
self.ais_shape.SetColor(Quantity_NameOfColor.Quantity_NOC_ALICEBLUE)
self.ais_shape.Display(update=True)
def hide(self):
self.visible = False
self.ais_shape.Erase()
self.root_node._viewer.Update()
def display(self, update=False):
"""
Builds the display object and attach it to the OCAF tree
"""
if update:
self.ais_shape.Erase(remove=True)
self.root_node._viewer.Update()
self.bldr = TNaming_Builder(self._label) # _label is TDF_Label
self.bldr.Generated(self._occt_shape)
named_shape = self.bldr.NamedShape()
self._label.FindAttribute(TNaming_NamedShape.GetID_s(), named_shape)
self.ais_shape = TPrsStd_AISPresentation.Set_s(named_shape)
self.ais_shape.SetTransparency(0.5)
self.ais_shape.SetColor(Quantity_NameOfColor.Quantity_NOC_ALICEBLUE)
self.ais_shape.Display(update=True)
self.root_node._viewer.Update()
self.visible = True
def update(self):
"""
Update the shape object
"""
self._occt_shape = self.shape.wrapped
self.display(True)
class NShapeOperation(NNode):
def __init__(self, maker_method: Callable, shape_class, parent=None):
super().__init__(maker_method.__name__, parent)
self.maker_method = maker_method
self.shape_class = shape_class
def update(self) -> None:
args = [child.value for child in self.childs]
self.parent.shape = self.maker_method(self.shape_class, *args)
self.parent.update()
class NOperation(NNode):
def __init__(
self, method_name: str, part_obj: Part, parent: NNode, operation: CQMethodCall
):
super().__init__(method_name, parent=parent)
self.part_obj = part_obj
self.operation = operation
self.method = getattr(part_obj, method_name).__func__
if method_name == "Workplane":
self._root_operation = True
else:
self._root_operation = False
def update_from_node(self):
"""
Update the Part from this node
It recomputes every operation from this node to the end
"""
ops: List[NOperation] = self.parent.childs[self.row :]
for op in ops:
op.update()
self.parent.display(update=True)
def _update_init_part(self):
"""
This method is called when the user try to update __init__ method arguments
There is a special handling because it is a bit different from the regular methods
"""
args = [
child.value if not child.is_linked("obj") else child.linked_obj
for child in self.childs
]
try:
self.method(self.part_obj, *args, internal_call=True)
except Exception as exc:
StdErrorMsgBox(repr(exc))
def update(self) -> bool:
"""
Update the CQ objects stack from param modification in the GUI view
"""
# Special handling of __init__ method
if self.row == 0:
self._update_init_part()
return True
previous_operations: List[NOperation] = self.parent.childs[: self.row]
old_part_obj = previous_operations[-1].part_obj
args = [
child.value if not child.is_linked("obj") else child.linked_obj
for child in self.childs
]
try:
self.part_obj = self.method(old_part_obj, *args, internal_call=True)
return True
except ValueError as exc: # we update parent operations until pending wires have reset
if exc.args[0] == "No pending wires present":
tried_updates = [self]
# recursively call parent ops and store all the failed updates to update them again afterwards
while (tried_update := previous_operations[-1].update()) is False:
tried_updates.append(tried_update)
for tried_update in tried_updates:
tried_update.update()
else:
StdErrorMsgBox(repr(exc))
return False
except Exception as exc:
StdErrorMsgBox(repr(exc))
return False
def _restore_pending_wires(self):
index = 2
previous_ops = self.parent.childs[: self._row]
while len(self.parent.part.ctx.pendingWires) == 0:
op = previous_ops[-index]
op.update(len(previous_ops) - op._row)
index += 1
class NShapeArgument(NNode):
def __init__(self, name=None, parent=None):
super().__init__(name, parent)
class NArgument(NNode):
"""
The underlying data of an Argument is as follow :
name : cq argument name
value : value
linked_param : the name of the parameter linked to this arg, None if not connected to any
type: value type : a voir si je garde ca
If the Argument is linked to a Parameter, the Parameter name is displayed
"""
def __init__(self, arg_name: str, value, arg_type, parent: NNode, kwarg=False):
super().__init__(arg_name, parent=parent)
self._name = arg_name
self._type = arg_type
self._value = value
self._typechecker = TypeChecker(arg_type)
self._kwarg = kwarg # Boolean indicating if the arg is a kwarg or not
self._linked_param = None
self._linked_nobj_idx: QPersistentModelIndex = None
self._param_name_pidx = None
self._param_value_pidx = None
def link(
self,
by: Literal["param", "obj"],
value: Union[Tuple, QPersistentModelIndex, Any],
):
"""
Link this parameter to an object in available in the data model
"""
if by == "param":
raw_val = value[1]
if not self.is_type_compatible(raw_val):
raise TypeError("Couldn't link the param")
self._linked_param = value[0]
self._value = value[1]
self._param_name_pidx = value[2]
self._param_value_pidx = value[3]
else:
self._linked_nobj_idx = value
def unlink_param(self):
self._linked_param = None
self._param_name_pidx = None
self._param_value_pidx = None
def is_kwarg(self):
return self._kwarg
def is_linked(self, by: str = None):
if by == "obj":
return True if self._linked_nobj_idx else False
elif by == "param":
return True if self._linked_param else False
elif by is None:
if self._linked_param or self._linked_nobj_idx:
return True
else:
return False
else:
raise ValueError("Argument 'by' must be either 'obj' or 'param'")
def is_optional_type(self) -> bool:
"""
Indicates if the NArgument is optional, i.e the function signature looks something like :
method(nargument:Union[float,None] = None) or method(nargument:Optional[float] = None)
"""
if self.is_kwarg():
origin = typing.get_origin(self._type)
if origin == Optional:
return True
if origin == Union:
for allowed_type in typing.get_args(self._type):
if allowed_type == type(None):
return True
return False
else:
return False
else:
return False
def is_literal_type(self) -> bool:
origin = typing.get_origin(self.type)
if self.type == str or origin == Literal:
return True
if origin == Union:
possible_types = typing.get_args(self.type)
for possible_type in possible_types:
if possible_type == str or possible_type == Literal:
return True
return False
def is_type_compatible(self, value: str) -> bool:
return self._typechecker.check(value)
def _cast(self, value: Any):
if type(value) == self._type:
return value
return self._typechecker.cast(value)
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def linked_param(self):
if self.is_linked():
return self._linked_param
else:
raise ValueError("This argument is not linked to a param")
@property
def linked_node(self):
if not self._linked_nobj_idx:
raise ValueError("This argument isn't linked to any node")
else:
return self._linked_nobj_idx.data(Qt.EditRole)
@property
def linked_obj(self):
if self.is_linked(by="obj"):
if hasattr(self.linked_node, "part"):
return self.linked_node.part
elif hasattr(self.linked_node, "shape"):
return self.linked_node.shape
else:
raise NotImplementedError(
"This argument is linked to a object that is not supported yet"
)
else:
raise ValueError("This argument is not linked to an object")
@property
def columns_nb(self):
return 1
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def value(self):
if self.is_optional_type() and self._value is None:
return None
if self.is_linked(by="param"):
return self._cast(self._param_value_pidx.data())
elif self.is_linked(by="obj"):
return self.linked_obj
elif not isinstance(self._value, str):
# Upon argument creation self._value is already of the right type
return self._value
else:
# If self._value is a string, means the users modified the argument in the GUI
return self._cast(self._value)
@value.setter
def value(self, value):
self._value = value
@property
def linked_param(self):
return self._linked_param
|
[
"OCP.TDF.TDF_Label",
"OCP.TPrsStd.TPrsStd_AISPresentation.Set_s",
"OCP.TNaming.TNaming_Builder",
"OCP.TCollection.TCollection_ExtendedString",
"nales.utils.TypeChecker",
"OCP.TopoDS.TopoDS_Shape",
"OCP.TDF.TDF_TagSource.NewChild_s",
"typing.get_args",
"OCP.TNaming.TNaming_NamedShape.GetID_s",
"typing.get_origin"
] |
[((1523, 1560), 'OCP.TDF.TDF_TagSource.NewChild_s', 'TDF_TagSource.NewChild_s', (['self._label'], {}), '(self._label)\n', (1547, 1560), False, 'from OCP.TDF import TDF_Label, TDF_TagSource\n'), ((3612, 3626), 'OCP.TopoDS.TopoDS_Shape', 'TopoDS_Shape', ([], {}), '()\n', (3624, 3626), False, 'from OCP.TopoDS import TopoDS_Shape\n'), ((4763, 4791), 'OCP.TNaming.TNaming_Builder', 'TNaming_Builder', (['self._label'], {}), '(self._label)\n', (4778, 4791), False, 'from OCP.TNaming import TNaming_Builder, TNaming_NamedShape\n'), ((4934, 4981), 'OCP.TPrsStd.TPrsStd_AISPresentation.Set_s', 'TPrsStd_AISPresentation.Set_s', (['solid_shape_attr'], {}), '(solid_shape_attr)\n', (4963, 4981), False, 'from OCP.TPrsStd import TPrsStd_AISPresentation\n'), ((6540, 6568), 'OCP.TNaming.TNaming_Builder', 'TNaming_Builder', (['self._label'], {}), '(self._label)\n', (6555, 6568), False, 'from OCP.TNaming import TNaming_Builder, TNaming_NamedShape\n'), ((6777, 6819), 'OCP.TPrsStd.TPrsStd_AISPresentation.Set_s', 'TPrsStd_AISPresentation.Set_s', (['named_shape'], {}), '(named_shape)\n', (6806, 6819), False, 'from OCP.TPrsStd import TPrsStd_AISPresentation\n'), ((7362, 7390), 'OCP.TNaming.TNaming_Builder', 'TNaming_Builder', (['self._label'], {}), '(self._label)\n', (7377, 7390), False, 'from OCP.TNaming import TNaming_Builder, TNaming_NamedShape\n'), ((7610, 7652), 'OCP.TPrsStd.TPrsStd_AISPresentation.Set_s', 'TPrsStd_AISPresentation.Set_s', (['named_shape'], {}), '(named_shape)\n', (7639, 7652), False, 'from OCP.TPrsStd import TPrsStd_AISPresentation\n'), ((12148, 12169), 'nales.utils.TypeChecker', 'TypeChecker', (['arg_type'], {}), '(arg_type)\n', (12159, 12169), False, 'from nales.utils import TypeChecker\n'), ((14453, 14481), 'typing.get_origin', 'typing.get_origin', (['self.type'], {}), '(self.type)\n', (14470, 14481), False, 'import typing\n'), ((1059, 1098), 'OCP.TDF.TDF_TagSource.NewChild_s', 'TDF_TagSource.NewChild_s', (['parent._label'], {}), '(parent._label)\n', (1083, 1098), False, 'from OCP.TDF import TDF_Label, TDF_TagSource\n'), ((1248, 1259), 'OCP.TDF.TDF_Label', 'TDF_Label', ([], {}), '()\n', (1257, 1259), False, 'from OCP.TDF import TDF_Label, TDF_TagSource\n'), ((1612, 1663), 'OCP.TCollection.TCollection_ExtendedString', 'TCollection_ExtendedString', (['f"""{self.name} subshape"""'], {}), "(f'{self.name} subshape')\n", (1638, 1663), False, 'from OCP.TCollection import TCollection_ExtendedString\n'), ((5246, 5294), 'OCP.TPrsStd.TPrsStd_AISPresentation.Set_s', 'TPrsStd_AISPresentation.Set_s', (['active_shape_attr'], {}), '(active_shape_attr)\n', (5275, 5294), False, 'from OCP.TPrsStd import TPrsStd_AISPresentation\n'), ((6708, 6736), 'OCP.TNaming.TNaming_NamedShape.GetID_s', 'TNaming_NamedShape.GetID_s', ([], {}), '()\n', (6734, 6736), False, 'from OCP.TNaming import TNaming_Builder, TNaming_NamedShape\n'), ((7541, 7569), 'OCP.TNaming.TNaming_NamedShape.GetID_s', 'TNaming_NamedShape.GetID_s', ([], {}), '()\n', (7567, 7569), False, 'from OCP.TNaming import TNaming_Builder, TNaming_NamedShape\n'), ((14004, 14033), 'typing.get_origin', 'typing.get_origin', (['self._type'], {}), '(self._type)\n', (14021, 14033), False, 'import typing\n'), ((14613, 14639), 'typing.get_args', 'typing.get_args', (['self.type'], {}), '(self.type)\n', (14628, 14639), False, 'import typing\n'), ((1174, 1206), 'OCP.TCollection.TCollection_ExtendedString', 'TCollection_ExtendedString', (['name'], {}), '(name)\n', (1200, 1206), False, 'from OCP.TCollection import TCollection_ExtendedString\n'), ((3910, 3924), 'OCP.TopoDS.TopoDS_Shape', 'TopoDS_Shape', ([], {}), '()\n', (3922, 3924), False, 'from OCP.TopoDS import TopoDS_Shape\n'), ((14165, 14192), 'typing.get_args', 'typing.get_args', (['self._type'], {}), '(self._type)\n', (14180, 14192), False, 'import typing\n')]
|
import sys
import pygame as pg
import numpy as np
import random
import time
pic = np.zeros(shape=(128,64))
width = 128
height = 64
refresh_rate = 60
interval = 1 / refresh_rate
bootrom_file = "bootrom0"
rom_file = "rom"
# rom_file = "hello_world"
debug = False
pg.display.init()
display = pg.display.set_mode((width*4, height*4), flags=0, depth=8)
screen = pg.Surface((width, height), flags=0, depth=8)
pg.transform.scale(screen, (width*4, height*4), display)
def screen_update(silent=True):
pg.transform.scale(screen, (width*4, height*4), display)
pg.display.flip()
if not silent:
print("Screen Update")
def screen_clear():
screen.fill((0,0,0))
#screen_update()
def screen_draw_line(x, y, pixels):
# print("----------DRAW----------")
# print("x:",x)
# print("y:",y)
# print("pix:",bin(pixels))
j = 0b10000000
for i in range(8):
x_pos = x + i
y_pos = y
if x_pos >= 0 and x_pos < width:
if y_pos >= 0 and y_pos < height:
if pixels & j:
pg.draw.rect(screen, 255, pg.Rect(x_pos,y_pos,1,1))
else:
pg.draw.rect(screen, 0, pg.Rect(x_pos,y_pos,1,1))
j = j >> 1
#screen_update()
screen_clear()
# screen_draw_line(0,0,0b10101011)
# input()
class memByte:
def __init__(self):
self.value = 0x00000000
def write(self, value):
self.value = value & 0xff
def readUpper(self):
return (self.value & 0b11110000) >> 4
def readLower(self):
return self.value & 0b1111
class Flags:
def __init__(self):
self.z = 0
self.n = 0
self.h = 0
self.c = 0
def setZero(self):
self.z = 1
def clearZero(self):
self.z = 0
def setNeg(self):
self.n = 1
def clearNeg(self):
self.n = 0
def setHalf(self):
self.h = 1
def clearHalf(self):
self.h = 0
def setCarry(self):
self.c = 1
def clearCarry(self):
self.c = 0
def clearFlags(self):
self.z = 0
self.n = 0
self.h = 0
self.c = 0
class reg:
def __init__(self):
self.value = 0b00000000
self.value = random.randint(0,255)
def send(self):
sys.stdout.write(chr(self.value))
sys.stdout.flush()
class Dreg:
def __init__(self, r1, r2):
self.r1 = r1
self.r2 = r2
def getvalue(self):
self.value = (self.r1.value << 8) + self.r2.value
def setvalue(self):
self.r1.value = self.value >> 8
self.r2.value = self.value & 0xff
class regPC:
def __init__(self):
self.value = 0x0
def inc(self, length=1):
self.value += length
self.value = self.value & 0xffff
def jump(self, address):
self.value = address & 0xffff
class regSP:
def __init__(self):
self.value = 0xfffe
def inc(self):
self.value += 2
self.value = self.value & 0xffff
def dec(self):
self.value -= 2
def setvalue(self):
#print("SPSET:",hex(self.value))
pass # JUST TO MAKE LDX SIMPLER
ONE_REG = reg()
ONE_REG.value = 1
FL = Flags()
halt = False
A = reg()
B = reg()
C = reg()
D = reg()
E = reg()
H = reg()
L = reg()
BC = Dreg(B, C)
DE = Dreg(D, E)
HL = Dreg(H, L)
#E.value = 0x1 # Randomness loop
PC = regPC()
SP = regSP()
memory = []
jumped = False
print("RESERVING MEMORY...")
for i in range(0x10000):
memory.append(memByte())
print("MEMORY RESERVED.")
print("LOADING MEMORY...")
f = open(bootrom_file, "rb")
rom_data = f.read()
f.close()
for i in range(len(rom_data)):
memory[i+0x0].value = rom_data[i]
f = open(rom_file, "rb")
rom_data = f.read()
f.close()
for i in range(len(rom_data)):
memory[i+0x597].value = rom_data[i]
print("MEMORY LOADED.")
def LDI(R, mem=False):
PC.inc()
if not mem:
R.value = memory[PC.value].value
else:
R.getvalue()
memory[R.value].value = memory[PC.value].value
def LDX(R):
PC.inc()
low = memory[PC.value].value
PC.inc()
R.value = low + (memory[PC.value].value << 8)
R.setvalue()
def PUSH_R(R, mem=False):
if not mem:
memory[SP.value].value = R.value
else:
R.getvalue()
memory[SP.value].value = memory[R.value].value
SP.dec()
def PUSH_RR(RR):
RR.getvalue()
memory[SP.value].value = RR.value & 0xff
memory[SP.value + 1].value = RR.value >> 8
SP.dec()
def POP_R(R, mem=False):
SP.inc()
if not mem:
#print(hex(SP.value))
R.value = memory[SP.value].value
else:
R.getvalue()
memory[R.value].value = memory[SP.value].value
def POP_RR(RR):
SP.inc()
RR.value = memory[SP.value].value + (memory[SP.value + 1].value << 8)
RR.setvalue()
MOV_REGISTERS = [B, C, D, E, H, L, HL, A]
MOVB_OPCODES = [0x09, 0x19, 0x29, 0x39, 0x49, 0x59, 0x69, 0x79]
MOVC_OPCODES = [0x99, 0x99, 0xA9, 0xB9, 0xC9, 0xD9, 0xE9, 0xF9]
MOVD_OPCODES = [0x0A, 0x1A, 0x2A, 0x3A, 0x4A, 0x5A, 0x6A, 0x7A]
MOVE_OPCODES = [0x8A, 0x9A, 0xAA, 0xBA, 0xCA, 0xDA, 0xEA, 0xFA]
MOVH_OPCODES = [0x0B, 0x1B, 0x2B, 0x3B, 0x4B, 0x5B, 0x6B, 0x7B]
MOVL_OPCODES = [0x8B, 0x9B, 0xAB, 0xBB, 0xCB, 0xDB, 0xEB, 0xFB]
MOVMHL_OPCODES = [0x0C, 0x1C, 0x2C, 0x3C, 0x4C, 0x5C, 0x6C, 0x7C]
MOVA_OPCODES = [0x8C, 0x9C, 0xAC, 0xBC, 0xCC, 0xDC, 0xEC, 0xFC]
def MOV(R1, R2index, mem=False):
R2 = MOV_REGISTERS[R2index]
if not mem:
if R2index == 6:
R2.getvalue()
R1.value = memory[R2.value].value
else:
R1.value = R2.value
else:
memory[R1.value].value = R2.value
R1.setvalue()
def MOV_RR(RR1, RR2):
RR2.getvalue()
RR1.value = RR2.value
RR1.setvalue()
def ADD_8(value1, value2):
nib = (value1 & 0xf) + (value2 & 0xf)
value = value1 + value2
FL.clearFlags()
if value & 0xff == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
if nib & 0xf0:
FL.setHalf()
if value >> 8:
FL.setCarry()
return value & 0xff
def ADD_R(R, mem=False):
if not mem:
value = ADD_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = ADD_8(A.value, memory[R.value].value)
memory[R.value].value = value
def ADD_16(value1, value2):
nib = (value1 & 0xf) + (value2 & 0xf)
value = value1 + value2
FL.clearFlags()
if value & 0xffff == 0:
FL.setZero()
if value & 0b1000000000000000:
FL.setNeg()
if nib & 0xf0:
FL.setHalf()
if value >> 16:
FL.setCarry()
return value & 0xffff
def ADDX_RR(RR):
RR.getvalue()
value = ADD_16(A.value, RR.value)
RR.value = value
RR.setvalue()
def SUB_8(value1, value2):
value = value1 - value2
if value < 0:
value += 0x100
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
if (value1 & 0xf) <= (value2 & 0xf):
FL.setHalf()
if value1 <= value2:
FL.setCarry()
return value & 0xff
def SUB_R(R, compare_only, mem=False):
if not mem:
value = SUB_8(R.value, A.value)
if not compare_only:
R.value = value
else:
R.getvalue()
value = SUB_8(memory[R.value].value, A.value)
if not compare_only:
memory[R.value].value = value
def INC(R, mem=False):
if not mem:
value = ADD_8(ONE_REG.value, R.value)
R.value = value
else:
R.getvalue()
value = ADD_8(ONE_REG.value, memory[R.value].value)
memory[R.value].value = value
def DEC(R, mem=False):
if not mem:
value = SUB_8(R.value, ONE_REG.value)
R.value = value
else:
R.getvalue()
value = SUB_8(memory[R.value].value, ONE_REG.value)
memory[R.value].value = value
def AND_8(value1, value2):
value = value1 & value2
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
return value & 0xff
def AND_R(R, mem=False):
if not mem:
value = AND_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = AND_8(A.value, memory[R.value].value)
memory[R.value].value = value
def OR_8(value1, value2):
value = value1 | value2
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
return value & 0xff
def OR_R(R, mem=False):
if not mem:
value = OR_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = OR_8(A.value, memory[R.value].value)
memory[R.value].value = value
def XOR_8(value1, value2):
value = value1 ^ value2
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
return value & 0xff
def XOR_R(R, mem=False):
if not mem:
value = XOR_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = XOR_8(A.value, memory[R.value].value)
memory[R.value].value = value
def CMPS(R, mem=False):
if not mem:
Rval = R.value
if Rval & 0b10000000:
Rval = - ((0x100 - Rval) & 0xff)
Aval = A.value
if Aval & 0b10000000:
Aval = - ((0x100 - Aval) & 0xff)
FL.clearFlags()
if Rval == Aval:
FL.setZero()
elif Rval < Aval:
FL.setNeg()
else:
R.getvalue()
Rval = memory[R.value].value
if Rval & 0b10000000:
Rval = - ((0x100 - Rval) & 0xff)
Aval = A.value
if Aval & 0b10000000:
Aval = - ((0x100 - Aval) & 0xff)
FL.clearFlags()
if Rval == Aval:
FL.setZero()
elif Rval < Aval:
FL.setNeg()
def JUMP():
PC.inc()
low = memory[PC.value].value
PC.inc()
high = memory[PC.value].value
global jumped
jumped = True
PC.value = (high << 8) + low
print("JUMP:",hex((high << 8) + low))
def REL_JUMP():
PC.inc()
value = memory[PC.value].value
if value & 0b10000000:
value = - ((0x100 - value) & 0xff)
# ACCORDING TO DOCUMENTATION RELATIVE JUMPS USE THE +2 PC INC
PC.value += value
screen_update()
last_update = time.time()
while not halt:
b_up = memory[PC.value].readUpper()
b_down = memory[PC.value].readLower()
b_val = memory[PC.value].value
jumped = False
if time.time() > last_update + interval:
screen_update()
last_update = time.time()
# Handle pygame events
for event in pg.event.get():
# print("EVENT:",event.type)
# input()
pass
if debug:
pass#input()
if debug or False:
print(hex(PC.value), hex(b_val))
# if b_val in [0x86, 0x96, 0xA6, 0xB6, 0xC6, 0xD6, 0xE6, 0xF6]:
# print("CMP R")
# input()
# if b_val == 0xF7:
# print("CMPI")
# input()
# HCF (HALT)
if b_val == 0x6C:
halt = True
# LDI R, xx
if b_val == 0x20:
LDI(B)
elif b_val == 0x30:
LDI(C)
elif b_val == 0x40:
LDI(D)
elif b_val == 0x50:
LDI(E)
elif b_val == 0x60:
LDI(H)
elif b_val == 0x70:
LDI(L)
elif b_val == 0x80:
LDI(HL, mem=True)
elif b_val == 0x90:
LDI(A)
# LDX RR, xxyy
elif b_val == 0x21:
LDX(BC)
elif b_val == 0x31:
LDX(DE)
elif b_val == 0x41:
LDX(HL)
elif b_val == 0x22:
LDX(SP)
# PUSH R
elif b_val == 0x81:
PUSH_R(B)
elif b_val == 0x91:
PUSH_R(C)
elif b_val == 0xA1:
PUSH_R(D)
elif b_val == 0xB1:
PUSH_R(E)
elif b_val == 0xC1:
PUSH_R(H)
elif b_val == 0xD1:
PUSH_R(L)
elif b_val == 0xC0:
PUSH_R(HL, mem=True)
elif b_val == 0xD0:
PUSH_R(A)
# PUSH RR
elif b_val == 0x51:
PUSH_RR(BC)
elif b_val == 0x61:
PUSH_RR(DE)
elif b_val == 0x71:
PUSH_RR(HL)
# POP R
elif b_val == 0x82:
POP_R(B)
elif b_val == 0x92:
POP_R(C)
elif b_val == 0xA2:
POP_R(D)
elif b_val == 0xB2:
POP_R(E)
elif b_val == 0xC2:
POP_R(H)
elif b_val == 0xD2:
POP_R(L)
elif b_val == 0xC3:
POP_R(HL, mem=True)
elif b_val == 0xD3:
POP_R(A)
# POP RR
elif b_val == 0x52:
POP_RR(BC)
elif b_val == 0x62:
POP_RR(DE)
elif b_val == 0x72:
POP_RR(HL)
# MOV R1, R2
elif b_val in MOVB_OPCODES:
MOV(B, MOVB_OPCODES.index(b_val))
elif b_val in MOVC_OPCODES:
MOV(C, MOVC_OPCODES.index(b_val))
elif b_val in MOVD_OPCODES:
MOV(D, MOVD_OPCODES.index(b_val))
elif b_val in MOVE_OPCODES:
MOV(E, MOVE_OPCODES.index(b_val))
elif b_val in MOVH_OPCODES:
MOV(H, MOVH_OPCODES.index(b_val))
elif b_val in MOVL_OPCODES:
MOV(L, MOVL_OPCODES.index(b_val))
elif b_val in MOVMHL_OPCODES:
MOV(HL, MOVMHL_OPCODES.index(b_val), mem=True)
elif b_val in MOVA_OPCODES:
MOV(A, MOVA_OPCODES.index(b_val))
# MOV RR1, RR2
elif b_val == 0xED:
MOV_RR(HL, BC)
elif b_val == 0xFD:
MOV_RR(HL, DE)
# CLRFLAG
elif b_val == 0x08:
FL.clearFlags()
# SETFLAG f, x
elif b_val == 0x18:
FL.setZero()
elif b_val == 0x28:
FL.clearZero()
elif b_val == 0x38:
FL.setNeg()
elif b_val == 0x48:
FL.clearNeg()
elif b_val == 0x58:
FL.setHalf()
elif b_val == 0x68:
FL.clearHalf()
elif b_val == 0x78:
FL.setCarry()
elif b_val == 0x88:
FL.clearCarry()
# ADD R
elif b_val == 0x04:
ADD_R(B)
elif b_val == 0x14:
ADD_R(C)
elif b_val == 0x24:
ADD_R(D)
elif b_val == 0x34:
ADD_R(E)
elif b_val == 0x44:
ADD_R(H)
elif b_val == 0x54:
ADD_R(L)
elif b_val == 0x64:
ADD_R(HL, mem=True)
elif b_val == 0x74:
ADD_R(A)
# ADDI xx
elif b_val == 0xA7:
PC.inc()
value = ADD_8(A.value, memory[PC.value].value)
A.value = value
# ADDX RR
elif b_val == 0x83:
ADDX_RR(BC)
elif b_val == 0x93:
ADDX_RR(DE)
elif b_val == 0xA3:
ADDX_RR(HL)
# SUB R | CMP R
elif b_val == 0x84 or b_val == 0x86:
SUB_R(B, b_val == 0x86)
elif b_val == 0x94 or b_val == 0x96:
SUB_R(C, b_val == 0x96)
elif b_val == 0xA4 or b_val == 0xA6:
SUB_R(D, b_val == 0xA6)
elif b_val == 0xB4 or b_val == 0xB6:
SUB_R(E, b_val == 0xB6)
elif b_val == 0xC4 or b_val == 0xC6:
SUB_R(H, b_val == 0xC6)
elif b_val == 0xD4 or b_val == 0xD6:
SUB_R(L, b_val == 0xD6)
elif b_val == 0xE4 or b_val == 0xE6:
SUB_R(HL, b_val == 0xE6, mem=True)
elif b_val == 0xF4 or b_val == 0xF6:
SUB_R(A, b_val == 0xF6)
# SUBI xx | CMPI xx
elif b_val == 0xB7 or b_val == 0xF7:
PC.inc()
value = SUB_8(A.value, memory[PC.value].value)
if b_val == 0xB7: # SUBI xx
A.value = value
# INC R
elif b_val == 0x03:
INC(B)
elif b_val == 0x13:
INC(C)
elif b_val == 0x23:
INC(D)
elif b_val == 0x33:
INC(E)
elif b_val == 0x43:
INC(H)
elif b_val == 0x53:
INC(L)
elif b_val == 0x63:
INC(HL, mem=True)
elif b_val == 0x73:
INC(A)
# INX RR
elif b_val == 0xA8:
BC.getvalue()
BC.value += 1
BC.value & 0xffff
BC.setvalue()
elif b_val == 0xB8:
DE.getvalue()
DE.value += 1
DE.value & 0xffff
DE.setvalue()
elif b_val == 0xC8:
HL.getvalue()
HL.value += 1
HL.value & 0xffff
HL.setvalue()
# DEC R
elif b_val == 0x07:
DEC(B)
elif b_val == 0x17:
DEC(C)
elif b_val == 0x27:
DEC(D)
elif b_val == 0x37:
DEC(E)
elif b_val == 0x47:
DEC(H)
elif b_val == 0x57:
DEC(L)
elif b_val == 0x67:
DEC(HL, mem=True)
elif b_val == 0x77:
DEC(A)
# AND R
elif b_val == 0x05:
AND_R(B)
elif b_val == 0x15:
AND_R(C)
elif b_val == 0x25:
AND_R(D)
elif b_val == 0x35:
AND_R(E)
elif b_val == 0x45:
AND_R(H)
elif b_val == 0x55:
AND_R(L)
elif b_val == 0x65:
AND_R(HL, mem=True)
elif b_val == 0x75:
AND_R(A)
# ANDI xx
elif b_val == 0xC7:
PC.inc()
value = AND_8(memory[PC.value].value, A.value)
A.value = value
# OR R
elif b_val == 0x85:
OR_R(B)
elif b_val == 0x95:
OR_R(C)
elif b_val == 0xA5:
OR_R(D)
elif b_val == 0xB5:
OR_R(E)
elif b_val == 0xC5:
OR_R(H)
elif b_val == 0xD5:
OR_R(L)
elif b_val == 0xE5:
OR_R(HL, mem=True)
elif b_val == 0xF5:
OR_R(A)
# ORI xx
elif b_val == 0xD7:
PC.inc()
value = OR_8(memory[PC.value].value, A.value)
A.value = value
# XOR R
elif b_val == 0x06:
XOR_R(B)
elif b_val == 0x16:
XOR_R(C)
elif b_val == 0x26:
XOR_R(D)
elif b_val == 0x36:
XOR_R(E)
elif b_val == 0x46:
XOR_R(H)
elif b_val == 0x56:
XOR_R(L)
elif b_val == 0x66:
XOR_R(HL, mem=True)
elif b_val == 0x76:
XOR_R(A)
# XORI xx
elif b_val == 0xE7:
PC.inc()
value = XOR_8(memory[PC.value].value, A.value)
A.value = value
# CMPS R
elif b_val == 0x0D:
CMPS(B)
elif b_val == 0x1D:
CMPS(C)
elif b_val == 0x2D:
CMPS(D)
elif b_val == 0x3D:
CMPS(E)
elif b_val == 0x4D:
CMPS(H)
elif b_val == 0x5D:
CMPS(L)
elif b_val == 0x6D:
CMPS(HL, mem=True)
elif b_val == 0x7D:
CMPS(A)
# SIN
elif b_val == 0xE0:
A.value = ord(sys.stdin.buffer.read(1)) & 0xff
pass
# SOUT
elif b_val == 0xE1:
print(chr(A.value),end="",flush=True)
if A.value == 7:
print("[BELL]")
pass
# CLRSCR
elif b_val == 0xF0:
screen_clear()
# DRAW
elif b_val == 0xF1:
x = C.value
if x & 0b10000000:
x = - ((0x100 - x) & 0xff)
y = B.value
if y & 0b10000000:
y = - ((0x100 - y) & 0xff)
screen_draw_line(x, y, A.value & 0xff)
# JMP xxyy
elif b_val == 0x0F:
JUMP()
# JMPcc xxyy
elif b_val == 0x1F:
if FL.z:
JUMP()
else:
PC.inc(2)
elif b_val == 0x2F:
if not FL.z:
JUMP()
else:
PC.inc(2)
elif b_val == 0x3F:
if FL.n:
JUMP()
else:
PC.inc(2)
elif b_val == 0x4F:
if not FL.n:
JUMP()
else:
PC.inc(2)
elif b_val == 0x5F:
if FL.h:
JUMP()
elif b_val == 0x6F:
if not FL.h:
JUMP()
else:
PC.inc(2)
elif b_val == 0x7F:
if FL.c:
JUMP()
else:
PC.inc(2)
elif b_val == 0x8F:
if not FL.c:
JUMP()
else:
PC.inc(2)
# JMP xx
elif b_val == 0x9F:
REL_JUMP()
# JMPcc xx
elif b_val == 0xAF:
if FL.z:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xBF:
if not FL.z:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xCF:
if FL.n:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xDF:
if not FL.n:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xEF:
if FL.h:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xFF:
if not FL.h:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xEE:
if FL.c:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xFE:
if not FL.c:
REL_JUMP()
else:
PC.inc()
# CALL xxyy
elif b_val == 0x1E:
memory[SP.value].value = (PC.value+3) & 0xff
memory[SP.value + 1].value = (PC.value+3) >> 8
SP.dec()
JUMP()
# RET
elif b_val == 0x0E:
SP.inc()
PC.value = memory[SP.value].value + (memory[SP.value + 1].value << 8)
jumped = True
# NOP
elif b_val == 0x00:
pass
else:
pass
print("UNKNOWN:",hex(b_val),"@",hex(PC.value))
if debug:
BC.getvalue()
DE.getvalue()
HL.getvalue()
print("A:",hex(A.value),"B:",hex(B.value),"C:",hex(C.value),"D:",hex(D.value),"E:",hex(E.value),"H:",hex(H.value),
"L:",hex(L.value),"BC:",hex(BC.value),"DE:",hex(DE.value),"HL:",hex(HL.value),"PC:",hex(PC.value),"SP:",hex(SP.value))
if not jumped:
PC.inc()
else:
pass
#print("JUMPED")
|
[
"pygame.display.init",
"pygame.Surface",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.Rect",
"numpy.zeros",
"sys.stdin.buffer.read",
"sys.stdout.flush",
"time.time",
"random.randint",
"pygame.transform.scale"
] |
[((83, 108), 'numpy.zeros', 'np.zeros', ([], {'shape': '(128, 64)'}), '(shape=(128, 64))\n', (91, 108), True, 'import numpy as np\n'), ((264, 281), 'pygame.display.init', 'pg.display.init', ([], {}), '()\n', (279, 281), True, 'import pygame as pg\n'), ((292, 354), 'pygame.display.set_mode', 'pg.display.set_mode', (['(width * 4, height * 4)'], {'flags': '(0)', 'depth': '(8)'}), '((width * 4, height * 4), flags=0, depth=8)\n', (311, 354), True, 'import pygame as pg\n'), ((360, 405), 'pygame.Surface', 'pg.Surface', (['(width, height)'], {'flags': '(0)', 'depth': '(8)'}), '((width, height), flags=0, depth=8)\n', (370, 405), True, 'import pygame as pg\n'), ((406, 466), 'pygame.transform.scale', 'pg.transform.scale', (['screen', '(width * 4, height * 4)', 'display'], {}), '(screen, (width * 4, height * 4), display)\n', (424, 466), True, 'import pygame as pg\n'), ((10272, 10283), 'time.time', 'time.time', ([], {}), '()\n', (10281, 10283), False, 'import time\n'), ((500, 560), 'pygame.transform.scale', 'pg.transform.scale', (['screen', '(width * 4, height * 4)', 'display'], {}), '(screen, (width * 4, height * 4), display)\n', (518, 560), True, 'import pygame as pg\n'), ((561, 578), 'pygame.display.flip', 'pg.display.flip', ([], {}), '()\n', (576, 578), True, 'import pygame as pg\n'), ((10587, 10601), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (10599, 10601), True, 'import pygame as pg\n'), ((2225, 2247), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2239, 2247), False, 'import random\n'), ((2317, 2335), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2333, 2335), False, 'import sys\n'), ((10446, 10457), 'time.time', 'time.time', ([], {}), '()\n', (10455, 10457), False, 'import time\n'), ((10530, 10541), 'time.time', 'time.time', ([], {}), '()\n', (10539, 10541), False, 'import time\n'), ((1091, 1118), 'pygame.Rect', 'pg.Rect', (['x_pos', 'y_pos', '(1)', '(1)'], {}), '(x_pos, y_pos, 1, 1)\n', (1098, 1118), True, 'import pygame as pg\n'), ((1183, 1210), 'pygame.Rect', 'pg.Rect', (['x_pos', 'y_pos', '(1)', '(1)'], {}), '(x_pos, y_pos, 1, 1)\n', (1190, 1210), True, 'import pygame as pg\n'), ((18032, 18056), 'sys.stdin.buffer.read', 'sys.stdin.buffer.read', (['(1)'], {}), '(1)\n', (18053, 18056), False, 'import sys\n')]
|
# -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTests.test_write -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTests.test_write} to test that
ITransport.write() works for process transports.
"""
__import__('_preamble')
import sys
from twisted.internet import stdio, protocol
from twisted.python import reflect
class WriteChild(protocol.Protocol):
def connectionMade(self):
for ch in 'ok!':
self.transport.write(ch)
self.transport.loseConnection()
def connectionLost(self, reason):
reactor.stop()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
from twisted.internet import reactor
stdio.StandardIO(WriteChild())
reactor.run()
|
[
"twisted.internet.reactor.stop",
"twisted.python.reflect.namedAny",
"twisted.internet.reactor.run"
] |
[((837, 850), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (848, 850), False, 'from twisted.internet import reactor\n'), ((669, 683), 'twisted.internet.reactor.stop', 'reactor.stop', ([], {}), '()\n', (681, 683), False, 'from twisted.internet import reactor\n'), ((717, 746), 'twisted.python.reflect.namedAny', 'reflect.namedAny', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (733, 746), False, 'from twisted.python import reflect\n')]
|
# calculate the load on each org
import threading
# cannot use from wpad_dispatch here, have to import whole module,
# because of circular dependency
import wpad_dispatch
from wpad_utils import *
from wlcg_wpad import getiporg
orgcleanminutes = 5
orgcleantime = 0
# Minute records keep track of the number of requests in each minute
class MinuteRecord:
def __init__(self, now, older):
self.minute = now # minute of the record
self.requests = 0 # number of requests this minute
self.next = None # next MinuteRecord
if older != None:
older.next = self # point older record to this one
class OrgData:
def __init__(self):
self.lock = threading.Lock() # lock for this org
self.overloadminute = 0 # minute last overload triggered
self.total = 0 # total number of requests tracked
self.newest = None # newest MinuteRecord
self.oldest = None # oldest MinuteRecord
orgdata = {}
# lock for adding, deleting, and looking up an org
orgdatalock = threading.Lock()
# return triple of org name, minutes remaining in an overload,
# and percent of limit in the last minutes being tracked
def orgload(remoteip, limit, minutes, persist, now):
global orgcleantime
org = getiporg(remoteip)
if org == None:
return None, 0, 0
# See if this org is excluded
# wlcgwpadconf is occasionally replaced, so use a local variable for it
conf = wpad_dispatch.wlcgwpadconf
if 'overload' in conf and 'excludes' in conf['overload'] and \
org in conf['overload']['excludes']:
return None, 0, 0
now = now / 60 # this function deals only with minutes
orgdatalock.acquire()
if orgcleantime <= now - orgcleanminutes:
# clean out orgs that have had no updates in minutes or overload in
# persist minutes, except current org
orgcleantime = now
numorgs = 0
delorgs = 0
for oldorg in list(orgdata):
numorgs += 1
if org == oldorg:
continue
data = orgdata[oldorg]
if persist < now - data.overloadminute and \
data.newest != None and data.newest.minute < now - minutes:
# Note that there is a race condition where this could
# delete an org from orgdata at the same time as another
# request comes in to another thread to prolong it, but
# that would only result in the loss of one count, it would
# not be fatal. The only way data.newest can equal None
# is if the organization is in the process of being created
# by another thread, leave that one alone.
del orgdata[oldorg]
delorgs += 1
if delorgs > 0:
orgdatalock.release()
logmsg('-', '-', '', 'cleaned load data from ' + str(delorgs) + ' orgs, ' + str(numorgs-delorgs) + ' still active')
orgdatalock.acquire()
# get the data for this org
if org in orgdata:
data = orgdata[org]
else:
data = OrgData()
orgdata[org] = data
orgdatalock.release()
data.lock.acquire()
# remove any minute records that are too old
record = data.oldest
while record != None and record.minute <= now - minutes:
data.total -= record.requests
record = record.next
data.oldest = record
record = data.newest
if record == None or record.minute != now:
# add new minute record
record = MinuteRecord( now, record )
data.newest = record
if data.oldest == None:
data.oldest = record
# add one to this minute and the total
record.requests += 1
data.total = data.total + 1
percent = int(data.total * 100.0 / limit)
if percent > 100:
data.overloadminute = now
overloadminute = data.overloadminute
data.lock.release()
return ( org, persist - (now - overloadminute), percent )
|
[
"threading.Lock",
"wlcg_wpad.getiporg"
] |
[((1142, 1158), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1156, 1158), False, 'import threading\n'), ((1369, 1387), 'wlcg_wpad.getiporg', 'getiporg', (['remoteip'], {}), '(remoteip)\n', (1377, 1387), False, 'from wlcg_wpad import getiporg\n'), ((751, 767), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (765, 767), False, 'import threading\n')]
|
import os
import sys
sys.path.append(os.path.abspath(''))
# Raises linting error because not at top of file
# Not sure how to resolve this with the pathing
from src import uploadJson # noqa: E402
import src.config as config # noqa: E402
# Taking out of commission until new geojson format requested developed
# def test_main():
# from src import main
# import json
# import geojson
# #from src import main
# main_dict = main.main(['county'])
# for v in main_dict.values():
# v_str = json.dumps(v)
# v_geojson = geojson.loads(v_str)
# assert v_geojson.is_valid == True
def test_requirements():
import pkg_resources
requirements_path = "requirements.txt"
with open(requirements_path) as f:
requirements = pkg_resources.parse_requirements(f)
for r in requirements:
r = str(r)
pkg_resources.require(r)
# breakpoint()
def test_auth():
db = uploadJson.auth_firebase()
cook = db.reference('/county_data/17031').get()
assert cook['NAME'] == 'Cook County, Illinois'
def test_secrets():
assert type(config.CENSUS_KEY) == str
assert type(config.FIREBASE_SERVICE_KEY) == str
assert config.CENSUS_KEY != ''
assert config.FIREBASE_SERVICE_KEY != ''
|
[
"os.path.abspath",
"pkg_resources.parse_requirements",
"pkg_resources.require",
"src.uploadJson.auth_firebase"
] |
[((37, 56), 'os.path.abspath', 'os.path.abspath', (['""""""'], {}), "('')\n", (52, 56), False, 'import os\n'), ((956, 982), 'src.uploadJson.auth_firebase', 'uploadJson.auth_firebase', ([], {}), '()\n', (980, 982), False, 'from src import uploadJson\n'), ((774, 809), 'pkg_resources.parse_requirements', 'pkg_resources.parse_requirements', (['f'], {}), '(f)\n', (806, 809), False, 'import pkg_resources\n'), ((876, 900), 'pkg_resources.require', 'pkg_resources.require', (['r'], {}), '(r)\n', (897, 900), False, 'import pkg_resources\n')]
|
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn.naive_bayes import GaussianNB
import pickle
def train_iris_nb():
"""Train a GaussianNB model on iris dataset."""
X, y_train = load_iris(return_X_y=True, as_frame=True)
colnames = X.columns
X_train = X.values
model = GaussianNB()
model.fit(X_train, y_train)
return model
def dump_model(model_path, model):
"""Save model as binary pickle file."""
with open(model_path, 'wb') as file:
pickle.dump(model, file)
def load_model(model_path):
"""Load model to return for future use."""
with open(model_path, 'rb') as file:
model = pickle.load(file)
return model
def main():
model = train_iris_nb()
dump_model('model.pickle', model)
if __name__ == '__main__':
main()
|
[
"sklearn.datasets.load_iris",
"sklearn.naive_bayes.GaussianNB",
"pickle.load",
"pickle.dump"
] |
[((226, 267), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)', 'as_frame': '(True)'}), '(return_X_y=True, as_frame=True)\n', (235, 267), False, 'from sklearn.datasets import load_iris\n'), ((328, 340), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (338, 340), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((519, 543), 'pickle.dump', 'pickle.dump', (['model', 'file'], {}), '(model, file)\n', (530, 543), False, 'import pickle\n'), ((677, 694), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (688, 694), False, 'import pickle\n')]
|
#Import modules
import os
import pandas as pd
import numpy as np
from pandas import DatetimeIndex
import dask
import scipy
import time
import glob
import torch
import torch.nn as nn
from live_plotter import live_plotter
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from functools import partial
from abc import ABCMeta, abstractmethod
import plottingTools
import pytorchModel
import loadData
class pytorchFwdModel(pytorchModel.pytorchModel) :
#######################################################################################################
#Construction functions
#######################################################################################################
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestPyTorchFwdModel"):
super().__init__(learningRate, hyperParameters, nbUnitsPerLayer, nbFactors,
modelName = modelName)
def buildModel(self):
self.fe = pytorchModel.Functional_encoder(self.nbFactors + 1) #Neural network architecture
return
#######################################################################################################
#Evaluation functions
#######################################################################################################
def evalBatch(self, batch, code):
batchLogMoneyness = self.getLogMoneyness(batch)
scaledMoneyness = (batchLogMoneyness.values - self.MeanLogMoneyness) / self.StdLogMoneyness
logMoneynessTensor = torch.Tensor(np.expand_dims(scaledMoneyness, 1)).float() #Log moneyness
# for j in np.random.choice(len(test[k]), 10):
# filt = test[k].nBizDays >= 10
batchLogMat = self.getLogMaturities(batch)
scaledMat = (batchLogMat.values - self.MeanLogMaturity) / self.StdLogMaturity
logMaturity = torch.tensor( np.expand_dims(scaledMat, 1) , requires_grad=True).float()
scaledFwd = (batch[2].values - self.MeanFwd) / self.StdFwd
fwdTensor = torch.tensor( np.expand_dims(scaledFwd, 1) ).float()
codeTensor = code.repeat(batch[0].shape[0], 1).float()
refVol = torch.tensor(batch[0].values)
inputTensor = torch.cat((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)
outputTensor = self.fe( inputTensor )[:, 0]
loss = torch.mean( (outputTensor - refVol)[~torch.isnan(outputTensor)] ** 2 )#torch.nanmean( (outputTensor - refVol) ** 2 )
return inputTensor, outputTensor, loss, logMaturity, codeTensor, logMoneynessTensor
def commonEvalSingleDayWithoutCalibration(self,
initialValueForFactors,
dataSetList,
computeSensi = False):
#Rebuild tensor graph
self.restoringGraph()
#Build tensor for reconstruction
nbObs = 1 if initialValueForFactors.ndim == 1 else initialValueForFactors.shape[0]
nbPoints = dataSetList[1].shape[0] if dataSetList[1].ndim == 1 else dataSetList[1].shape[1]
nbFactors = self.nbFactors
reshapedValueForFactors = np.reshape([initialValueForFactors],
(nbObs,nbFactors))
self.code = pytorchModel.Code(nbObs, self.nbFactors, initialValue = reshapedValueForFactors) #Latent variables
codeTensor = self.code.code[k, :].repeat(nbPoints, 1)
batchLogMoneyness = self.getLogMoneyness(dataSetList)
scaledMoneyness = (batchLogMoneyness.values - self.MeanLogMoneyness) / self.StdLogMoneyness
logMoneynessTensor = torch.Tensor(np.expand_dims(scaledMoneyness.values, 1)).float() #Log moneyness
scaledFwd = (dataSetList[2].values - self.MeanFwd) / self.StdFwd
fwdTensor = torch.tensor( np.expand_dims(scaledFwd, 1) ).float()
# for j in np.random.choice(len(test[k]), 10):
# filt = test[k].nBizDays >= 10
batchLogMat = self.getLogMaturities(dataSetList)
scaledMat = (batchLogMat.values - self.MeanLogMaturity) / self.StdLogMaturity
logMaturity = torch.tensor( np.expand_dims(scaledMat, 1) ).float()
inputTensor = torch.cat((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)
outputTensor = self.fe( inputTensor )[:, 0]
self.restoreWeights()
#Build tensor for reconstruction
# print("nbPoints : ", nbPoints)
# print("initialValueForFactors : ", initialValueForFactors)
# print("inputFeatures : ", inputFeatures)
# print("outputFeatures : ", outputFeatures)
# print("outputTensor : ", self.outputTensor)
reconstructedSurface = outputTensor.detach().numpy().reshape(batch[0].shape)
inputTensor = torch.cat((strikes, logMaturity, codeTensor), dim=1)
#if computeSensi :
# inputTensor.requires_grad = True
outputTensor = self.fe( inputTensor )[:, 0]
reshapedJacobian = None
if computeSensi :
reshapedJacobian = np.ones((nbObs, nbPoints, nbFactors)) if initialValueForFactors.ndim != 1 else np.ones((nbPoints, nbFactors))
#for p in range(nbPoints) :
# output.backward()
# jacobian = input.grad.data
# reshapedJacobian = tf.reshape(jacobian, shape = [nbObs, nbPoints, nbFactors])
# if self.verbose :
# print(reshapedJacobian)
calibratedSurfaces = outputTensor
factorSensi = None
if initialValueForFactors.ndim == 1 :
calibratedSurfaces = np.reshape(reconstructedSurface, (nbPoints))
if reshapedJacobian is not None :
factorSensi = np.reshape(reshapedJacobian, (nbPoints, nbFactors))
elif initialValueForFactors.ndim == 2 :
calibratedSurfaces = np.reshape(reconstructedSurface, (nbObs,nbPoints))
if reshapedJacobian is not None :
factorSensi = np.reshape(reshapedJacobian, (nbObs, nbPoints, nbFactors))
return calibratedSurfaces, factorSensi
|
[
"pytorchModel.Functional_encoder",
"numpy.reshape",
"numpy.ones",
"torch.tensor",
"pytorchModel.Code",
"numpy.expand_dims",
"torch.isnan",
"torch.cat"
] |
[((1146, 1197), 'pytorchModel.Functional_encoder', 'pytorchModel.Functional_encoder', (['(self.nbFactors + 1)'], {}), '(self.nbFactors + 1)\n', (1177, 1197), False, 'import pytorchModel\n'), ((2399, 2428), 'torch.tensor', 'torch.tensor', (['batch[0].values'], {}), '(batch[0].values)\n', (2411, 2428), False, 'import torch\n'), ((2462, 2536), 'torch.cat', 'torch.cat', (['(logMoneynessTensor, logMaturity, fwdTensor, codeTensor)'], {'dim': '(1)'}), '((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)\n', (2471, 2536), False, 'import torch\n'), ((3487, 3543), 'numpy.reshape', 'np.reshape', (['[initialValueForFactors]', '(nbObs, nbFactors)'], {}), '([initialValueForFactors], (nbObs, nbFactors))\n', (3497, 3543), True, 'import numpy as np\n'), ((3630, 3708), 'pytorchModel.Code', 'pytorchModel.Code', (['nbObs', 'self.nbFactors'], {'initialValue': 'reshapedValueForFactors'}), '(nbObs, self.nbFactors, initialValue=reshapedValueForFactors)\n', (3647, 3708), False, 'import pytorchModel\n'), ((4601, 4675), 'torch.cat', 'torch.cat', (['(logMoneynessTensor, logMaturity, fwdTensor, codeTensor)'], {'dim': '(1)'}), '((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)\n', (4610, 4675), False, 'import torch\n'), ((5214, 5266), 'torch.cat', 'torch.cat', (['(strikes, logMaturity, codeTensor)'], {'dim': '(1)'}), '((strikes, logMaturity, codeTensor), dim=1)\n', (5223, 5266), False, 'import torch\n'), ((6094, 6136), 'numpy.reshape', 'np.reshape', (['reconstructedSurface', 'nbPoints'], {}), '(reconstructedSurface, nbPoints)\n', (6104, 6136), True, 'import numpy as np\n'), ((5507, 5544), 'numpy.ones', 'np.ones', (['(nbObs, nbPoints, nbFactors)'], {}), '((nbObs, nbPoints, nbFactors))\n', (5514, 5544), True, 'import numpy as np\n'), ((5586, 5616), 'numpy.ones', 'np.ones', (['(nbPoints, nbFactors)'], {}), '((nbPoints, nbFactors))\n', (5593, 5616), True, 'import numpy as np\n'), ((6217, 6268), 'numpy.reshape', 'np.reshape', (['reshapedJacobian', '(nbPoints, nbFactors)'], {}), '(reshapedJacobian, (nbPoints, nbFactors))\n', (6227, 6268), True, 'import numpy as np\n'), ((6352, 6403), 'numpy.reshape', 'np.reshape', (['reconstructedSurface', '(nbObs, nbPoints)'], {}), '(reconstructedSurface, (nbObs, nbPoints))\n', (6362, 6403), True, 'import numpy as np\n'), ((1750, 1784), 'numpy.expand_dims', 'np.expand_dims', (['scaledMoneyness', '(1)'], {}), '(scaledMoneyness, 1)\n', (1764, 1784), True, 'import numpy as np\n'), ((2093, 2121), 'numpy.expand_dims', 'np.expand_dims', (['scaledMat', '(1)'], {}), '(scaledMat, 1)\n', (2107, 2121), True, 'import numpy as np\n'), ((2267, 2295), 'numpy.expand_dims', 'np.expand_dims', (['scaledFwd', '(1)'], {}), '(scaledFwd, 1)\n', (2281, 2295), True, 'import numpy as np\n'), ((4009, 4050), 'numpy.expand_dims', 'np.expand_dims', (['scaledMoneyness.values', '(1)'], {}), '(scaledMoneyness.values, 1)\n', (4023, 4050), True, 'import numpy as np\n'), ((4194, 4222), 'numpy.expand_dims', 'np.expand_dims', (['scaledFwd', '(1)'], {}), '(scaledFwd, 1)\n', (4208, 4222), True, 'import numpy as np\n'), ((4528, 4556), 'numpy.expand_dims', 'np.expand_dims', (['scaledMat', '(1)'], {}), '(scaledMat, 1)\n', (4542, 4556), True, 'import numpy as np\n'), ((6481, 6539), 'numpy.reshape', 'np.reshape', (['reshapedJacobian', '(nbObs, nbPoints, nbFactors)'], {}), '(reshapedJacobian, (nbObs, nbPoints, nbFactors))\n', (6491, 6539), True, 'import numpy as np\n'), ((2653, 2678), 'torch.isnan', 'torch.isnan', (['outputTensor'], {}), '(outputTensor)\n', (2664, 2678), False, 'import torch\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
import warnings
warnings.filterwarnings('ignore')
mnist = tf.keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
X_train.shape = (60000, 28, 28)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
# output shape is (None, 10) ie batch size, 10
# logits is the inverse of sigmoid
logits = model(X_train[:1]).numpy()
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])
model.fit(X_train, y_train, epochs=1)
model.evaluate(X_test, y_test, verbose=2)
probab_model = keras.Sequential([
model,
keras.layers.Softmax()
])
|
[
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.layers.Flatten",
"warnings.filterwarnings"
] |
[((89, 122), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (112, 122), False, 'import warnings\n'), ((625, 685), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (667, 685), False, 'from tensorflow import keras\n'), ((331, 376), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (354, 376), True, 'import tensorflow as tf\n'), ((382, 427), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (403, 427), True, 'import tensorflow as tf\n'), ((433, 461), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (456, 461), True, 'import tensorflow as tf\n'), ((467, 492), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {}), '(10)\n', (488, 492), True, 'import tensorflow as tf\n'), ((885, 907), 'tensorflow.keras.layers.Softmax', 'keras.layers.Softmax', ([], {}), '()\n', (905, 907), False, 'from tensorflow import keras\n')]
|
import unittest
from rdflib import Graph
def buildQueryArgs(q):
return dict(select="", where="", optional="")
class SPARQLParserTest(unittest.TestCase):
known_issue = True
def setUp(self):
self.graph = Graph()
pass
def tearDown(self):
pass
tests = [
("basic",
"""\
SELECT ?name
WHERE { ?a <http://xmlns.com/foaf/0.1/name> ?name }"""),
("simple_prefix",
"""\
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { ?a foaf:name ?name }"""),
("base_statement",
"""\
BASE <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { ?a <name> ?name }"""),
("prefix_and_colon_only_prefix",
"""\
PREFIX : <http://xmlns.com/foaf/0.1/>
PREFIX vcard: <http://www.w3.org/2001/vcard-rdf/3.0#>
SELECT ?name ?title
WHERE {
?a :name ?name .
?a vcard:TITLE ?title
}"""),
("predicate_object_list_notation",
"""\
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?mbox
WHERE {
?x foaf:name ?name ;
foaf:mbox ?mbox .
}"""),
("object_list_notation",
"""\
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?x
WHERE {
?x foaf:nick "Alice" ,
"Alice_" .
}
"""),
("escaped_literals",
"""\
PREFIX tag: <http://xmlns.com/foaf/0.1/>
PREFIX vcard: <http://www.w3.org/2001/vcard-rdf/3.0#>
SELECT ?name
WHERE {
?a tag:name ?name ;
vcard:TITLE "escape test vcard:TITLE " ;
<tag://test/escaping> "This is a ''' Test \"\"\"" ;
<tag://test/escaping> ?d
}
"""),
("key_word_as_variable",
"""\
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?PREFIX ?WHERE
WHERE {
?x foaf:name ?PREFIX ;
foaf:mbox ?WHERE .
}"""),
("key_word_as_prefix",
"""\
PREFIX WHERE: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?mbox
WHERE {
?x WHERE:name ?name ;
WHERE:mbox ?mbox .
}"""),
("some_test_cases_from_grammar_py_1",
"""\
SELECT ?title
WHERE {
<http://example.org/book/book1>
<http://purl.org/dc/elements/1.1/title>
?title .
}"""),
("some_test_cases_from_grammar_py_2",
"""\
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?mbox
WHERE { ?person foaf:name ?name .
OPTIONAL { ?person foaf:mbox ?mbox}
}"""),
("some_test_cases_from_grammar_py_3",
"""\
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?name2
WHERE { ?person foaf:name ?name .
OPTIONAL { ?person foaf:knows ?p2 . ?p2 foaf:name ?name2 . }
}"""),
("some_test_cases_from_grammar_py_4",
"""\
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
#PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?name ?mbox
WHERE
{
{ ?person rdf:type foaf:Person } .
OPTIONAL { ?person foaf:name ?name } .
OPTIONAL {?person foaf:mbox ?mbox} .
}""")
]
def _buildQueryArg(q):
res = buildQueryArgs(q)
if res.get('select', False):
assert res["select"] is not None
if res.get('where', False):
assert res["where"] is not None
if res.get('optional', False):
assert res["optional"] is not None
# result = sparqlGr.query(select, where, optional)
# self.assert_(self.graph.query(q) is not None)
|
[
"rdflib.Graph"
] |
[((225, 232), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (230, 232), False, 'from rdflib import Graph\n')]
|
from panda3d.core import *
# Leave these imports in, they may be used by ptf files.
from panda3d.physics import *
from . import Particles
from . import ForceGroup
from direct.directnotify import DirectNotifyGlobal
class ParticleEffect(NodePath):
notify = DirectNotifyGlobal.directNotify.newCategory('ParticleEffect')
pid = 1
def __init__(self, name=None, particles=None):
if name is None:
name = 'particle-effect-%d' % ParticleEffect.pid
ParticleEffect.pid += 1
NodePath.__init__(self, name)
# Record particle effect name
self.name = name
# Enabled flag
self.fEnabled = 0
# Dictionary of particles and forceGroups
self.particlesDict = {}
self.forceGroupDict = {}
# The effect's particle system
if particles is not None:
self.addParticles(particles)
self.renderParent = None
def cleanup(self):
self.removeNode()
self.disable()
if self.__isValid():
for f in self.forceGroupDict.values():
f.cleanup()
for p in self.particlesDict.values():
p.cleanup()
del self.forceGroupDict
del self.particlesDict
del self.renderParent
def getName(self):
# override NodePath.getName()
return self.name
def reset(self):
self.removeAllForces()
self.removeAllParticles()
self.forceGroupDict = {}
self.particlesDict = {}
def start(self, parent=None, renderParent=None):
assert self.notify.debug('start() - name: %s' % self.name)
self.renderParent = renderParent
self.enable()
if parent is not None:
self.reparentTo(parent)
def enable(self):
# band-aid added for client crash - grw
if self.__isValid():
if self.renderParent:
for p in self.particlesDict.values():
p.setRenderParent(self.renderParent.node())
for f in self.forceGroupDict.values():
f.enable()
for p in self.particlesDict.values():
p.enable()
self.fEnabled = 1
def disable(self):
self.detachNode()
# band-aid added for client crash - grw
if self.__isValid():
for p in self.particlesDict.values():
p.setRenderParent(p.node)
for f in self.forceGroupDict.values():
f.disable()
for p in self.particlesDict.values():
p.disable()
self.fEnabled = 0
def isEnabled(self):
"""
Note: this may be misleading if enable(), disable() not used
"""
return self.fEnabled
def addForceGroup(self, forceGroup):
forceGroup.nodePath.reparentTo(self)
forceGroup.particleEffect = self
self.forceGroupDict[forceGroup.getName()] = forceGroup
# Associate the force group with all particles
for i in range(len(forceGroup)):
self.addForce(forceGroup[i])
def addForce(self, force):
for p in list(self.particlesDict.values()):
p.addForce(force)
def removeForceGroup(self, forceGroup):
# Remove forces from all particles
for i in range(len(forceGroup)):
self.removeForce(forceGroup[i])
forceGroup.nodePath.removeNode()
forceGroup.particleEffect = None
self.forceGroupDict.pop(forceGroup.getName(), None)
def removeForce(self, force):
for p in list(self.particlesDict.values()):
p.removeForce(force)
def removeAllForces(self):
for fg in list(self.forceGroupDict.values()):
self.removeForceGroup(fg)
def addParticles(self, particles):
particles.nodePath.reparentTo(self)
self.particlesDict[particles.getName()] = particles
# Associate all forces in all force groups with the particles
for fg in list(self.forceGroupDict.values()):
for i in range(len(fg)):
particles.addForce(fg[i])
def removeParticles(self, particles):
if particles is None:
self.notify.warning('removeParticles() - particles == None!')
return
particles.nodePath.detachNode()
self.particlesDict.pop(particles.getName(), None)
# Remove all forces from the particles
for fg in list(self.forceGroupDict.values()):
for f in fg:
particles.removeForce(f)
def removeAllParticles(self):
for p in list(self.particlesDict.values()):
self.removeParticles(p)
def getParticlesList(self):
return list(self.particlesDict.values())
def getParticlesNamed(self, name):
return self.particlesDict.get(name, None)
def getParticlesDict(self):
return self.particlesDict
def getForceGroupList(self):
return list(self.forceGroupDict.values())
def getForceGroupNamed(self, name):
return self.forceGroupDict.get(name, None)
def getForceGroupDict(self):
return self.forceGroupDict
def saveConfig(self, filename):
filename = Filename(filename)
with open(filename.toOsSpecific(), 'w') as f:
# Add a blank line
f.write('\n')
# Make sure we start with a clean slate
f.write('self.reset()\n')
pos = self.getPos()
hpr = self.getHpr()
scale = self.getScale()
f.write('self.setPos(%0.3f, %0.3f, %0.3f)\n' %
(pos[0], pos[1], pos[2]))
f.write('self.setHpr(%0.3f, %0.3f, %0.3f)\n' %
(hpr[0], hpr[1], hpr[2]))
f.write('self.setScale(%0.3f, %0.3f, %0.3f)\n' %
(scale[0], scale[1], scale[2]))
# Save all the particles to file
num = 0
for p in list(self.particlesDict.values()):
target = 'p%d' % num
num = num + 1
f.write(target + ' = Particles.Particles(\'%s\')\n' % p.getName())
p.printParams(f, target)
f.write('self.addParticles(%s)\n' % target)
# Save all the forces to file
num = 0
for fg in list(self.forceGroupDict.values()):
target = 'f%d' % num
num = num + 1
f.write(target + ' = ForceGroup.ForceGroup(\'%s\')\n' % \
fg.getName())
fg.printParams(f, target)
f.write('self.addForceGroup(%s)\n' % target)
def loadConfig(self, filename):
vfs = VirtualFileSystem.getGlobalPtr()
data = vfs.readFile(filename, 1)
data = data.replace(b'\r', b'')
try:
exec(data)
except:
self.notify.warning('loadConfig: failed to load particle file: '+ repr(filename))
raise
def accelerate(self,time,stepCount = 1,stepTime=0.0):
for particles in self.getParticlesList():
particles.accelerate(time,stepCount,stepTime)
def clearToInitial(self):
for particles in self.getParticlesList():
particles.clearToInitial()
def softStop(self):
for particles in self.getParticlesList():
particles.softStop()
def softStart(self, firstBirthDelay=None):
if self.__isValid():
for particles in self.getParticlesList():
if firstBirthDelay is not None:
particles.softStart(br=-1, first_birth_delay=firstBirthDelay)
else:
particles.softStart()
else:
# Not asserting here since we want to crash live clients for more expedient bugfix
# (Sorry, live clients)
self.notify.error('Trying to start effect(%s) after cleanup.' % (self.getName(),))
def __isValid(self):
return hasattr(self, 'forceGroupDict') and \
hasattr(self, 'particlesDict')
# Snake-case aliases.
is_enabled = isEnabled
add_force_group = addForceGroup
add_force = addForce
remove_force_group = removeForceGroup
remove_force = removeForce
remove_all_forces = removeAllForces
add_particles = addParticles
remove_particles = removeParticles
remove_all_particles = removeAllParticles
get_particles_list = getParticlesList
get_particles_named = getParticlesNamed
get_particles_dict = getParticlesDict
get_force_group_list = getForceGroupList
get_force_group_named = getForceGroupNamed
get_force_group_dict = getForceGroupDict
save_config = saveConfig
load_config = loadConfig
clear_to_initial = clearToInitial
soft_stop = softStop
soft_start = softStart
|
[
"direct.directnotify.DirectNotifyGlobal.directNotify.newCategory"
] |
[((264, 325), 'direct.directnotify.DirectNotifyGlobal.directNotify.newCategory', 'DirectNotifyGlobal.directNotify.newCategory', (['"""ParticleEffect"""'], {}), "('ParticleEffect')\n", (307, 325), False, 'from direct.directnotify import DirectNotifyGlobal\n')]
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from orchestra.models.fields import PrivateFileField
from orchestra.models.queryset import group_by
from . import settings
from .methods import PaymentMethod
class PaymentSourcesQueryset(models.QuerySet):
def get_default(self):
return self.filter(is_active=True).first()
class PaymentSource(models.Model):
account = models.ForeignKey('accounts.Account', verbose_name=_("account"),
related_name='paymentsources')
method = models.CharField(_("method"), max_length=32,
choices=PaymentMethod.get_choices())
data = JSONField(_("data"), default={})
is_active = models.BooleanField(_("active"), default=True)
objects = PaymentSourcesQueryset.as_manager()
def __str__(self):
return "%s (%s)" % (self.label, self.method_class.verbose_name)
@cached_property
def method_class(self):
return PaymentMethod.get(self.method)
@cached_property
def method_instance(self):
""" Per request lived method_instance """
return self.method_class(self)
@cached_property
def label(self):
return self.method_instance.get_label()
@cached_property
def number(self):
return self.method_instance.get_number()
def get_bill_context(self):
method = self.method_instance
return {
'message': method.get_bill_message(),
}
def get_due_delta(self):
return self.method_instance.due_delta
def clean(self):
self.data = self.method_instance.clean_data()
class TransactionQuerySet(models.QuerySet):
group_by = group_by
def create(self, **kwargs):
source = kwargs.get('source')
if source is None or not hasattr(source.method_class, 'process'):
# Manual payments don't need processing
kwargs['state'] = self.model.WAITTING_EXECUTION
amount = kwargs.get('amount')
if amount == 0:
kwargs['state'] = self.model.SECURED
return super(TransactionQuerySet, self).create(**kwargs)
def secured(self):
return self.filter(state=Transaction.SECURED)
def exclude_rejected(self):
return self.exclude(state=Transaction.REJECTED)
def amount(self):
return next(iter(self.aggregate(models.Sum('amount')).values())) or 0
def processing(self):
return self.filter(state__in=[Transaction.EXECUTED, Transaction.WAITTING_EXECUTION])
class Transaction(models.Model):
WAITTING_PROCESSING = 'WAITTING_PROCESSING' # CREATED
WAITTING_EXECUTION = 'WAITTING_EXECUTION' # PROCESSED
EXECUTED = 'EXECUTED'
SECURED = 'SECURED'
REJECTED = 'REJECTED'
STATES = (
(WAITTING_PROCESSING, _("Waitting processing")),
(WAITTING_EXECUTION, _("Waitting execution")),
(EXECUTED, _("Executed")),
(SECURED, _("Secured")),
(REJECTED, _("Rejected")),
)
STATE_HELP = {
WAITTING_PROCESSING: _("The transaction is created and requires processing by the "
"specific payment method."),
WAITTING_EXECUTION: _("The transaction is processed and its pending execution on "
"the related financial institution."),
EXECUTED: _("The transaction is executed on the financial institution."),
SECURED: _("The transaction ammount is secured."),
REJECTED: _("The transaction has failed and the ammount is lost, a new transaction "
"should be created for recharging."),
}
bill = models.ForeignKey('bills.bill', verbose_name=_("bill"),
related_name='transactions')
source = models.ForeignKey(PaymentSource, null=True, blank=True, on_delete=models.SET_NULL,
verbose_name=_("source"), related_name='transactions')
process = models.ForeignKey('payments.TransactionProcess', null=True, blank=True,
on_delete=models.SET_NULL, verbose_name=_("process"), related_name='transactions')
state = models.CharField(_("state"), max_length=32, choices=STATES,
default=WAITTING_PROCESSING)
amount = models.DecimalField(_("amount"), max_digits=12, decimal_places=2)
currency = models.CharField(max_length=10, default=settings.PAYMENT_CURRENCY)
created_at = models.DateTimeField(_("created"), auto_now_add=True)
modified_at = models.DateTimeField(_("modified"), auto_now=True)
objects = TransactionQuerySet.as_manager()
def __str__(self):
return "#%i" % self.id
@property
def account(self):
return self.bill.account
def clean(self):
if not self.pk:
amount = self.bill.transactions.exclude(state=self.REJECTED).amount()
if amount >= self.bill.total:
raise ValidationError(
_("Bill %(number)s already has valid transactions that cover bill total amount (%(amount)s).") % {
'number': self.bill.number,
'amount': amount,
}
)
def get_state_help(self):
if self.source:
return self.source.method_instance.state_help.get(self.state) or self.STATE_HELP.get(self.state)
return self.STATE_HELP.get(self.state)
def mark_as_processed(self):
self.state = self.WAITTING_EXECUTION
self.save(update_fields=('state', 'modified_at'))
def mark_as_executed(self):
self.state = self.EXECUTED
self.save(update_fields=('state', 'modified_at'))
def mark_as_secured(self):
self.state = self.SECURED
self.save(update_fields=('state', 'modified_at'))
def mark_as_rejected(self):
self.state = self.REJECTED
self.save(update_fields=('state', 'modified_at'))
class TransactionProcess(models.Model):
"""
Stores arbitrary data generated by payment methods while processing transactions
"""
CREATED = 'CREATED'
EXECUTED = 'EXECUTED'
ABORTED = 'ABORTED'
COMMITED = 'COMMITED'
STATES = (
(CREATED, _("Created")),
(EXECUTED, _("Executed")),
(ABORTED, _("Aborted")),
(COMMITED, _("Commited")),
)
data = JSONField(_("data"), blank=True)
file = PrivateFileField(_("file"), blank=True)
state = models.CharField(_("state"), max_length=16, choices=STATES, default=CREATED)
created_at = models.DateTimeField(_("created"), auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(_("updated"), auto_now=True)
class Meta:
verbose_name_plural = _("Transaction processes")
def __str__(self):
return '#%i' % self.id
def mark_as_executed(self):
self.state = self.EXECUTED
for transaction in self.transactions.all():
transaction.mark_as_executed()
self.save(update_fields=('state', 'updated_at'))
def abort(self):
self.state = self.ABORTED
for transaction in self.transactions.all():
transaction.mark_as_rejected()
self.save(update_fields=('state', 'updated_at'))
def commit(self):
self.state = self.COMMITED
for transaction in self.transactions.processing():
transaction.mark_as_secured()
self.save(update_fields=('state', 'updated_at'))
|
[
"django.db.models.Sum",
"django.utils.translation.ugettext_lazy",
"django.db.models.CharField"
] |
[((4428, 4494), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'default': 'settings.PAYMENT_CURRENCY'}), '(max_length=10, default=settings.PAYMENT_CURRENCY)\n', (4444, 4494), False, 'from django.db import models\n'), ((692, 703), 'django.utils.translation.ugettext_lazy', '_', (['"""method"""'], {}), "('method')\n", (693, 703), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((786, 795), 'django.utils.translation.ugettext_lazy', '_', (['"""data"""'], {}), "('data')\n", (787, 795), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((845, 856), 'django.utils.translation.ugettext_lazy', '_', (['"""active"""'], {}), "('active')\n", (846, 856), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3199, 3291), 'django.utils.translation.ugettext_lazy', '_', (['"""The transaction is created and requires processing by the specific payment method."""'], {}), "('The transaction is created and requires processing by the specific payment method.'\n )\n", (3200, 3291), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3350, 3452), 'django.utils.translation.ugettext_lazy', '_', (['"""The transaction is processed and its pending execution on the related financial institution."""'], {}), "('The transaction is processed and its pending execution on the related financial institution.'\n )\n", (3351, 3452), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3500, 3562), 'django.utils.translation.ugettext_lazy', '_', (['"""The transaction is executed on the financial institution."""'], {}), "('The transaction is executed on the financial institution.')\n", (3501, 3562), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3581, 3621), 'django.utils.translation.ugettext_lazy', '_', (['"""The transaction ammount is secured."""'], {}), "('The transaction ammount is secured.')\n", (3582, 3621), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3641, 3754), 'django.utils.translation.ugettext_lazy', '_', (['"""The transaction has failed and the ammount is lost, a new transaction should be created for recharging."""'], {}), "('The transaction has failed and the ammount is lost, a new transaction should be created for recharging.'\n )\n", (3642, 3754), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4254, 4264), 'django.utils.translation.ugettext_lazy', '_', (['"""state"""'], {}), "('state')\n", (4255, 4264), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4367, 4378), 'django.utils.translation.ugettext_lazy', '_', (['"""amount"""'], {}), "('amount')\n", (4368, 4378), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4533, 4545), 'django.utils.translation.ugettext_lazy', '_', (['"""created"""'], {}), "('created')\n", (4534, 4545), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4605, 4618), 'django.utils.translation.ugettext_lazy', '_', (['"""modified"""'], {}), "('modified')\n", (4606, 4618), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6457, 6466), 'django.utils.translation.ugettext_lazy', '_', (['"""data"""'], {}), "('data')\n", (6458, 6466), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6508, 6517), 'django.utils.translation.ugettext_lazy', '_', (['"""file"""'], {}), "('file')\n", (6509, 6517), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6560, 6570), 'django.utils.translation.ugettext_lazy', '_', (['"""state"""'], {}), "('state')\n", (6561, 6570), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6658, 6670), 'django.utils.translation.ugettext_lazy', '_', (['"""created"""'], {}), "('created')\n", (6659, 6670), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6744, 6756), 'django.utils.translation.ugettext_lazy', '_', (['"""updated"""'], {}), "('updated')\n", (6745, 6756), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6824, 6850), 'django.utils.translation.ugettext_lazy', '_', (['"""Transaction processes"""'], {}), "('Transaction processes')\n", (6825, 6850), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((609, 621), 'django.utils.translation.ugettext_lazy', '_', (['"""account"""'], {}), "('account')\n", (610, 621), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2960, 2984), 'django.utils.translation.ugettext_lazy', '_', (['"""Waitting processing"""'], {}), "('Waitting processing')\n", (2961, 2984), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3016, 3039), 'django.utils.translation.ugettext_lazy', '_', (['"""Waitting execution"""'], {}), "('Waitting execution')\n", (3017, 3039), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3061, 3074), 'django.utils.translation.ugettext_lazy', '_', (['"""Executed"""'], {}), "('Executed')\n", (3062, 3074), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3095, 3107), 'django.utils.translation.ugettext_lazy', '_', (['"""Secured"""'], {}), "('Secured')\n", (3096, 3107), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3129, 3142), 'django.utils.translation.ugettext_lazy', '_', (['"""Rejected"""'], {}), "('Rejected')\n", (3130, 3142), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3841, 3850), 'django.utils.translation.ugettext_lazy', '_', (['"""bill"""'], {}), "('bill')\n", (3842, 3850), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4006, 4017), 'django.utils.translation.ugettext_lazy', '_', (['"""source"""'], {}), "('source')\n", (4007, 4017), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4182, 4194), 'django.utils.translation.ugettext_lazy', '_', (['"""process"""'], {}), "('process')\n", (4183, 4194), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6307, 6319), 'django.utils.translation.ugettext_lazy', '_', (['"""Created"""'], {}), "('Created')\n", (6308, 6319), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6341, 6354), 'django.utils.translation.ugettext_lazy', '_', (['"""Executed"""'], {}), "('Executed')\n", (6342, 6354), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6375, 6387), 'django.utils.translation.ugettext_lazy', '_', (['"""Aborted"""'], {}), "('Aborted')\n", (6376, 6387), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6409, 6422), 'django.utils.translation.ugettext_lazy', '_', (['"""Commited"""'], {}), "('Commited')\n", (6410, 6422), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5054, 5153), 'django.utils.translation.ugettext_lazy', '_', (['"""Bill %(number)s already has valid transactions that cover bill total amount (%(amount)s)."""'], {}), "('Bill %(number)s already has valid transactions that cover bill total amount (%(amount)s).'\n )\n", (5055, 5153), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2526, 2546), 'django.db.models.Sum', 'models.Sum', (['"""amount"""'], {}), "('amount')\n", (2536, 2546), False, 'from django.db import models\n')]
|
import optparse
import sys
from sys import getsizeof
import logging
from signal import signal, SIGINT
import time
import requests
# MIT License
#
# Copyright (c) 2022 SaicharanKandukuri
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(
level="NOTSET",
format=FORMAT,
datefmt="[%X]",
handlers=[RichHandler()]
)
logging.disable('DEBUG')
log = logging.getLogger("rich")
class WifiUtils:
"""class for wifi utils"""
def __init__(self, username, password, host, port):
self.username = username
self.password = password
self.host = host
self.port = port
@classmethod
def request(cls,
method,
username,
password,
host, port,
timeout) -> list:
"""request method: sends request to wifi host
Args:
method (str): interaction method "login.xml" or "logout.xml". Defaults to "login.xml".
username (str): username assigned by parul university to access wifi
password (str): password assigned by parul university to access wifi
host (str): hostname of the parul university wifi hotspot/routers Defaults to "
port (str): port to send login request. Defaults to "8090".
timeout (int): request timeout. Defaults to 10.
Returns:
list
server_request status[true|false]
response(xml data returned form server)
status_code(web request status code)
"""
url = ("http://"+host+":"+port+"/"+method)
body = ("mode=191&username=" + username + "&password=" + password +
"&a=1630404423764&producttype=0"
)
headers = {
"Host": "http://" + host + ":" + port + "",
"Content-Length": str(getsizeof(body)),
"User-Agent": "Chrome/92.0.4515.159 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"Origin": "http://" + host + ":" + port,
"Referer": "http://" + host + ":" + port + "/",
"Accept-Encoding": "gzip defalte",
"Accept-Language": "en-US,en;q=0.9",
"Connection": "close",
}
body_array = bytearray(body, 'utf-8')
req = requests.post(url,
data=body_array,
headers=headers,
timeout=timeout,
verify=False
)
return [(req.status_code == 200), req.text, req.status_code]
def login(self,
username,
password,
host,
port="8090",
method="login.xml",
timeout=10) -> list:
"""login: uses request method to send login web request with credentials to wifi host
Args:
username (str): username assigned by parul university to access wifi
password (str): password assigned by parul university to access wifi
host (str): hostname of the parul university wifi hotspot/routers
Defaults to "10.0.0.11"
port (str, optional): port to send login request. Defaults to "8090".
method (str, optional): interaction method
"login.xml" or "logout.xml". Defaults to "login.xml".
timeout (int, optional): request timeout. Defaults to 10.
"""
return self.request(method, username, password, host, port, timeout)
def logout(self,
username,
password,
host,
port="8090",
method="logout.xml",
timeout=10) -> list:
"""logout: uses request method to send logout web request with credentials to wifi host
Args:
username (str): username assigned by parul university to access wifi
password (str): password assigned by parul university to access wifi
host (str): hostname of the parul university wifi hotspot/routers
Defaults to "10.0.0.11"
port (str, optional): port to send login request. Defaults to "8090".
method (str, optional): interaction method
"login.xml" or "logout.xml". Defaults to "logout.xml".
timeout (int, optional): request timeout. Defaults to 10.
"""
return self.request(method, username, password, host, port, timeout)
# def get_xml_msg(xml): # for later (●'◡'●)
# return Et.parse(xml).getroot()[1]
def grey_print(_string):
"""prints outs grey text
Args:
_string (str)
"""
print(f"\033[90m{_string}\033[0m")
def connection_to(url, timeout=10):
"""checks if connection to url is available"""
try:
requests.get(url, timeout=timeout)
return True
except (requests.ConnectionError,
requests.Timeout):
return False
def keep_alive(username, password, host, port):
"""keeps connection alive to wifi host"""
while True:
if connection_to("http://10.0.0.11:8090/"):
log.info("connection to router \"available\"")
else:
log.critical("connection to router \"unavailable\"")
if connection_to("https://google.com"):
log.info("Connected to the internet")
else:
log.warning("Not connected to the internet")
log.info("Tying to login back")
try:
log.info(WifiUtils.login(username, password, host, port))
except (requests.ConnectionError,
requests.Timeout):
log.critical(
"Connection error: \"UNSTABLE CONNECTION TO HOST\"")
time.sleep(5)
def exit_handler(_signal, frame):
"""captures keyboard interrupts and kill signals & exits with messesage"""
log.warning('SIGINT or CTRL-C detected. Exiting gracefully')
grey_print("signal:"+str(_signal))
grey_print("frame:"+str(frame))
sys.exit(0)
if __name__ == '__main__':
signal(SIGINT, exit_handler)
parser = optparse.OptionParser()
parser.add_option('-u', '--username', dest='username',
help='username to login/logout with parul university wifi service')
parser.add_option('-p', '--password', dest='password',
help='password to login/logout with parul university wifi service')
parser.add_option('-H', '--host', dest='host',
default='10.0.0.11', type=str)
parser.add_option('-P', '--port', dest='port',
default='8090', type=str)
parser.add_option('-k', '--keep-alive', action='store_true',
help='keep connecting to wifi when it gets signed out', default=False)
parser.add_option('-o', '--logout', action='store_true',
help='logout from wifi', default=False)
parser.add_option('-l', '--login', action='store_true',
help='login to wifi', default=False)
options, args = parser.parse_args()
WifiUtils = WifiUtils(
options.username, options.password, options.host, options.port)
if options.login:
log.info("=> login <=")
log.info(WifiUtils.login(options.username,
options.password,
options.host, options.port,
))
sys.exit(0)
if options.logout:
log.info("=> logout <=")
log.info(WifiUtils.logout(options.username,
options.password,
options.host, options.port,
))
sys.exit(0)
if options.keep_alive:
log.info("=> keep alive <=")
keep_alive(options.username,
options.password,
options.host, options.port,
)
|
[
"logging.getLogger",
"signal.signal",
"requests.post",
"sys.getsizeof",
"optparse.OptionParser",
"requests.get",
"time.sleep",
"sys.exit",
"rich.logging.RichHandler",
"logging.disable"
] |
[((1420, 1444), 'logging.disable', 'logging.disable', (['"""DEBUG"""'], {}), "('DEBUG')\n", (1435, 1444), False, 'import logging\n'), ((1452, 1477), 'logging.getLogger', 'logging.getLogger', (['"""rich"""'], {}), "('rich')\n", (1469, 1477), False, 'import logging\n'), ((7125, 7136), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7133, 7136), False, 'import sys\n'), ((7171, 7199), 'signal.signal', 'signal', (['SIGINT', 'exit_handler'], {}), '(SIGINT, exit_handler)\n', (7177, 7199), False, 'from signal import signal, SIGINT\n'), ((7214, 7237), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (7235, 7237), False, 'import optparse\n'), ((3420, 3507), 'requests.post', 'requests.post', (['url'], {'data': 'body_array', 'headers': 'headers', 'timeout': 'timeout', 'verify': '(False)'}), '(url, data=body_array, headers=headers, timeout=timeout,\n verify=False)\n', (3433, 3507), False, 'import requests\n'), ((5903, 5937), 'requests.get', 'requests.get', (['url'], {'timeout': 'timeout'}), '(url, timeout=timeout)\n', (5915, 5937), False, 'import requests\n'), ((6853, 6866), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6863, 6866), False, 'import time\n'), ((8517, 8528), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8525, 8528), False, 'import sys\n'), ((8773, 8784), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8781, 8784), False, 'import sys\n'), ((1402, 1415), 'rich.logging.RichHandler', 'RichHandler', ([], {}), '()\n', (1413, 1415), False, 'from rich.logging import RichHandler\n'), ((2929, 2944), 'sys.getsizeof', 'getsizeof', (['body'], {}), '(body)\n', (2938, 2944), False, 'from sys import getsizeof\n')]
|
"""
sphinx-simulink.application
~~~~~~~~~~~~~~~~~~~~~~~
Embed Simulink diagrams on your documentation.
:copyright:
Copyright 2016 by <NAME> <<EMAIL>>.
:license:
MIT, see LICENSE for details.
"""
import matlab.engine
import os
from sphinx.errors import SphinxError
from sphinx.util.osutil import ensuredir
from sphinxsimulink.diagram import directives,nodes
from sphinxsimulink.metadata import __version__
engine = None
class SimulinkDiagramError(SphinxError):
pass
def render_diagram(app, node, docname):
global engine
uri = node['uri']
# do not regenerate
if os.path.exists( uri ):
pass
ensuredir( os.path.dirname( uri ) )
try:
# reuse last engine to save loading time
if engine == None:
engine = matlab.engine.start_matlab()
else:
# clean up used engines
engine.restoredefaultpath(nargout=0)
engine.close('all', nargout=0)
engine.bdclose('all', nargout=0)
engine.clear('classes', nargout=0)
# start engine from document directory
engine.cd( os.path.dirname( app.env.doc2path( docname ) ) )
# then, support changing directory (relative to document)
dir = node.get('dir')
if dir:
engine.cd( dir )
# finally, add the MATLAB paths relative to the changed directory
pathlist = node.get('addpath')
if pathlist:
for path in pathlist:
engine.addpath( path )
# preload script
preload = node.get('preload')
if preload:
engine.eval( preload + ';', nargout=0)
# load system
system = node.get('system')
if system:
engine.load_system( system );
# if subsystem specified, print from this layer
subsystem = node.get('subsystem')
if subsystem:
system = "/".join( [ system, subsystem ] )
# print from Simulink handle to .png
engine.eval(
"print( get_param( '{}', 'Handle' ), '-dpng', '{}' )".
format( system, uri ),
nargout=0
)
except matlab.engine.MatlabExecutionError as err:
raise SimulinkDiagramError('Unable to render Simulink diagram due ' +
'to MATLAB execution error'
)
def process_diagram_nodes(app, doctree, docname):
for node in doctree.traverse(nodes.diagram):
render_diagram(app, node, docname)
node.replace_self(node.children)
def terminate_matlab_engine(app, exception):
global engine
if engine is not None:
engine.quit()
engine = None
def setup(app):
app.add_directive('simulink-diagram', directives.SimulinkDiagramDirective)
app.connect('doctree-resolved', process_diagram_nodes)
app.connect('build-finished', terminate_matlab_engine)
return {'version': __version__}
|
[
"os.path.dirname",
"os.path.exists"
] |
[((623, 642), 'os.path.exists', 'os.path.exists', (['uri'], {}), '(uri)\n', (637, 642), False, 'import os\n'), ((675, 695), 'os.path.dirname', 'os.path.dirname', (['uri'], {}), '(uri)\n', (690, 695), False, 'import os\n')]
|
"""
Author: <NAME>
Date: 04/02/2020
All rights reserved.
Feel free to use and modify and if you like it give it a star.
Import the Robot's Step Files and Color/Scale/Assemble them using the instructions in /RoboDK/KUKA/KUKA LWR IV+ Description
(for Original=kuka_lwr_model_description.json, for custom=custom_kuka_lwr_model_description, for custom2=custom_kuka_lwr_model_description_2)
before running the code to complete the robot model.
#########################################################################
######### To quickly color and scale use the next lines of code #########
#########################################################################
from robolink import * # RoboDK API
from robodk import * # Robot toolbox
RDK = Robolink()
for station in RDK.ItemList():
for item in station.Childs():
item.Scale(1000)
item.setColor(255/255, 85/255, 0/255, 255/255)
########################################################################
#### For custom2 run these commands before assembling the stl files ####
########################################################################
from robolink import * # RoboDK API
from robodk import * # Robot toolbox
import numpy as np
RDK = Robolink()
for station in RDK.ItemList():
for item in station.Childs():
item.setGeometryPose(item.Pose()*rotz(np.pi))
item.Scale(1000)
item.setColor(255/255, 85/255, 0/255, 255/255)
and after building the mechanism and import it, in order to rotate the robot run:
from robolink import * # RoboDK API
from robodk import * # Robot toolbox
RDK = Robolink()
ref = RDK.Item('reference2')
ref.setPoseAbs(ref.Pose()*rotz(pi))
##############################################################################################
##### The original option is just the robot model without any inverted sense and joints ######
##### home are [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ###########################################
##############################################################################################
##### The custom robot is the real model that has the same limitations, home joints and ######
##### senses as the REAl KUKA LWR but the X and Y axis system are inverted ###################
##############################################################################################
##### The custom2 robot is the same as the custom option but with the X and Y axis being #####
##### the same as the REAL KUKA ROBOT ########################################################
##############################################################################################
"""
# Start the RoboDK API
from robolink.robolink import *
from robodk.robodk import *
import json
import os
# ORIGINAL ROBOT DATA
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'RoboDK/KUKA/KUKA LWR IV+ Description/kuka_lwr_model_description.json')) as config_file:
data = json.load(config_file)
original_robot_name = data['Robot name']
original_robot_dof = data['DOF']
original_robot_joint1 = data['Joint 1']
original_robot_joint2 = data['Joint 2']
original_robot_joint3 = data['Joint 3']
original_robot_joint4 = data['Joint 4']
original_robot_joint5 = data['Joint 5']
original_robot_joint6 = data['Joint 6']
original_robot_joint7 = data['Joint 7']
original_robot_joints_build = [original_robot_joint1["Build joints"], original_robot_joint2["Build joints"], original_robot_joint3["Build joints"],
original_robot_joint4["Build joints"], original_robot_joint5["Build joints"], original_robot_joint6["Build joints"],
original_robot_joint7["Build joints"]]
original_robot_joints_home = [original_robot_joint1["Home"], original_robot_joint2["Home"], original_robot_joint3["Home"],
original_robot_joint4["Home"], original_robot_joint5["Home"], original_robot_joint6["Home"], original_robot_joint7["Home"]]
original_robot_parameters = [data["d1"], data["d3"], data["d5"], data["d7"], data["dtheta1"], data["dtheta2"], data["dtheta3"], data["dtheta4"],
data["dtheta5"], data["dtheta6"], data["dtheta7"]]
original_robot_joint_senses = [original_robot_joint1["Invert Sense"], original_robot_joint2["Invert Sense"], original_robot_joint3["Invert Sense"],
original_robot_joint4["Invert Sense"], original_robot_joint5["Invert Sense"], original_robot_joint6["Invert Sense"],
original_robot_joint7["Invert Sense"]]
original_robot_joint_lower_limit = [original_robot_joint1["Minimum limit"], original_robot_joint2["Minimum limit"], original_robot_joint3["Minimum limit"],
original_robot_joint4["Minimum limit"], original_robot_joint5["Minimum limit"], original_robot_joint6["Minimum limit"],
original_robot_joint7["Minimum limit"]]
original_robot_joint_upper_limit = [original_robot_joint1["Maximum limit"], original_robot_joint2["Maximum limit"], original_robot_joint3["Maximum limit"],
original_robot_joint4["Maximum limit"], original_robot_joint5["Maximum limit"], original_robot_joint6["Maximum limit"],
original_robot_joint7["Maximum limit"]]
original_robot_base_pose = data["Base shift"]
original_robot_tool_pose = data["End-effector shift"]
# CUSTOM ROBOT DATA
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'RoboDK/KUKA/KUKA LWR IV+ Description/custom_lwr_model_description.json')) as config_file:
data = json.load(config_file)
custom_robot_name = data['Robot name']
custom_robot_dof = data['DOF']
custom_robot_joint1 = data['Joint 1']
custom_robot_joint2 = data['Joint 2']
custom_robot_joint3 = data['Joint 3']
custom_robot_joint4 = data['Joint 4']
custom_robot_joint5 = data['Joint 5']
custom_robot_joint6 = data['Joint 6']
custom_robot_joint7 = data['Joint 7']
custom_robot_joints_build = [custom_robot_joint1["Build joints"], custom_robot_joint2["Build joints"], custom_robot_joint3["Build joints"],
custom_robot_joint4["Build joints"], custom_robot_joint5["Build joints"], custom_robot_joint6["Build joints"],
custom_robot_joint7["Build joints"]]
custom_robot_joints_home = [custom_robot_joint1["Home"], custom_robot_joint2["Home"], custom_robot_joint3["Home"],
custom_robot_joint4["Home"], custom_robot_joint5["Home"], custom_robot_joint6["Home"], custom_robot_joint7["Home"]]
custom_robot_parameters = [data["d1"], data["d3"], data["d5"], data["d7"], data["dtheta1"], data["dtheta2"], data["dtheta3"], data["dtheta4"],
data["dtheta5"], data["dtheta6"], data["dtheta7"]]
custom_robot_joint_senses = [custom_robot_joint1["Invert Sense"], custom_robot_joint2["Invert Sense"], custom_robot_joint3["Invert Sense"],
custom_robot_joint4["Invert Sense"], custom_robot_joint5["Invert Sense"], custom_robot_joint6["Invert Sense"],
custom_robot_joint7["Invert Sense"]]
custom_robot_joint_lower_limit = [custom_robot_joint1["Minimum limit"], custom_robot_joint2["Minimum limit"], custom_robot_joint3["Minimum limit"],
custom_robot_joint4["Minimum limit"], custom_robot_joint5["Minimum limit"], custom_robot_joint6["Minimum limit"],
custom_robot_joint7["Minimum limit"]]
custom_robot_joint_upper_limit = [custom_robot_joint1["Maximum limit"], custom_robot_joint2["Maximum limit"], custom_robot_joint3["Maximum limit"],
custom_robot_joint4["Maximum limit"], custom_robot_joint5["Maximum limit"], custom_robot_joint6["Maximum limit"],
custom_robot_joint7["Maximum limit"]]
custom_robot_base_pose = data["Base shift"]
custom_robot_tool_pose = data["End-effector shift"]
# CUSTOM 2 ROBOT DATA
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'RoboDK/KUKA/KUKA LWR IV+ Description/custom_lwr_model_description_2.json')) as config_file:
data = json.load(config_file)
custom_2_robot_name = data['Robot name']
custom_2_robot_dof = data['DOF']
custom_2_robot_joint1 = data['Joint 1']
custom_2_robot_joint2 = data['Joint 2']
custom_2_robot_joint3 = data['Joint 3']
custom_2_robot_joint4 = data['Joint 4']
custom_2_robot_joint5 = data['Joint 5']
custom_2_robot_joint6 = data['Joint 6']
custom_2_robot_joint7 = data['Joint 7']
custom_2_robot_joints_build = [custom_2_robot_joint1["Build joints"], custom_2_robot_joint2["Build joints"], custom_2_robot_joint3["Build joints"],
custom_2_robot_joint4["Build joints"], custom_2_robot_joint5["Build joints"], custom_2_robot_joint6["Build joints"],
custom_2_robot_joint7["Build joints"]]
custom_2_robot_joints_home = [custom_2_robot_joint1["Home"], custom_2_robot_joint2["Home"], custom_2_robot_joint3["Home"],
custom_2_robot_joint4["Home"], custom_2_robot_joint5["Home"], custom_2_robot_joint6["Home"], custom_2_robot_joint7["Home"]]
custom_2_robot_parameters = [data["d1"], data["d3"], data["d5"], data["d7"], data["dtheta1"], data["dtheta2"], data["dtheta3"], data["dtheta4"],
data["dtheta5"], data["dtheta6"], data["dtheta7"]]
custom_2_robot_joint_senses = [custom_2_robot_joint1["Invert Sense"], custom_2_robot_joint2["Invert Sense"], custom_2_robot_joint3["Invert Sense"],
custom_2_robot_joint4["Invert Sense"], custom_2_robot_joint5["Invert Sense"], custom_2_robot_joint6["Invert Sense"],
custom_2_robot_joint7["Invert Sense"]]
custom_2_robot_joint_lower_limit = [custom_2_robot_joint1["Minimum limit"], custom_2_robot_joint2["Minimum limit"], custom_2_robot_joint3["Minimum limit"],
custom_2_robot_joint4["Minimum limit"], custom_2_robot_joint5["Minimum limit"], custom_2_robot_joint6["Minimum limit"],
custom_2_robot_joint7["Minimum limit"]]
custom_2_robot_joint_upper_limit = [custom_2_robot_joint1["Maximum limit"], custom_2_robot_joint2["Maximum limit"], custom_2_robot_joint3["Maximum limit"],
custom_2_robot_joint4["Maximum limit"], custom_2_robot_joint5["Maximum limit"], custom_2_robot_joint6["Maximum limit"],
custom_2_robot_joint7["Maximum limit"]]
custom_2_robot_base_pose = data["Base shift"]
custom_2_robot_tool_pose = data["End-effector shift"]
RDK = Robolink()
custom = False
custom2 = True
if custom:
robot_name = custom_robot_name
DOFs = custom_robot_dof
joints_build = custom_robot_joints_build
joints_home = custom_robot_joints_home
parameters = custom_robot_parameters
joints_senses = custom_robot_joint_senses # -1 = Inverted, +1 = Not Inverted
lower_limits = custom_robot_joint_lower_limit
upper_limits = custom_robot_joint_upper_limit
base_pose = xyzrpw_2_pose(custom_robot_base_pose)
tool_pose = xyzrpw_2_pose(custom_robot_tool_pose)
list_objects = []
elif custom2:
robot_name = custom_2_robot_name
DOFs = custom_2_robot_dof
joints_build = custom_2_robot_joints_build
joints_home = custom_2_robot_joints_home
parameters = custom_2_robot_parameters
joints_senses = custom_2_robot_joint_senses # -1 = Inverted, +1 = Not Inverted
lower_limits = custom_2_robot_joint_lower_limit
upper_limits = custom_2_robot_joint_upper_limit
base_pose = xyzrpw_2_pose(custom_2_robot_base_pose)
tool_pose = xyzrpw_2_pose(custom_2_robot_tool_pose)
list_objects = []
else:
robot_name = original_robot_name
DOFs = original_robot_dof
joints_build = original_robot_joints_build
joints_home = original_robot_joints_home
parameters = original_robot_parameters
joints_senses = original_robot_joint_senses # -1 = Inverted, +1 = Not Inverted
lower_limits = original_robot_joint_lower_limit
upper_limits = original_robot_joint_upper_limit
base_pose = xyzrpw_2_pose(original_robot_base_pose)
tool_pose = xyzrpw_2_pose(original_robot_tool_pose)
list_objects = []
for i in range(DOFs + 1):
if i == 0:
itm = RDK.Item('base', ITEM_TYPE_OBJECT)
else:
itm = RDK.Item('link_'+str(i), ITEM_TYPE_OBJECT)
list_objects.append(itm)
new_robot = RDK.BuildMechanism(MAKE_ROBOT_7DOF, list_objects, parameters, joints_build, joints_home, joints_senses, lower_limits, upper_limits, base_pose, tool_pose, robot_name)
if not new_robot.Valid():
print("Failed to create the robot. Check input values.")
else:
print("Robot/mechanism created: " + new_robot.Name())
|
[
"json.load",
"os.path.realpath"
] |
[((2952, 2974), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (2961, 2974), False, 'import json\n'), ((5745, 5767), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (5754, 5767), False, 'import json\n'), ((8421, 8443), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (8430, 8443), False, 'import json\n'), ((2823, 2849), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2839, 2849), False, 'import os\n'), ((5614, 5640), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5630, 5640), False, 'import os\n'), ((8288, 8314), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8304, 8314), False, 'import os\n')]
|
import unittest
from context import parser
class TVShowFileParserTests(unittest.TestCase):
def setUp(self):
self.filename = parser.Parser("test.2018.S01E01E02.mkv")
def tearDown(self):
self.filename = None
def testisSubtitleFileSRT(self):
self.assertFalse(self.filename.isSubs)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"context.parser.Parser"
] |
[((352, 367), 'unittest.main', 'unittest.main', ([], {}), '()\n', (365, 367), False, 'import unittest\n'), ((139, 179), 'context.parser.Parser', 'parser.Parser', (['"""test.2018.S01E01E02.mkv"""'], {}), "('test.2018.S01E01E02.mkv')\n", (152, 179), False, 'from context import parser\n')]
|
from urllib.parse import quote
from django import template
register = template.Library()
@register.filter
def urlify(value):
return quote(value)
|
[
"urllib.parse.quote",
"django.template.Library"
] |
[((71, 89), 'django.template.Library', 'template.Library', ([], {}), '()\n', (87, 89), False, 'from django import template\n'), ((138, 150), 'urllib.parse.quote', 'quote', (['value'], {}), '(value)\n', (143, 150), False, 'from urllib.parse import quote\n')]
|
# Generated by Django 2.0.6 on 2018-06-11 23:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('producto', '0003_auto_20180611_2248'),
]
operations = [
migrations.RenameModel(
old_name='Venta',
new_name='Ventas',
),
]
|
[
"django.db.migrations.RenameModel"
] |
[((228, 287), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Venta"""', 'new_name': '"""Ventas"""'}), "(old_name='Venta', new_name='Ventas')\n", (250, 287), False, 'from django.db import migrations\n')]
|
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: src/test/testcases/testPSUReadSbeMem.py $
#
# OpenPOWER sbe Project
#
# Contributors Listed Below - COPYRIGHT 2017,2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
from __future__ import print_function
import sys
sys.path.append("targets/p9_nimbus/sbeTest" )
sys.path.append("targets/p9_axone/sbeTest" )
import testPSUUtil
import testRegistry as reg
import testUtil
import testMemUtil
#-------------------------------
# This is a Test Expected Data
#-------------------------------
def getdoubleword(dataInInt):
hex_string = '0'*(16-len(str(hex(dataInInt))[:18][2:])) + str(hex(dataInInt))[:18][2:]
return hex_string
def readSeeprom(offset, size, destAddr, primStatus, secStatus):
'''
#------------------------------------------------------------------------------------------------------------------------------
# SBE side test data -
#------------------------------------------------------------------------------------------------------------------------------
'''
sbe_test_data = (
#-----------------------------------------------------------------------------------------------------
# OP Reg ValueToWrite size Test Expected Data Description
#-----------------------------------------------------------------------------------------------------
# FFDC Size, Pass CMD Size
["write", reg.REG_MBOX0, "0000010000F0D703", 8, "None", "Writing to MBOX0 address"],
# seeprom offset, Size
["write", reg.REG_MBOX1, getdoubleword((offset<<32)+size), 8, "None", "Writing to MBOX1 address"],
# response Addr
["write", reg.REG_MBOX2, getdoubleword(destAddr), 8, "None", "Writing to MBOX2 address"],
["write", reg.PSU_SBE_DOORBELL_REG_WO_OR, "8000000000000000", 8, "None", "Update SBE Doorbell register to interrupt SBE"],
)
'''
#---------------------
# Host side test data - SUCCESS
#---------------------
'''
host_test_data_success = (
#----------------------------------------------------------------------------------------------------------------
# OP Reg ValueToWrite size Test Expected Data Description
#----------------------------------------------------------------------------------------------------------------
["read", reg.REG_MBOX4, "0", 8, getdoubleword((primStatus<<48)+(secStatus<<32)+0xF0D703), "Reading Host MBOX4 data to Validate"],
)
'''
#-----------------------------------------------------------------------
# Do not modify - Used to simulate interrupt on Ringing Doorbell on Host
#-----------------------------------------------------------------------
'''
host_polling_data = (
#----------------------------------------------------------------------------------------------------------------
# OP Reg ValueToWrite size Test Expected Data Description
#----------------------------------------------------------------------------------------------------------------
["read", reg.PSU_HOST_DOORBELL_REG_WO_OR, "0", 8, "8000000000000000", "Reading Host Doorbell for Interrupt Bit0"],
)
# Run Simics initially
testUtil.runCycles( 10000000 );
# Intialize the class obj instances
regObj = testPSUUtil.registry() # Registry obj def for operation
# HOST->SBE data set execution
regObj.ExecuteTestOp( testPSUUtil.simSbeObj, sbe_test_data )
print("\n Poll on Host side for INTR ...\n")
#Poll on HOST DoorBell Register for interrupt
regObj.pollingOn( testPSUUtil.simSbeObj, host_polling_data, 5 )
#SBE->HOST data set execution
regObj.ExecuteTestOp( testPSUUtil.simSbeObj, host_test_data_success )
#-------------------------
# Main Function
#-------------------------
def main():
# Run Simics initially
testUtil.runCycles( 10000000 );
print("\n Execute SBE Test - Read SBE Mem\n")
'''
Test Case 1
'''
readSeeprom(0, 128, 0x08000000, 0, 0)
print("SUCCESS: read seeprom valid")
# Read data from cache and verify its contents
# seeprom header
seepprmHdr = 'XIP SEPM'
#read from cache
readData = testMemUtil.getmem(0x08000000, 0x80, 0x02)
for byte in range(len(seepprmHdr)):
if( ord(seepprmHdr[byte]) != readData[byte ]):
print("Data mismtach at: ", byte)
print(" expected: ", ord(seepprmHdr[byte]))
print(" Actual: ", readData[byte])
raise Exception('data mistmach');
'''
Test Case 2
'''
readSeeprom(0x38CA0, 0x180, 0x8973780, 0, 0)
print("SUCCESS: read seeprom HB testcase")
'''
Test Case 3
'''
readSeeprom(0x0, 0x40, 0x08000000, 0x03, 0x19)
print("SUCCESS: read seeprom size not aligned")
'''
Test Case 4
'''
readSeeprom(0x3fe80, 0x180, 0x08000000, 0x03, 0x19)
print("SUCCESS: read seeprom size exceeded")
'''
Test Case 5
'''
readSeeprom(0x7, 0x40, 0x08000000, 0x03, 0x19)
print("SUCCESS: read seeprom offset not aligned")
if __name__ == "__main__":
if testUtil.getMachineName() == "axone":
try:
main()
except:
print ( "\nTest Suite completed with error(s)" )
testUtil.collectFFDC()
raise()
print ( "\nTest Suite completed with no errors" )
else:
main()
if err:
print ( "\nTest Suite completed with error(s)" )
#sys.exit(1)
else:
print ( "\nTest Suite completed with no errors" )
#sys.exit(0);
|
[
"testUtil.getMachineName",
"testPSUUtil.registry",
"testUtil.collectFFDC",
"testMemUtil.getmem",
"testUtil.runCycles",
"sys.path.append"
] |
[((865, 909), 'sys.path.append', 'sys.path.append', (['"""targets/p9_nimbus/sbeTest"""'], {}), "('targets/p9_nimbus/sbeTest')\n", (880, 909), False, 'import sys\n'), ((911, 954), 'sys.path.append', 'sys.path.append', (['"""targets/p9_axone/sbeTest"""'], {}), "('targets/p9_axone/sbeTest')\n", (926, 954), False, 'import sys\n'), ((4105, 4133), 'testUtil.runCycles', 'testUtil.runCycles', (['(10000000)'], {}), '(10000000)\n', (4123, 4133), False, 'import testUtil\n'), ((4191, 4213), 'testPSUUtil.registry', 'testPSUUtil.registry', ([], {}), '()\n', (4211, 4213), False, 'import testPSUUtil\n'), ((4741, 4769), 'testUtil.runCycles', 'testUtil.runCycles', (['(10000000)'], {}), '(10000000)\n', (4759, 4769), False, 'import testUtil\n'), ((5078, 5115), 'testMemUtil.getmem', 'testMemUtil.getmem', (['(134217728)', '(128)', '(2)'], {}), '(134217728, 128, 2)\n', (5096, 5115), False, 'import testMemUtil\n'), ((5993, 6018), 'testUtil.getMachineName', 'testUtil.getMachineName', ([], {}), '()\n', (6016, 6018), False, 'import testUtil\n'), ((6152, 6174), 'testUtil.collectFFDC', 'testUtil.collectFFDC', ([], {}), '()\n', (6172, 6174), False, 'import testUtil\n')]
|
#!/usr/bin/env python3
"""Module containing the ClusteringPredict class and the command line interface."""
import argparse
import pandas as pd
import joblib
from biobb_common.generic.biobb_object import BiobbObject
from sklearn.preprocessing import StandardScaler
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_ml.clustering.common import *
class ClusteringPredict(BiobbObject):
"""
| biobb_ml ClusteringPredict
| Makes predictions from an input dataset and a given clustering model.
| Makes predictions from an input dataset (provided either as a file or as a dictionary property) and a given clustering model fitted with `KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_ method.
Args:
input_model_path (str): Path to the input model. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/clustering/model_clustering_predict.pkl>`_. Accepted formats: pkl (edam:format_3653).
input_dataset_path (str) (Optional): Path to the dataset to predict. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/clustering/input_clustering_predict.csv>`_. Accepted formats: csv (edam:format_3752).
output_results_path (str): Path to the output results file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_results_clustering_predict.csv>`_. Accepted formats: csv (edam:format_3752).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **predictions** (*list*) - (None) List of dictionaries with all values you want to predict targets. It will be taken into account only in case **input_dataset_path** is not provided. Format: [{ 'var1': 1.0, 'var2': 2.0 }, { 'var1': 4.0, 'var2': 2.7 }] for datasets with headers and [[ 1.0, 2.0 ], [ 4.0, 2.7 ]] for datasets without headers.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_ml.clustering.clustering_predict import clustering_predict
prop = {
'predictions': [
{
'var1': 1.0,
'var2': 2.0
},
{
'var1': 4.0,
'var2': 2.7
}
]
}
clustering_predict(input_model_path='/path/to/myModel.pkl',
output_results_path='/path/to/newPredictedResults.csv',
input_dataset_path='/path/to/myDataset.csv',
properties=prop)
Info:
* wrapped_software:
* name: scikit-learn
* version: >=0.24.2
* license: BSD 3-Clause
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_model_path, output_results_path,
input_dataset_path=None, properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": { "input_model_path": input_model_path, "input_dataset_path": input_dataset_path },
"out": { "output_results_path": output_results_path }
}
# Properties specific for BB
self.predictions = properties.get('predictions', [])
self.properties = properties
# Check the properties
self.check_properties(properties)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.io_dict["in"]["input_model_path"] = check_input_path(self.io_dict["in"]["input_model_path"], "input_model_path", out_log, self.__class__.__name__)
self.io_dict["out"]["output_results_path"] = check_output_path(self.io_dict["out"]["output_results_path"],"output_results_path", False, out_log, self.__class__.__name__)
if self.io_dict["in"]["input_dataset_path"]:
self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`ClusteringPredict <clustering.clustering_predict.ClusteringPredict>` clustering.clustering_predict.ClusteringPredict object."""
# check input/output paths and parameters
self.check_data_params(self.out_log, self.err_log)
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
fu.log('Getting model from %s' % self.io_dict["in"]["input_model_path"], self.out_log, self.global_log)
with open(self.io_dict["in"]["input_model_path"], "rb") as f:
while True:
try:
m = joblib.load(f)
if (isinstance(m, KMeans)):
new_model = m
if isinstance(m, StandardScaler):
scaler = m
if isinstance(m, dict):
variables = m
except EOFError:
break
if self.io_dict["in"]["input_dataset_path"]:
# load dataset from input_dataset_path file
fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log)
if 'columns' in variables['predictors']:
labels = getHeader(self.io_dict["in"]["input_dataset_path"])
skiprows = 1
else:
labels = None
skiprows = None
new_data_table = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header = None, sep="\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels)
else:
# load dataset from properties
if 'columns' in variables['predictors']:
# sorting self.properties in the correct order given by variables['predictors']['columns']
index_map = { v: i for i, v in enumerate(variables['predictors']['columns']) }
predictions = []
for i, pred in enumerate(self.predictions):
sorted_pred = sorted(pred.items(), key=lambda pair: index_map[pair[0]])
predictions.append(dict(sorted_pred))
new_data_table = pd.DataFrame(data=get_list_of_predictors(predictions),columns=get_keys_of_predictors(predictions))
else:
predictions = self.predictions
new_data_table = pd.DataFrame(data=predictions)
if variables['scale']:
fu.log('Scaling dataset', self.out_log, self.global_log)
new_data = scaler.transform(new_data_table)
else: new_data = new_data_table
p = new_model.predict(new_data)
new_data_table['cluster'] = p
fu.log('Predicting results\n\nPREDICTION RESULTS\n\n%s\n' % new_data_table, self.out_log, self.global_log)
fu.log('Saving results to %s' % self.io_dict["out"]["output_results_path"], self.out_log, self.global_log)
new_data_table.to_csv(self.io_dict["out"]["output_results_path"], index = False, header=True, float_format='%.3f')
return 0
def clustering_predict(input_model_path: str, output_results_path: str, input_dataset_path: str = None, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`ClusteringPredict <clustering.clustering_predict.ClusteringPredict>` class and
execute the :meth:`launch() <clustering.clustering_predict.ClusteringPredict.launch>` method."""
return ClusteringPredict(input_model_path=input_model_path,
output_results_path=output_results_path,
input_dataset_path=input_dataset_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Makes predictions from an input dataset and a given clustering model.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_model_path', required=True, help='Path to the input model. Accepted formats: pkl.')
required_args.add_argument('--output_results_path', required=True, help='Path to the output results file. Accepted formats: csv.')
parser.add_argument('--input_dataset_path', required=False, help='Path to the dataset to predict. Accepted formats: csv.')
args = parser.parse_args()
args.config = args.config or "{}"
properties = settings.ConfReader(config=args.config).get_prop_dic()
# Specific call of each building block
clustering_predict(input_model_path=args.input_model_path,
output_results_path=args.output_results_path,
input_dataset_path=args.input_dataset_path,
properties=properties)
if __name__ == '__main__':
main()
|
[
"biobb_common.tools.file_utils.log",
"pandas.read_csv",
"argparse.RawTextHelpFormatter",
"joblib.load",
"pandas.DataFrame",
"biobb_common.configuration.settings.ConfReader"
] |
[((5128, 5235), 'biobb_common.tools.file_utils.log', 'fu.log', (["('Getting model from %s' % self.io_dict['in']['input_model_path'])", 'self.out_log', 'self.global_log'], {}), "('Getting model from %s' % self.io_dict['in']['input_model_path'],\n self.out_log, self.global_log)\n", (5134, 5235), True, 'from biobb_common.tools import file_utils as fu\n'), ((7454, 7563), 'biobb_common.tools.file_utils.log', 'fu.log', (['("""Predicting results\n\nPREDICTION RESULTS\n\n%s\n""" % new_data_table)', 'self.out_log', 'self.global_log'], {}), '("""Predicting results\n\nPREDICTION RESULTS\n\n%s\n""" % new_data_table,\n self.out_log, self.global_log)\n', (7460, 7563), True, 'from biobb_common.tools import file_utils as fu\n'), ((7569, 7679), 'biobb_common.tools.file_utils.log', 'fu.log', (["('Saving results to %s' % self.io_dict['out']['output_results_path'])", 'self.out_log', 'self.global_log'], {}), "('Saving results to %s' % self.io_dict['out']['output_results_path'],\n self.out_log, self.global_log)\n", (7575, 7679), True, 'from biobb_common.tools import file_utils as fu\n'), ((5825, 5936), 'biobb_common.tools.file_utils.log', 'fu.log', (["('Getting dataset from %s' % self.io_dict['in']['input_dataset_path'])", 'self.out_log', 'self.global_log'], {}), "('Getting dataset from %s' % self.io_dict['in']['input_dataset_path'],\n self.out_log, self.global_log)\n", (5831, 5936), True, 'from biobb_common.tools import file_utils as fu\n'), ((6201, 6343), 'pandas.read_csv', 'pd.read_csv', (["self.io_dict['in']['input_dataset_path']"], {'header': 'None', 'sep': '"""\\\\s+|;|:|,|\t"""', 'engine': '"""python"""', 'skiprows': 'skiprows', 'names': 'labels'}), "(self.io_dict['in']['input_dataset_path'], header=None, sep=\n '\\\\s+|;|:|,|\\t', engine='python', skiprows=skiprows, names=labels)\n", (6212, 6343), True, 'import pandas as pd\n'), ((7213, 7269), 'biobb_common.tools.file_utils.log', 'fu.log', (['"""Scaling dataset"""', 'self.out_log', 'self.global_log'], {}), "('Scaling dataset', self.out_log, self.global_log)\n", (7219, 7269), True, 'from biobb_common.tools import file_utils as fu\n'), ((9403, 9442), 'biobb_common.configuration.settings.ConfReader', 'settings.ConfReader', ([], {'config': 'args.config'}), '(config=args.config)\n', (9422, 9442), False, 'from biobb_common.configuration import settings\n'), ((7125, 7155), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'predictions'}), '(data=predictions)\n', (7137, 7155), True, 'import pandas as pd\n'), ((8689, 8737), 'argparse.RawTextHelpFormatter', 'argparse.RawTextHelpFormatter', (['prog'], {'width': '(99999)'}), '(prog, width=99999)\n', (8718, 8737), False, 'import argparse\n'), ((5372, 5386), 'joblib.load', 'joblib.load', (['f'], {}), '(f)\n', (5383, 5386), False, 'import joblib\n')]
|
import pytest
import os
import memcnn.experiment.factory
from memcnn.config import Config
def test_get_attr_from_module():
a = memcnn.experiment.factory.get_attr_from_module('memcnn.experiment.factory.get_attr_from_module')
assert a is memcnn.experiment.factory.get_attr_from_module
def test_load_experiment_config():
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
@pytest.mark.skip(reason="Covered more efficiently by test_train.test_run_experiment")
def test_experiment_config_parser(tmp_path):
tmp_data_dir = tmp_path / "tmpdata"
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
cfg = memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
memcnn.experiment.factory.experiment_config_parser(cfg, str(tmp_data_dir), workers=None)
def test_circular_dependency(tmp_path):
p = str(tmp_path / "circular.json")
content = u'{ "circ": { "base": "circ" } }'
with open(p, 'w') as fh:
fh.write(content)
with open(p, 'r') as fh:
assert fh.read() == content
with pytest.raises(RuntimeError):
memcnn.experiment.factory.load_experiment_config(p, ['circ'])
|
[
"pytest.mark.skip",
"pytest.raises",
"memcnn.config.Config.get_dir"
] |
[((490, 580), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Covered more efficiently by test_train.test_run_experiment"""'}), "(reason=\n 'Covered more efficiently by test_train.test_run_experiment')\n", (506, 580), False, 'import pytest\n'), ((359, 375), 'memcnn.config.Config.get_dir', 'Config.get_dir', ([], {}), '()\n', (373, 375), False, 'from memcnn.config import Config\n'), ((690, 706), 'memcnn.config.Config.get_dir', 'Config.get_dir', ([], {}), '()\n', (704, 706), False, 'from memcnn.config import Config\n'), ((1176, 1203), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1189, 1203), False, 'import pytest\n')]
|
"""
@file: test_search_serial_port.py
@author: <NAME>
@brief: search_serial_port.pyのをテストするプログラム
"""
from search_serial_port import search_com_ports, search_enabled_com_port
def test_search_com_ports():
search_com_ports()
def test_search_enabled_com_port():
search_enabled_com_port()
|
[
"search_serial_port.search_com_ports",
"search_serial_port.search_enabled_com_port"
] |
[((209, 227), 'search_serial_port.search_com_ports', 'search_com_ports', ([], {}), '()\n', (225, 227), False, 'from search_serial_port import search_com_ports, search_enabled_com_port\n'), ((270, 295), 'search_serial_port.search_enabled_com_port', 'search_enabled_com_port', ([], {}), '()\n', (293, 295), False, 'from search_serial_port import search_com_ports, search_enabled_com_port\n')]
|
import requests
import json
def getguild(guild_id):
guild_id = str(guild_id)
http_response = requests.get(f'https://discord.com/api/guilds/{guild_id}/widget.json')
response_data = http_response.json()
data = json.dumps(response_data)
return data
|
[
"json.dumps",
"requests.get"
] |
[((102, 172), 'requests.get', 'requests.get', (['f"""https://discord.com/api/guilds/{guild_id}/widget.json"""'], {}), "(f'https://discord.com/api/guilds/{guild_id}/widget.json')\n", (114, 172), False, 'import requests\n'), ((225, 250), 'json.dumps', 'json.dumps', (['response_data'], {}), '(response_data)\n', (235, 250), False, 'import json\n')]
|
"""p2 core tasks"""
from p2.core.celery import CELERY_APP
from p2.lib.reflection import path_to_class
@CELERY_APP.task(bind=True)
def signal_marshall(self, signal, args=None, kwargs=None):
"""Run signal in task worker"""
if not args:
args = []
if not kwargs:
kwargs = {}
# Lookup PK to model instance
for key, value in kwargs.items():
if 'class' in value and 'pk' in value:
model_class = path_to_class(value.get('class'))
model_instance = model_class.objects.get(pk=value.get('pk'))
kwargs[key] = model_instance
signal_cls = path_to_class(signal)
signal_cls.send(sender=self, *args, **kwargs)
|
[
"p2.core.celery.CELERY_APP.task",
"p2.lib.reflection.path_to_class"
] |
[((105, 131), 'p2.core.celery.CELERY_APP.task', 'CELERY_APP.task', ([], {'bind': '(True)'}), '(bind=True)\n', (120, 131), False, 'from p2.core.celery import CELERY_APP\n'), ((611, 632), 'p2.lib.reflection.path_to_class', 'path_to_class', (['signal'], {}), '(signal)\n', (624, 632), False, 'from p2.lib.reflection import path_to_class\n')]
|
import pytest
import numpy as np
import itertools
from numpy.testing import assert_allclose
from keras_contrib.utils.test_utils import layer_test, keras_test
from keras.utils.conv_utils import conv_input_length
from keras import backend as K
from keras_contrib import backend as KC
from keras_contrib.layers import convolutional, pooling
from keras.models import Sequential
# TensorFlow does not support full convolution.
if K.backend() == 'theano':
_convolution_border_modes = ['valid', 'same']
else:
_convolution_border_modes = ['valid', 'same']
@keras_test
def test_cosineconvolution_2d():
num_samples = 2
num_filter = 2
stack_size = 3
num_row = 10
num_col = 6
if K.backend() == 'theano':
data_format = 'channels_first'
elif K.backend() == 'tensorflow':
data_format = 'channels_last'
for border_mode in _convolution_border_modes:
for subsample in [(1, 1), (2, 2)]:
for use_bias_mode in [True, False]:
if border_mode == 'same' and subsample != (1, 1):
continue
layer_test(convolutional.CosineConvolution2D,
kwargs={'filters': num_filter,
'kernel_size': (3, 3),
'padding': border_mode,
'strides': subsample,
'use_bias': use_bias_mode,
'data_format': data_format},
input_shape=(num_samples, num_row, num_col, stack_size))
layer_test(convolutional.CosineConvolution2D,
kwargs={'filters': num_filter,
'kernel_size': (3, 3),
'padding': border_mode,
'strides': subsample,
'use_bias': use_bias_mode,
'data_format': data_format,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2'},
input_shape=(num_samples, num_row, num_col, stack_size))
if data_format == 'channels_first':
X = np.random.randn(1, 3, 5, 5)
input_dim = (3, 5, 5)
W0 = X[:, :, ::-1, ::-1]
elif data_format == 'channels_last':
X = np.random.randn(1, 5, 5, 3)
input_dim = (5, 5, 3)
W0 = X[0, :, :, :, None]
model = Sequential()
model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=True,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = W0
W[1] = np.asarray([1.])
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
model = Sequential()
model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=False,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = -2 * W0
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, -np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
@keras_test
def test_sub_pixel_upscaling():
num_samples = 2
num_row = 16
num_col = 16
input_dtype = K.floatx()
for scale_factor in [2, 3, 4]:
input_data = np.random.random((num_samples, 4 * (scale_factor ** 2), num_row, num_col))
input_data = input_data.astype(input_dtype)
if K.image_data_format() == 'channels_last':
input_data = input_data.transpose((0, 2, 3, 1))
input_tensor = K.variable(input_data)
expected_output = K.eval(KC.depth_to_space(input_tensor,
scale=scale_factor))
layer_test(convolutional.SubPixelUpscaling,
kwargs={'scale_factor': scale_factor},
input_data=input_data,
expected_output=expected_output,
expected_output_dtype=K.floatx())
if __name__ == '__main__':
pytest.main([__file__])
|
[
"keras.backend.image_data_format",
"numpy.random.random",
"numpy.asarray",
"keras_contrib.utils.test_utils.layer_test",
"keras.backend.floatx",
"pytest.main",
"keras.models.Sequential",
"keras_contrib.layers.convolutional.CosineConvolution2D",
"keras_contrib.backend.depth_to_space",
"keras.backend.variable",
"keras.backend.backend",
"numpy.random.randn"
] |
[((427, 438), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (436, 438), True, 'from keras import backend as K\n'), ((2581, 2593), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2591, 2593), False, 'from keras.models import Sequential\n'), ((2917, 2934), 'numpy.asarray', 'np.asarray', (['[1.0]'], {}), '([1.0])\n', (2927, 2934), True, 'import numpy as np\n'), ((3076, 3088), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3086, 3088), False, 'from keras.models import Sequential\n'), ((3655, 3665), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3663, 3665), True, 'from keras import backend as K\n'), ((4439, 4462), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (4450, 4462), False, 'import pytest\n'), ((704, 715), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (713, 715), True, 'from keras import backend as K\n'), ((2333, 2360), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(5)', '(5)'], {}), '(1, 3, 5, 5)\n', (2348, 2360), True, 'import numpy as np\n'), ((2608, 2720), 'keras_contrib.layers.convolutional.CosineConvolution2D', 'convolutional.CosineConvolution2D', (['(1)', '(5, 5)'], {'use_bias': '(True)', 'input_shape': 'input_dim', 'data_format': 'data_format'}), '(1, (5, 5), use_bias=True, input_shape=\n input_dim, data_format=data_format)\n', (2641, 2720), False, 'from keras_contrib.layers import convolutional, pooling\n'), ((3103, 3216), 'keras_contrib.layers.convolutional.CosineConvolution2D', 'convolutional.CosineConvolution2D', (['(1)', '(5, 5)'], {'use_bias': '(False)', 'input_shape': 'input_dim', 'data_format': 'data_format'}), '(1, (5, 5), use_bias=False, input_shape=\n input_dim, data_format=data_format)\n', (3136, 3216), False, 'from keras_contrib.layers import convolutional, pooling\n'), ((3723, 3795), 'numpy.random.random', 'np.random.random', (['(num_samples, 4 * scale_factor ** 2, num_row, num_col)'], {}), '((num_samples, 4 * scale_factor ** 2, num_row, num_col))\n', (3739, 3795), True, 'import numpy as np\n'), ((3988, 4010), 'keras.backend.variable', 'K.variable', (['input_data'], {}), '(input_data)\n', (3998, 4010), True, 'from keras import backend as K\n'), ((777, 788), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (786, 788), True, 'from keras import backend as K\n'), ((2477, 2504), 'numpy.random.randn', 'np.random.randn', (['(1)', '(5)', '(5)', '(3)'], {}), '(1, 5, 5, 3)\n', (2492, 2504), True, 'import numpy as np\n'), ((3862, 3883), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (3881, 3883), True, 'from keras import backend as K\n'), ((4044, 4095), 'keras_contrib.backend.depth_to_space', 'KC.depth_to_space', (['input_tensor'], {'scale': 'scale_factor'}), '(input_tensor, scale=scale_factor)\n', (4061, 4095), True, 'from keras_contrib import backend as KC\n'), ((1098, 1369), 'keras_contrib.utils.test_utils.layer_test', 'layer_test', (['convolutional.CosineConvolution2D'], {'kwargs': "{'filters': num_filter, 'kernel_size': (3, 3), 'padding': border_mode,\n 'strides': subsample, 'use_bias': use_bias_mode, 'data_format': data_format\n }", 'input_shape': '(num_samples, num_row, num_col, stack_size)'}), "(convolutional.CosineConvolution2D, kwargs={'filters': num_filter,\n 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample,\n 'use_bias': use_bias_mode, 'data_format': data_format}, input_shape=(\n num_samples, num_row, num_col, stack_size))\n", (1108, 1369), False, 'from keras_contrib.utils.test_utils import layer_test, keras_test\n'), ((1603, 1965), 'keras_contrib.utils.test_utils.layer_test', 'layer_test', (['convolutional.CosineConvolution2D'], {'kwargs': "{'filters': num_filter, 'kernel_size': (3, 3), 'padding': border_mode,\n 'strides': subsample, 'use_bias': use_bias_mode, 'data_format':\n data_format, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2',\n 'activity_regularizer': 'l2'}", 'input_shape': '(num_samples, num_row, num_col, stack_size)'}), "(convolutional.CosineConvolution2D, kwargs={'filters': num_filter,\n 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample,\n 'use_bias': use_bias_mode, 'data_format': data_format,\n 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2',\n 'activity_regularizer': 'l2'}, input_shape=(num_samples, num_row,\n num_col, stack_size))\n", (1613, 1965), False, 'from keras_contrib.utils.test_utils import layer_test, keras_test\n'), ((3039, 3049), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3047, 3049), True, 'from keras import backend as K\n'), ((4394, 4404), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (4402, 4404), True, 'from keras import backend as K\n'), ((3513, 3523), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3521, 3523), True, 'from keras import backend as K\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loop utilities."""
import jax
import jax.numpy as jnp
def _while_loop_scan(cond_fun, body_fun, init_val, max_iter):
"""Scan-based implementation (jit ok, reverse-mode autodiff ok)."""
def _iter(val):
next_val = body_fun(val)
next_cond = cond_fun(next_val)
return next_val, next_cond
def _fun(tup, it):
val, cond = tup
# When cond is met, we start doing no-ops.
return jax.lax.cond(cond, _iter, lambda x: (x, False), val), it
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def _while_loop_python(cond_fun, body_fun, init_val, maxiter):
"""Python based implementation (no jit, reverse-mode autodiff ok)."""
val = init_val
for _ in range(maxiter):
cond = cond_fun(val)
if not cond:
# When condition is met, break (not jittable).
break
val = body_fun(val)
return val
def _while_loop_lax(cond_fun, body_fun, init_val, maxiter):
"""lax.while_loop based implementation (jit by default, no reverse-mode)."""
def _cond_fun(_val):
it, val = _val
return jnp.logical_and(cond_fun(val), it <= maxiter - 1)
def _body_fun(_val):
it, val = _val
val = body_fun(val)
return it+1, val
return jax.lax.while_loop(_cond_fun, _body_fun, (0, init_val))[1]
def while_loop(cond_fun, body_fun, init_val, maxiter, unroll=False, jit=False):
"""A while loop with a bounded number of iterations."""
if unroll:
if jit:
fun = _while_loop_scan
else:
fun = _while_loop_python
else:
if jit:
fun = _while_loop_lax
else:
raise ValueError("unroll=False and jit=False cannot be used together")
if jit and fun is not _while_loop_lax:
# jit of a lax while_loop is redundant, and this jit would only
# constrain maxiter to be static where it is not required.
fun = jax.jit(fun, static_argnums=(0, 1, 3))
return fun(cond_fun, body_fun, init_val, maxiter)
|
[
"jax.lax.scan",
"jax.lax.while_loop",
"jax.jit",
"jax.lax.cond"
] |
[((1809, 1864), 'jax.lax.while_loop', 'jax.lax.while_loop', (['_cond_fun', '_body_fun', '(0, init_val)'], {}), '(_cond_fun, _body_fun, (0, init_val))\n', (1827, 1864), False, 'import jax\n'), ((2422, 2460), 'jax.jit', 'jax.jit', (['fun'], {'static_argnums': '(0, 1, 3)'}), '(fun, static_argnums=(0, 1, 3))\n', (2429, 2460), False, 'import jax\n'), ((981, 1033), 'jax.lax.cond', 'jax.lax.cond', (['cond', '_iter', '(lambda x: (x, False))', 'val'], {}), '(cond, _iter, lambda x: (x, False), val)\n', (993, 1033), False, 'import jax\n'), ((1088, 1135), 'jax.lax.scan', 'jax.lax.scan', (['_fun', 'init', 'None'], {'length': 'max_iter'}), '(_fun, init, None, length=max_iter)\n', (1100, 1135), False, 'import jax\n')]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains NcfModelRunner, which can train and evaluate an NCF model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import time
import tensorflow as tf
from tensorflow.contrib.compiler import xla
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
class NcfModelRunner(object):
"""Creates a graph to train/evaluate an NCF model, and runs it.
This class builds both a training model and evaluation model in the graph.
The two models share variables, so that during evaluation, the trained
variables are used.
"""
# _TrainModelProperties and _EvalModelProperties store useful properties of
# the training and evaluation models, respectively.
# _SHARED_MODEL_PROPERTY_FIELDS is their shared fields.
_SHARED_MODEL_PROPERTY_FIELDS = (
# A scalar tf.string placeholder tensor, that will be fed the path to the
# directory storing the TFRecord files for the input data.
"record_files_placeholder",
# The tf.data.Iterator to iterate over the input data.
"iterator",
# A scalar float tensor representing the model loss.
"loss",
# The batch size, as a Python int.
"batch_size",
# The op to run the model. For the training model, this trains the model
# for one step. For the evaluation model, this computes the metrics and
# updates the metric variables.
"run_model_op")
_TrainModelProperties = namedtuple("_TrainModelProperties", # pylint: disable=invalid-name
_SHARED_MODEL_PROPERTY_FIELDS)
_EvalModelProperties = namedtuple( # pylint: disable=invalid-name
"_EvalModelProperties", _SHARED_MODEL_PROPERTY_FIELDS + (
# A dict from metric name to (metric, update_op) tuple.
"metrics",
# Initializes the metric variables.
"metric_initializer",))
def __init__(self, ncf_dataset, params):
with tf.Graph().as_default() as self._graph:
if params["use_xla_for_gpu"]:
# The XLA functions we use require resource variables.
tf.enable_resource_variables()
self._ncf_dataset = ncf_dataset
self._global_step = tf.train.create_global_step()
self._train_model_properties = self._build_model(params, is_training=True)
self._eval_model_properties = self._build_model(params, is_training=False)
initializer = tf.global_variables_initializer()
self._graph.finalize()
self._session = tf.Session(graph=self._graph)
self._session.run(initializer)
def _build_model(self, params, is_training):
"""Builds the NCF model.
Args:
params: A dict of hyperparameters.
is_training: If True, build the training model. If False, build the
evaluation model.
Returns:
A _TrainModelProperties if is_training is True, or an _EvalModelProperties
otherwise.
"""
record_files_placeholder = tf.placeholder(tf.string, ())
input_fn, _, _ = \
data_preprocessing.make_input_fn(
ncf_dataset=self._ncf_dataset, is_training=is_training,
record_files=record_files_placeholder)
dataset = input_fn(params)
iterator = dataset.make_initializable_iterator()
model_fn = neumf_model.neumf_model_fn
if params["use_xla_for_gpu"]:
model_fn = xla.estimator_model_fn(model_fn)
if is_training:
features, labels = iterator.get_next()
estimator_spec = model_fn(
features, labels, tf.estimator.ModeKeys.TRAIN, params)
with tf.control_dependencies([estimator_spec.train_op]):
run_model_op = self._global_step.assign_add(1)
return self._TrainModelProperties(
record_files_placeholder, iterator,
estimator_spec.loss, params["batch_size"], run_model_op)
else:
features = iterator.get_next()
estimator_spec = model_fn(
features, None, tf.estimator.ModeKeys.EVAL, params)
run_model_op = tf.group(*(update_op for _, update_op in
estimator_spec.eval_metric_ops.values()))
metric_initializer = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
return self._EvalModelProperties(
record_files_placeholder, iterator, estimator_spec.loss,
params["eval_batch_size"], run_model_op,
estimator_spec.eval_metric_ops, metric_initializer)
def _train_or_eval(self, model_properties, num_steps, is_training):
"""Either trains or evaluates, depending on whether `is_training` is True.
Args:
model_properties: _TrainModelProperties or an _EvalModelProperties
containing the properties of the training or evaluation graph.
num_steps: The number of steps to train or evaluate for.
is_training: If True, run the training model. If False, run the evaluation
model.
Returns:
record_dir: The directory of TFRecords where the training/evaluation input
data was read from.
"""
if self._ncf_dataset is not None:
epoch_metadata, record_dir, template = data_preprocessing.get_epoch_info(
is_training=is_training, ncf_dataset=self._ncf_dataset)
batch_count = epoch_metadata["batch_count"]
if batch_count != num_steps:
raise ValueError(
"Step counts do not match. ({} vs. {}) The async process is "
"producing incorrect shards.".format(batch_count, num_steps))
record_files = os.path.join(record_dir, template.format("*"))
initializer_feed_dict = {
model_properties.record_files_placeholder: record_files}
del batch_count
else:
initializer_feed_dict = None
record_dir = None
self._session.run(model_properties.iterator.initializer,
initializer_feed_dict)
fetches = (model_properties.loss, model_properties.run_model_op)
mode = "Train" if is_training else "Eval"
start = None
for i in range(num_steps):
loss, _, = self._session.run(fetches)
if i % 100 == 0:
if start is None:
# Only start the timer after 100 steps so there is a warmup.
start = time.time()
start_step = i
tf.logging.info("{} Loss = {}".format(mode, loss))
end = time.time()
if start is not None:
print("{} peformance: {} examples/sec".format(
mode, (i - start_step) * model_properties.batch_size / (end - start)))
return record_dir
def train(self, num_train_steps):
"""Trains the graph for a single cycle.
Args:
num_train_steps: The number of steps per cycle to train for.
"""
record_dir = self._train_or_eval(self._train_model_properties,
num_train_steps, is_training=True)
if record_dir:
# We delete the record_dir because each cycle, new TFRecords is generated
# by the async process.
tf.gfile.DeleteRecursively(record_dir)
def eval(self, num_eval_steps):
"""Evaluates the graph on the eval data.
Args:
num_eval_steps: The number of steps to evaluate for.
Returns:
A dict of evaluation results.
"""
self._session.run(self._eval_model_properties.metric_initializer)
self._train_or_eval(self._eval_model_properties, num_eval_steps,
is_training=False)
eval_results = {
'global_step': self._session.run(self._global_step)}
for key, (val, _) in self._eval_model_properties.metrics.items():
val_ = self._session.run(val)
tf.logging.info("{} = {}".format(key, self._session.run(val)))
eval_results[key] = val_
return eval_results
|
[
"tensorflow.Graph",
"collections.namedtuple",
"tensorflow.contrib.compiler.xla.estimator_model_fn",
"official.recommendation.data_preprocessing.get_epoch_info",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.global_variables_initializer",
"official.recommendation.data_preprocessing.make_input_fn",
"tensorflow.control_dependencies",
"time.time",
"tensorflow.enable_resource_variables",
"tensorflow.train.create_global_step",
"tensorflow.get_collection"
] |
[((2243, 2309), 'collections.namedtuple', 'namedtuple', (['"""_TrainModelProperties"""', '_SHARED_MODEL_PROPERTY_FIELDS'], {}), "('_TrainModelProperties', _SHARED_MODEL_PROPERTY_FIELDS)\n", (2253, 2309), False, 'from collections import namedtuple\n'), ((2404, 2510), 'collections.namedtuple', 'namedtuple', (['"""_EvalModelProperties"""', "(_SHARED_MODEL_PROPERTY_FIELDS + ('metrics', 'metric_initializer'))"], {}), "('_EvalModelProperties', _SHARED_MODEL_PROPERTY_FIELDS + (\n 'metrics', 'metric_initializer'))\n", (2414, 2510), False, 'from collections import namedtuple\n'), ((3268, 3297), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self._graph'}), '(graph=self._graph)\n', (3278, 3297), True, 'import tensorflow as tf\n'), ((3712, 3741), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string', '()'], {}), '(tf.string, ())\n', (3726, 3741), True, 'import tensorflow as tf\n'), ((3771, 3903), 'official.recommendation.data_preprocessing.make_input_fn', 'data_preprocessing.make_input_fn', ([], {'ncf_dataset': 'self._ncf_dataset', 'is_training': 'is_training', 'record_files': 'record_files_placeholder'}), '(ncf_dataset=self._ncf_dataset, is_training\n =is_training, record_files=record_files_placeholder)\n', (3803, 3903), False, 'from official.recommendation import data_preprocessing\n'), ((7030, 7041), 'time.time', 'time.time', ([], {}), '()\n', (7039, 7041), False, 'import time\n'), ((2974, 3003), 'tensorflow.train.create_global_step', 'tf.train.create_global_step', ([], {}), '()\n', (3001, 3003), True, 'import tensorflow as tf\n'), ((3187, 3220), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3218, 3220), True, 'import tensorflow as tf\n'), ((4098, 4130), 'tensorflow.contrib.compiler.xla.estimator_model_fn', 'xla.estimator_model_fn', (['model_fn'], {}), '(model_fn)\n', (4120, 4130), False, 'from tensorflow.contrib.compiler import xla\n'), ((5854, 5948), 'official.recommendation.data_preprocessing.get_epoch_info', 'data_preprocessing.get_epoch_info', ([], {'is_training': 'is_training', 'ncf_dataset': 'self._ncf_dataset'}), '(is_training=is_training, ncf_dataset=self\n ._ncf_dataset)\n', (5887, 5948), False, 'from official.recommendation import data_preprocessing\n'), ((7666, 7704), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['record_dir'], {}), '(record_dir)\n', (7692, 7704), True, 'import tensorflow as tf\n'), ((2879, 2909), 'tensorflow.enable_resource_variables', 'tf.enable_resource_variables', ([], {}), '()\n', (2907, 2909), True, 'import tensorflow as tf\n'), ((4306, 4356), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[estimator_spec.train_op]'], {}), '([estimator_spec.train_op])\n', (4329, 4356), True, 'import tensorflow as tf\n'), ((4908, 4956), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.METRIC_VARIABLES'], {}), '(tf.GraphKeys.METRIC_VARIABLES)\n', (4925, 4956), True, 'import tensorflow as tf\n'), ((2732, 2742), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2740, 2742), True, 'import tensorflow as tf\n'), ((6924, 6935), 'time.time', 'time.time', ([], {}), '()\n', (6933, 6935), False, 'import time\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
def getTransformMatrix(origin, destination):
x = np.zeros(origin.shape[0] + 1) # insert [0]-element for better indexing -> x[1] = first element
x[1:] = origin[:,0]
y = np.copy(x)
y[1:] = origin[:,1]
x_ = np.copy(x)
x_[1:] = destination[:,0]
y_ = np.copy(x)
y_[1:] = destination[:,1]
a11 = (y[1] * (x_[2] - x_[3]) + y[2] * (x_[3] - x_[1]) + y[3] * (x_[1] - x_[2]))
a12 = (x[1] * (x_[3] - x_[2]) + x[2] * (x_[1] - x_[3]) + x[3] * (x_[2] - x_[1]))
a21 = (y[1] * (y_[2] - y_[3]) + y[2] * (y_[3] - y_[1]) + y[3] * (y_[1] - y_[2]))
a22 = (x[1] * (y_[3] - y_[2]) + x[2] * (y_[1] - y_[3]) + x[3] * (y_[2] - y_[1]))
a13 = (x[1] * (y[3]*x_[2] - y[2]*x_[3]) + x[2] * (y[1]*x_[3] - y[3]*x_[1]) + x[3] * (y[2]*x_[1] - y[1]*x_[2]))
a23 = (x[1] * (y[3]*y_[2] - y[2]*y_[3]) + x[2] * (y[1]*y_[3] - y[3]*y_[1]) + x[3] * (y[2]*y_[1] - y[1]*y_[2]))
d = x[1]*(y[3] - y[2]) + x[2]*(y[1] - y[3]) + x[3]*(y[2] - y[1])
return 1/d * np.array([[a11, a12, a13], [a21, a22, a23], [0, 0, 1]])
def transformImage(image, M):
warpedImage = np.zeros(image.shape, dtype=np.int32)
for y, row in enumerate(image):
for x, value in enumerate(row):
newX, newY, _ = np.dot(M, np.array([x,y,1]))
cond1 = newY < warpedImage.shape[0] and newX < warpedImage.shape[1]
cond2 = newY > 0 and newX > 0
if cond1 and cond2:
warpedImage[int(newY)][int(newX)] = value
return warpedImage
def interpolateMissingPixels(image):
#interpImage = np.zeros(image.shape, dtype=np.int32)
interpImage = np.array(image)
for y in range(1, len(image) - 1):
row = interpImage[y]
for x in range(1, len(row) - 1):
if row[x].all() == 0: # empty pixel
windowPixels = interpImage[y-1:y+2, x-1:x+2] # [rgb], [rgb], [rgb]
# if windowPixels.sum() == 0:
# continue
newPixel = np.array([0,0,0])
for channel in range(3): # interpolate rgb
channelValues = windowPixels[:, :, channel]
temp = channelValues != 0
meancount = temp.sum()
newPixel[channel] = channelValues.sum() / meancount if meancount != 0 else 0
interpImage[y][x] = newPixel
return interpImage
def main():
origin = np.array([[50, 50], [50, 100], [100, 50]])
destination = np.array([[50, 100], [100, 250], [150, 50]])
m = getTransformMatrix(origin, destination)
image = plt.imread("scarlet.jpg")[100:400, 100:400]
warpedImage = transformImage(image, m)
interpImage = interpolateMissingPixels(warpedImage)
fig, ax = plt.subplots(1,3)
ax[0].imshow(image)
ax[1].imshow(warpedImage)
ax[2].imshow(interpImage)
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.copy",
"matplotlib.pyplot.imread",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((129, 158), 'numpy.zeros', 'np.zeros', (['(origin.shape[0] + 1)'], {}), '(origin.shape[0] + 1)\n', (137, 158), True, 'import numpy as np\n'), ((256, 266), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (263, 266), True, 'import numpy as np\n'), ((301, 311), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (308, 311), True, 'import numpy as np\n'), ((351, 361), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (358, 361), True, 'import numpy as np\n'), ((1278, 1315), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int32'}), '(image.shape, dtype=np.int32)\n', (1286, 1315), True, 'import numpy as np\n'), ((1797, 1812), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1805, 1812), True, 'import numpy as np\n'), ((2584, 2626), 'numpy.array', 'np.array', (['[[50, 50], [50, 100], [100, 50]]'], {}), '([[50, 50], [50, 100], [100, 50]])\n', (2592, 2626), True, 'import numpy as np\n'), ((2646, 2690), 'numpy.array', 'np.array', (['[[50, 100], [100, 250], [150, 50]]'], {}), '([[50, 100], [100, 250], [150, 50]])\n', (2654, 2690), True, 'import numpy as np\n'), ((2913, 2931), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (2925, 2931), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3027, 3029), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1228), 'numpy.array', 'np.array', (['[[a11, a12, a13], [a21, a22, a23], [0, 0, 1]]'], {}), '([[a11, a12, a13], [a21, a22, a23], [0, 0, 1]])\n', (1181, 1228), True, 'import numpy as np\n'), ((2754, 2779), 'matplotlib.pyplot.imread', 'plt.imread', (['"""scarlet.jpg"""'], {}), "('scarlet.jpg')\n", (2764, 2779), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1449), 'numpy.array', 'np.array', (['[x, y, 1]'], {}), '([x, y, 1])\n', (1438, 1449), True, 'import numpy as np\n'), ((2158, 2177), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2166, 2177), True, 'import numpy as np\n')]
|
import argparse
import os
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim.adamax import Adamax
from multiobject.pytorch import MultiObjectDataLoader, MultiObjectDataset
epochs = 100
batch_size = 64
lr = 3e-4
dataset_filename = os.path.join(
'dsprites',
'multi_dsprites_color_012.npz')
# dataset_filename = os.path.join(
# 'binary_mnist',
# 'multi_binary_mnist_012.npz')
class SimpleBlock(nn.Module):
def __init__(self, ch, kernel, stride=1, dropout=0.25):
super().__init__()
assert kernel % 2 == 1
padding = (kernel - 1) // 2
self.net = nn.Sequential(
nn.Conv2d(ch, ch, kernel, padding=padding, stride=stride),
nn.Dropout2d(dropout),
nn.LeakyReLU(),
nn.BatchNorm2d(ch),
)
def forward(self, x):
return self.net(x)
class Model(nn.Module):
def __init__(self, color_channels, n_classes):
super().__init__()
self.convnet = nn.Sequential(
nn.Conv2d(color_channels, 64, 5, padding=2, stride=2),
nn.LeakyReLU(),
SimpleBlock(64, 3, stride=2),
SimpleBlock(64, 3, stride=2),
SimpleBlock(64, 3, stride=2),
nn.Conv2d(64, 64, 3, padding=1, stride=2),
)
self.fcnet = nn.Sequential(
nn.Linear(64, 64),
nn.LeakyReLU(),
nn.Linear(64, n_classes),
)
def forward(self, x):
x = self.convnet(x) # output is 2x2 for 64x64 images
x = x.sum((2, 3)) # sum over spatial dimensions
x = self.fcnet(x)
return x
def main():
args = parse_args()
path = os.path.join('generated', args.dataset_path)
# Datasets and dataloaders
print("loading dataset...")
train_set = MultiObjectDataset(path, train=True)
test_set = MultiObjectDataset(path, train=False)
train_loader = MultiObjectDataLoader(
train_set, batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = MultiObjectDataLoader(test_set, batch_size=100)
# Model and optimizer
print("initializing model...")
channels = train_set.x.shape[1]
n_classes = 3 # hardcoded for dataset with 0 to 2 objects
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Model(channels, n_classes).to(device)
optimizer = Adamax(model.parameters(), lr=lr)
# Training loop
print("training starts")
step = 0
model.train()
for e in range(1, epochs + 1):
for x, labels in train_loader:
# Run model and compute loss
loss, acc = forward(model, x, labels, device)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
if step % 100 == 0:
print("[{}] loss: {:.2g} acc: {:.2g}".format(
step, loss.item(), acc))
# Test
with torch.no_grad():
model.eval()
loss = acc = 0.
for x, labels in test_loader:
loss_, acc_ = forward(model, x, labels, device)
k = len(x) / len(test_set)
loss += loss_.item() * k
acc += acc_ * k
model.train()
print("TEST [epoch {}] loss: {:.2g} acc: {:.2g}".format(
e, loss, acc))
def forward(model, x, labels, device):
# Forward pass through model
n = labels['n_obj'].to(device)
x = x.to(device)
logits = model(x)
# Loss
loss = F.cross_entropy(logits, n)
# Accuracy
pred = logits.max(1)[1]
accuracy = (n == pred).float().mean().item()
return loss, accuracy
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
allow_abbrev=False)
parser.add_argument('--dataset',
type=str,
default=dataset_filename,
metavar='PATH',
dest='dataset_path',
help="relative path of the dataset")
return parser.parse_args()
if __name__ == '__main__':
main()
|
[
"torch.nn.BatchNorm2d",
"argparse.ArgumentParser",
"torch.nn.LeakyReLU",
"torch.nn.Dropout2d",
"os.path.join",
"torch.nn.Conv2d",
"multiobject.pytorch.MultiObjectDataset",
"torch.cuda.is_available",
"torch.nn.functional.cross_entropy",
"torch.nn.Linear",
"multiobject.pytorch.MultiObjectDataLoader",
"torch.no_grad"
] |
[((265, 321), 'os.path.join', 'os.path.join', (['"""dsprites"""', '"""multi_dsprites_color_012.npz"""'], {}), "('dsprites', 'multi_dsprites_color_012.npz')\n", (277, 321), False, 'import os\n'), ((1688, 1732), 'os.path.join', 'os.path.join', (['"""generated"""', 'args.dataset_path'], {}), "('generated', args.dataset_path)\n", (1700, 1732), False, 'import os\n'), ((1813, 1849), 'multiobject.pytorch.MultiObjectDataset', 'MultiObjectDataset', (['path'], {'train': '(True)'}), '(path, train=True)\n', (1831, 1849), False, 'from multiobject.pytorch import MultiObjectDataLoader, MultiObjectDataset\n'), ((1865, 1902), 'multiobject.pytorch.MultiObjectDataset', 'MultiObjectDataset', (['path'], {'train': '(False)'}), '(path, train=False)\n', (1883, 1902), False, 'from multiobject.pytorch import MultiObjectDataLoader, MultiObjectDataset\n'), ((1922, 2011), 'multiobject.pytorch.MultiObjectDataLoader', 'MultiObjectDataLoader', (['train_set'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(train_set, batch_size=batch_size, shuffle=True,\n drop_last=True)\n', (1943, 2011), False, 'from multiobject.pytorch import MultiObjectDataLoader, MultiObjectDataset\n'), ((2035, 2082), 'multiobject.pytorch.MultiObjectDataLoader', 'MultiObjectDataLoader', (['test_set'], {'batch_size': '(100)'}), '(test_set, batch_size=100)\n', (2056, 2082), False, 'from multiobject.pytorch import MultiObjectDataLoader, MultiObjectDataset\n'), ((3584, 3610), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'n'], {}), '(logits, n)\n', (3599, 3610), True, 'import torch.nn.functional as F\n'), ((3764, 3868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'allow_abbrev': '(False)'}), '(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, allow_abbrev=False)\n', (3787, 3868), False, 'import argparse\n'), ((656, 713), 'torch.nn.Conv2d', 'nn.Conv2d', (['ch', 'ch', 'kernel'], {'padding': 'padding', 'stride': 'stride'}), '(ch, ch, kernel, padding=padding, stride=stride)\n', (665, 713), False, 'from torch import nn\n'), ((727, 748), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (739, 748), False, 'from torch import nn\n'), ((762, 776), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (774, 776), False, 'from torch import nn\n'), ((790, 808), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ch'], {}), '(ch)\n', (804, 808), False, 'from torch import nn\n'), ((1029, 1082), 'torch.nn.Conv2d', 'nn.Conv2d', (['color_channels', '(64)', '(5)'], {'padding': '(2)', 'stride': '(2)'}), '(color_channels, 64, 5, padding=2, stride=2)\n', (1038, 1082), False, 'from torch import nn\n'), ((1096, 1110), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1108, 1110), False, 'from torch import nn\n'), ((1250, 1291), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'padding': '(1)', 'stride': '(2)'}), '(64, 64, 3, padding=1, stride=2)\n', (1259, 1291), False, 'from torch import nn\n'), ((1351, 1368), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(64)'], {}), '(64, 64)\n', (1360, 1368), False, 'from torch import nn\n'), ((1382, 1396), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1394, 1396), False, 'from torch import nn\n'), ((1410, 1434), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'n_classes'], {}), '(64, n_classes)\n', (1419, 1434), False, 'from torch import nn\n'), ((2281, 2306), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2304, 2306), False, 'import torch\n'), ((2995, 3010), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3008, 3010), False, 'import torch\n')]
|
import bpy
import random
import math
## 固定値設定 #############################################################
# 実行ファイルパス一覧
FILE_ROOT_PATH = 'D:/blender_battleVR_py/'
setrendr_file_name = FILE_ROOT_PATH + "setting_render.py"
magicobj_file_name = FILE_ROOT_PATH + "magic_model.py"
fieldins_file_name = FILE_ROOT_PATH + "field_model.py"
wizardob_file_name = FILE_ROOT_PATH + "wizard_model.py"
witchcft_file_name = FILE_ROOT_PATH + "witchcraft_model.py"
camerast_file_name = FILE_ROOT_PATH + "camera_setting.py"
# SEファイルパス一覧
SE_ROOT_PATH = FILE_ROOT_PATH + 'se/'
#sound_begin = (SE_ROOT_PATH + "花火・一発_begin.wav", SE_ROOT_PATH + "花火・一発_begin.wav")
#sound_bomb = (SE_ROOT_PATH + "花火・一発_bomb.wav", SE_ROOT_PATH + "nc178345_bomb.wav")
# 魔法陣ファイルパス一覧
IMG_ROOT_PATH = FILE_ROOT_PATH + 'img/'
witchcraft_img_name = (
IMG_ROOT_PATH + "magic_0.png",
IMG_ROOT_PATH + "magic_1.png",
IMG_ROOT_PATH + "magic_2.png",
IMG_ROOT_PATH + "magic_3.png",
IMG_ROOT_PATH + "magic_4.png"
)
# シーンのエンドフレーム
FRAME_END = 500
##########################################################################
#オブジェクト全選択
bpy.ops.object.select_all(action='SELECT')
#オブジェクト全削除
bpy.ops.object.delete(True)
# シーケンスエディタを生成
if bpy.context.scene.sequence_editor:
bpy.context.scene.sequence_editor_clear()
bpy.context.scene.sequence_editor_create()
# 最終フレームを設定
bpy.data.scenes["Scene"].frame_end = FRAME_END
# レンダリング設定
exec(compile(open(setrendr_file_name).read().replace("FILE_ROOT_PATH", FILE_ROOT_PATH), setrendr_file_name, 'exec'))
# カメラを生成
exec(compile(open(camerast_file_name).read(), camerast_file_name, 'exec'))
# フィールドを生成
exec(compile(open(fieldins_file_name).read(), fieldins_file_name, 'exec'))
# 魔法使いモデルを生成
exec(compile(open(wizardob_file_name).read(), wizardob_file_name, 'exec'))
# 魔法陣を生成
exec(compile(open(witchcft_file_name).read().replace("WITCHECRAFT_IMAGES", str(witchcraft_img_name)), witchcft_file_name, 'exec'))
# 魔法に使用するオブジェクトの作成
exec(compile(open(magicobj_file_name).read(), magicobj_file_name, 'exec'))
|
[
"bpy.ops.object.delete",
"bpy.ops.object.select_all",
"bpy.context.scene.sequence_editor_clear",
"bpy.context.scene.sequence_editor_create"
] |
[((1104, 1146), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (1129, 1146), False, 'import bpy\n'), ((1159, 1186), 'bpy.ops.object.delete', 'bpy.ops.object.delete', (['(True)'], {}), '(True)\n', (1180, 1186), False, 'import bpy\n'), ((1287, 1329), 'bpy.context.scene.sequence_editor_create', 'bpy.context.scene.sequence_editor_create', ([], {}), '()\n', (1327, 1329), False, 'import bpy\n'), ((1245, 1286), 'bpy.context.scene.sequence_editor_clear', 'bpy.context.scene.sequence_editor_clear', ([], {}), '()\n', (1284, 1286), False, 'import bpy\n')]
|
from typing import Union
import kfp
from kfp.components import InputPath, OutputPath
from skit_pipelines import constants as pipeline_constants
def extract_tgz_archive(
tgz_path: InputPath(str),
output_path: OutputPath(str),
):
import tarfile
from loguru import logger
logger.debug(f"Extracting .tgz archive {tgz_path}.")
tar = tarfile.open(tgz_path)
tar.extractall(path=output_path)
tar.close()
logger.debug(f"Extracted successfully.")
extract_tgz_op = kfp.components.create_component_from_func(
extract_tgz_archive, base_image=pipeline_constants.BASE_IMAGE
)
|
[
"tarfile.open",
"loguru.logger.debug",
"kfp.components.create_component_from_func",
"kfp.components.InputPath",
"kfp.components.OutputPath"
] |
[((498, 607), 'kfp.components.create_component_from_func', 'kfp.components.create_component_from_func', (['extract_tgz_archive'], {'base_image': 'pipeline_constants.BASE_IMAGE'}), '(extract_tgz_archive, base_image=\n pipeline_constants.BASE_IMAGE)\n', (539, 607), False, 'import kfp\n'), ((295, 347), 'loguru.logger.debug', 'logger.debug', (['f"""Extracting .tgz archive {tgz_path}."""'], {}), "(f'Extracting .tgz archive {tgz_path}.')\n", (307, 347), False, 'from loguru import logger\n'), ((358, 380), 'tarfile.open', 'tarfile.open', (['tgz_path'], {}), '(tgz_path)\n', (370, 380), False, 'import tarfile\n'), ((438, 478), 'loguru.logger.debug', 'logger.debug', (['f"""Extracted successfully."""'], {}), "(f'Extracted successfully.')\n", (450, 478), False, 'from loguru import logger\n'), ((187, 201), 'kfp.components.InputPath', 'InputPath', (['str'], {}), '(str)\n', (196, 201), False, 'from kfp.components import InputPath, OutputPath\n'), ((220, 235), 'kfp.components.OutputPath', 'OutputPath', (['str'], {}), '(str)\n', (230, 235), False, 'from kfp.components import InputPath, OutputPath\n')]
|
import pytest
from dagster import (
DagsterInvalidDefinitionError,
InputDefinition,
Nothing,
Optional,
composite_solid,
execute_pipeline,
execute_solid,
lambda_solid,
pipeline,
)
def test_none():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=None)])
def none_x(x):
return x
result = execute_solid(none_x)
assert result.output_value() == None
def test_none_infer():
@lambda_solid
def none_x(x=None):
return x
result = execute_solid(none_x)
assert result.output_value() == None
def test_int():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=1337)])
def int_x(x):
return x
result = execute_solid(int_x)
assert result.output_value() == 1337
def test_int_infer():
@lambda_solid
def int_x(x=1337):
return x
result = execute_solid(int_x)
assert result.output_value() == 1337
def test_early_fail():
with pytest.raises(
DagsterInvalidDefinitionError,
match="Type check failed for the default_value of InputDefinition x of type Int",
):
@lambda_solid(input_defs=[InputDefinition("x", int, default_value="foo")])
def _int_x(x):
return x
with pytest.raises(
DagsterInvalidDefinitionError,
match="Type check failed for the default_value of InputDefinition x of type String",
):
@lambda_solid(input_defs=[InputDefinition("x", str, default_value=1337)])
def _int_x(x):
return x
# we can't catch bad default_values except for scalars until runtime since the type_check function depends on
# a context that has access to resources etc.
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value="number")])
def bad_default(x):
return x
def test_mismatch():
result = execute_solid(bad_default, raise_on_error=False)
assert result.success == False
assert result.input_events_during_compute[0].step_input_data.type_check_data.success == False
def test_env_precedence():
result = execute_solid(
bad_default,
run_config={"solids": {"bad_default": {"inputs": {"x": 1}}}},
raise_on_error=False,
)
assert result.success == True
assert result.output_value() == 1
def test_input_precedence():
@lambda_solid
def emit_one():
return 1
@pipeline
def pipe():
bad_default(emit_one())
result = execute_pipeline(pipe)
assert result.success
assert result.output_for_solid("bad_default") == 1
def test_nothing():
with pytest.raises(DagsterInvalidDefinitionError):
@lambda_solid(input_defs=[InputDefinition("x", Nothing, default_value=None)])
def _nothing():
pass
def test_composite_outer_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int])])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int], default_value=42)])
def wrap(y):
return int_x(y)
result = execute_solid(wrap)
assert result.success
assert result.output_value() == 42
def test_composite_inner_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=1337)])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int])])
def wrap(y):
return int_x(y)
result = execute_solid(wrap)
assert result.success
assert result.output_value() == 1337
def test_composite_precedence_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=1337)])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int], default_value=42)])
def wrap(y):
return int_x(y)
result = execute_solid(wrap)
assert result.success
assert result.output_value() == 42
def test_composite_mid_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int])])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int], default_value=42)])
def wrap(y):
return int_x(y)
@composite_solid(input_defs=[InputDefinition("z", Optional[int])])
def outter_wrap(z):
return wrap(z)
result = execute_solid(outter_wrap)
assert result.success
assert result.output_value() == 42
|
[
"dagster.execute_solid",
"dagster.execute_pipeline",
"pytest.raises",
"dagster.InputDefinition"
] |
[((373, 394), 'dagster.execute_solid', 'execute_solid', (['none_x'], {}), '(none_x)\n', (386, 394), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((534, 555), 'dagster.execute_solid', 'execute_solid', (['none_x'], {}), '(none_x)\n', (547, 555), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((752, 772), 'dagster.execute_solid', 'execute_solid', (['int_x'], {}), '(int_x)\n', (765, 772), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((910, 930), 'dagster.execute_solid', 'execute_solid', (['int_x'], {}), '(int_x)\n', (923, 930), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((1891, 1939), 'dagster.execute_solid', 'execute_solid', (['bad_default'], {'raise_on_error': '(False)'}), '(bad_default, raise_on_error=False)\n', (1904, 1939), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((2115, 2229), 'dagster.execute_solid', 'execute_solid', (['bad_default'], {'run_config': "{'solids': {'bad_default': {'inputs': {'x': 1}}}}", 'raise_on_error': '(False)'}), "(bad_default, run_config={'solids': {'bad_default': {'inputs':\n {'x': 1}}}}, raise_on_error=False)\n", (2128, 2229), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((2492, 2514), 'dagster.execute_pipeline', 'execute_pipeline', (['pipe'], {}), '(pipe)\n', (2508, 2514), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((3087, 3106), 'dagster.execute_solid', 'execute_solid', (['wrap'], {}), '(wrap)\n', (3100, 3106), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((3460, 3479), 'dagster.execute_solid', 'execute_solid', (['wrap'], {}), '(wrap)\n', (3473, 3479), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((3858, 3877), 'dagster.execute_solid', 'execute_solid', (['wrap'], {}), '(wrap)\n', (3871, 3877), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((4346, 4372), 'dagster.execute_solid', 'execute_solid', (['outter_wrap'], {}), '(outter_wrap)\n', (4359, 4372), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((1006, 1137), 'pytest.raises', 'pytest.raises', (['DagsterInvalidDefinitionError'], {'match': '"""Type check failed for the default_value of InputDefinition x of type Int"""'}), "(DagsterInvalidDefinitionError, match=\n 'Type check failed for the default_value of InputDefinition x of type Int')\n", (1019, 1137), False, 'import pytest\n'), ((1295, 1434), 'pytest.raises', 'pytest.raises', (['DagsterInvalidDefinitionError'], {'match': '"""Type check failed for the default_value of InputDefinition x of type String"""'}), "(DagsterInvalidDefinitionError, match=\n 'Type check failed for the default_value of InputDefinition x of type String'\n )\n", (1308, 1434), False, 'import pytest\n'), ((2627, 2671), 'pytest.raises', 'pytest.raises', (['DagsterInvalidDefinitionError'], {}), '(DagsterInvalidDefinitionError)\n', (2640, 2671), False, 'import pytest\n'), ((1760, 1819), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Optional[int]'], {'default_value': '"""number"""'}), "('x', Optional[int], default_value='number')\n", (1775, 1819), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((265, 320), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Optional[int]'], {'default_value': 'None'}), "('x', Optional[int], default_value=None)\n", (280, 320), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((645, 700), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Optional[int]'], {'default_value': '(1337)'}), "('x', Optional[int], default_value=1337)\n", (660, 700), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((2869, 2904), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Optional[int]'], {}), "('x', Optional[int])\n", (2884, 2904), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((2976, 3029), 'dagster.InputDefinition', 'InputDefinition', (['"""y"""', 'Optional[int]'], {'default_value': '(42)'}), "('y', Optional[int], default_value=42)\n", (2991, 3029), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((3240, 3295), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Optional[int]'], {'default_value': '(1337)'}), "('x', Optional[int], default_value=1337)\n", (3255, 3295), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((3367, 3402), 'dagster.InputDefinition', 'InputDefinition', (['"""y"""', 'Optional[int]'], {}), "('y', Optional[int])\n", (3382, 3402), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((3620, 3675), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Optional[int]'], {'default_value': '(1337)'}), "('x', Optional[int], default_value=1337)\n", (3635, 3675), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((3747, 3800), 'dagster.InputDefinition', 'InputDefinition', (['"""y"""', 'Optional[int]'], {'default_value': '(42)'}), "('y', Optional[int], default_value=42)\n", (3762, 3800), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((4009, 4044), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Optional[int]'], {}), "('x', Optional[int])\n", (4024, 4044), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((4116, 4169), 'dagster.InputDefinition', 'InputDefinition', (['"""y"""', 'Optional[int]'], {'default_value': '(42)'}), "('y', Optional[int], default_value=42)\n", (4131, 4169), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((4247, 4282), 'dagster.InputDefinition', 'InputDefinition', (['"""z"""', 'Optional[int]'], {}), "('z', Optional[int])\n", (4262, 4282), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((1192, 1238), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'int'], {'default_value': '"""foo"""'}), "('x', int, default_value='foo')\n", (1207, 1238), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((1484, 1529), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'str'], {'default_value': '(1337)'}), "('x', str, default_value=1337)\n", (1499, 1529), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n'), ((2708, 2757), 'dagster.InputDefinition', 'InputDefinition', (['"""x"""', 'Nothing'], {'default_value': 'None'}), "('x', Nothing, default_value=None)\n", (2723, 2757), False, 'from dagster import DagsterInvalidDefinitionError, InputDefinition, Nothing, Optional, composite_solid, execute_pipeline, execute_solid, lambda_solid, pipeline\n')]
|
from config.updateConfig import UpdateConfig
sampleCONF = {
"task": {
"name": "sample",
},
"instructions": {
"text": "Give instructions",
"startPrompt": "Press any key to continue. Press q to quit.",
"alarm": "horn.wav",
"questionnaireReminder": "answerQuestionnaire.wav"
},
"stimuli": {
"backgroundColor": {"versionMain": "black", "versionDemo": "blue", "versionDebug": "gray"},
},
}
sampleTriggers = {
"example": 10
}
updateCofig = UpdateConfig()
updateCofig.addContent(sampleCONF)
updateCofig.addTriggers(sampleTriggers)
CONF = updateCofig.getConfig()
|
[
"config.updateConfig.UpdateConfig"
] |
[((514, 528), 'config.updateConfig.UpdateConfig', 'UpdateConfig', ([], {}), '()\n', (526, 528), False, 'from config.updateConfig import UpdateConfig\n')]
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import onnxruntime as ort
import onnx
from collections import OrderedDict
import tempfile
import os
import shutil
def convert_to_onnx_and_check(
job_func,
print_outlier=False,
explicit_init=True,
external_data=False,
ort_optimize=True,
opset=None,
):
check_point = flow.train.CheckPoint()
if explicit_init:
# it is a trick to keep check_point.save() from hanging when there is no variable
@flow.global_function(flow.FunctionConfig())
def add_var():
return flow.get_variable(
name="trick",
shape=(1,),
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
check_point.init()
flow_weight_dir = tempfile.TemporaryDirectory()
check_point.save(flow_weight_dir.name)
# TODO(daquexian): a more elegant way?
while not os.path.exists(os.path.join(flow_weight_dir.name, "snapshot_done")):
pass
onnx_model_dir = tempfile.TemporaryDirectory()
onnx_model_path = os.path.join(onnx_model_dir.name, "model.onnx")
flow.onnx.export(
job_func,
flow_weight_dir.name,
onnx_model_path,
opset=opset,
external_data=external_data,
)
flow_weight_dir.cleanup()
ort_sess_opt = ort.SessionOptions()
ort_sess_opt.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optimize
else ort.GraphOptimizationLevel.ORT_DISABLE_ALL
)
sess = ort.InferenceSession(onnx_model_path, sess_options=ort_sess_opt)
onnx_model_dir.cleanup()
assert len(sess.get_outputs()) == 1
assert len(sess.get_inputs()) <= 1
ipt_dict = OrderedDict()
for ipt in sess.get_inputs():
ipt_data = np.random.uniform(low=-10, high=10, size=ipt.shape).astype(
np.float32
)
ipt_dict[ipt.name] = ipt_data
onnx_res = sess.run([], ipt_dict)[0]
oneflow_res = job_func(*ipt_dict.values()).get().numpy()
rtol, atol = 1e-2, 1e-5
if print_outlier:
a = onnx_res.flatten()
b = oneflow_res.flatten()
for i in range(len(a)):
if np.abs(a[i] - b[i]) > atol + rtol * np.abs(b[i]):
print("a[{}]={}, b[{}]={}".format(i, a[i], i, b[i]))
assert np.allclose(onnx_res, oneflow_res, rtol=rtol, atol=atol)
|
[
"tempfile.TemporaryDirectory",
"collections.OrderedDict",
"numpy.allclose",
"onnxruntime.SessionOptions",
"oneflow.FunctionConfig",
"numpy.abs",
"os.path.join",
"onnxruntime.InferenceSession",
"oneflow.random_uniform_initializer",
"oneflow.train.CheckPoint",
"numpy.random.uniform",
"oneflow.onnx.export"
] |
[((927, 950), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (948, 950), True, 'import oneflow as flow\n'), ((1396, 1425), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1423, 1425), False, 'import tempfile\n'), ((1629, 1658), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1656, 1658), False, 'import tempfile\n'), ((1681, 1728), 'os.path.join', 'os.path.join', (['onnx_model_dir.name', '"""model.onnx"""'], {}), "(onnx_model_dir.name, 'model.onnx')\n", (1693, 1728), False, 'import os\n'), ((1733, 1845), 'oneflow.onnx.export', 'flow.onnx.export', (['job_func', 'flow_weight_dir.name', 'onnx_model_path'], {'opset': 'opset', 'external_data': 'external_data'}), '(job_func, flow_weight_dir.name, onnx_model_path, opset=\n opset, external_data=external_data)\n', (1749, 1845), True, 'import oneflow as flow\n'), ((1937, 1957), 'onnxruntime.SessionOptions', 'ort.SessionOptions', ([], {}), '()\n', (1955, 1957), True, 'import onnxruntime as ort\n'), ((2156, 2220), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['onnx_model_path'], {'sess_options': 'ort_sess_opt'}), '(onnx_model_path, sess_options=ort_sess_opt)\n', (2176, 2220), True, 'import onnxruntime as ort\n'), ((2344, 2357), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2355, 2357), False, 'from collections import OrderedDict\n'), ((2937, 2993), 'numpy.allclose', 'np.allclose', (['onnx_res', 'oneflow_res'], {'rtol': 'rtol', 'atol': 'atol'}), '(onnx_res, oneflow_res, rtol=rtol, atol=atol)\n', (2948, 2993), True, 'import numpy as np\n'), ((1093, 1114), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1112, 1114), True, 'import oneflow as flow\n'), ((1541, 1592), 'os.path.join', 'os.path.join', (['flow_weight_dir.name', '"""snapshot_done"""'], {}), "(flow_weight_dir.name, 'snapshot_done')\n", (1553, 1592), False, 'import os\n'), ((2411, 2462), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10)', 'high': '(10)', 'size': 'ipt.shape'}), '(low=-10, high=10, size=ipt.shape)\n', (2428, 2462), True, 'import numpy as np\n'), ((2807, 2826), 'numpy.abs', 'np.abs', (['(a[i] - b[i])'], {}), '(a[i] - b[i])\n', (2813, 2826), True, 'import numpy as np\n'), ((1297, 1330), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1328, 1330), True, 'import oneflow as flow\n'), ((2843, 2855), 'numpy.abs', 'np.abs', (['b[i]'], {}), '(b[i])\n', (2849, 2855), True, 'import numpy as np\n')]
|
import asyncio
import os
import signal
import tomodachi
from typing import Any, Dict, Tuple, Callable, Union # noqa
from aiohttp import web
from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler
from tomodachi.discovery.dummy_registry import DummyRegistry
async def middleware_function(func: Callable, service: Any, request: web.Request, context: Dict, *args: Any, **kwargs: Any) -> Any:
if request.headers.get('X-Use-Middleware') == 'Set':
service.middleware_called = True
if request.headers.get('X-Use-Middleware') == 'Before':
return 'before'
return_value = await func()
if request.headers.get('X-Use-Middleware') == 'After':
return 'after'
return return_value
@tomodachi.service
class HttpService(tomodachi.Service):
name = 'test_http'
discovery = [DummyRegistry]
options = {
'http': {
'port': None,
'access_log': True,
'real_ip_from': '127.0.0.1'
}
}
uuid = None
closer = asyncio.Future() # type: Any
http_middleware = [middleware_function]
slow_request = False
middleware_called = False
function_triggered = False
websocket_connected = False
websocket_received_data = None
@http('GET', r'/test/?')
async def test(self, request: web.Request) -> str:
return_value = 'test'
return return_value
@http('GET', r'/test/(?P<id>[^/]+?)/?')
async def test_with_id(self, request: web.Request, id: str) -> str:
return 'test {}'.format(id)
@http('GET', r'/middleware-before/?')
async def middleware_before(self, request: web.Request) -> str:
self.function_triggered = True
return 'test'
@http('GET', r'/slow/?')
async def test_slow(self, request: web.Request) -> str:
await asyncio.sleep(2.0)
self.slow_request = True
return 'test'
@http(['GET'], r'/dict/?')
async def test_dict(self, request: web.Request) -> Dict:
return {
'status': 200,
'body': 'test dict',
'headers': {
'X-Dict': 'test'
}
}
@http('GET', r'/tuple/?')
async def test_tuple(self, request: web.Request) -> Tuple:
return (200, 'test tuple', {
'X-Tuple': 'test'
})
@http('GET', r'/aiohttp/?')
async def test_aiohttp(self, request: web.Request) -> web.Response:
return web.Response(body='test aiohttp', status=200, headers={
'X-Aiohttp': 'test'
})
@http('GET', r'/response/?')
async def test_response_object(self, request: web.Request) -> Response:
return Response(body='test tomodachi response', status=200, headers={
'X-Tomodachi-Response': 'test'
})
@http('GET', r'/exception/?')
async def test_exception(self, request: web.Request) -> None:
raise Exception('test')
@http('GET', r'/slow-exception/?')
async def test_slow_exception(self, request: web.Request) -> None:
await asyncio.sleep(2.0)
raise Exception('test')
@http('GET', r'/test-weird-content-type/?')
async def test_weird_content_type(self, request: web.Request) -> web.Response:
return web.Response(body='test', status=200, headers={
'Content-Type': 'text/plain; '
})
@http('GET', r'/test-charset/?')
async def test_charset(self, request: web.Request) -> web.Response:
return web.Response(body='test', status=200, headers={
'Content-Type': 'text/plain; charset=utf-8'
})
@http('GET', r'/test-charset-encoding-correct/?')
async def test_charset_encoding_correct(self, request: web.Request) -> Response:
return Response(body='test \xe5\xe4\xf6', status=200, headers={
'Content-Type': 'text/plain; charset=iso-8859-1'
})
@http('GET', r'/test-charset-encoding-error/?')
async def test_charset_encoding_error(self, request: web.Request) -> Response:
return Response(body='test 友達', status=200, headers={
'Content-Type': 'text/plain; charset=iso-8859-1'
})
@http('GET', r'/test-charset-invalid/?')
async def test_charset_invalid(self, request: web.Request) -> Response:
return Response(body='test', status=200, headers={
'Content-Type': 'text/plain; charset=utf-9'
})
@http('GET', r'/empty-data/?')
async def empty_data(self, request: web.Request) -> str:
return ''
@http('GET', r'/byte-data/?')
async def byte_data(self, request: web.Request) -> bytes:
return b'test \xc3\xa5\xc3\xa4\xc3\xb6'
@http('GET', r'/none-data/?')
async def none_data(self, request: web.Request) -> None:
return None
@http('GET', r'/forwarded-for/?')
async def forwarded_for(self, request: web.Request) -> str:
return RequestHandler.get_request_ip(request) or ''
@http('GET', r'/authorization/?')
async def authorization(self, request: web.Request) -> str:
return request._cache.get('auth').login if request._cache.get('auth') else ''
@http_static('../static_files', r'/static/')
async def static_files_filename_append(self) -> None:
pass
@http_static('../static_files', r'/download/(?P<filename>[^/]+?)/image')
async def static_files_filename_existing(self) -> None:
pass
@http_error(status_code=404)
async def test_404(self, request: web.Request) -> str:
return 'test 404'
@websocket(r'/websocket-simple')
async def websocket_simple(self, websocket: web.WebSocketResponse) -> None:
self.websocket_connected = True
@websocket(r'/websocket-data')
async def websocket_data(self, websocket: web.WebSocketResponse) -> Callable:
async def _receive(data: Union[str, bytes]) -> None:
self.websocket_received_data = data
return _receive
async def _started_service(self) -> None:
async def _async() -> None:
async def sleep_and_kill() -> None:
await asyncio.sleep(10.0)
if not self.closer.done():
self.closer.set_result(None)
task = asyncio.ensure_future(sleep_and_kill())
await self.closer
if not task.done():
task.cancel()
os.kill(os.getpid(), signal.SIGINT)
asyncio.ensure_future(_async())
def stop_service(self) -> None:
if not self.closer.done():
self.closer.set_result(None)
|
[
"tomodachi.transport.http.Response",
"aiohttp.web.Response",
"tomodachi.transport.http.http_error",
"os.getpid",
"asyncio.sleep",
"tomodachi.transport.http.RequestHandler.get_request_ip",
"tomodachi.transport.http.http_static",
"tomodachi.transport.http.http",
"tomodachi.transport.http.websocket",
"asyncio.Future"
] |
[((1055, 1071), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (1069, 1071), False, 'import asyncio\n'), ((1288, 1310), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test/?"""'], {}), "('GET', '/test/?')\n", (1292, 1310), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((1431, 1468), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test/(?P<id>[^/]+?)/?"""'], {}), "('GET', '/test/(?P<id>[^/]+?)/?')\n", (1435, 1468), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((1584, 1619), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/middleware-before/?"""'], {}), "('GET', '/middleware-before/?')\n", (1588, 1619), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((1756, 1778), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/slow/?"""'], {}), "('GET', '/slow/?')\n", (1760, 1778), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((1934, 1958), 'tomodachi.transport.http.http', 'http', (["['GET']", '"""/dict/?"""'], {}), "(['GET'], '/dict/?')\n", (1938, 1958), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((2186, 2209), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/tuple/?"""'], {}), "('GET', '/tuple/?')\n", (2190, 2209), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((2358, 2383), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/aiohttp/?"""'], {}), "('GET', '/aiohttp/?')\n", (2362, 2383), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((2577, 2603), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/response/?"""'], {}), "('GET', '/response/?')\n", (2581, 2603), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((2819, 2846), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/exception/?"""'], {}), "('GET', '/exception/?')\n", (2823, 2846), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((2952, 2984), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/slow-exception/?"""'], {}), "('GET', '/slow-exception/?')\n", (2956, 2984), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((3128, 3169), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test-weird-content-type/?"""'], {}), "('GET', '/test-weird-content-type/?')\n", (3132, 3169), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((3377, 3407), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test-charset/?"""'], {}), "('GET', '/test-charset/?')\n", (3381, 3407), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((3617, 3664), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test-charset-encoding-correct/?"""'], {}), "('GET', '/test-charset-encoding-correct/?')\n", (3621, 3664), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((3901, 3946), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test-charset-encoding-error/?"""'], {}), "('GET', '/test-charset-encoding-error/?')\n", (3905, 3946), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4171, 4209), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test-charset-invalid/?"""'], {}), "('GET', '/test-charset-invalid/?')\n", (4175, 4209), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4419, 4447), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/empty-data/?"""'], {}), "('GET', '/empty-data/?')\n", (4423, 4447), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4534, 4561), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/byte-data/?"""'], {}), "('GET', '/byte-data/?')\n", (4538, 4561), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4679, 4706), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/none-data/?"""'], {}), "('GET', '/none-data/?')\n", (4683, 4706), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4795, 4826), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/forwarded-for/?"""'], {}), "('GET', '/forwarded-for/?')\n", (4799, 4826), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4958, 4989), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/authorization/?"""'], {}), "('GET', '/authorization/?')\n", (4962, 4989), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((5147, 5189), 'tomodachi.transport.http.http_static', 'http_static', (['"""../static_files"""', '"""/static/"""'], {}), "('../static_files', '/static/')\n", (5158, 5189), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((5268, 5338), 'tomodachi.transport.http.http_static', 'http_static', (['"""../static_files"""', '"""/download/(?P<filename>[^/]+?)/image"""'], {}), "('../static_files', '/download/(?P<filename>[^/]+?)/image')\n", (5279, 5338), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((5419, 5446), 'tomodachi.transport.http.http_error', 'http_error', ([], {'status_code': '(404)'}), '(status_code=404)\n', (5429, 5446), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((5538, 5568), 'tomodachi.transport.http.websocket', 'websocket', (['"""/websocket-simple"""'], {}), "('/websocket-simple')\n", (5547, 5568), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((5696, 5724), 'tomodachi.transport.http.websocket', 'websocket', (['"""/websocket-data"""'], {}), "('/websocket-data')\n", (5705, 5724), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((2472, 2548), 'aiohttp.web.Response', 'web.Response', ([], {'body': '"""test aiohttp"""', 'status': '(200)', 'headers': "{'X-Aiohttp': 'test'}"}), "(body='test aiohttp', status=200, headers={'X-Aiohttp': 'test'})\n", (2484, 2548), False, 'from aiohttp import web\n'), ((2696, 2795), 'tomodachi.transport.http.Response', 'Response', ([], {'body': '"""test tomodachi response"""', 'status': '(200)', 'headers': "{'X-Tomodachi-Response': 'test'}"}), "(body='test tomodachi response', status=200, headers={\n 'X-Tomodachi-Response': 'test'})\n", (2704, 2795), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((3269, 3348), 'aiohttp.web.Response', 'web.Response', ([], {'body': '"""test"""', 'status': '(200)', 'headers': "{'Content-Type': 'text/plain; '}"}), "(body='test', status=200, headers={'Content-Type': 'text/plain; '})\n", (3281, 3348), False, 'from aiohttp import web\n'), ((3496, 3592), 'aiohttp.web.Response', 'web.Response', ([], {'body': '"""test"""', 'status': '(200)', 'headers': "{'Content-Type': 'text/plain; charset=utf-8'}"}), "(body='test', status=200, headers={'Content-Type':\n 'text/plain; charset=utf-8'})\n", (3508, 3592), False, 'from aiohttp import web\n'), ((3766, 3867), 'tomodachi.transport.http.Response', 'Response', ([], {'body': '"""test åäö"""', 'status': '(200)', 'headers': "{'Content-Type': 'text/plain; charset=iso-8859-1'}"}), "(body='test åäö', status=200, headers={'Content-Type':\n 'text/plain; charset=iso-8859-1'})\n", (3774, 3867), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4046, 4146), 'tomodachi.transport.http.Response', 'Response', ([], {'body': '"""test 友達"""', 'status': '(200)', 'headers': "{'Content-Type': 'text/plain; charset=iso-8859-1'}"}), "(body='test 友達', status=200, headers={'Content-Type':\n 'text/plain; charset=iso-8859-1'})\n", (4054, 4146), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((4302, 4394), 'tomodachi.transport.http.Response', 'Response', ([], {'body': '"""test"""', 'status': '(200)', 'headers': "{'Content-Type': 'text/plain; charset=utf-9'}"}), "(body='test', status=200, headers={'Content-Type':\n 'text/plain; charset=utf-9'})\n", (4310, 4394), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((1854, 1872), 'asyncio.sleep', 'asyncio.sleep', (['(2.0)'], {}), '(2.0)\n', (1867, 1872), False, 'import asyncio\n'), ((3071, 3089), 'asyncio.sleep', 'asyncio.sleep', (['(2.0)'], {}), '(2.0)\n', (3084, 3089), False, 'import asyncio\n'), ((4907, 4945), 'tomodachi.transport.http.RequestHandler.get_request_ip', 'RequestHandler.get_request_ip', (['request'], {}), '(request)\n', (4936, 4945), False, 'from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler\n'), ((6379, 6390), 'os.getpid', 'os.getpid', ([], {}), '()\n', (6388, 6390), False, 'import os\n'), ((6095, 6114), 'asyncio.sleep', 'asyncio.sleep', (['(10.0)'], {}), '(10.0)\n', (6108, 6114), False, 'import asyncio\n')]
|
# -*- coding: utf-8 -*-
# Copyright © 2018 PyHelp Project Contributors
# https://github.com/jnsebgosselin/pyhelp
#
# This file is part of PyHelp.
# Licensed under the terms of the GNU General Public License.
# ---- Standard Library Imports
import os
import os.path as osp
# ---- Third Party imports
import numpy as np
import geopandas as gpd
import netCDF4
import pandas as pd
# ---- Local Libraries Imports
from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs
from pyhelp.processing import run_help_allcells
from pyhelp.utils import savedata_to_hdf5
from pyhelp.weather_reader import (
save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP,
read_cweeds_file, join_daily_cweeds_wy2_and_wy3)
FNAME_CONN_TABLES = 'connect_table.npy'
class HELPManager(object):
def __init__(self, workdir, year_range, path_togrid=None):
super(HELPManager, self).__init__()
self.year_range = year_range
self.set_workdir(workdir)
self._setup_connect_tables()
if path_togrid is not None:
self.load_grid(path_togrid)
else:
self.grid = None
@property
def cellnames(self):
if self.grid is not None:
return self.grid['cid'].tolist()
else:
return []
@property
def inputdir(self):
"""
Return the path to the folder where the HELP input files are going to
be saved in the working directory. This folder is created in case it
doesn't already exist in the file system.
"""
inputdir = osp.join(self.workdir, 'help_input_files')
if not osp.exists(inputdir):
os.makedirs(inputdir)
return inputdir
@property
def workdir(self):
"""Return the path to the current working directory."""
return os.getcwd()
def set_workdir(self, dirname):
"""Set the working directory of the manager."""
if not osp.exists(dirname):
os.makedirs(dirname)
os.chdir(dirname)
# ---- Connect tables
@property
def path_connect_tables(self):
return osp.join(self.inputdir, FNAME_CONN_TABLES)
def _setup_connect_tables(self):
"""Setup the connect tables dictionary."""
if osp.exists(self.path_connect_tables):
self.connect_tables = np.load(self.path_connect_tables).item()
else:
self.connect_tables = {}
def _save_connect_tables(self):
"""Save the connect tables dictionary to a numpy binary file."""
np.save(self.path_connect_tables, self.connect_tables)
# ---- HELP grid
def load_grid(self, path_togrid):
"""
Load the grid that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
self.grid = load_grid_from_csv(path_togrid)
return self.grid
# ---- Input files creation
def generate_d13_from_cweeds(self, d13fname, fpath_cweed2, fpath_cweed3,
cellnames=None):
"""
Generate the HELP D13 input file for solar radiation from wy2 and
wy3 CWEEDS files at a given location.
"""
d13fpath = osp.join(self.inputdir, d13fname)
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
print('Reading CWEEDS files...', end=' ')
daily_wy2 = read_cweeds_file(fpath_cweed2, format_to_daily=True)
daily_wy3 = read_cweeds_file(fpath_cweed3, format_to_daily=True)
wy23_df = join_daily_cweeds_wy2_and_wy3(daily_wy2, daily_wy3)
indexes = np.where((wy23_df['Years'] >= self.year_range[0]) &
(wy23_df['Years'] <= self.year_range[1]))[0]
print('done')
print('Generating HELP D13 file for solar radiation...', end=' ')
save_solrad_to_HELP(d13fpath,
wy23_df['Years'][indexes],
wy23_df['Irradiance'][indexes],
'CAN_QC_MONTREAL-INTL-A_7025251',
wy23_df['Latitude'])
print('done')
if self.year_range[1] > np.max(wy23_df['Years']):
print("Warning: there is no solar radiation data after year %d."
% np.max(wy23_df['Years']))
if self.year_range[0] < np.min(wy23_df['Years']):
print("Warning: there is no solar radiation data before year %d."
% np.min(wy23_df['Years']))
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
d13_connect_table = {cid: d13fpath for cid in cellnames}
self.connect_tables['D13'] = d13_connect_table
self._save_connect_tables()
print("done")
def generate_d10d11_input_files(self, cellnames=None, sf_edepth=1,
sf_ulai=1):
"""Prepare the D10 and D11 input datafiles for each cell."""
d10d11_inputdir = osp.join(self.inputdir, 'd10d11_input_files')
if not osp.exists(d10d11_inputdir):
os.makedirs(d10d11_inputdir)
# Only keep the cells that are going to be run in HELP because we
# don't need the D10 or D11 input files for those that aren't.
cellnames = self.get_run_cellnames(cellnames)
d10data, d11data = format_d10d11_inputs(self.grid, cellnames,
sf_edepth, sf_ulai)
# Write the D10 and D11 input files.
d10_conn_tbl, d11_conn_tbl = write_d10d11_allcells(
d10d11_inputdir, d10data, d11data)
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D10'] = d10_conn_tbl
self.connect_tables['D11'] = d11_conn_tbl
self._save_connect_tables()
print("done")
def generate_d4d7_from_MDELCC_grid(self, path_netcdf_dir, cellnames=None):
"""
Prepare the D4 and D7 input datafiles for each cell from the
interpolated grid of the MDDELCC.
"""
d4d7_inputdir = osp.join(self.inputdir, 'd4d7_input_files')
if not osp.exists(d4d7_inputdir):
os.makedirs(d4d7_inputdir)
cellnames = self.get_run_cellnames(cellnames)
N = len(cellnames)
# Get the latitudes and longitudes of the resulting cells.
lat_dd, lon_dd = self.get_latlon_for_cellnames(cellnames)
# Generate the connectivity table between the HELP grid and the
# MDDELCC interpolated daily weather grid.
print('Generating the connectivity table for each cell...', end=' ')
meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
d4_conn_tbl = {}
d7_conn_tbl = {}
data = []
for i, cellname in enumerate(cellnames):
lat_idx, lon_idx = meteo_manager.get_idx_from_latlon(
lat_dd[i], lon_dd[i])
d4fname = osp.join(
d4d7_inputdir, '%03d_%03d.D4' % (lat_idx, lon_idx))
d7fname = osp.join(
d4d7_inputdir, '%03d_%03d.D7' % (lat_idx, lon_idx))
d4_conn_tbl[cellnames[i]] = d4fname
d7_conn_tbl[cellnames[i]] = d7fname
data.append([lat_idx, lon_idx, d4fname, d7fname])
print('done')
# Fetch the daily weather data from the netCDF files.
data = np.unique(data, axis=0)
lat_indx = data[:, 0].astype(int)
lon_idx = data[:, 1].astype(int)
years = range(self.year_range[0], self.year_range[1]+1)
tasavg, precip, years = meteo_manager.get_data_from_idx(
lat_indx, lon_idx, years)
# Convert and save the weather data to D4 and D7 HELP input files.
N = len(data)
for i in range(N):
print(("\rGenerating HELP D4 and D7 files for location " +
"%d of %d (%0.1f%%)...") % (i+1, N, (i+1)/N * 100), end=' ')
lat = meteo_manager.lat[lat_indx[i]]
lon = meteo_manager.lon[lon_idx[i]]
d4fname, d7fname = data[i, 2], data[i, 3]
city = 'Meteo Grid at lat/lon %0.1f ; %0.1f' % (lat, lon)
# Fill -999 with 0 in daily precip.
precip_i = precip[:, i]
precip_i[precip_i == -999] = 0
# Fill -999 with linear interpolation in daily air temp.
tasavg_i = tasavg[:, i]
time_ = np.arange(len(tasavg_i))
indx = np.where(tasavg_i != -999)[0]
tasavg_i = np.interp(time_, time_[indx], tasavg_i[indx])
if not osp.exists(d4fname):
save_precip_to_HELP(d4fname, years, precip_i, city)
if not osp.exists(d7fname):
save_airtemp_to_HELP(d7fname, years, tasavg_i, city)
print('done')
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D4'] = d4_conn_tbl
self.connect_tables['D7'] = d7_conn_tbl
self._save_connect_tables()
print('done')
def run_help_for(self, path_outfile=None, cellnames=None, tfsoil=0):
"""
Run help for the cells listed in cellnames and save the result in
an hdf5 file.
"""
# Convert from Celcius to Farenheight
tfsoil = (tfsoil * 1.8) + 32
tempdir = osp.join(self.inputdir, ".temp")
if not osp.exists(tempdir):
os.makedirs(tempdir)
run_cellnames = self.get_run_cellnames(cellnames)
cellparams = {}
for cellname in run_cellnames:
fpath_d4 = self.connect_tables['D4'][cellname]
fpath_d7 = self.connect_tables['D7'][cellname]
fpath_d13 = self.connect_tables['D13'][cellname]
fpath_d10 = self.connect_tables['D10'][cellname]
fpath_d11 = self.connect_tables['D11'][cellname]
fpath_out = osp.abspath(osp.join(tempdir, str(cellname) + '.OUT'))
daily_out = 0
monthly_out = 1
yearly_out = 0
summary_out = 0
unit_system = 2 # IP if 1 else SI
simu_nyear = self.year_range[1] - self.year_range[0] + 1
cellparams[cellname] = (fpath_d4, fpath_d7, fpath_d13, fpath_d11,
fpath_d10, fpath_out, daily_out,
monthly_out, yearly_out, summary_out,
unit_system, simu_nyear, tfsoil)
output = run_help_allcells(cellparams)
if path_outfile:
savedata_to_hdf5(output, path_outfile)
return output
def calc_surf_water_cells(self, evp_surf, path_netcdf_dir,
path_outfile=None, cellnames=None):
cellnames = self.get_water_cellnames(cellnames)
lat_dd, lon_dd = self.get_latlon_for_cellnames(cellnames)
meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
N = len(cellnames)
lat_indx = np.empty(N).astype(int)
lon_indx = np.empty(N).astype(int)
for i, cellname in enumerate(cellnames):
lat_indx[i], lon_indx[i] = meteo_manager.get_idx_from_latlon(
lat_dd[i], lon_dd[i])
year_range = np.arange(
self.year_range[0], self.year_range[1] + 1).astype(int)
tasavg, precip, years = meteo_manager.get_data_from_idx(
lat_indx, lon_indx, year_range)
# Fill -999 with 0 in daily precip.
precip[precip == -999] = 0
nyr = len(year_range)
output = {}
for i, cellname in enumerate(cellnames):
data = {}
data['years'] = year_range
data['rain'] = np.zeros(nyr)
data['evapo'] = np.zeros(nyr) + evp_surf
data['runoff'] = np.zeros(nyr)
for k, year in enumerate(year_range):
indx = np.where(years == year)[0]
data['rain'][k] = np.sum(precip[indx, i])
data['runoff'][k] = data['rain'][k] - evp_surf
output[cellname] = data
if path_outfile:
savedata_to_hdf5(output, path_outfile)
return output
# # For cells for which the context is 2, convert recharge and deep
# # subrunoff into superfical subrunoff.
# cellnames_con_2 = cellnames[self.grid[fcon] == 2].tolist()
# for cellname in cellnames_con_2:
# output[cellname]['subrun1'] += output[cellname]['subrun2']
# output[cellname]['subrun1'] += output[cellname]['recharge']
# output[cellname]['subrun2'][:] = 0
# output[cellname]['recharge'][:] = 0
# # For cells for which the context is 3, convert recharge into
# # deep runoff.
# cellnames_con_3 = cellnames[self.grid[fcon] == 3].tolist()
# for cellname in cellnames_con_3:
# output[cellname]['subrun2'] += output[cellname]['recharge']
# output[cellname]['recharge'][:] = 0
# # Comput water budget for cells for which the context is 0.
# cellnames_con_2 = cellnames[self.grid[fcon] == 0].tolist()
# # meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
# # for cellname in cellnames_run0:
# Save the result to an hdf5 file.
# ---- Utilities
def get_water_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are considered
to be in a surface water area.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells for which context is 0.
cellnames = self.grid['cid'][cellnames][self.grid['context'] == 0]
return cellnames.tolist()
def get_run_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are in the grid
and for which HELP can be run.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells that are going to be run in HELP because we
# don't need the D4 or D7 input files for those that aren't.
cellnames = self.grid['cid'][cellnames][self.grid['run'] == 1].tolist()
return cellnames
def get_latlon_for_cellnames(self, cells):
"""
Return a numpy array with latitudes and longitudes of the provided
cells cid. Latitude and longitude for cids that are missing from
the grid are set to nan.
"""
lat = np.array(self.grid['lat_dd'].reindex(cells).tolist())
lon = np.array(self.grid['lon_dd'].reindex(cells).tolist())
return lat, lon
class NetCDFMeteoManager(object):
def __init__(self, dirpath_netcdf):
super(NetCDFMeteoManager, self).__init__()
self.dirpath_netcdf = dirpath_netcdf
self.lat = []
self.lon = []
self.setup_ncfile_list()
self.setup_latlon_grid()
def setup_ncfile_list(self):
"""Read all the available netCDF files in dirpath_netcdf."""
self.ncfilelist = []
for file in os.listdir(self.dirpath_netcdf):
if file.endswith('.nc'):
self.ncfilelist.append(osp.join(self.dirpath_netcdf, file))
def setup_latlon_grid(self):
if self.ncfilelist:
netcdf_dset = netCDF4.Dataset(self.ncfilelist[0], 'r+')
self.lat = np.array(netcdf_dset['lat'])
self.lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
def get_idx_from_latlon(self, latitudes, longitudes, unique=False):
"""
Get the i and j indexes of the grid meshes from a list of latitude
and longitude coordinates. If unique is True, only the unique pairs of
i and j indexes will be returned.
"""
try:
lat_idx = [np.argmin(np.abs(self.lat - lat)) for lat in latitudes]
lon_idx = [np.argmin(np.abs(self.lon - lon)) for lon in longitudes]
if unique:
ijdx = np.vstack({(i, j) for i, j in zip(lat_idx, lon_idx)})
lat_idx = ijdx[:, 0].tolist()
lon_idx = ijdx[:, 1].tolist()
except TypeError:
lat_idx = np.argmin(np.abs(self.lat - latitudes))
lon_idx = np.argmin(np.abs(self.lon - longitudes))
return lat_idx, lon_idx
def get_data_from_latlon(self, latitudes, longitudes, years):
"""
Return the daily minimum, maximum and average air temperature and daily
precipitation
"""
lat_idx, lon_idx = self.get_idx_from_latlon(latitudes, longitudes)
return self.get_data_from_idx(lat_idx, lon_idx, years)
def get_data_from_idx(self, lat_idx, lon_idx, years):
try:
len(lat_idx)
except TypeError:
lat_idx, lon_idx = [lat_idx], [lon_idx]
tasmax_stacks = []
tasmin_stacks = []
precip_stacks = []
years_stack = []
for year in years:
print('\rFetching daily weather data for year %d...' % year,
end=' ')
filename = osp.join(self.dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
tasmax_stacks.append(
np.array(netcdf_dset['tasmax'])[:, lat_idx, lon_idx])
tasmin_stacks.append(
np.array(netcdf_dset['tasmin'])[:, lat_idx, lon_idx])
precip_stacks.append(
np.array(netcdf_dset['pr'])[:, lat_idx, lon_idx])
years_stack.append(
np.zeros(len(precip_stacks[-1][:])).astype(int) + year)
netcdf_dset.close()
print('done')
tasmax = np.vstack(tasmax_stacks)
tasmin = np.vstack(tasmin_stacks)
precip = np.vstack(precip_stacks)
years = np.hstack(years_stack)
return (tasmax + tasmin)/2, precip, years
def load_grid_from_csv(path_togrid):
"""
Load the csv that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
print('Reading HELP grid from csv...', end=' ')
grid = pd.read_csv(path_togrid)
print('done')
fname = osp.basename(path_togrid)
req_keys = ['cid', 'lat_dd', 'lon_dd', 'run']
for key in req_keys:
if key not in grid.keys():
raise KeyError("No attribute '%s' found in %s" % (key, fname))
# Make sure that cid is a str.
grid['cid'] = np.array(grid['cid']).astype(str)
# Set 'cid' as the index of the dataframe.
grid.set_index(['cid'], drop=False, inplace=True)
return grid
|
[
"pyhelp.weather_reader.save_airtemp_to_HELP",
"pandas.read_csv",
"numpy.hstack",
"numpy.array",
"pyhelp.processing.run_help_allcells",
"numpy.save",
"numpy.arange",
"os.path.exists",
"os.listdir",
"numpy.where",
"netCDF4.Dataset",
"pyhelp.weather_reader.read_cweeds_file",
"numpy.max",
"numpy.empty",
"numpy.vstack",
"numpy.min",
"pyhelp.weather_reader.save_solrad_to_HELP",
"pyhelp.preprocessing.write_d10d11_allcells",
"numpy.abs",
"pyhelp.weather_reader.save_precip_to_HELP",
"pyhelp.weather_reader.join_daily_cweeds_wy2_and_wy3",
"numpy.interp",
"numpy.unique",
"os.makedirs",
"pyhelp.preprocessing.format_d10d11_inputs",
"os.path.join",
"os.getcwd",
"os.chdir",
"numpy.sum",
"numpy.zeros",
"os.path.basename",
"numpy.load",
"pyhelp.utils.savedata_to_hdf5"
] |
[((18515, 18539), 'pandas.read_csv', 'pd.read_csv', (['path_togrid'], {}), '(path_togrid)\n', (18526, 18539), True, 'import pandas as pd\n'), ((18571, 18596), 'os.path.basename', 'osp.basename', (['path_togrid'], {}), '(path_togrid)\n', (18583, 18596), True, 'import os.path as osp\n'), ((1587, 1629), 'os.path.join', 'osp.join', (['self.workdir', '"""help_input_files"""'], {}), "(self.workdir, 'help_input_files')\n", (1595, 1629), True, 'import os.path as osp\n'), ((1842, 1853), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1851, 1853), False, 'import os\n'), ((2024, 2041), 'os.chdir', 'os.chdir', (['dirname'], {}), '(dirname)\n', (2032, 2041), False, 'import os\n'), ((2134, 2176), 'os.path.join', 'osp.join', (['self.inputdir', 'FNAME_CONN_TABLES'], {}), '(self.inputdir, FNAME_CONN_TABLES)\n', (2142, 2176), True, 'import os.path as osp\n'), ((2277, 2313), 'os.path.exists', 'osp.exists', (['self.path_connect_tables'], {}), '(self.path_connect_tables)\n', (2287, 2313), True, 'import os.path as osp\n'), ((2559, 2613), 'numpy.save', 'np.save', (['self.path_connect_tables', 'self.connect_tables'], {}), '(self.path_connect_tables, self.connect_tables)\n', (2566, 2613), True, 'import numpy as np\n'), ((3216, 3249), 'os.path.join', 'osp.join', (['self.inputdir', 'd13fname'], {}), '(self.inputdir, d13fname)\n', (3224, 3249), True, 'import os.path as osp\n'), ((3535, 3587), 'pyhelp.weather_reader.read_cweeds_file', 'read_cweeds_file', (['fpath_cweed2'], {'format_to_daily': '(True)'}), '(fpath_cweed2, format_to_daily=True)\n', (3551, 3587), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((3608, 3660), 'pyhelp.weather_reader.read_cweeds_file', 'read_cweeds_file', (['fpath_cweed3'], {'format_to_daily': '(True)'}), '(fpath_cweed3, format_to_daily=True)\n', (3624, 3660), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((3679, 3730), 'pyhelp.weather_reader.join_daily_cweeds_wy2_and_wy3', 'join_daily_cweeds_wy2_and_wy3', (['daily_wy2', 'daily_wy3'], {}), '(daily_wy2, daily_wy3)\n', (3708, 3730), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((3979, 4132), 'pyhelp.weather_reader.save_solrad_to_HELP', 'save_solrad_to_HELP', (['d13fpath', "wy23_df['Years'][indexes]", "wy23_df['Irradiance'][indexes]", '"""CAN_QC_MONTREAL-INTL-A_7025251"""', "wy23_df['Latitude']"], {}), "(d13fpath, wy23_df['Years'][indexes], wy23_df[\n 'Irradiance'][indexes], 'CAN_QC_MONTREAL-INTL-A_7025251', wy23_df[\n 'Latitude'])\n", (3998, 4132), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((5115, 5160), 'os.path.join', 'osp.join', (['self.inputdir', '"""d10d11_input_files"""'], {}), "(self.inputdir, 'd10d11_input_files')\n", (5123, 5160), True, 'import os.path as osp\n'), ((5474, 5536), 'pyhelp.preprocessing.format_d10d11_inputs', 'format_d10d11_inputs', (['self.grid', 'cellnames', 'sf_edepth', 'sf_ulai'], {}), '(self.grid, cellnames, sf_edepth, sf_ulai)\n', (5494, 5536), False, 'from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs\n'), ((5668, 5724), 'pyhelp.preprocessing.write_d10d11_allcells', 'write_d10d11_allcells', (['d10d11_inputdir', 'd10data', 'd11data'], {}), '(d10d11_inputdir, d10data, d11data)\n', (5689, 5724), False, 'from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs\n'), ((6236, 6279), 'os.path.join', 'osp.join', (['self.inputdir', '"""d4d7_input_files"""'], {}), "(self.inputdir, 'd4d7_input_files')\n", (6244, 6279), True, 'import os.path as osp\n'), ((7524, 7547), 'numpy.unique', 'np.unique', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7533, 7547), True, 'import numpy as np\n'), ((9483, 9515), 'os.path.join', 'osp.join', (['self.inputdir', '""".temp"""'], {}), "(self.inputdir, '.temp')\n", (9491, 9515), True, 'import os.path as osp\n'), ((10623, 10652), 'pyhelp.processing.run_help_allcells', 'run_help_allcells', (['cellparams'], {}), '(cellparams)\n', (10640, 10652), False, 'from pyhelp.processing import run_help_allcells\n'), ((15478, 15509), 'os.listdir', 'os.listdir', (['self.dirpath_netcdf'], {}), '(self.dirpath_netcdf)\n', (15488, 15509), False, 'import os\n'), ((18091, 18115), 'numpy.vstack', 'np.vstack', (['tasmax_stacks'], {}), '(tasmax_stacks)\n', (18100, 18115), True, 'import numpy as np\n'), ((18133, 18157), 'numpy.vstack', 'np.vstack', (['tasmin_stacks'], {}), '(tasmin_stacks)\n', (18142, 18157), True, 'import numpy as np\n'), ((18175, 18199), 'numpy.vstack', 'np.vstack', (['precip_stacks'], {}), '(precip_stacks)\n', (18184, 18199), True, 'import numpy as np\n'), ((18216, 18238), 'numpy.hstack', 'np.hstack', (['years_stack'], {}), '(years_stack)\n', (18225, 18238), True, 'import numpy as np\n'), ((1645, 1665), 'os.path.exists', 'osp.exists', (['inputdir'], {}), '(inputdir)\n', (1655, 1665), True, 'import os.path as osp\n'), ((1679, 1700), 'os.makedirs', 'os.makedirs', (['inputdir'], {}), '(inputdir)\n', (1690, 1700), False, 'import os\n'), ((1962, 1981), 'os.path.exists', 'osp.exists', (['dirname'], {}), '(dirname)\n', (1972, 1981), True, 'import os.path as osp\n'), ((1995, 2015), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (2006, 2015), False, 'import os\n'), ((3750, 3847), 'numpy.where', 'np.where', (["((wy23_df['Years'] >= self.year_range[0]) & (wy23_df['Years'] <= self.\n year_range[1]))"], {}), "((wy23_df['Years'] >= self.year_range[0]) & (wy23_df['Years'] <=\n self.year_range[1]))\n", (3758, 3847), True, 'import numpy as np\n'), ((4290, 4314), 'numpy.max', 'np.max', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4296, 4314), True, 'import numpy as np\n'), ((4471, 4495), 'numpy.min', 'np.min', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4477, 4495), True, 'import numpy as np\n'), ((5176, 5203), 'os.path.exists', 'osp.exists', (['d10d11_inputdir'], {}), '(d10d11_inputdir)\n', (5186, 5203), True, 'import os.path as osp\n'), ((5217, 5245), 'os.makedirs', 'os.makedirs', (['d10d11_inputdir'], {}), '(d10d11_inputdir)\n', (5228, 5245), False, 'import os\n'), ((6295, 6320), 'os.path.exists', 'osp.exists', (['d4d7_inputdir'], {}), '(d4d7_inputdir)\n', (6305, 6320), True, 'import os.path as osp\n'), ((6334, 6360), 'os.makedirs', 'os.makedirs', (['d4d7_inputdir'], {}), '(d4d7_inputdir)\n', (6345, 6360), False, 'import os\n'), ((7086, 7146), 'os.path.join', 'osp.join', (['d4d7_inputdir', "('%03d_%03d.D4' % (lat_idx, lon_idx))"], {}), "(d4d7_inputdir, '%03d_%03d.D4' % (lat_idx, lon_idx))\n", (7094, 7146), True, 'import os.path as osp\n'), ((7186, 7246), 'os.path.join', 'osp.join', (['d4d7_inputdir', "('%03d_%03d.D7' % (lat_idx, lon_idx))"], {}), "(d4d7_inputdir, '%03d_%03d.D7' % (lat_idx, lon_idx))\n", (7194, 7246), True, 'import os.path as osp\n'), ((8646, 8691), 'numpy.interp', 'np.interp', (['time_', 'time_[indx]', 'tasavg_i[indx]'], {}), '(time_, time_[indx], tasavg_i[indx])\n', (8655, 8691), True, 'import numpy as np\n'), ((9531, 9550), 'os.path.exists', 'osp.exists', (['tempdir'], {}), '(tempdir)\n', (9541, 9550), True, 'import os.path as osp\n'), ((9564, 9584), 'os.makedirs', 'os.makedirs', (['tempdir'], {}), '(tempdir)\n', (9575, 9584), False, 'import os\n'), ((10691, 10729), 'pyhelp.utils.savedata_to_hdf5', 'savedata_to_hdf5', (['output', 'path_outfile'], {}), '(output, path_outfile)\n', (10707, 10729), False, 'from pyhelp.utils import savedata_to_hdf5\n'), ((11819, 11832), 'numpy.zeros', 'np.zeros', (['nyr'], {}), '(nyr)\n', (11827, 11832), True, 'import numpy as np\n'), ((11915, 11928), 'numpy.zeros', 'np.zeros', (['nyr'], {}), '(nyr)\n', (11923, 11928), True, 'import numpy as np\n'), ((12224, 12262), 'pyhelp.utils.savedata_to_hdf5', 'savedata_to_hdf5', (['output', 'path_outfile'], {}), '(output, path_outfile)\n', (12240, 12262), False, 'from pyhelp.utils import savedata_to_hdf5\n'), ((15712, 15753), 'netCDF4.Dataset', 'netCDF4.Dataset', (['self.ncfilelist[0]', '"""r+"""'], {}), "(self.ncfilelist[0], 'r+')\n", (15727, 15753), False, 'import netCDF4\n'), ((15777, 15805), 'numpy.array', 'np.array', (["netcdf_dset['lat']"], {}), "(netcdf_dset['lat'])\n", (15785, 15805), True, 'import numpy as np\n'), ((15829, 15857), 'numpy.array', 'np.array', (["netcdf_dset['lon']"], {}), "(netcdf_dset['lon'])\n", (15837, 15857), True, 'import numpy as np\n'), ((17494, 17546), 'os.path.join', 'osp.join', (['self.dirpath_netcdf', "('GCQ_v2_%d.nc' % year)"], {}), "(self.dirpath_netcdf, 'GCQ_v2_%d.nc' % year)\n", (17502, 17546), True, 'import os.path as osp\n'), ((17573, 17604), 'netCDF4.Dataset', 'netCDF4.Dataset', (['filename', '"""r+"""'], {}), "(filename, 'r+')\n", (17588, 17604), False, 'import netCDF4\n'), ((18836, 18857), 'numpy.array', 'np.array', (["grid['cid']"], {}), "(grid['cid'])\n", (18844, 18857), True, 'import numpy as np\n'), ((8593, 8619), 'numpy.where', 'np.where', (['(tasavg_i != -999)'], {}), '(tasavg_i != -999)\n', (8601, 8619), True, 'import numpy as np\n'), ((8712, 8731), 'os.path.exists', 'osp.exists', (['d4fname'], {}), '(d4fname)\n', (8722, 8731), True, 'import os.path as osp\n'), ((8749, 8800), 'pyhelp.weather_reader.save_precip_to_HELP', 'save_precip_to_HELP', (['d4fname', 'years', 'precip_i', 'city'], {}), '(d4fname, years, precip_i, city)\n', (8768, 8800), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((8820, 8839), 'os.path.exists', 'osp.exists', (['d7fname'], {}), '(d7fname)\n', (8830, 8839), True, 'import os.path as osp\n'), ((8857, 8909), 'pyhelp.weather_reader.save_airtemp_to_HELP', 'save_airtemp_to_HELP', (['d7fname', 'years', 'tasavg_i', 'city'], {}), '(d7fname, years, tasavg_i, city)\n', (8877, 8909), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((11113, 11124), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (11121, 11124), True, 'import numpy as np\n'), ((11156, 11167), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (11164, 11167), True, 'import numpy as np\n'), ((11363, 11416), 'numpy.arange', 'np.arange', (['self.year_range[0]', '(self.year_range[1] + 1)'], {}), '(self.year_range[0], self.year_range[1] + 1)\n', (11372, 11416), True, 'import numpy as np\n'), ((11861, 11874), 'numpy.zeros', 'np.zeros', (['nyr'], {}), '(nyr)\n', (11869, 11874), True, 'import numpy as np\n'), ((12063, 12086), 'numpy.sum', 'np.sum', (['precip[indx, i]'], {}), '(precip[indx, i])\n', (12069, 12086), True, 'import numpy as np\n'), ((2349, 2382), 'numpy.load', 'np.load', (['self.path_connect_tables'], {}), '(self.path_connect_tables)\n', (2356, 2382), True, 'import numpy as np\n'), ((4413, 4437), 'numpy.max', 'np.max', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4419, 4437), True, 'import numpy as np\n'), ((4595, 4619), 'numpy.min', 'np.min', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4601, 4619), True, 'import numpy as np\n'), ((12002, 12025), 'numpy.where', 'np.where', (['(years == year)'], {}), '(years == year)\n', (12010, 12025), True, 'import numpy as np\n'), ((15587, 15622), 'os.path.join', 'osp.join', (['self.dirpath_netcdf', 'file'], {}), '(self.dirpath_netcdf, file)\n', (15595, 15622), True, 'import os.path as osp\n'), ((16229, 16251), 'numpy.abs', 'np.abs', (['(self.lat - lat)'], {}), '(self.lat - lat)\n', (16235, 16251), True, 'import numpy as np\n'), ((16308, 16330), 'numpy.abs', 'np.abs', (['(self.lon - lon)'], {}), '(self.lon - lon)\n', (16314, 16330), True, 'import numpy as np\n'), ((16605, 16633), 'numpy.abs', 'np.abs', (['(self.lat - latitudes)'], {}), '(self.lat - latitudes)\n', (16611, 16633), True, 'import numpy as np\n'), ((16667, 16696), 'numpy.abs', 'np.abs', (['(self.lon - longitudes)'], {}), '(self.lon - longitudes)\n', (16673, 16696), True, 'import numpy as np\n'), ((17656, 17687), 'numpy.array', 'np.array', (["netcdf_dset['tasmax']"], {}), "(netcdf_dset['tasmax'])\n", (17664, 17687), True, 'import numpy as np\n'), ((17760, 17791), 'numpy.array', 'np.array', (["netcdf_dset['tasmin']"], {}), "(netcdf_dset['tasmin'])\n", (17768, 17791), True, 'import numpy as np\n'), ((17864, 17891), 'numpy.array', 'np.array', (["netcdf_dset['pr']"], {}), "(netcdf_dset['pr'])\n", (17872, 17891), True, 'import numpy as np\n')]
|
from decimal import Decimal
import pytest
from bson import Decimal128
from momapper import MappedClass, Field
from momapper.mongodb.collection import MappedCollection
from momapper.types import (
DecimalType,
ValidationError,
IntType,
FloatType,
StringType,
ByteType,
BoolType,
ListType,
DictType,
)
@pytest.mark.parametrize("value, exception", [(0, None), (object(), ValidationError)])
def test_int_type(mongo_client, value, exception):
class DocWithInt(MappedClass):
value = Field("value", type_=IntType)
if exception:
with pytest.raises(exception):
DocWithInt(value=value)
else:
doc = DocWithInt(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithInt
)
collection.insert_one(doc)
@pytest.mark.parametrize("value, exception", [(0.0, None), (object(), ValidationError)])
def test_float_type(mongo_client, value, exception):
class DocWithFloat(MappedClass):
value = Field("value", type_=FloatType)
if exception:
with pytest.raises(exception):
DocWithFloat(value=value)
else:
doc = DocWithFloat(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithFloat
)
collection.insert_one(doc)
@pytest.mark.parametrize("amount", [0, 0.0, Decimal("10")])
def test_decimal_type(mongo_client, amount):
class DocWithDecimal(MappedClass):
amount = Field("amount", type_=DecimalType)
doc = DocWithDecimal(amount=amount)
assert isinstance(doc.amount, Decimal)
assert isinstance(doc._document["amount"], Decimal128)
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDecimal
)
doc_id = collection.insert_one(doc).inserted_id
fetched_doc = collection.find_one({"_id": doc_id})
assert isinstance(fetched_doc.amount, Decimal)
assert isinstance(fetched_doc._document["amount"], Decimal128)
assert doc.amount == fetched_doc.amount
def test_decimal_type_if_missing(mongo_client):
class DocWithDecimalRequired(MappedClass):
amount = Field(
"amount", type_=DecimalType, required=True, if_missing=Decimal(5)
)
doc = DocWithDecimalRequired()
assert isinstance(doc.amount, Decimal)
assert isinstance(doc._document["amount"], Decimal128)
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDecimalRequired
)
doc_id = collection.insert_one(doc).inserted_id
fetched_doc = collection.find_one({"_id": doc_id})
assert isinstance(fetched_doc.amount, Decimal)
assert isinstance(fetched_doc._document["amount"], Decimal128)
assert doc.amount == fetched_doc.amount
@pytest.mark.parametrize(
"value, exception", [("value", None), (object(), ValidationError)]
)
def test_string_type(mongo_client, value, exception):
class DocWithString(MappedClass):
value = Field("value", type_=StringType)
if exception:
with pytest.raises(exception):
DocWithString(value=value)
else:
doc = DocWithString(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithString
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(b"value", None), (object(), ValidationError)]
)
def test_bytes_type(mongo_client, value, exception):
class DocWithBytes(MappedClass):
value = Field("value", type_=ByteType)
if exception:
with pytest.raises(exception):
DocWithBytes(value=value)
else:
doc = DocWithBytes(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithBytes
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(False, None), (True, None), (object(), ValidationError)]
)
def test_bool_type(mongo_client, value, exception):
class DocWithBool(MappedClass):
value = Field("value", type_=BoolType)
if exception:
with pytest.raises(exception):
DocWithBool(value=value)
else:
doc = DocWithBool(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithBool
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(["value"], None), (object(), ValidationError)]
)
def test_list_type(mongo_client, value, exception):
class DocWithList(MappedClass):
value = Field("value", type_=ListType)
if exception:
with pytest.raises(exception):
DocWithList(value=value)
else:
doc = DocWithList(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithList
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [({"value": "value"}, None), (object(), ValidationError)]
)
def test_dict_type(mongo_client, value, exception):
class DocWithDict(MappedClass):
value = Field("value", type_=DictType)
if exception:
with pytest.raises(exception):
DocWithDict(value=value)
else:
doc = DocWithDict(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDict
)
collection.insert_one(doc)
|
[
"momapper.mongodb.collection.MappedCollection",
"pytest.raises",
"decimal.Decimal",
"momapper.Field"
] |
[((1907, 1986), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithDecimal'}), '(mongo_client.db, mongo_client.collection, impl=DocWithDecimal)\n', (1923, 1986), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((2636, 2728), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithDecimalRequired'}), '(mongo_client.db, mongo_client.collection, impl=\n DocWithDecimalRequired)\n', (2652, 2728), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((529, 558), 'momapper.Field', 'Field', (['"""value"""'], {'type_': 'IntType'}), "('value', type_=IntType)\n", (534, 558), False, 'from momapper import MappedClass, Field\n'), ((803, 878), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithInt'}), '(mongo_client.db, mongo_client.collection, impl=DocWithInt)\n', (819, 878), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((1133, 1164), 'momapper.Field', 'Field', (['"""value"""'], {'type_': 'FloatType'}), "('value', type_=FloatType)\n", (1138, 1164), False, 'from momapper import MappedClass, Field\n'), ((1413, 1490), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithFloat'}), '(mongo_client.db, mongo_client.collection, impl=DocWithFloat)\n', (1429, 1490), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((1711, 1745), 'momapper.Field', 'Field', (['"""amount"""'], {'type_': 'DecimalType'}), "('amount', type_=DecimalType)\n", (1716, 1745), False, 'from momapper import MappedClass, Field\n'), ((1594, 1607), 'decimal.Decimal', 'Decimal', (['"""10"""'], {}), "('10')\n", (1601, 1607), False, 'from decimal import Decimal\n'), ((3217, 3249), 'momapper.Field', 'Field', (['"""value"""'], {'type_': 'StringType'}), "('value', type_=StringType)\n", (3222, 3249), False, 'from momapper import MappedClass, Field\n'), ((3500, 3578), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithString'}), '(mongo_client.db, mongo_client.collection, impl=DocWithString)\n', (3516, 3578), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((3844, 3874), 'momapper.Field', 'Field', (['"""value"""'], {'type_': 'ByteType'}), "('value', type_=ByteType)\n", (3849, 3874), False, 'from momapper import MappedClass, Field\n'), ((4123, 4200), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithBytes'}), '(mongo_client.db, mongo_client.collection, impl=DocWithBytes)\n', (4139, 4200), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((4475, 4505), 'momapper.Field', 'Field', (['"""value"""'], {'type_': 'BoolType'}), "('value', type_=BoolType)\n", (4480, 4505), False, 'from momapper import MappedClass, Field\n'), ((4752, 4828), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithBool'}), '(mongo_client.db, mongo_client.collection, impl=DocWithBool)\n', (4768, 4828), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((5093, 5123), 'momapper.Field', 'Field', (['"""value"""'], {'type_': 'ListType'}), "('value', type_=ListType)\n", (5098, 5123), False, 'from momapper import MappedClass, Field\n'), ((5370, 5446), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithList'}), '(mongo_client.db, mongo_client.collection, impl=DocWithList)\n', (5386, 5446), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((5720, 5750), 'momapper.Field', 'Field', (['"""value"""'], {'type_': 'DictType'}), "('value', type_=DictType)\n", (5725, 5750), False, 'from momapper import MappedClass, Field\n'), ((5997, 6073), 'momapper.mongodb.collection.MappedCollection', 'MappedCollection', (['mongo_client.db', 'mongo_client.collection'], {'impl': 'DocWithDict'}), '(mongo_client.db, mongo_client.collection, impl=DocWithDict)\n', (6013, 6073), False, 'from momapper.mongodb.collection import MappedCollection\n'), ((591, 615), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (604, 615), False, 'import pytest\n'), ((1197, 1221), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (1210, 1221), False, 'import pytest\n'), ((3282, 3306), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (3295, 3306), False, 'import pytest\n'), ((3907, 3931), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (3920, 3931), False, 'import pytest\n'), ((4538, 4562), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (4551, 4562), False, 'import pytest\n'), ((5156, 5180), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (5169, 5180), False, 'import pytest\n'), ((5783, 5807), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (5796, 5807), False, 'import pytest\n'), ((2459, 2469), 'decimal.Decimal', 'Decimal', (['(5)'], {}), '(5)\n', (2466, 2469), False, 'from decimal import Decimal\n')]
|
import ctypes as ct
import time
import copy
import numpy as np
import sharpy.aero.utils.mapping as mapping
import sharpy.utils.cout_utils as cout
import sharpy.utils.solver_interface as solver_interface
import sharpy.utils.controller_interface as controller_interface
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
import sharpy.structure.utils.xbeamlib as xbeam
import sharpy.utils.exceptions as exc
@solver
class DynamicCoupled(BaseSolver):
"""
The ``DynamicCoupled`` solver couples the aerodynamic and structural solvers of choice to march forward in time
the aeroelastic system's solution.
Using the ``DynamicCoupled`` solver requires that an instance of the ``StaticCoupled`` solver is called in the
SHARPy solution ``flow`` when defining the problem case.
"""
solver_id = 'DynamicCoupled'
solver_classification = 'Coupled'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Write status to screen'
settings_types['structural_solver'] = 'str'
settings_default['structural_solver'] = None
settings_description['structural_solver'] = 'Structural solver to use in the coupled simulation'
settings_types['structural_solver_settings'] = 'dict'
settings_default['structural_solver_settings'] = None
settings_description['structural_solver_settings'] = 'Dictionary of settings for the structural solver'
settings_types['aero_solver'] = 'str'
settings_default['aero_solver'] = None
settings_description['aero_solver'] = 'Aerodynamic solver to use in the coupled simulation'
settings_types['aero_solver_settings'] = 'dict'
settings_default['aero_solver_settings'] = None
settings_description['aero_solver_settings'] = 'Dictionary of settings for the aerodynamic solver'
settings_types['n_time_steps'] = 'int'
settings_default['n_time_steps'] = None
settings_description['n_time_steps'] = 'Number of time steps for the simulation'
settings_types['dt'] = 'float'
settings_default['dt'] = None
settings_description['dt'] = 'Time step'
settings_types['fsi_substeps'] = 'int'
settings_default['fsi_substeps'] = 70
settings_description['fsi_substeps'] = 'Max iterations in the FSI loop'
settings_types['fsi_tolerance'] = 'float'
settings_default['fsi_tolerance'] = 1e-5
settings_description['fsi_tolerance'] = 'Convergence threshold for the FSI loop'
settings_types['structural_substeps'] = 'int'
settings_default['structural_substeps'] = 0 # 0 is normal coupled sim.
settings_description['structural_substeps'] = 'Number of extra structural time steps per aero time step. 0 is a fully coupled simulation.'
settings_types['relaxation_factor'] = 'float'
settings_default['relaxation_factor'] = 0.2
settings_description['relaxation_factor'] = 'Relaxation parameter in the FSI iteration. 0 is no relaxation and -> 1 is very relaxed'
settings_types['final_relaxation_factor'] = 'float'
settings_default['final_relaxation_factor'] = 0.0
settings_description['final_relaxation_factor'] = 'Relaxation factor reached in ``relaxation_steps`` with ``dynamic_relaxation`` on'
settings_types['minimum_steps'] = 'int'
settings_default['minimum_steps'] = 3
settings_description['minimum_steps'] = 'Number of minimum FSI iterations before convergence'
settings_types['relaxation_steps'] = 'int'
settings_default['relaxation_steps'] = 100
settings_description['relaxation_steps'] = 'Length of the relaxation factor ramp between ``relaxation_factor`` and ``final_relaxation_factor`` with ``dynamic_relaxation`` on'
settings_types['dynamic_relaxation'] = 'bool'
settings_default['dynamic_relaxation'] = False
settings_description['dynamic_relaxation'] = 'Controls if relaxation factor is modified during the FSI iteration process'
settings_types['postprocessors'] = 'list(str)'
settings_default['postprocessors'] = list()
settings_description['postprocessors'] = 'List of the postprocessors to run at the end of every time step'
settings_types['postprocessors_settings'] = 'dict'
settings_default['postprocessors_settings'] = dict()
settings_description['postprocessors_settings'] = 'Dictionary with the applicable settings for every ``psotprocessor``. Every ``postprocessor`` needs its entry, even if empty'
settings_types['controller_id'] = 'dict'
settings_default['controller_id'] = dict()
settings_description['controller_id'] = 'Dictionary of id of every controller (key) and its type (value)'
settings_types['controller_settings'] = 'dict'
settings_default['controller_settings'] = dict()
settings_description['controller_settings'] = 'Dictionary with settings (value) of every controller id (key)'
settings_types['cleanup_previous_solution'] = 'bool'
settings_default['cleanup_previous_solution'] = False
settings_description['cleanup_previous_solution'] = 'Controls if previous ``timestep_info`` arrays are reset before running the solver'
settings_types['include_unsteady_force_contribution'] = 'bool'
settings_default['include_unsteady_force_contribution'] = False
settings_description['include_unsteady_force_contribution'] = 'If on, added mass contribution is added to the forces. This depends on the time derivative of the bound circulation. Check ``filter_gamma_dot`` in the aero solver'
settings_types['steps_without_unsteady_force'] = 'int'
settings_default['steps_without_unsteady_force'] = 0
settings_description['steps_without_unsteady_force'] = 'Number of initial timesteps that don\'t include unsteady forces contributions. This avoids oscillations due to no perfectly trimmed initial conditions'
settings_types['pseudosteps_ramp_unsteady_force'] = 'int'
settings_default['pseudosteps_ramp_unsteady_force'] = 0
settings_description['pseudosteps_ramp_unsteady_force'] = 'Length of the ramp with which unsteady force contribution is introduced every time step during the FSI iteration process'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.data = None
self.settings = None
self.structural_solver = None
self.aero_solver = None
self.print_info = False
self.res = 0.0
self.res_dqdt = 0.0
self.res_dqddt = 0.0
self.previous_force = None
self.dt = 0.
self.substep_dt = 0.
self.initial_n_substeps = None
self.predictor = False
self.residual_table = None
self.postprocessors = dict()
self.with_postprocessors = False
self.controllers = None
self.time_aero = 0.
self.time_struc = 0.
def get_g(self):
"""
Getter for ``g``, the gravity value
"""
return self.structural_solver.settings['gravity'].value
def set_g(self, new_g):
"""
Setter for ``g``, the gravity value
"""
self.structural_solver.settings['gravity'] = ct.c_double(new_g)
def get_rho(self):
"""
Getter for ``rho``, the density value
"""
return self.aero_solver.settings['rho'].value
def set_rho(self, new_rho):
"""
Setter for ``rho``, the density value
"""
self.aero_solver.settings['rho'] = ct.c_double(new_rho)
def initialise(self, data, custom_settings=None):
"""
Controls the initialisation process of the solver, including processing
the settings and initialising the aero and structural solvers, postprocessors
and controllers.
"""
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings,
self.settings_types,
self.settings_default)
self.original_settings = copy.deepcopy(self.settings)
self.dt = self.settings['dt']
self.substep_dt = (
self.dt.value/(self.settings['structural_substeps'].value + 1))
self.initial_n_substeps = self.settings['structural_substeps'].value
self.print_info = self.settings['print_info']
if self.settings['cleanup_previous_solution']:
# if there's data in timestep_info[>0], copy the last one to
# timestep_info[0] and remove the rest
self.cleanup_timestep_info()
self.structural_solver = solver_interface.initialise_solver(
self.settings['structural_solver'])
self.structural_solver.initialise(
self.data, self.settings['structural_solver_settings'])
self.aero_solver = solver_interface.initialise_solver(
self.settings['aero_solver'])
self.aero_solver.initialise(self.structural_solver.data,
self.settings['aero_solver_settings'])
self.data = self.aero_solver.data
# initialise postprocessors
self.postprocessors = dict()
if self.settings['postprocessors']:
self.with_postprocessors = True
for postproc in self.settings['postprocessors']:
self.postprocessors[postproc] = solver_interface.initialise_solver(
postproc)
self.postprocessors[postproc].initialise(
self.data, self.settings['postprocessors_settings'][postproc])
# initialise controllers
self.controllers = dict()
self.with_controllers = False
if self.settings['controller_id']:
self.with_controllers = True
for controller_id, controller_type in self.settings['controller_id'].items():
self.controllers[controller_id] = (
controller_interface.initialise_controller(controller_type))
self.controllers[controller_id].initialise(
self.settings['controller_settings'][controller_id],
controller_id)
# print information header
if self.print_info:
self.residual_table = cout.TablePrinter(8, 12, ['g', 'f', 'g', 'f', 'f', 'f', 'e', 'e'])
self.residual_table.field_length[0] = 5
self.residual_table.field_length[1] = 6
self.residual_table.field_length[2] = 4
self.residual_table.print_header(['ts', 't', 'iter', 'struc ratio', 'iter time', 'residual vel',
'FoR_vel(x)', 'FoR_vel(z)'])
def cleanup_timestep_info(self):
if max(len(self.data.aero.timestep_info), len(self.data.structure.timestep_info)) > 1:
# copy last info to first
self.data.aero.timestep_info[0] = self.data.aero.timestep_info[-1]
self.data.structure.timestep_info[0] = self.data.structure.timestep_info[-1]
# delete all the rest
while len(self.data.aero.timestep_info) - 1:
del self.data.aero.timestep_info[-1]
while len(self.data.structure.timestep_info) - 1:
del self.data.structure.timestep_info[-1]
self.data.ts = 0
def process_controller_output(self, controlled_state):
"""
This function modified the solver properties and parameters as
requested from the controller.
This keeps the main loop much cleaner, while allowing for flexibility
Please, if you add options in here, always code the possibility of
that specific option not being there without the code complaining to
the user.
If it possible, use the same Key for the new setting as for the
setting in the solver. For example, if you want to modify the
`structural_substeps` variable in settings, use that Key in the
`info` dictionary.
As a convention: a value of None returns the value to the initial
one specified in settings, while the key not being in the dict
is ignored, so if any change was made before, it will stay there.
"""
try:
info = controlled_state['info']
except KeyError:
return controlled_state['structural'], controlled_state['aero']
# general copy-if-exists, restore if == None
for info_k, info_v in info.items():
if info_k in self.settings:
if info_v is not None:
self.settings[info_k] = info_v
else:
self.settings[info_k] = self.original_settings[info_k]
# specifics of every option
for info_k, info_v in info.items():
if info_k in self.settings:
if info_k == 'structural_substeps':
if info_v is not None:
self.substep_dt = (
self.settings['dt'].value/(
self.settings['structural_substeps'].value + 1))
if info_k == 'structural_solver':
if info_v is not None:
self.structural_solver = solver_interface.initialise_solver(
info['structural_solver'])
self.structural_solver.initialise(
self.data, self.settings['structural_solver_settings'])
return controlled_state['structural'], controlled_state['aero']
def run(self):
"""
Run the time stepping procedure with controllers and postprocessors
included.
"""
# dynamic simulations start at tstep == 1, 0 is reserved for the initial state
for self.data.ts in range(
len(self.data.structure.timestep_info),
self.settings['n_time_steps'].value + len(self.data.structure.timestep_info)):
initial_time = time.perf_counter()
structural_kstep = self.data.structure.timestep_info[-1].copy()
aero_kstep = self.data.aero.timestep_info[-1].copy()
# Add the controller here
if self.with_controllers:
state = {'structural': structural_kstep,
'aero': aero_kstep}
for k, v in self.controllers.items():
state = v.control(self.data, state)
# this takes care of the changes in options for the solver
structural_kstep, aero_kstep = self.process_controller_output(
state)
self.time_aero = 0.0
self.time_struc = 0.0
# Copy the controlled states so that the interpolation does not
# destroy the previous information
controlled_structural_kstep = structural_kstep.copy()
controlled_aero_kstep = aero_kstep.copy()
k = 0
for k in range(self.settings['fsi_substeps'].value + 1):
if (k == self.settings['fsi_substeps'].value and
self.settings['fsi_substeps']):
cout.cout_wrap('The FSI solver did not converge!!!')
break
# generate new grid (already rotated)
aero_kstep = controlled_aero_kstep.copy()
self.aero_solver.update_custom_grid(
structural_kstep,
aero_kstep)
# compute unsteady contribution
force_coeff = 0.0
unsteady_contribution = False
if self.settings['include_unsteady_force_contribution'].value:
if self.data.ts > self.settings['steps_without_unsteady_force'].value:
unsteady_contribution = True
if k < self.settings['pseudosteps_ramp_unsteady_force'].value:
force_coeff = k/self.settings['pseudosteps_ramp_unsteady_force'].value
else:
force_coeff = 1.
# run the solver
ini_time_aero = time.perf_counter()
self.data = self.aero_solver.run(aero_kstep,
structural_kstep,
convect_wake=True,
unsteady_contribution=unsteady_contribution)
self.time_aero += time.perf_counter() - ini_time_aero
previous_kstep = structural_kstep.copy()
structural_kstep = controlled_structural_kstep.copy()
# move the aerodynamic surface according the the structural one
self.aero_solver.update_custom_grid(structural_kstep,
aero_kstep)
self.map_forces(aero_kstep,
structural_kstep,
force_coeff)
# relaxation
relax_factor = self.relaxation_factor(k)
relax(self.data.structure,
structural_kstep,
previous_kstep,
relax_factor)
# check if nan anywhere.
# if yes, raise exception
if np.isnan(structural_kstep.steady_applied_forces).any():
raise exc.NotConvergedSolver('NaN found in steady_applied_forces!')
if np.isnan(structural_kstep.unsteady_applied_forces).any():
raise exc.NotConvergedSolver('NaN found in unsteady_applied_forces!')
copy_structural_kstep = structural_kstep.copy()
ini_time_struc = time.perf_counter()
for i_substep in range(
self.settings['structural_substeps'].value + 1):
# run structural solver
coeff = ((i_substep + 1)/
(self.settings['structural_substeps'].value + 1))
structural_kstep = self.interpolate_timesteps(
step0=self.data.structure.timestep_info[-1],
step1=copy_structural_kstep,
out_step=structural_kstep,
coeff=coeff)
self.data = self.structural_solver.run(
structural_step=structural_kstep,
dt=self.substep_dt)
self.time_struc += time.perf_counter() - ini_time_struc
# check convergence
if self.convergence(k,
structural_kstep,
previous_kstep):
# move the aerodynamic surface according to the structural one
self.aero_solver.update_custom_grid(
structural_kstep,
aero_kstep)
break
# move the aerodynamic surface according the the structural one
self.aero_solver.update_custom_grid(structural_kstep, aero_kstep)
self.aero_solver.add_step()
self.data.aero.timestep_info[-1] = aero_kstep.copy()
self.structural_solver.add_step()
self.data.structure.timestep_info[-1] = structural_kstep.copy()
final_time = time.perf_counter()
if self.print_info:
self.residual_table.print_line([self.data.ts,
self.data.ts*self.dt.value,
k,
self.time_struc/(self.time_aero + self.time_struc),
final_time - initial_time,
np.log10(self.res_dqdt),
structural_kstep.for_vel[0],
structural_kstep.for_vel[2],
np.sum(structural_kstep.steady_applied_forces[:, 0]),
np.sum(structural_kstep.steady_applied_forces[:, 2])])
self.structural_solver.extract_resultants()
# run postprocessors
if self.with_postprocessors:
for postproc in self.postprocessors:
self.data = self.postprocessors[postproc].run(online=True)
if self.print_info:
cout.cout_wrap('...Finished', 1)
return self.data
def convergence(self, k, tstep, previous_tstep):
r"""
Check convergence in the FSI loop.
Convergence is determined as:
.. math:: \epsilon_q^k = \frac{|| q^k - q^{k - 1} ||}{q^0}
.. math:: \epsilon_\dot{q}^k = \frac{|| \dot{q}^k - \dot{q}^{k - 1} ||}{\dot{q}^0}
FSI converged if :math:`\epsilon_q^k < \mathrm{FSI\ tolerance}` and :math:`\epsilon_\dot{q}^k < \mathrm{FSI\ tolerance}`
"""
# check for non-convergence
if not all(np.isfinite(tstep.q)):
import pdb
pdb.set_trace()
raise Exception(
'***Not converged! There is a NaN value in the forces!')
if not k:
# save the value of the vectors for normalising later
self.base_q = np.linalg.norm(tstep.q.copy())
self.base_dqdt = np.linalg.norm(tstep.dqdt.copy())
if self.base_dqdt == 0:
self.base_dqdt = 1.
return False
# relative residuals
self.res = (np.linalg.norm(tstep.q-
previous_tstep.q)/
self.base_q)
self.res_dqdt = (np.linalg.norm(tstep.dqdt-
previous_tstep.dqdt)/
self.base_dqdt)
# we don't want this to converge before introducing the gamma_dot forces!
if self.settings['include_unsteady_force_contribution'].value:
if k < self.settings['pseudosteps_ramp_unsteady_force'].value:
return False
# convergence
if k > self.settings['minimum_steps'].value - 1:
if self.res < self.settings['fsi_tolerance'].value:
if self.res_dqdt < self.settings['fsi_tolerance'].value:
return True
return False
def map_forces(self, aero_kstep, structural_kstep, unsteady_forces_coeff=1.0):
# set all forces to 0
structural_kstep.steady_applied_forces.fill(0.0)
structural_kstep.unsteady_applied_forces.fill(0.0)
# aero forces to structural forces
struct_forces = mapping.aero2struct_force_mapping(
aero_kstep.forces,
self.data.aero.struct2aero_mapping,
aero_kstep.zeta,
structural_kstep.pos,
structural_kstep.psi,
self.data.structure.node_master_elem,
self.data.structure.connectivities,
structural_kstep.cag(),
self.data.aero.aero_dict)
dynamic_struct_forces = unsteady_forces_coeff*mapping.aero2struct_force_mapping(
aero_kstep.dynamic_forces,
self.data.aero.struct2aero_mapping,
aero_kstep.zeta,
structural_kstep.pos,
structural_kstep.psi,
self.data.structure.node_master_elem,
self.data.structure.connectivities,
structural_kstep.cag(),
self.data.aero.aero_dict)
# prescribed forces + aero forces
try:
structural_kstep.steady_applied_forces = (
(struct_forces + self.data.structure.ini_info.steady_applied_forces).
astype(dtype=ct.c_double, order='F', copy=True))
structural_kstep.unsteady_applied_forces = (
(dynamic_struct_forces + self.data.structure.dynamic_input[max(self.data.ts - 1, 0)]['dynamic_forces']).
astype(dtype=ct.c_double, order='F', copy=True))
except KeyError:
structural_kstep.steady_applied_forces = (
(struct_forces + self.data.structure.ini_info.steady_applied_forces).
astype(dtype=ct.c_double, order='F', copy=True))
structural_kstep.unsteady_applied_forces = dynamic_struct_forces
def relaxation_factor(self, k):
initial = self.settings['relaxation_factor'].value
if not self.settings['dynamic_relaxation'].value:
return initial
final = self.settings['final_relaxation_factor'].value
if k >= self.settings['relaxation_steps'].value:
return final
value = initial + (final - initial)/self.settings['relaxation_steps'].value*k
return value
@staticmethod
def interpolate_timesteps(step0, step1, out_step, coeff):
"""
Performs a linear interpolation between step0 and step1 based on coeff
in [0, 1]. 0 means info in out_step == step0 and 1 out_step == step1.
Quantities interpolated:
* `steady_applied_forces`
* `unsteady_applied_forces`
* `velocity` input in Lagrange constraints
"""
if not 0.0 <= coeff <= 1.0:
return out_step
# forces
out_step.steady_applied_forces[:] = (
(1.0 - coeff)*step0.steady_applied_forces +
(coeff)*(step1.steady_applied_forces))
out_step.unsteady_applied_forces[:] = (
(1.0 - coeff)*step0.unsteady_applied_forces +
(coeff)*(step1.unsteady_applied_forces))
# multibody if necessary
if out_step.mb_dict is not None:
for key in step1.mb_dict.keys():
if 'constraint_' in key:
try:
out_step.mb_dict[key]['velocity'][:] = (
(1.0 - coeff)*step0.mb_dict[key]['velocity'] +
(coeff)*step1.mb_dict[key]['velocity'])
except KeyError:
pass
return out_step
def relax(beam, timestep, previous_timestep, coeff):
timestep.steady_applied_forces[:] = ((1.0 - coeff)*timestep.steady_applied_forces +
coeff*previous_timestep.steady_applied_forces)
timestep.unsteady_applied_forces[:] = ((1.0 - coeff)*timestep.unsteady_applied_forces +
coeff*previous_timestep.unsteady_applied_forces)
def normalise_quaternion(tstep):
tstep.dqdt[-4:] = algebra.unit_vector(tstep.dqdt[-4:])
tstep.quat = tstep.dqdt[-4:].astype(dtype=ct.c_double, order='F', copy=True)
|
[
"sharpy.utils.algebra.unit_vector",
"sharpy.utils.cout_utils.TablePrinter",
"sharpy.utils.cout_utils.cout_wrap",
"numpy.log10",
"sharpy.utils.exceptions.NotConvergedSolver",
"sharpy.utils.controller_interface.initialise_controller",
"sharpy.utils.settings.SettingsTable",
"time.perf_counter",
"numpy.linalg.norm",
"numpy.sum",
"numpy.isfinite",
"ctypes.c_double",
"pdb.set_trace",
"copy.deepcopy",
"numpy.isnan",
"sharpy.utils.solver_interface.initialise_solver",
"sharpy.utils.settings.to_custom_types"
] |
[((6294, 6318), 'sharpy.utils.settings.SettingsTable', 'settings.SettingsTable', ([], {}), '()\n', (6316, 6318), True, 'import sharpy.utils.settings as settings\n'), ((26759, 26795), 'sharpy.utils.algebra.unit_vector', 'algebra.unit_vector', (['tstep.dqdt[-4:]'], {}), '(tstep.dqdt[-4:])\n', (26778, 26795), True, 'import sharpy.utils.algebra as algebra\n'), ((7341, 7359), 'ctypes.c_double', 'ct.c_double', (['new_g'], {}), '(new_g)\n', (7352, 7359), True, 'import ctypes as ct\n'), ((7654, 7674), 'ctypes.c_double', 'ct.c_double', (['new_rho'], {}), '(new_rho)\n', (7665, 7674), True, 'import ctypes as ct\n'), ((8130, 8218), 'sharpy.utils.settings.to_custom_types', 'settings.to_custom_types', (['self.settings', 'self.settings_types', 'self.settings_default'], {}), '(self.settings, self.settings_types, self.\n settings_default)\n', (8154, 8218), True, 'import sharpy.utils.settings as settings\n'), ((8314, 8342), 'copy.deepcopy', 'copy.deepcopy', (['self.settings'], {}), '(self.settings)\n', (8327, 8342), False, 'import copy\n'), ((8872, 8942), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (["self.settings['structural_solver']"], {}), "(self.settings['structural_solver'])\n", (8906, 8942), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((9094, 9158), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (["self.settings['aero_solver']"], {}), "(self.settings['aero_solver'])\n", (9128, 9158), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((9617, 9661), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (['postproc'], {}), '(postproc)\n', (9651, 9661), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((10152, 10211), 'sharpy.utils.controller_interface.initialise_controller', 'controller_interface.initialise_controller', (['controller_type'], {}), '(controller_type)\n', (10194, 10211), True, 'import sharpy.utils.controller_interface as controller_interface\n'), ((10475, 10541), 'sharpy.utils.cout_utils.TablePrinter', 'cout.TablePrinter', (['(8)', '(12)', "['g', 'f', 'g', 'f', 'f', 'f', 'e', 'e']"], {}), "(8, 12, ['g', 'f', 'g', 'f', 'f', 'f', 'e', 'e'])\n", (10492, 10541), True, 'import sharpy.utils.cout_utils as cout\n'), ((14188, 14207), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14205, 14207), False, 'import time\n'), ((19636, 19655), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19653, 19655), False, 'import time\n'), ((20788, 20820), 'sharpy.utils.cout_utils.cout_wrap', 'cout.cout_wrap', (['"""...Finished"""', '(1)'], {}), "('...Finished', 1)\n", (20802, 20820), True, 'import sharpy.utils.cout_utils as cout\n'), ((21410, 21425), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (21423, 21425), False, 'import pdb\n'), ((21880, 21922), 'numpy.linalg.norm', 'np.linalg.norm', (['(tstep.q - previous_tstep.q)'], {}), '(tstep.q - previous_tstep.q)\n', (21894, 21922), True, 'import numpy as np\n'), ((22016, 22064), 'numpy.linalg.norm', 'np.linalg.norm', (['(tstep.dqdt - previous_tstep.dqdt)'], {}), '(tstep.dqdt - previous_tstep.dqdt)\n', (22030, 22064), True, 'import numpy as np\n'), ((16366, 16385), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (16383, 16385), False, 'import time\n'), ((17984, 18003), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18001, 18003), False, 'import time\n'), ((21352, 21372), 'numpy.isfinite', 'np.isfinite', (['tstep.q'], {}), '(tstep.q)\n', (21363, 21372), True, 'import numpy as np\n'), ((15372, 15424), 'sharpy.utils.cout_utils.cout_wrap', 'cout.cout_wrap', (['"""The FSI solver did not converge!!!"""'], {}), "('The FSI solver did not converge!!!')\n", (15386, 15424), True, 'import sharpy.utils.cout_utils as cout\n'), ((16710, 16729), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (16727, 16729), False, 'import time\n'), ((17657, 17718), 'sharpy.utils.exceptions.NotConvergedSolver', 'exc.NotConvergedSolver', (['"""NaN found in steady_applied_forces!"""'], {}), "('NaN found in steady_applied_forces!')\n", (17679, 17718), True, 'import sharpy.utils.exceptions as exc\n'), ((17822, 17885), 'sharpy.utils.exceptions.NotConvergedSolver', 'exc.NotConvergedSolver', (['"""NaN found in unsteady_applied_forces!"""'], {}), "('NaN found in unsteady_applied_forces!')\n", (17844, 17885), True, 'import sharpy.utils.exceptions as exc\n'), ((18763, 18782), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18780, 18782), False, 'import time\n'), ((13442, 13503), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (["info['structural_solver']"], {}), "(info['structural_solver'])\n", (13476, 13503), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((17575, 17623), 'numpy.isnan', 'np.isnan', (['structural_kstep.steady_applied_forces'], {}), '(structural_kstep.steady_applied_forces)\n', (17583, 17623), True, 'import numpy as np\n'), ((17738, 17788), 'numpy.isnan', 'np.isnan', (['structural_kstep.unsteady_applied_forces'], {}), '(structural_kstep.unsteady_applied_forces)\n', (17746, 17788), True, 'import numpy as np\n'), ((20101, 20124), 'numpy.log10', 'np.log10', (['self.res_dqdt'], {}), '(self.res_dqdt)\n', (20109, 20124), True, 'import numpy as np\n'), ((20328, 20380), 'numpy.sum', 'np.sum', (['structural_kstep.steady_applied_forces[:, 0]'], {}), '(structural_kstep.steady_applied_forces[:, 0])\n', (20334, 20380), True, 'import numpy as np\n'), ((20430, 20482), 'numpy.sum', 'np.sum', (['structural_kstep.steady_applied_forces[:, 2]'], {}), '(structural_kstep.steady_applied_forces[:, 2])\n', (20436, 20482), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.core import local
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
class TestGeneratorDbSession(unittest.TestCase):
def setUp(self):
db = Database()
class Account(db.Entity):
id = PrimaryKey(int)
amount = Required(int)
setup_database(db)
self.db = db
self.Account = Account
with db_session:
a1 = Account(id=1, amount=1000)
a2 = Account(id=2, amount=2000)
a3 = Account(id=3, amount=3000)
def tearDown(self):
teardown_database(self.db)
assert local.db_session is None
self.db = self.Account = None
@raises_exception(TypeError, 'db_session with `retry` option cannot be applied to generator function')
def test1(self):
@db_session(retry=3)
def f(): yield
@raises_exception(TypeError, 'db_session with `ddl` option cannot be applied to generator function')
def test2(self):
@db_session(ddl=True)
def f(): yield
@raises_exception(TypeError, 'db_session with `serializable` option cannot be applied to generator function')
def test3(self):
@db_session(serializable=True)
def f(): yield
def test4(self):
@db_session(immediate=True)
def f(): yield
@raises_exception(TransactionError, '@db_session-wrapped generator cannot be used inside another db_session')
def test5(self):
@db_session
def f(): yield
with db_session:
next(f())
def test6(self):
@db_session
def f():
x = local.db_session
self.assertTrue(x is not None)
yield self.db._get_cache()
self.assertEqual(local.db_session, x)
a1 = self.Account[1]
yield a1.amount
self.assertEqual(local.db_session, x)
a2 = self.Account[2]
yield a2.amount
gen = f()
cache = next(gen)
self.assertTrue(cache.is_alive)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 1000)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 2000)
self.assertEqual(local.db_session, None)
try: next(gen)
except StopIteration:
self.assertFalse(cache.is_alive)
else:
self.fail()
def test7(self):
@db_session
def f(id1):
a1 = self.Account[id1]
id2 = yield a1.amount
a2 = self.Account[id2]
amount = yield a2.amount
a1.amount -= amount
a2.amount += amount
commit()
gen = f(1)
amount1 = next(gen)
self.assertEqual(amount1, 1000)
amount2 = gen.send(2)
self.assertEqual(amount2, 2000)
try:
gen.send(100)
except StopIteration:
pass
else:
self.fail()
with db_session:
a1 = self.Account[1]
self.assertEqual(a1.amount, 900)
a2 = self.Account[2]
self.assertEqual(a2.amount, 2100)
@raises_exception(TransactionError, 'You need to manually commit() changes before suspending the generator')
def test8(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
yield a1.amount
for amount in f(1):
pass
def test9(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
commit()
yield a1.amount
for amount in f(1):
pass
def test10(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
a1.amount += 100
with db_session:
a = self.Account[1].amount
for amount in f(1):
pass
with db_session:
b = self.Account[1].amount
self.assertEqual(b, a + 100)
def test12(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.close()
@raises_exception(TypeError, 'error message')
def test13(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.throw(TypeError('error message'))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pony.orm.tests.teardown_database",
"pony.orm.tests.setup_database"
] |
[((4718, 4733), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4731, 4733), False, 'import unittest\n'), ((449, 467), 'pony.orm.tests.setup_database', 'setup_database', (['db'], {}), '(db)\n', (463, 467), False, 'from pony.orm.tests import setup_database, teardown_database\n'), ((712, 738), 'pony.orm.tests.teardown_database', 'teardown_database', (['self.db'], {}), '(self.db)\n', (729, 738), False, 'from pony.orm.tests import setup_database, teardown_database\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-05 01:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('private_sharing', '0007_auto_20171220_2038'),
]
operations = [
migrations.CreateModel(
name='FeaturedProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='private_sharing.DataRequestProject')),
],
),
]
|
[
"django.db.models.AutoField",
"django.db.models.TextField",
"django.db.models.ForeignKey"
] |
[((404, 497), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (420, 497), False, 'from django.db import migrations, models\n'), ((528, 556), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (544, 556), False, 'from django.db import migrations, models\n'), ((587, 695), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""private_sharing.DataRequestProject"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'private_sharing.DataRequestProject')\n", (604, 695), False, 'from django.db import migrations, models\n')]
|
# coding: utf-8
"""
Test Pyleecan optimization module using Zitzler–Deb–Thiele's function N. 3
"""
import pytest
from ....definitions import PACKAGE_NAME
from ....Tests.Validation.Machine.SCIM_001 import SCIM_001
from ....Classes.InputCurrent import InputCurrent
from ....Classes.MagFEMM import MagFEMM
from ....Classes.Simu1 import Simu1
from ....Classes.Output import Output
from ....Classes.OptiDesignVar import OptiDesignVar
from ....Classes.OptiObjFunc import OptiObjFunc
from ....Classes.OptiConstraint import OptiConstraint
from ....Classes.OptiProblem import OptiProblem
from ....Classes.ImportMatrixVal import ImportMatrixVal
from ....Classes.ImportGenVectLin import ImportGenVectLin
from ....Classes.OptiGenAlgNsga2Deap import OptiGenAlgNsga2Deap
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import random
@pytest.mark.validation
@pytest.mark.optimization
def test_zdt3():
# ### Defining reference Output
# Definition of the enforced output of the electrical module
Nt = 2
Nr = ImportMatrixVal(value=np.ones(Nt) * 3000)
Is = ImportMatrixVal(
value=np.array(
[
[6.97244193e-06, 2.25353053e02, -2.25353060e02],
[-2.60215295e02, 1.30107654e02, 1.30107642e02],
# [-6.97244208e-06, -2.25353053e02, 2.25353060e02],
# [2.60215295e02, -1.30107654e02, -1.30107642e02],
]
)
)
Ir = ImportMatrixVal(value=np.zeros(30))
time = ImportGenVectLin(start=0, stop=0.015, num=Nt, endpoint=True)
angle = ImportGenVectLin(
start=0, stop=2 * np.pi, num=64, endpoint=False
) # num=1024
# Definition of the simulation
simu = Simu1(name="Test_machine", machine=SCIM_001)
simu.input = InputCurrent(
Is=Is,
Ir=Ir, # zero current for the rotor
Nr=Nr,
angle_rotor=None, # Will be computed
time=time,
angle=angle,
angle_rotor_initial=0.5216 + np.pi,
)
# Definition of the magnetic simulation
simu.mag = MagFEMM(
is_stator_linear_BH=2,
is_rotor_linear_BH=2,
is_symmetry_a=True,
is_antiper_a=False,
)
simu.mag.Kmesh_fineness = 0.01
# simu.mag.Kgeo_fineness=0.02
simu.mag.sym_a = 4
simu.struct = None
output = Output(simu=simu)
# ### Design variable
my_vars = {}
for i in range(30):
my_vars["var_" + str(i)] = OptiDesignVar(
name="output.simu.input.Ir.value[" + str(i) + "]",
type_var="interval",
space=[0, 1],
function=lambda space: np.random.uniform(*space),
)
# ### Objectives
objs = {
"obj1": OptiObjFunc(
description="Maximization of the torque average",
func=lambda output: output.mag.Tem_av,
),
"obj2": OptiObjFunc(
description="Minimization of the torque ripple",
func=lambda output: output.mag.Tem_rip,
),
}
# ### Evaluation
def evaluate(output):
x = output.simu.input.Ir.value
f1 = lambda x: x[0]
g = lambda x: 1 + (9 / 29) * np.sum(x[1:])
h = lambda f1, g: 1 - np.sqrt(f1 / g) - (f1 / g) * np.sin(10 * np.pi * f1)
output.mag.Tem_av = f1(x)
output.mag.Tem_rip = g(x) * h(f1(x), g(x))
# ### Defining the problem
my_prob = OptiProblem(
output=output, design_var=my_vars, obj_func=objs, eval_func=evaluate
)
solver = OptiGenAlgNsga2Deap(problem=my_prob, size_pop=40, nb_gen=100, p_mutate=0.5)
res = solver.solve()
def plot_pareto(self):
"""Plot every fitness values with the pareto front for 2 fitness
Parameters
----------
self : OutputMultiOpti
"""
# TODO Add a feature to return the design_varibles of each indiv from the Pareto front
# Get fitness and ngen
is_valid = np.array(self.is_valid)
fitness = np.array(self.fitness)
ngen = np.array(self.ngen)
# Keep only valid values
indx = np.where(is_valid)[0]
fitness = fitness[indx]
ngen = ngen[indx]
# Get pareto front
pareto = list(np.unique(fitness, axis=0))
# Get dominated values
to_remove = []
N = len(pareto)
for i in range(N):
for j in range(N):
if all(pareto[j] <= pareto[i]) and any(pareto[j] < pareto[i]):
to_remove.append(pareto[i])
break
# Remove dominated values
for i in to_remove:
for l in range(len(pareto)):
if all(i == pareto[l]):
pareto.pop(l)
break
pareto = np.array(pareto)
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
# Plot Pareto front
axs[0].scatter(
pareto[:, 0],
pareto[:, 1],
facecolors="b",
edgecolors="b",
s=0.8,
label="Pareto Front",
)
axs[0].autoscale()
axs[0].legend()
axs[0].set_title("Pyleecan results")
axs[0].set_xlabel(r"$f_1(x)$")
axs[0].set_ylabel(r"$f_2(x)$")
try:
img_to_find = img.imread(
"pyleecan\\Tests\\Validation\\Optimization\\zdt3.jpg", format="jpg"
)
axs[1].imshow(img_to_find, aspect="auto")
axs[1].axis("off")
axs[1].set_title("Pareto front of the problem")
except (TypeError, ValueError):
print("Pillow is needed to import jpg files")
return fig
fig = plot_pareto(res)
fig.savefig(PACKAGE_NAME + "/Tests/Results/Validation/test_zdt3.png")
|
[
"numpy.sqrt",
"numpy.unique",
"numpy.ones",
"numpy.where",
"matplotlib.image.imread",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.uniform",
"numpy.sin",
"matplotlib.pyplot.subplots"
] |
[((3954, 3977), 'numpy.array', 'np.array', (['self.is_valid'], {}), '(self.is_valid)\n', (3962, 3977), True, 'import numpy as np\n'), ((3996, 4018), 'numpy.array', 'np.array', (['self.fitness'], {}), '(self.fitness)\n', (4004, 4018), True, 'import numpy as np\n'), ((4034, 4053), 'numpy.array', 'np.array', (['self.ngen'], {}), '(self.ngen)\n', (4042, 4053), True, 'import numpy as np\n'), ((4774, 4790), 'numpy.array', 'np.array', (['pareto'], {}), '(pareto)\n', (4782, 4790), True, 'import numpy as np\n'), ((4811, 4846), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 6)'}), '(1, 2, figsize=(16, 6))\n', (4823, 4846), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1224), 'numpy.array', 'np.array', (['[[6.97244193e-06, 225.353053, -225.35306], [-260.215295, 130.107654, \n 130.107642]]'], {}), '([[6.97244193e-06, 225.353053, -225.35306], [-260.215295, \n 130.107654, 130.107642]])\n', (1136, 1224), True, 'import numpy as np\n'), ((1501, 1513), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (1509, 1513), True, 'import numpy as np\n'), ((4103, 4121), 'numpy.where', 'np.where', (['is_valid'], {}), '(is_valid)\n', (4111, 4121), True, 'import numpy as np\n'), ((4234, 4260), 'numpy.unique', 'np.unique', (['fitness'], {'axis': '(0)'}), '(fitness, axis=0)\n', (4243, 4260), True, 'import numpy as np\n'), ((5284, 5363), 'matplotlib.image.imread', 'img.imread', (['"""pyleecan\\\\Tests\\\\Validation\\\\Optimization\\\\zdt3.jpg"""'], {'format': '"""jpg"""'}), "('pyleecan\\\\Tests\\\\Validation\\\\Optimization\\\\zdt3.jpg', format='jpg')\n", (5294, 5363), True, 'import matplotlib.image as img\n'), ((1068, 1079), 'numpy.ones', 'np.ones', (['Nt'], {}), '(Nt)\n', (1075, 1079), True, 'import numpy as np\n'), ((2641, 2666), 'numpy.random.uniform', 'np.random.uniform', (['*space'], {}), '(*space)\n', (2658, 2666), True, 'import numpy as np\n'), ((3177, 3190), 'numpy.sum', 'np.sum', (['x[1:]'], {}), '(x[1:])\n', (3183, 3190), True, 'import numpy as np\n'), ((3221, 3236), 'numpy.sqrt', 'np.sqrt', (['(f1 / g)'], {}), '(f1 / g)\n', (3228, 3236), True, 'import numpy as np\n'), ((3250, 3273), 'numpy.sin', 'np.sin', (['(10 * np.pi * f1)'], {}), '(10 * np.pi * f1)\n', (3256, 3273), True, 'import numpy as np\n')]
|
from typing import Union, List
import pika
import pika.exceptions
import time
import logging
def CreateDurableQueue(channel: pika.adapters.blocking_connection.BlockingChannel, queue: str,
settings: dict = None):
if settings is None:
settings = {}
channel.queue_declare(queue,
passive=settings.get('passive', False),
durable=settings.get('durable', True),
exclusive=settings.get('exclusive', False),
auto_delete=settings.get('auto_delete', False),
arguments=settings.get('arguments', None))
def CreateExchange(channel: pika.adapters.blocking_connection.BlockingChannel, exchange: str,
settings: dict = None):
if settings is None:
settings = {}
channel.exchange_declare(exchange,
exchange_type=settings.get('exchange_type', 'direct'),
passive=settings.get('passive', False),
durable=settings.get('durable', True),
auto_delete=settings.get('auto_delete', False),
internal=settings.get('internal', False),
arguments=settings.get('arguments', None))
def BindQueue(channel: pika.adapters.blocking_connection.BlockingChannel, queue: str, exchange: str, topic: str,
arguments: dict = None):
channel.queue_bind(queue, exchange, routing_key=topic, arguments=arguments)
def UnbindQueue(channel: pika.adapters.blocking_connection.BlockingChannel, queue: str, exchange: str, topic: str,
arguments: dict = None):
channel.queue_unbind(queue, exchange, routing_key=topic, arguments=arguments)
def AssertDurableQueueExists(connection: pika.BlockingConnection, queue: str, retries: int = 0, logger=logging.getLogger(__name__)):
count = 0
while count <= retries:
channel: pika.adapters.blocking_connection.BlockingChannel = connection.channel()
try:
channel.queue_declare(queue, durable=True, passive=True)
channel.close()
return
except Exception as e:
count += 1
if count <= retries:
time.sleep(1)
msg = f"Queue {queue} does not exist!"
logger.error(msg)
raise Exception(msg)
def SafeCloseChannel(channel: pika.BlockingConnection.channel, acceptAllFailures: bool = True):
if channel.is_closed:
return
try:
channel.close()
except pika.exceptions.ChannelWrongStateError:
# channel already closed
pass
except:
if not acceptAllFailures:
raise
def SafeCloseConnection(connection: pika.BlockingConnection, acceptAllFailures: bool = True):
if connection.is_closed:
return
try:
connection.close()
except pika.exceptions.ConnectionWrongStateError:
# connection already closed
pass
except:
if not acceptAllFailures:
raise
def BasicSend(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, destination: str, body: bytes,
properties: pika.spec.BasicProperties = None,
mandatory: bool = True):
BindQueue(channel, queue=destination, exchange=exchange, topic=destination)
channel.basic_publish(exchange, destination, body, properties=properties, mandatory=mandatory)
def BasicPublish(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, topic: str, body: bytes,
properties: pika.spec.BasicProperties = None,
mandatory: bool = True):
channel.basic_publish(exchange, topic, body, properties=properties, mandatory=mandatory)
def BasicSubscribe(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, topic: Union[List[str], str], queue: str,
arguments: dict = None):
if isinstance(topic, list):
topics = topic
else:
topics = [topic]
for topic in topics:
if isinstance(topic, dict):
arguments = topic.get('arguments', None)
topic = topic.get('topic', None)
BindQueue(channel, queue, exchange, topic, arguments=arguments)
def BasicUnsubscribe(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, topic: Union[List[str], str], queue: str,
arguments: dict = None):
if isinstance(topic, list):
topics = topic
else:
topics = [topic]
for topic in topics:
if isinstance(topic, dict):
arguments = topic.get('arguments', None)
topic = topic.get('topic', None)
UnbindQueue(channel, queue, exchange, topic, arguments=arguments)
|
[
"logging.getLogger",
"time.sleep"
] |
[((1907, 1934), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1924, 1934), False, 'import logging\n'), ((2301, 2314), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2311, 2314), False, 'import time\n')]
|
# Copyright 2013 by <NAME>.
# Revisions copyright 2015 by <NAME>.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import unittest
import warnings
from os import path
from Bio import BiopythonParserWarning
from Bio import GenBank
from Bio import SeqIO
class GenBankTests(unittest.TestCase):
def test_invalid_product_line_raises_value_error(self):
"""Test GenBank parsing invalid product line raises ValueError"""
def parse_invalid_product_line():
rec = SeqIO.read(path.join('GenBank', 'invalid_product.gb'),
'genbank')
self.assertRaises(ValueError, parse_invalid_product_line)
def test_genbank_read(self):
with open(path.join("GenBank", "NC_000932.gb")) as handle:
record = GenBank.read(handle)
self.assertEqual(['NC_000932'], record.accession)
def test_genbank_read_multirecord(self):
with open(path.join("GenBank", "cor6_6.gb")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
def test_genbank_read_invalid(self):
with open(path.join("GenBank", "NC_000932.faa")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
def test_genbank_read_no_origin_no_end(self):
with open(path.join("GenBank", "no_origin_no_end.gb")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
# Evil hack with 000 to manipulate sort order to ensure this is tested
# first (otherwise something silences the warning)
def test_000_genbank_bad_loc_wrap_warning(self):
with warnings.catch_warnings():
warnings.simplefilter("error", BiopythonParserWarning)
with open(path.join("GenBank", "bad_loc_wrap.gb")) as handle:
# self.assertRaises(BiopythonParserWarning, GenBank.read, handle)
try:
record = GenBank.read(handle)
except BiopythonParserWarning as e:
self.assertEqual(str(e), "Non-standard feature line wrapping (didn't break on comma)?")
else:
self.assertTrue(False, "Expected specified BiopythonParserWarning here.")
# Similar hack as we also want to catch that warning here
def test_001_negative_location_warning(self):
with warnings.catch_warnings():
warnings.simplefilter("error", BiopythonParserWarning)
try:
SeqIO.read(path.join("GenBank", "negative_location.gb"), "genbank")
except BiopythonParserWarning as e:
self.assertEqual(str(e), "Couldn't parse feature location: '-2..492'")
else:
self.assertTrue(False, "Expected specified BiopythonParserWarning here.")
def test_genbank_bad_loc_wrap_parsing(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
with open(path.join("GenBank", "bad_loc_wrap.gb")) as handle:
record = GenBank.read(handle)
self.assertEqual(1, len(record.features))
loc = record.features[0].location
self.assertEqual(loc, "join(3462..3615,3698..3978,4077..4307,4408..4797,4876..5028,5141..5332)")
def test_negative_location(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec = SeqIO.read(path.join("GenBank", "negative_location.gb"), "genbank")
self.assertEqual(None, rec.features[-1].location)
def test_dot_lineage(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec = SeqIO.read("GenBank/bad_loc_wrap.gb", "genbank")
self.assertEqual(rec.annotations["organism"], ".")
self.assertEqual(rec.annotations["taxonomy"], [])
def test_dblink(self):
"""GenBank record with old DBLINK project entry."""
record = SeqIO.read("GenBank/NC_005816.gb", "gb")
self.assertEqual(record.dbxrefs, ["Project:58037"])
embl = record.format("embl")
self.assertTrue("XX\nPR Project:58037;\nXX\n" in embl, embl)
def test_dblink_two(self):
"""GenBank record with old and new DBLINK project entries."""
record = SeqIO.read("GenBank/NP_416719.gbwithparts", "gb")
self.assertEqual(record.dbxrefs,
["Project:57779", "BioProject:PRJNA57779"])
embl = record.format("embl")
self.assertTrue("XX\nPR Project:PRJNA57779;\nXX\n" in embl, embl)
def test_dbline_gb_embl(self):
"""GenBank / EMBL paired records with PR project entry: GenBank"""
record = SeqIO.read("GenBank/DS830848.gb", "gb")
self.assertTrue("BioProject:PRJNA16232" in record.dbxrefs, record.dbxrefs)
gb = record.format("gb")
self.assertTrue("\nDBLINK BioProject:PRJNA16232\n" in gb, gb)
# Also check EMBL output
embl = record.format("embl")
self.assertTrue("XX\nPR Project:PRJNA16232;\nXX\n" in embl, embl)
def test_dbline_embl_gb(self):
"""GenBank / EMBL paired records with PR project entry: EMBL"""
record = SeqIO.read("EMBL/DS830848.embl", "embl")
# TODO: Should we map this to BioProject:PRJNA16232
self.assertTrue("Project:PRJNA16232" in record.dbxrefs, record.dbxrefs)
gb = record.format("gb")
self.assertTrue("\nDBLINK Project:PRJNA16232\n" in gb, gb)
embl = record.format("embl")
self.assertTrue("XX\nPR Project:PRJNA16232;\nXX\n" in embl, embl)
def test_structured_comment_parsing(self):
# GISAID_EpiFlu(TM)Data, HM138502.gbk has both 'comment' and 'structured_comment'
record = SeqIO.read(path.join('GenBank', 'HM138502.gbk'), 'genbank')
self.assertEqual(record.annotations['comment'],
"Swine influenza A (H1N1) virus isolated during human swine flu\noutbreak of 2009.")
self.assertEqual(record.annotations['structured_comment']['GISAID_EpiFlu(TM)Data']['Lineage'], 'swl')
self.assertEqual(len(record.annotations['structured_comment']['GISAID_EpiFlu(TM)Data']), 3)
# FluData structured comment
record = SeqIO.read(path.join('GenBank', 'EU851978.gbk'), 'genbank')
self.assertEqual(record.annotations['structured_comment']['FluData']['LabID'], '2008704957')
self.assertEqual(len(record.annotations['structured_comment']['FluData']), 5)
# Assembly-Data structured comment
record = SeqIO.read(path.join('GenBank', 'KF527485.gbk'), 'genbank')
self.assertEqual(record.annotations['structured_comment']['Assembly-Data']['Assembly Method'], 'Lasergene v. 10')
self.assertEqual(len(record.annotations['structured_comment']['Assembly-Data']), 2)
# No structured comment in NC_000932.gb, just a regular comment
record = SeqIO.read(path.join('GenBank', 'NC_000932.gb'), 'genbank')
self.assertFalse("structured_comment" in record.annotations)
self.assertEqual(record.annotations['comment'],
'REVIEWED REFSEQ: This record has been curated by NCBI staff. The\n'
'reference sequence was derived from AP000423.\n'
'COMPLETENESS: full length.')
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
[
"warnings.catch_warnings",
"os.path.join",
"warnings.simplefilter",
"Bio.SeqIO.read",
"unittest.main",
"Bio.GenBank.read",
"unittest.TextTestRunner"
] |
[((7472, 7508), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (7495, 7508), False, 'import unittest\n'), ((7513, 7545), 'unittest.main', 'unittest.main', ([], {'testRunner': 'runner'}), '(testRunner=runner)\n', (7526, 7545), False, 'import unittest\n'), ((4089, 4129), 'Bio.SeqIO.read', 'SeqIO.read', (['"""GenBank/NC_005816.gb"""', '"""gb"""'], {}), "('GenBank/NC_005816.gb', 'gb')\n", (4099, 4129), False, 'from Bio import SeqIO\n'), ((4417, 4466), 'Bio.SeqIO.read', 'SeqIO.read', (['"""GenBank/NP_416719.gbwithparts"""', '"""gb"""'], {}), "('GenBank/NP_416719.gbwithparts', 'gb')\n", (4427, 4466), False, 'from Bio import SeqIO\n'), ((4818, 4857), 'Bio.SeqIO.read', 'SeqIO.read', (['"""GenBank/DS830848.gb"""', '"""gb"""'], {}), "('GenBank/DS830848.gb', 'gb')\n", (4828, 4857), False, 'from Bio import SeqIO\n'), ((5320, 5360), 'Bio.SeqIO.read', 'SeqIO.read', (['"""EMBL/DS830848.embl"""', '"""embl"""'], {}), "('EMBL/DS830848.embl', 'embl')\n", (5330, 5360), False, 'from Bio import SeqIO\n'), ((891, 911), 'Bio.GenBank.read', 'GenBank.read', (['handle'], {}), '(handle)\n', (903, 911), False, 'from Bio import GenBank\n'), ((1704, 1729), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1727, 1729), False, 'import warnings\n'), ((1743, 1797), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'BiopythonParserWarning'], {}), "('error', BiopythonParserWarning)\n", (1764, 1797), False, 'import warnings\n'), ((2427, 2452), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2450, 2452), False, 'import warnings\n'), ((2466, 2520), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'BiopythonParserWarning'], {}), "('error', BiopythonParserWarning)\n", (2487, 2520), False, 'import warnings\n'), ((2928, 2953), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2951, 2953), False, 'import warnings\n'), ((2967, 3022), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'BiopythonParserWarning'], {}), "('ignore', BiopythonParserWarning)\n", (2988, 3022), False, 'import warnings\n'), ((3416, 3441), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3439, 3441), False, 'import warnings\n'), ((3455, 3510), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'BiopythonParserWarning'], {}), "('ignore', BiopythonParserWarning)\n", (3476, 3510), False, 'import warnings\n'), ((3705, 3730), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3728, 3730), False, 'import warnings\n'), ((3744, 3799), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'BiopythonParserWarning'], {}), "('ignore', BiopythonParserWarning)\n", (3765, 3799), False, 'import warnings\n'), ((3818, 3866), 'Bio.SeqIO.read', 'SeqIO.read', (['"""GenBank/bad_loc_wrap.gb"""', '"""genbank"""'], {}), "('GenBank/bad_loc_wrap.gb', 'genbank')\n", (3828, 3866), False, 'from Bio import SeqIO\n'), ((5885, 5921), 'os.path.join', 'path.join', (['"""GenBank"""', '"""HM138502.gbk"""'], {}), "('GenBank', 'HM138502.gbk')\n", (5894, 5921), False, 'from os import path\n'), ((6362, 6398), 'os.path.join', 'path.join', (['"""GenBank"""', '"""EU851978.gbk"""'], {}), "('GenBank', 'EU851978.gbk')\n", (6371, 6398), False, 'from os import path\n'), ((6669, 6705), 'os.path.join', 'path.join', (['"""GenBank"""', '"""KF527485.gbk"""'], {}), "('GenBank', 'KF527485.gbk')\n", (6678, 6705), False, 'from os import path\n'), ((7032, 7068), 'os.path.join', 'path.join', (['"""GenBank"""', '"""NC_000932.gb"""'], {}), "('GenBank', 'NC_000932.gb')\n", (7041, 7068), False, 'from os import path\n'), ((619, 661), 'os.path.join', 'path.join', (['"""GenBank"""', '"""invalid_product.gb"""'], {}), "('GenBank', 'invalid_product.gb')\n", (628, 661), False, 'from os import path\n'), ((821, 857), 'os.path.join', 'path.join', (['"""GenBank"""', '"""NC_000932.gb"""'], {}), "('GenBank', 'NC_000932.gb')\n", (830, 857), False, 'from os import path\n'), ((1034, 1067), 'os.path.join', 'path.join', (['"""GenBank"""', '"""cor6_6.gb"""'], {}), "('GenBank', 'cor6_6.gb')\n", (1043, 1067), False, 'from os import path\n'), ((1204, 1241), 'os.path.join', 'path.join', (['"""GenBank"""', '"""NC_000932.faa"""'], {}), "('GenBank', 'NC_000932.faa')\n", (1213, 1241), False, 'from os import path\n'), ((1387, 1430), 'os.path.join', 'path.join', (['"""GenBank"""', '"""no_origin_no_end.gb"""'], {}), "('GenBank', 'no_origin_no_end.gb')\n", (1396, 1430), False, 'from os import path\n'), ((3122, 3142), 'Bio.GenBank.read', 'GenBank.read', (['handle'], {}), '(handle)\n', (3134, 3142), False, 'from Bio import GenBank\n'), ((3540, 3584), 'os.path.join', 'path.join', (['"""GenBank"""', '"""negative_location.gb"""'], {}), "('GenBank', 'negative_location.gb')\n", (3549, 3584), False, 'from os import path\n'), ((1820, 1859), 'os.path.join', 'path.join', (['"""GenBank"""', '"""bad_loc_wrap.gb"""'], {}), "('GenBank', 'bad_loc_wrap.gb')\n", (1829, 1859), False, 'from os import path\n'), ((2004, 2024), 'Bio.GenBank.read', 'GenBank.read', (['handle'], {}), '(handle)\n', (2016, 2024), False, 'from Bio import GenBank\n'), ((2565, 2609), 'os.path.join', 'path.join', (['"""GenBank"""', '"""negative_location.gb"""'], {}), "('GenBank', 'negative_location.gb')\n", (2574, 2609), False, 'from os import path\n'), ((3045, 3084), 'os.path.join', 'path.join', (['"""GenBank"""', '"""bad_loc_wrap.gb"""'], {}), "('GenBank', 'bad_loc_wrap.gb')\n", (3054, 3084), False, 'from os import path\n')]
|
import turtle
import math
from time import sleep
def calculate_points(pos, r1, r2, angles):
points = []
for a in angles:
x = pos[0] + (r1 * math.cos(math.radians(a)))
y = pos[1] + (r2 * math.sin(math.radians(a)))
points.append((x, y))
return points
def draw_pyramid(tur, draw=False):
y_points = calculate_points(pyramid_pos, 0, pyramid_height//2, pyramid_y_angles)
x_points = calculate_points(y_points[1], pyramid_width, pyramid_width*math.cos(math.radians(pyramid_y_angles[1])), pyramid_x_angles)
if draw:
screen.tracer(1)
jump(tur, x_points[0])
for p in x_points:
tur.goto(p)
tur.goto(y_points[0])
jump(t1, p)
tur.goto(x_points[0])
screen.tracer(0)
def jump(tur, pos):
tur.pu()
tur.goto(pos)
tur.pd()
screen = turtle.Screen()
t1 = turtle.Turtle()
t1.hideturtle()
pyramid_base_sides = 4
pyramid_height = 200
pyramid_width = 100
spin_x = True
spin_y = True
pyramid_pos = [0, 0]
pyramid_x_angles = [x for x in range(15, 375, 360//pyramid_base_sides)]
pyramid_y_angles = [80, 260]
draw_pyramid(t1, True)
while True:
draw_pyramid(t1)
if spin_x:
for i in range(len(pyramid_x_angles)):
pyramid_x_angles[i] += 1
if pyramid_x_angles[i] >= 360:
pyramid_x_angles[i] -= 360
if spin_y:
for i in range(len(pyramid_y_angles)):
pyramid_y_angles[i] += 1
if pyramid_y_angles[i] >= 360:
pyramid_y_angles[i] -= 360
screen.update()
sleep(.01)
t1.clear()
|
[
"turtle.Screen",
"turtle.Turtle",
"time.sleep",
"math.radians"
] |
[((825, 840), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (838, 840), False, 'import turtle\n'), ((846, 861), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (859, 861), False, 'import turtle\n'), ((1545, 1556), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (1550, 1556), False, 'from time import sleep\n'), ((490, 523), 'math.radians', 'math.radians', (['pyramid_y_angles[1]'], {}), '(pyramid_y_angles[1])\n', (502, 523), False, 'import math\n'), ((166, 181), 'math.radians', 'math.radians', (['a'], {}), '(a)\n', (178, 181), False, 'import math\n'), ((220, 235), 'math.radians', 'math.radians', (['a'], {}), '(a)\n', (232, 235), False, 'import math\n')]
|
# vim:set ts=4 sw=4 et:
'''
Config
======
'''
import re
from .action_mapper import Action
from .checks_list import Checks
from .exceptions import ConfigurationException
class Config(object):
"""The :class:`Config` class is responsible for storing application groups
and policies read from the datastore.
It has some handy functions to extract values from the configuration.
It can respond to questions such as:
* "Which are the groups for a user?"
* "Which policies user belong to?"
* "Which tests are enabled for a user?"
:var groups: The groups.
:vartype groups: dict or None
:var policies: The policies.
:vartype policies: dict or None
:param groups: The groups.
:type groups: dict or None
:param policies: The policies.
:type policies: dict or None
"""
#: The loaded policies
policies = None
#: The loaded groups
groups = None
def __init__(self, groups=None, policies=None):
self.update(groups, policies)
def update(self, groups=None, policies=None):
"""Update the stored configuration with the provided values.
:param groups: The groups.
:type groups: dict or None
:param policies: The policies.
:type policies: dict or None
"""
if groups:
if self.groups:
self.groups.update(groups)
else:
self.groups = groups
if policies:
self.policies = policies
def get_rules(self, payload):
"""Return the rules for a payload.
:param str payload: The current payload.
:return: The rules concerned by the payload.
:rtype: list
"""
username = payload.user
action = Action(method=payload.method, query=payload.uri)
hostname = payload.get_host()
for rule in self.policies:
if not self._match_host(hostname, rule["hosts"]):
continue
if "policies" not in rule:
return self._default_rule(rule)
policies = self._get_policy_by_member(username, rule["policies"])
if policies is None:
return self._default_rule(rule)
rules = self._match_rules(action, policies)
if not rules:
return self._default_rule(rule)
return rules
@staticmethod
def _default_rule(rule):
"""Construct a default rule
:param dict rule: The current parsed rule
:return: A :class:`~docker_leash.checks_list.Checks_list.Checks` containing only the default rule
:rtype: :class:`~docker_leash.checks_list.Checks_list.Checks`
"""
checks = Checks()
checks.add(rule["default"])
return checks
@staticmethod
def _match_host(hostname, host_rules):
"""Validate if a hostname match hosts regex list
:param str hostname: The hostname
:param list host_rules: List of hosts regex
:return: True if hostname match host rules
:rtype: bool
:raises ConfigurationException: if the host rules are invalid.
"""
match = False
for hosts_reg in host_rules:
mode = hosts_reg[0]
regex = hosts_reg[1:]
if mode == '+':
if re.match(regex, hostname):
match = True
continue
elif mode == '-':
if re.match(regex, hostname):
match = False
continue
else:
raise ConfigurationException(
"'hosts' regex (%s) is missing '+' or '-'" % hosts_reg
)
return match
def _get_policy_by_member(self, username, policies):
"""Extract the policies for a user name.
Return the concerned policies:
* If the user match in a group
* If the user is None, and "members" contains "Anonymous"
* Else return None
:param str username: The username
:param dict policies: The policies to filter
:return: The policies for username
:rtype: None or dict
"""
for policy in policies:
for group in policy["members"]:
if group in self.groups:
if username in self.groups[group] \
or "*" in self.groups[group] \
or (username is None and "Anonymous" in self.groups[group]):
return policy["rules"]
return None
@staticmethod
def _match_rules(action, actions):
"""Extract the checks for an action.
First match for exact comparison, then for the "any" keyword,
and finally for "parents" action name.
:param docker_leash.action_mapper.Action action: The current action
:param dict actions: The actions from the policies
:return: The filtered actions list
:rtype: `~docker_leash.checks_list.Checks`
"""
assert isinstance(action, Action), 'expected Action, got {!r}'.format(action)
checks = Checks()
action_name = action.name
parent_action = action.namespace_name
# Look for "normal" Actions
if action_name in actions:
for check, args in actions[action_name].iteritems():
checks.add({check: args})
# Look for "parents" Actions
elif parent_action in actions:
for check, args in actions[parent_action].iteritems():
checks.add({check: args})
# Look for "any" Actions
elif "any" in actions:
for check, args in actions["any"].iteritems():
checks.add({check: args})
return checks
|
[
"re.match"
] |
[((3322, 3347), 're.match', 're.match', (['regex', 'hostname'], {}), '(regex, hostname)\n', (3330, 3347), False, 'import re\n'), ((3460, 3485), 're.match', 're.match', (['regex', 'hostname'], {}), '(regex, hostname)\n', (3468, 3485), False, 'import re\n')]
|
# Generated by Django 2.0.2 on 2018-02-17 12:17
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('printapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='historybayar',
name='bayar_now',
field=models.DecimalField(decimal_places=2, max_digits=14, verbose_name='pembayaran sekarang'),
),
migrations.AlterField(
model_name='kontrak',
name='nhari_har',
field=models.IntegerField(default=0, verbose_name='masa pemeliharaan (hari)'),
),
migrations.AlterField(
model_name='kontrak',
name='tgl_due',
field=models.DateField(default=datetime.date.today, verbose_name='tgl jatuh tempo'),
),
migrations.AlterField(
model_name='termin',
name='nth_termin',
field=models.IntegerField(default=1, verbose_name='termin ke-'),
),
]
|
[
"django.db.models.DecimalField",
"django.db.models.DateField",
"django.db.models.IntegerField"
] |
[((351, 444), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(14)', 'verbose_name': '"""pembayaran sekarang"""'}), "(decimal_places=2, max_digits=14, verbose_name=\n 'pembayaran sekarang')\n", (370, 444), False, 'from django.db import migrations, models\n'), ((565, 636), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""masa pemeliharaan (hari)"""'}), "(default=0, verbose_name='masa pemeliharaan (hari)')\n", (584, 636), False, 'from django.db import migrations, models\n'), ((760, 837), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'datetime.date.today', 'verbose_name': '"""tgl jatuh tempo"""'}), "(default=datetime.date.today, verbose_name='tgl jatuh tempo')\n", (776, 837), False, 'from django.db import migrations, models\n'), ((963, 1020), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""termin ke-"""'}), "(default=1, verbose_name='termin ke-')\n", (982, 1020), False, 'from django.db import migrations, models\n')]
|
# ---------------------------------------------------------------------
# crm.supplierprofile application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.crm.models.supplierprofile import SupplierProfile
from noc.core.translation import ugettext as _
class SupplierProfileApplication(ExtDocApplication):
"""
SupplierProfile application
"""
title = _("Supplier Profile")
menu = [_("Setup"), _("Supplier Profiles")]
model = SupplierProfile
query_fields = ["name__icontains", "description__icontains"]
def field_row_class(self, o):
return o.style.css_class_name if o.style else ""
|
[
"noc.core.translation.ugettext"
] |
[((615, 636), 'noc.core.translation.ugettext', '_', (['"""Supplier Profile"""'], {}), "('Supplier Profile')\n", (616, 636), True, 'from noc.core.translation import ugettext as _\n'), ((649, 659), 'noc.core.translation.ugettext', '_', (['"""Setup"""'], {}), "('Setup')\n", (650, 659), True, 'from noc.core.translation import ugettext as _\n'), ((661, 683), 'noc.core.translation.ugettext', '_', (['"""Supplier Profiles"""'], {}), "('Supplier Profiles')\n", (662, 683), True, 'from noc.core.translation import ugettext as _\n')]
|
from random import shuffle
NUM_DIGIT = 3
MAX_GUESSES = 10
def main(): # main game
print(
f"""
Bagels, a detective logic game.
By <NAME>
I am thinking of a number {NUM_DIGIT} number with no repeted digits.
Try to guess what it is. Here re some clues:
When i say: That means:
Pico One digit is correct but in the wrong position.
Fermi One digit is correct and in the right position.
Bagels No digit is correct.
For example, if the secret number was 248 and your guess was 843, the clues would be Fermi Pico
"""
)
while True: # main game loop
secret_num = get_secret_num()
print("I have though of a number")
print(f"You have {MAX_GUESSES} guesses to get it.")
num_guesses = 1
while num_guesses <= MAX_GUESSES:
guess = ""
# keep looping until they enter a valid guess:
while len(guess) != NUM_DIGIT or not guess.isdecimal():
print(f"Guess {num_guesses}")
guess = input("> ")
clues = get_clues(guess, secret_num)
print(clues)
num_guesses += 1
if guess == secret_num:
break
if num_guesses > MAX_GUESSES:
print("You ran out of guesses.")
print(f"The nswer was {secret_num}")
break
print("Do you want to play again? (yes or no)")
if not input("> ").lower().startswith("y"):
break
print("Thanks for playing")
def get_secret_num():
""" returns a string made up of {NUM_DIGITS} uniqe random digits """
numbers = list("0123456789") # create a list of digits 0 - 9
shuffle(numbers) # shuffle them into random order
""" get the first {NUM_DIGITS} digits in the list for the secret number """
secret_num = ""
for i in range(NUM_DIGIT):
secret_num += str(numbers[i])
return secret_num
def get_clues(guess, secret_num):
""" returns a string with the pico, fermi, bagels clues for a guess and secret number pair """
if guess == secret_num:
return "You got it!"
clues = []
for i in range(len(guess)):
if guess[i] == secret_num[i]:
# a correct digit is in the correct place
clues.append("Fermi")
elif guess[i] in secret_num:
# a correct digit is in the incorrect place
clues.append("Pico")
if len(clues) == 0:
return "Bagels" # there are no correct digit at all
else:
# sort the clues into alphabetical order so their original order does not give information away
clues.sort()
return " ".join(clues)
if __name__ == "__main__":
main()
|
[
"random.shuffle"
] |
[((1816, 1832), 'random.shuffle', 'shuffle', (['numbers'], {}), '(numbers)\n', (1823, 1832), False, 'from random import shuffle\n')]
|
#!/usr/bin/env python
import os
import argparse
import sqlite3
from glob import glob
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--gtex-models-dir', type=str, required=True)
parser.add_argument('--variants-file-with-gtex-id', type=str, required=True)
parser.add_argument('--output-file', type=str, required=True)
args = parser.parse_args()
all_models = glob(os.path.join(args.gtex_models_dir, '*.db'))
assert len(all_models) == 49, len(all_models)
all_variants_ids = set()
for m in all_models:
print(f'Processing {m}')
with sqlite3.connect(m) as conn:
df = pd.read_sql('select varID from weights', conn)['varID']
all_variants_ids.update(set(df.values))
print(f'Read {len(all_variants_ids)} unique variants in GTEx models')
print(f'Reading {args.variants_file_with_gtex_id}')
variants_gtexid = pd.read_csv(args.variants_file_with_gtex_id, sep='\t', usecols=['panel_variant_id'], squeeze=True).dropna()
variants_gtexid = set(variants_gtexid.values)
print(f' Read {len(variants_gtexid)} variants')
print('Merging GTEx and other variants')
merged_variants = variants_gtexid.intersection(all_variants_ids)
print(f'Final number of merged variants: {len(merged_variants)}')
print(f'Coverage of GTEx variants: {(len(merged_variants) / len(all_variants_ids)) * 100:.2f}%')
print(f'Writing to {args.output_file}')
pd.DataFrame({'rsid': list(merged_variants)}).to_csv(args.output_file, index=False)
|
[
"sqlite3.connect",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.join",
"pandas.read_sql"
] |
[((117, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (140, 142), False, 'import argparse\n'), ((394, 436), 'os.path.join', 'os.path.join', (['args.gtex_models_dir', '"""*.db"""'], {}), "(args.gtex_models_dir, '*.db')\n", (406, 436), False, 'import os\n'), ((571, 589), 'sqlite3.connect', 'sqlite3.connect', (['m'], {}), '(m)\n', (586, 589), False, 'import sqlite3\n'), ((858, 961), 'pandas.read_csv', 'pd.read_csv', (['args.variants_file_with_gtex_id'], {'sep': '"""\t"""', 'usecols': "['panel_variant_id']", 'squeeze': '(True)'}), "(args.variants_file_with_gtex_id, sep='\\t', usecols=[\n 'panel_variant_id'], squeeze=True)\n", (869, 961), True, 'import pandas as pd\n'), ((612, 658), 'pandas.read_sql', 'pd.read_sql', (['"""select varID from weights"""', 'conn'], {}), "('select varID from weights', conn)\n", (623, 658), True, 'import pandas as pd\n')]
|
import os
import tensorflow as tf
from util import masked_softmax
class PolicyNetwork(object):
""" Policy Function approximator. """
def __init__(self, input_size, output_size, learning_rate=0.001, summaries_dir=None, scope="policy_estimator"):
with tf.variable_scope(scope):
# Writes Tensorboard summaries to disk
self.summary_writer = None
if summaries_dir:
summary_dir = os.path.join(summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.summary.FileWriter(summary_dir)
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.action = tf.placeholder(dtype=tf.int32, name="action")
self.target = tf.placeholder(dtype=tf.float64, name="target")
self.mask = tf.placeholder(dtype=tf.float64, shape=[1, output_size], name="mask")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=len(env.state),
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
# self.action_probs = tf.squeeze(tf.nn.softmax(self.output_layer))
self.action_probs = tf.squeeze(masked_softmax(self.output_layer, self.mask))
self.picked_action_prob = tf.gather(self.action_probs, self.action)
# Loss and train op
self.loss = -tf.log(self.picked_action_prob) * self.target
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, mask, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.action_probs, {self.state: state.reshape(1, -1),
self.mask: mask.reshape(1, -1)})
def update(self, state, target, action, mask, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target,
self.action: action, self.mask: mask.reshape(1, -1)}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class ValueNetwork(object):
""" Value Function approximator. """
def __init__(self, input_size, output_size=1, learning_rate=0.01, scope="value_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.target = tf.placeholder(dtype=tf.float64, name="target")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=input_size,
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1)})
def update(self, state, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
class ObjectAwareRewardNetwork(object):
""" Object-aware Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.object = tf.placeholder(shape=[], dtype=tf.int32, name="person_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
object_vec = tf.one_hot(self.object, input_size, dtype=tf.float64)
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
concat_vec = tf.concat([object_vec, action_vec], 0)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(concat_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, object, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action, self.object: object})
def update(self, state, action, object, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.object: object, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class RewardNetwork(object):
""" Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(action_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action})
def update(self, state, action, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
|
[
"tensorflow.one_hot",
"os.path.exists",
"tensorflow.variable_scope",
"os.makedirs",
"tensorflow.squared_difference",
"tensorflow.placeholder",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.get_default_session",
"tensorflow.log",
"tensorflow.global_variables",
"util.masked_softmax",
"tensorflow.concat",
"tensorflow.train.get_global_step",
"tensorflow.gather",
"tensorflow.expand_dims",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.FileWriter",
"tensorflow.squeeze"
] |
[((269, 293), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (286, 293), True, 'import tensorflow as tf\n'), ((699, 768), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float64', 'shape': '[1, input_size]', 'name': '"""state"""'}), "(dtype=tf.float64, shape=[1, input_size], name='state')\n", (713, 768), True, 'import tensorflow as tf\n'), ((795, 840), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'name': '"""action"""'}), "(dtype=tf.int32, name='action')\n", (809, 840), True, 'import tensorflow as tf\n'), ((867, 914), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float64', 'name': '"""target"""'}), "(dtype=tf.float64, name='target')\n", (881, 914), True, 'import tensorflow as tf\n'), ((939, 1008), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float64', 'shape': '[1, output_size]', 'name': '"""mask"""'}), "(dtype=tf.float64, shape=[1, output_size], name='mask')\n", (953, 1008), True, 'import tensorflow as tf\n'), ((1289, 1391), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', ([], {'inputs': 'self.state', 'num_outputs': 'output_size', 'activation_fn': 'None'}), '(inputs=self.state, num_outputs=\n output_size, activation_fn=None)\n', (1322, 1391), True, 'import tensorflow as tf\n'), ((1643, 1684), 'tensorflow.gather', 'tf.gather', (['self.action_probs', 'self.action'], {}), '(self.action_probs, self.action)\n', (1652, 1684), True, 'import tensorflow as tf\n'), ((1819, 1870), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1841, 1870), True, 'import tensorflow as tf\n'), ((2062, 2086), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2084, 2086), True, 'import tensorflow as tf\n'), ((2328, 2352), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2350, 2352), True, 'import tensorflow as tf\n'), ((2659, 2683), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2681, 2683), True, 'import tensorflow as tf\n'), ((2720, 2741), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2739, 2741), True, 'import tensorflow as tf\n'), ((2989, 3013), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (3006, 3013), True, 'import tensorflow as tf\n'), ((3040, 3109), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float64', 'shape': '[1, input_size]', 'name': '"""state"""'}), "(dtype=tf.float64, shape=[1, input_size], name='state')\n", (3054, 3109), True, 'import tensorflow as tf\n'), ((3136, 3183), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float64', 'name': '"""target"""'}), "(dtype=tf.float64, name='target')\n", (3150, 3183), True, 'import tensorflow as tf\n'), ((3457, 3559), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', ([], {'inputs': 'self.state', 'num_outputs': 'output_size', 'activation_fn': 'None'}), '(inputs=self.state, num_outputs=\n output_size, activation_fn=None)\n', (3490, 3559), True, 'import tensorflow as tf\n'), ((3639, 3668), 'tensorflow.squeeze', 'tf.squeeze', (['self.output_layer'], {}), '(self.output_layer)\n', (3649, 3668), True, 'import tensorflow as tf\n'), ((3693, 3748), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.value_estimate', 'self.target'], {}), '(self.value_estimate, self.target)\n', (3714, 3748), True, 'import tensorflow as tf\n'), ((3779, 3830), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3801, 3830), True, 'import tensorflow as tf\n'), ((4016, 4040), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (4038, 4040), True, 'import tensorflow as tf\n'), ((4194, 4218), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (4216, 4218), True, 'import tensorflow as tf\n'), ((4599, 4623), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (4616, 4623), True, 'import tensorflow as tf\n'), ((4650, 4719), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, input_size]', 'dtype': 'tf.float64', 'name': '"""state"""'}), "(shape=[1, input_size], dtype=tf.float64, name='state')\n", (4664, 4719), True, 'import tensorflow as tf\n'), ((4746, 4807), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[]', 'dtype': 'tf.int32', 'name': '"""question_idx"""'}), "(shape=[], dtype=tf.int32, name='question_idx')\n", (4760, 4807), True, 'import tensorflow as tf\n'), ((4834, 4893), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[]', 'dtype': 'tf.int32', 'name': '"""person_idx"""'}), "(shape=[], dtype=tf.int32, name='person_idx')\n", (4848, 4893), True, 'import tensorflow as tf\n'), ((4920, 4967), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float64', 'name': '"""target"""'}), "(dtype=tf.float64, name='target')\n", (4934, 4967), True, 'import tensorflow as tf\n'), ((4994, 5047), 'tensorflow.one_hot', 'tf.one_hot', (['self.object', 'input_size'], {'dtype': 'tf.float64'}), '(self.object, input_size, dtype=tf.float64)\n', (5004, 5047), True, 'import tensorflow as tf\n'), ((5073, 5126), 'tensorflow.one_hot', 'tf.one_hot', (['self.action', 'action_num'], {'dtype': 'tf.float64'}), '(self.action, action_num, dtype=tf.float64)\n', (5083, 5126), True, 'import tensorflow as tf\n'), ((5152, 5190), 'tensorflow.concat', 'tf.concat', (['[object_vec, action_vec]', '(0)'], {}), '([object_vec, action_vec], 0)\n', (5161, 5190), True, 'import tensorflow as tf\n'), ((5462, 5491), 'tensorflow.squeeze', 'tf.squeeze', (['self.output_layer'], {}), '(self.output_layer)\n', (5472, 5491), True, 'import tensorflow as tf\n'), ((5516, 5571), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.value_estimate', 'self.target'], {}), '(self.value_estimate, self.target)\n', (5537, 5571), True, 'import tensorflow as tf\n'), ((5602, 5653), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (5624, 5653), True, 'import tensorflow as tf\n'), ((5855, 5879), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (5877, 5879), True, 'import tensorflow as tf\n'), ((6091, 6115), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (6113, 6115), True, 'import tensorflow as tf\n'), ((6370, 6394), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (6392, 6394), True, 'import tensorflow as tf\n'), ((6431, 6452), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6450, 6452), True, 'import tensorflow as tf\n'), ((6713, 6737), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (6730, 6737), True, 'import tensorflow as tf\n'), ((6764, 6833), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, input_size]', 'dtype': 'tf.float64', 'name': '"""state"""'}), "(shape=[1, input_size], dtype=tf.float64, name='state')\n", (6778, 6833), True, 'import tensorflow as tf\n'), ((6860, 6921), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[]', 'dtype': 'tf.int32', 'name': '"""question_idx"""'}), "(shape=[], dtype=tf.int32, name='question_idx')\n", (6874, 6921), True, 'import tensorflow as tf\n'), ((6948, 6995), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float64', 'name': '"""target"""'}), "(dtype=tf.float64, name='target')\n", (6962, 6995), True, 'import tensorflow as tf\n'), ((7022, 7075), 'tensorflow.one_hot', 'tf.one_hot', (['self.action', 'action_num'], {'dtype': 'tf.float64'}), '(self.action, action_num, dtype=tf.float64)\n', (7032, 7075), True, 'import tensorflow as tf\n'), ((7346, 7375), 'tensorflow.squeeze', 'tf.squeeze', (['self.output_layer'], {}), '(self.output_layer)\n', (7356, 7375), True, 'import tensorflow as tf\n'), ((7400, 7455), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.value_estimate', 'self.target'], {}), '(self.value_estimate, self.target)\n', (7421, 7455), True, 'import tensorflow as tf\n'), ((7486, 7537), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (7508, 7537), True, 'import tensorflow as tf\n'), ((7731, 7755), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (7753, 7755), True, 'import tensorflow as tf\n'), ((7938, 7962), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (7960, 7962), True, 'import tensorflow as tf\n'), ((8196, 8220), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (8218, 8220), True, 'import tensorflow as tf\n'), ((8257, 8278), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (8276, 8278), True, 'import tensorflow as tf\n'), ((638, 672), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_dir'], {}), '(summary_dir)\n', (659, 672), True, 'import tensorflow as tf\n'), ((1559, 1603), 'util.masked_softmax', 'masked_softmax', (['self.output_layer', 'self.mask'], {}), '(self.output_layer, self.mask)\n', (1573, 1603), False, 'from util import masked_softmax\n'), ((526, 553), 'os.path.exists', 'os.path.exists', (['summary_dir'], {}), '(summary_dir)\n', (540, 553), False, 'import os\n'), ((575, 599), 'os.makedirs', 'os.makedirs', (['summary_dir'], {}), '(summary_dir)\n', (586, 599), False, 'import os\n'), ((1743, 1774), 'tensorflow.log', 'tf.log', (['self.picked_action_prob'], {}), '(self.picked_action_prob)\n', (1749, 1774), True, 'import tensorflow as tf\n'), ((1963, 1989), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (1987, 1989), True, 'import tensorflow as tf\n'), ((3923, 3949), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (3947, 3949), True, 'import tensorflow as tf\n'), ((5746, 5772), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (5770, 5772), True, 'import tensorflow as tf\n'), ((7630, 7656), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (7654, 7656), True, 'import tensorflow as tf\n'), ((5305, 5334), 'tensorflow.expand_dims', 'tf.expand_dims', (['concat_vec', '(0)'], {}), '(concat_vec, 0)\n', (5319, 5334), True, 'import tensorflow as tf\n'), ((7189, 7218), 'tensorflow.expand_dims', 'tf.expand_dims', (['action_vec', '(0)'], {}), '(action_vec, 0)\n', (7203, 7218), True, 'import tensorflow as tf\n')]
|
from setuptools import setup, find_packages
setup(name='getDB',
version='0.0.4',
description="This module can be used to download HMDB and KEGG database.",
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jaspershen/getDB',
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=['requests', 'pandas', 'bs4', 'numpy'],
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
)
|
[
"setuptools.find_packages"
] |
[((360, 375), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (373, 375), False, 'from setuptools import setup, find_packages\n')]
|
import tarfile
import click
import requests
from odc.io.tar import tar_mode, add_txt_file
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from urllib.parse import urlparse
from thredds_crawler.crawl import Crawl
def download(url):
parsed_uri = urlparse(url)
target_filename = url[len(parsed_uri.scheme + '://'):]
return requests.get(url).content, target_filename
@click.command('thredds-to-tar')
@click.option('--thredds_catalogue', '-c', type=str, required=True, help="The THREDDS catalogue endpoint")
@click.option('--skips', '-s', type=str, multiple=True,
help="Pattern to ignore when THREDDS crawling")
@click.option('--select', '-t', type=str, required=True,
help="Target file pattern to match for yaml")
@click.option('--workers', '-w', type=int, default=4, help="Number of thredds crawler workers to use")
@click.option('--outfile', type=str, default="metadata.tar.gz", help="Sets the output file name")
def cli(thredds_catalogue,
skips,
select,
workers,
outfile):
""" Download Metadata from THREDDS server to tarball
Example:
\b
Download files in directory that match `*yaml` and store them as a tar
> thredds-to-tar -c "http://dapds00.nci.org.au/thredds/catalog/if87/2018-11-29/"
-t ".*ARD-METADATA.yaml" -s '.*NBAR.*' -s '.*SUPPLEMENTARY.*'
-s '.*NBART.*' -s '.*/QA/.*' -w 8 --outfile 2018-11-29.tar.gz
"""
user_skips = Crawl.SKIPS
for skip in skips:
user_skips = user_skips+[skip]
print("Searching {thredds_catalogue} for matching files".format(thredds_catalogue=thredds_catalogue))
results = Crawl(thredds_catalogue + '/catalog.xml', select=[select], skip=user_skips, workers=workers).datasets
print("Found {0} metadata files".format(str(len(results))))
urls = [service['url'] for dataset in results
for service in dataset.services
if service['service'].lower() == 'httpserver']
# use a threadpool to download from thredds
pool = ThreadPool(workers)
yamls = pool.map(partial(download), urls)
pool.close()
pool.join()
# jam it all in a tar
tar_opts = dict(name=outfile, mode='w' + tar_mode(gzip=True, xz=True, is_pipe=False))
with tarfile.open(**tar_opts) as tar:
for yaml in yamls:
add_txt_file(tar=tar, content=yaml[0], fname=yaml[1])
print("Done!")
if __name__ == '__main__':
cli()
|
[
"odc.io.tar.tar_mode",
"tarfile.open",
"odc.io.tar.add_txt_file",
"urllib.parse.urlparse",
"click.option",
"requests.get",
"functools.partial",
"click.command",
"thredds_crawler.crawl.Crawl",
"multiprocessing.dummy.Pool"
] |
[((417, 448), 'click.command', 'click.command', (['"""thredds-to-tar"""'], {}), "('thredds-to-tar')\n", (430, 448), False, 'import click\n'), ((450, 560), 'click.option', 'click.option', (['"""--thredds_catalogue"""', '"""-c"""'], {'type': 'str', 'required': '(True)', 'help': '"""The THREDDS catalogue endpoint"""'}), "('--thredds_catalogue', '-c', type=str, required=True, help=\n 'The THREDDS catalogue endpoint')\n", (462, 560), False, 'import click\n'), ((557, 664), 'click.option', 'click.option', (['"""--skips"""', '"""-s"""'], {'type': 'str', 'multiple': '(True)', 'help': '"""Pattern to ignore when THREDDS crawling"""'}), "('--skips', '-s', type=str, multiple=True, help=\n 'Pattern to ignore when THREDDS crawling')\n", (569, 664), False, 'import click\n'), ((675, 781), 'click.option', 'click.option', (['"""--select"""', '"""-t"""'], {'type': 'str', 'required': '(True)', 'help': '"""Target file pattern to match for yaml"""'}), "('--select', '-t', type=str, required=True, help=\n 'Target file pattern to match for yaml')\n", (687, 781), False, 'import click\n'), ((792, 898), 'click.option', 'click.option', (['"""--workers"""', '"""-w"""'], {'type': 'int', 'default': '(4)', 'help': '"""Number of thredds crawler workers to use"""'}), "('--workers', '-w', type=int, default=4, help=\n 'Number of thredds crawler workers to use')\n", (804, 898), False, 'import click\n'), ((895, 996), 'click.option', 'click.option', (['"""--outfile"""'], {'type': 'str', 'default': '"""metadata.tar.gz"""', 'help': '"""Sets the output file name"""'}), "('--outfile', type=str, default='metadata.tar.gz', help=\n 'Sets the output file name')\n", (907, 996), False, 'import click\n'), ((286, 299), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (294, 299), False, 'from urllib.parse import urlparse\n'), ((2078, 2097), 'multiprocessing.dummy.Pool', 'ThreadPool', (['workers'], {}), '(workers)\n', (2088, 2097), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((1697, 1793), 'thredds_crawler.crawl.Crawl', 'Crawl', (["(thredds_catalogue + '/catalog.xml')"], {'select': '[select]', 'skip': 'user_skips', 'workers': 'workers'}), "(thredds_catalogue + '/catalog.xml', select=[select], skip=user_skips,\n workers=workers)\n", (1702, 1793), False, 'from thredds_crawler.crawl import Crawl\n'), ((2119, 2136), 'functools.partial', 'partial', (['download'], {}), '(download)\n', (2126, 2136), False, 'from functools import partial\n'), ((2303, 2327), 'tarfile.open', 'tarfile.open', ([], {}), '(**tar_opts)\n', (2315, 2327), False, 'import tarfile\n'), ((371, 388), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (383, 388), False, 'import requests\n'), ((2375, 2428), 'odc.io.tar.add_txt_file', 'add_txt_file', ([], {'tar': 'tar', 'content': 'yaml[0]', 'fname': 'yaml[1]'}), '(tar=tar, content=yaml[0], fname=yaml[1])\n', (2387, 2428), False, 'from odc.io.tar import tar_mode, add_txt_file\n'), ((2249, 2292), 'odc.io.tar.tar_mode', 'tar_mode', ([], {'gzip': '(True)', 'xz': '(True)', 'is_pipe': '(False)'}), '(gzip=True, xz=True, is_pipe=False)\n', (2257, 2292), False, 'from odc.io.tar import tar_mode, add_txt_file\n')]
|
from sklearn.linear_model import Ridge
class MachineLearning():
def __init__(self):
self.model = None
def train_model(self, X,y):
lr = Ridge(alpha=0.5)
lr.fit(X,y)
print(lr)
self.model = lr
def predict(self, X):
preds = self.model.predict(X)
return preds
|
[
"sklearn.linear_model.Ridge"
] |
[((162, 178), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (167, 178), False, 'from sklearn.linear_model import Ridge\n')]
|
# -*- coding: utf-8 -*-
"""
@author: <NAME> (<EMAIL>) 11/03/2020
@description: PyDash Project
The ConnectionHandler is a Singleton class implementation
The class responsible to retrieve segments in the web server.
Also it implements a traffic shaping approach.
"""
from base.simple_module import SimpleModule
from base.message import Message, MessageKind, SSMessage
from base.configuration_parser import ConfigurationParser
from player.parser import *
import http.client
import time
from scipy.stats import expon
from base.timer import Timer
import seaborn as sns
import matplotlib.pyplot as plt
class ConnectionHandler(SimpleModule):
def __init__(self, id):
SimpleModule.__init__(self, id)
self.initial_time = 0
self.qi = []
# for traffic shaping
config_parser = ConfigurationParser.get_instance()
self.traffic_shaping_interval = int(config_parser.get_parameter('traffic_shaping_profile_interval'))
self.traffic_shaping_seed = int(config_parser.get_parameter('traffic_shaping_seed'))
self.traffic_shaping_values = []
# mark the current traffic shapping interval
self.current_traffic_shaping_interval = 0
self.traffic_shaping_sequence = []
# traffic shaping sequence position
self.tss_position = 0
# traffic shaping values position
self.tsv_position = 0
token = config_parser.get_parameter('traffic_shaping_profile_sequence')
for i in range(len(token)):
if token[i] == 'L':
self.traffic_shaping_sequence.append(0)
elif token[i] == 'M':
self.traffic_shaping_sequence.append(1)
elif token[i] == 'H':
self.traffic_shaping_sequence.append(2)
self.timer = Timer.get_instance()
def get_traffic_shaping_positions(self):
current_tsi = self.timer.get_current_time() // self.traffic_shaping_interval
if current_tsi > self.current_traffic_shaping_interval:
self.current_traffic_shaping_interval = current_tsi
self.tss_position = (self.tss_position + 1) % len(self.traffic_shaping_sequence)
self.tsv_position = (self.tsv_position + 1) % len(self.traffic_shaping_values[0])
return (self.tss_position, self.tsv_position)
def initialize(self):
# self.send_down(Message(MessageKind.SEGMENT_REQUEST, '<NAME>'))
pass
def bandwidth_limitation(self, package_size=0):
if package_size == 0:
return
tsp = self.get_traffic_shaping_positions()
target_throughput = self.traffic_shaping_values[self.traffic_shaping_sequence[tsp[0]]][tsp[1]]
print(f'Execution Time {self.timer.get_current_time()} > target throughput: {target_throughput} - profile: ({self.traffic_shaping_sequence[tsp[0]]}, {tsp[1]})')
rtt = time.perf_counter() - self.initial_time
throughput = package_size / rtt
# we didn't pass our throughput go
if target_throughput >= throughput:
return
waiting_time = (package_size - (target_throughput * rtt)) / target_throughput
time.sleep(waiting_time)
def finalization(self):
pass
def handle_xml_request(self, msg):
if not 'http://' in msg.get_payload():
raise ValueError('url_mpd parameter should starts with http://')
self.initial_time = time.perf_counter()
url_tokens = msg.get_payload().split('/')[2:]
port = '80'
host_name = url_tokens[0]
path_name = '/' + '/'.join(url_tokens[1:])
mdp_file = ''
try:
connection = http.client.HTTPConnection(host_name, port)
connection.request('GET', path_name)
mdp_file = connection.getresponse().read().decode()
connection.close()
except Exception as err:
print('> Houston, we have a problem!')
print(f'> trying to connecto to: {msg.get_payload()}')
print(err)
exit(-1)
msg = Message(MessageKind.XML_RESPONSE, mdp_file)
msg.add_bit_length(8 * len(mdp_file))
parsed_mpd = parse_mpd(msg.get_payload())
self.qi = parsed_mpd.get_qi()
increase_factor = 1
low = round(self.qi[len(self.qi) - 1] * increase_factor)
medium = round(self.qi[(len(self.qi) // 2) - 1] * increase_factor)
high = round(self.qi[0] * increase_factor)
self.traffic_shaping_values.append(
expon.rvs(scale=1, loc=low, size=1000, random_state=self.traffic_shaping_seed))
self.traffic_shaping_values.append(
expon.rvs(scale=1, loc=medium, size=1000, random_state=self.traffic_shaping_seed))
self.traffic_shaping_values.append(
expon.rvs(scale=1, loc=high, size=1000, random_state=self.traffic_shaping_seed))
self.send_up(msg)
def handle_segment_size_request(self, msg):
port = '80'
host_name = msg.get_host_name()
path_name = msg.get_url()
ss_file = ''
self.initial_time = time.perf_counter()
print(f'Execution Time {self.timer.get_current_time()} > selected QI: {self.qi.index(msg.get_quality_id())}')
try:
connection = http.client.HTTPConnection(host_name, port)
connection.request('GET', path_name)
ss_file = connection.getresponse().read()
connection.close()
except Exception as err:
print('> Houston, we have a problem!')
print(f'> trying to connecto to: {msg.get_payload()}')
print(err)
exit(-1)
msg.set_kind(MessageKind.SEGMENT_RESPONSE)
decoded = False
try:
ss_file = ss_file.decode()
except UnicodeDecodeError:
# if wasn't possible to decode() is a ss
msg.add_bit_length(8 * len(ss_file))
self.bandwidth_limitation(msg.get_bit_length())
decoded = True
if not decoded and '404 Not Found' in ss_file:
msg.set_found(False)
self.send_up(msg)
def handle_segment_size_response(self, msg):
pass
def handle_xml_response(self, msg):
pass
|
[
"base.configuration_parser.ConfigurationParser.get_instance",
"time.perf_counter",
"time.sleep",
"base.simple_module.SimpleModule.__init__",
"base.timer.Timer.get_instance",
"base.message.Message",
"scipy.stats.expon.rvs"
] |
[((678, 709), 'base.simple_module.SimpleModule.__init__', 'SimpleModule.__init__', (['self', 'id'], {}), '(self, id)\n', (699, 709), False, 'from base.simple_module import SimpleModule\n'), ((816, 850), 'base.configuration_parser.ConfigurationParser.get_instance', 'ConfigurationParser.get_instance', ([], {}), '()\n', (848, 850), False, 'from base.configuration_parser import ConfigurationParser\n'), ((1795, 1815), 'base.timer.Timer.get_instance', 'Timer.get_instance', ([], {}), '()\n', (1813, 1815), False, 'from base.timer import Timer\n'), ((3153, 3177), 'time.sleep', 'time.sleep', (['waiting_time'], {}), '(waiting_time)\n', (3163, 3177), False, 'import time\n'), ((3414, 3433), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3431, 3433), False, 'import time\n'), ((4053, 4096), 'base.message.Message', 'Message', (['MessageKind.XML_RESPONSE', 'mdp_file'], {}), '(MessageKind.XML_RESPONSE, mdp_file)\n', (4060, 4096), False, 'from base.message import Message, MessageKind, SSMessage\n'), ((5084, 5103), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5101, 5103), False, 'import time\n'), ((2871, 2890), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2888, 2890), False, 'import time\n'), ((4509, 4587), 'scipy.stats.expon.rvs', 'expon.rvs', ([], {'scale': '(1)', 'loc': 'low', 'size': '(1000)', 'random_state': 'self.traffic_shaping_seed'}), '(scale=1, loc=low, size=1000, random_state=self.traffic_shaping_seed)\n', (4518, 4587), False, 'from scipy.stats import expon\n'), ((4645, 4731), 'scipy.stats.expon.rvs', 'expon.rvs', ([], {'scale': '(1)', 'loc': 'medium', 'size': '(1000)', 'random_state': 'self.traffic_shaping_seed'}), '(scale=1, loc=medium, size=1000, random_state=self.\n traffic_shaping_seed)\n', (4654, 4731), False, 'from scipy.stats import expon\n'), ((4784, 4863), 'scipy.stats.expon.rvs', 'expon.rvs', ([], {'scale': '(1)', 'loc': 'high', 'size': '(1000)', 'random_state': 'self.traffic_shaping_seed'}), '(scale=1, loc=high, size=1000, random_state=self.traffic_shaping_seed)\n', (4793, 4863), False, 'from scipy.stats import expon\n')]
|
import re
import yaml
import logging
logger = logging.getLogger(__name__)
from pylatexenc.macrospec import MacroSpec, ParsedMacroArgs, MacroStandardArgsParser
from pylatexenc import latexwalker
from latexpp.macro_subst_helper import MacroSubstHelper
from latexpp.fix import BaseFix
# parse entropy macros etc.
_qitobjdefs = yaml.safe_load(r"""
stdset:
HH:
type: Hbase
Hzero:
type: Hbase
sub: '\mathrm{max},0'
Hmin:
type: Hbase
sub: '\mathrm{min}'
Hmaxf:
type: Hbase
sub: '\mathrm{max}'
Hfn:
type: Hfnbase
Dmax:
type: Dbase
sub: '\mathrm{max}'
Dminz:
type: Dbase
sub: '0'
Dminf:
type: Dbase
sub: '\mathrm{min}'
Dr:
type: Dbase
sub: '\mathrm{Rob}'
DHyp:
type: Dbase
sub: '\mathrm{H}'
Dhyp:
type: Dbase
sub: '\mathrm{h}'
DCoh:
type: DCohbase
DCohx:
type: DCohbase
DD:
type: DD
""")
baseqitobjs = yaml.safe_load("""
IdentProc:
type: IdentProc
ee:
type: ee
""")
_fixed_repl = {
'DSym': lambda self: self.DSym,
'HSym': lambda self: self.HSym,
}
class ExpandQitObjects(BaseFix):
r"""
Expand the definitions for the "QIT Objects" that are defined via the
{phfqit} package.
If applied along with :py:class:`latexpp.fixes.pkg.phfqit.ExpandMacros`, the
dependency on package {phfqit} should be removed.
Arguments:
- `qitobjs`: a dictionary of custom "QIT Objects" to expand. The dictionary
has the structure ``{macroname: qitobjspec, ...}``, where:
- `macroname` is the name of the macro representing this QIT object (no
leading backslash);
- `qitobjspec` is a dictionary with the following structure::
{
'type': <type>,
'sym': <sym>
<...>
}
The `<type>` is a string that must be one of the following QIT object
types: 'Hbase', 'Hfnbase', 'DD', 'Dbase', 'DCohbase', 'IdentProc', 'ee'.
This determines on one hand how the arguments to the macro are parsed
and on the other hand the template latex code that will serve as a
replacement for the QIT object invocation.
The `<sym>` is any string that will be used to override the default
symbol for this qit object type. The 'sym' key can be left out to use
the default symbol for the qit object.
Depending on `<type>`, you can specify further keys that specify how the
qit object is rendered (specified alongside `type: <type>` above, where
`<...>` stands):
- `<type>='Hbase'`: You may further specify ``'sub': <sub>`` which
specifies the subscript to add to the entropy object. This can be any
LaTeX code.
- `<type>='Hfnbase'`: You may further specify ``'sub': <sub>`` and
``'sup': <sup>`` which specifies the subscript and superscript to add
to the entropy object. Both can be any LaTeX code.
- `<type>='Dbase'`: You may further specify ``'sub': <sub>`` which
specifies the subscript to add to the relative entropy object. This
can be any LaTeX code. You can also specify 'default_epsilon' to give
a default value of the epsilon argument (any LaTeX code).
- `<type>='Dalpha'`: You can also specify 'default_alpha' and
'default_epsilon' to give a default value for these arguments (any
LaTeX code).
- `<type>='DD'`: There are no further keys you can specify.
- `<type>='DCohbase'`: There are no further keys you can specify.
- `<type>='IdentProc'`: There are no further keys you can specify.
- `<type>='ee'`: There are no further keys you can specify.
- `qitobjdef`: a list of built-in QIT object sets to use, designated by
builtin set name. Currently only the set named "stdset" is available,
i.e., you may use ``qitobjdef=[]`` (don't use built-in QIT objects) or
``qitobjdef=['stdset']`` (use built-in QIT objects).
- `HSym`: the default symbol to use for entropy-like QIT objects. Defaults
to 'H'
- `DSym`: the default symbol to use for relative-entropy-like QIT objects.
Defaults to 'D'
- `DCSym`: the default symbol to use for coherent-relative-entropy-like QIT
objects. Defaults to '\\hat{D}'
"""
def __init__(self, qitobjs=dict(), qitobjdef=['stdset'],
HSym='H', DSym='D', DCSym=r'\hat{D}'):
super().__init__()
self.qitobjs = dict(baseqitobjs)
for qitobjname in qitobjdef:
self.qitobjs.update(_qitobjdefs[qitobjname])
self.qitobjs.update(qitobjs)
self.HSym = HSym
self.DSym = DSym
self.DCSym = DCSym
def specs(self, **kwargs):
return dict(
macros= (
MacroSpec(mname, args_parser=PhfQitObjectArgsParser(self.qitargspec(m['type'])))
for mname, m in self.qitobjs.items()
)
)
def qitargspec(self, t):
return {
"IdentProc": "`[[{",
"ee": "^",
"Hbase": "`[[{[",
"Hfnbase": "`(",
"DD": "_^`{{",
"Dbase": "[`{{",
"Dalpha": "[[`{{",
"DCohbase": "[`{{{{{",
}.get(t)
def fix_node(self, n, **kwargs):
if n.isNodeType(latexwalker.LatexMacroNode) and n.macroname in _fixed_repl:
return _fixed_repl[n.macroname](self)
if not n.isNodeType(latexwalker.LatexMacroNode) or n.macroname not in self.qitobjs:
return None
m = self.qitobjs[n.macroname]
fixs = self.fix_qitobj(m, n)
#logger.debug(" --> %r", fixs)
return fixs
def fix_qitobj(self, m, n):
#logger.debug("fix_qitobj: m=%r, n=%r", m, n)
if m['type'] == 'IdentProc':
nsizespec, nsysA, nsysB, narg = n.nodeargd.argnlist
sym = m.get('sym', r'\mathrm{id}')
subscript = ''
A, B = '', ''
if nsysA is not None:
A = self.preprocess_contents_latex(nsysA)
if nsysB is not None:
B = self.preprocess_contents_latex(nsysB)
if A:
if B:
subscript = A + r'\to ' + B
else:
subscript = A
text = '{' + sym + '}'
if subscript:
text += '_{' + subscript + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'ee':
narg, = n.nodeargd.argnlist
sym = m.get('sym', r'e')
return '{'+sym+'}^{' + self.preprocess_contents_latex(narg) + '}'
if m['type'] == 'Hbase':
nsizespec, nstate, nepsilon, ntargetsys, ncondsys = n.nodeargd.argnlist
sym = m.get('sym', self.HSym)
sub = m.get('sub', None)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od
text += self.preprocess_contents_latex(ntargetsys)
if ncondsys is not None:
text += r'\,' + md + r'\,' + self.preprocess_contents_latex(ncondsys)
text += cd
if nstate is not None:
text += r'_{' + self.preprocess_contents_latex(nstate) + '}'
return text
if m['type'] == 'Hfnbase':
nsizespec, narg = n.nodeargd.argnlist
sub = m.get('sub', None)
sup = m.get('sup', None)
sym = m.get('sym', self.HSym)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if sup:
text += '^{' + sup + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'Hfnbase':
nsub, nsup, nsizespec, narg = n.nodeargd.argnlist
sub = m.get('sub', None)
sup = m.get('sup', None)
sym = m.get('sym', self.HSym)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if sup:
text += '^{' + sup + '}'
nargcontents = self.preprocess_contents_latex(narg)
if nargcontents:
(od, md, cd) = _delims(nsizespec, '(', '|', ')')
text += od + nargcontents + cd
return text
if m['type'] == 'Dbase':
nepsilon, nsizespec, nstate, nrel = n.nodeargd.argnlist
sub = m.get('sub', None)
sym = m.get('sym', self.DSym)
default_epsilon = m.get('default_epsilon', None)
text = '{' + sym + '}'
if sub:
text += '_{' + sub + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
elif default_epsilon:
text += '^{' + default_epsilon + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'Dalpha':
nalpha, nepsilon, nsizespec, nstate, nrel = n.nodeargd.argnlist
sym = m.get('sym', self.DSym)
default_alpha = m.get('default_alpha', None)
default_epsilon = m.get('default_epsilon', None)
text = '{' + sym + '}'
if nalpha is not None:
text += '_{' + self.preprocess_contents_latex(nalpha) + '}'
elif default_alpha:
text += '_{' + default_alpha + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
elif default_epsilon:
text += '^{' + default_epsilon + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'DD':
nsub, nsup, nsizespec, nstate, nrel = n.nodeargd.argnlist
sym = m.get('sym', self.DSym)
text = '{' + sym + '}'
if nsub is not None:
text += '_{' + self.preprocess_contents_latex(nsub) + '}'
if nsup is not None:
text += '^{' + self.preprocess_contents_latex(nsup) + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
nstatecontents = self.preprocess_contents_latex(nstate)
nrelcontents = self.preprocess_contents_latex(nrel)
if nstatecontents or nrelcontents:
text += od + nstatecontents + r'\,' + md + r'\,' \
+ nrelcontents + cd
return text
if m['type'] == 'DCohbase':
nepsilon, nsizespec, nstate, nX, nXp, nGX, nGXp = n.nodeargd.argnlist
sym = m.get('sym', self.DCSym)
process_arg_subscripts = m.get('process_arg_subscripts', False)
text = '{' + sym + '}'
tX = self.preprocess_contents_latex(nX)
tXp = self.preprocess_contents_latex(nXp)
if tX and tXp:
text += '_{' + tX + r'\to ' + tXp + '}'
elif tX:
text += '_{' + tX + '}'
elif tXp:
text += '_{' + tXp + '}'
if nepsilon is not None:
text += '^{' + self.preprocess_contents_latex(nepsilon) + '}'
(od, md, cd) = _delims(nsizespec, '(', r'\Vert', ')')
if nstate.isNodeType(latexwalker.LatexGroupNode) \
and len(nstate.nodelist) \
and nstate.nodelist[0].isNodeType(latexwalker.LatexCharsNode) \
and nstate.nodelist[0].chars.lstrip().startswith('*'):
# remove '*'
statelatex = self.preprocess_contents_latex(nstate).lstrip(' \t*')
else:
if process_arg_subscripts:
statelatex = self.preprocess_contents_latex(nstate) + '_{' \
+ tX + r'\to ' + tXp + '}'
else:
statelatex = self.preprocess_contents_latex(nstate) + '_{' + tXp \
+ 'R_{' + tX + '}}'
text += od + statelatex + r'\,' + md + r'\,' + \
self.preprocess_contents_latex(nGX) + r',\,' \
+ self.preprocess_contents_latex(nGXp) + cd
return text
raise ValueError("Unknown phfqit macro type: {!r}".format(m))
def _delims(sizenode, opendelim, middelim, closedelim):
if sizenode is None:
return (opendelim, middelim, closedelim)
if sizenode.isNodeType(latexwalker.LatexGroupNode):
assert( len(sizenode.nodelist) == 1 )
sizenode = sizenode.nodelist[0]
if sizenode.isNodeType(latexwalker.LatexCharsNode) and sizenode.chars == '*':
return (r'\mathopen{}\left'+opendelim,
r'\mathclose{}\middle'+middelim+r'\mathopen{}',
r'\right'+closedelim+r'\mathclose{}')
if sizenode.isNodeType(latexwalker.LatexMacroNode):
mname = sizenode.macroname
return (r'\mathopen{}'+'\\'+mname+'l '+opendelim, # \bigl(
r'\mathopen{}'+'\\'+mname+' '+middelim, # \big|
r'\mathopen{}'+'\\'+mname+'r '+closedelim) # \bigr)
raise ValueError("unexpected optional sizing node : "+repr(sizenode))
def _delimtype(sizenode):
if sizenode is None:
return None
if sizenode.isNodeType(latexwalker.LatexGroupNode):
assert( len(sizenode.nodelist) == 1 )
sizenode = sizenode.nodelist[0]
if sizenode.isNodeType(latexwalker.LatexCharsNode) and sizenode.chars == '*':
return '*'
if sizenode.isNodeType(latexwalker.LatexMacroNode):
return '\\'+sizenode.macroname
mathtools_delims_macros = {
'abs': (r'\lvert', r'\rvert'),
'norm': (r'\lVert', r'\rVert'),
'avg': (r'\langle', r'\rangle'),
'ket': (r'\lvert', r'{%(1)s}', r'\rangle'),
'bra': (r'\langle', r'{%(1)s}', r'\rvert'),
'braket': (r'\langle', r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert\phfqitKetsBarSpace{%(2)s}',
r'\rangle'),
'ketbra': (r'\lvert', r'{%(1)s}%(delimsize)s\rangle %(phfqitKetsRLAngleSpace)s%(delimsize)s\langle{%(2)s}',
r'\rvert'),
'proj': (r'\lvert', r'{%(1)s}%(delimsize)s\rangle %(phfqitKetsRLAngleSpace)s%(delimsize)s\langle{%(1)s}',
r'\rvert'),
'matrixel': (r'\langle',
r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(2)s}'
+r'%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(3)s}',
r'\rangle'),
'dmatrixel': (r'\langle',
r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(2)s}'
+r'%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(1)s}',
r'\rangle'),
'innerprod': (r'\langle',
r'{%(1)s}%(phfqitBeforeCommaSpace)s,%(phfqitAfterCommaSpace)s{%(2)s}',
r'\rangle'),
'oket': (r'\lvert', r'{%(1)s}', r'\rrangle'),
'obra': (r'\llangle', r'{%(1)s}', r'\rvert'),
'obraket': (r'\llangle', r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}',
r'\rrangle'),
'oketbra': (r'\lvert', r'{%(1)s}%(delimsize)s\rrangle %(phfqitOKetsRLAngleSpace)s%(delimsize)s\llangle{%(2)s}',
r'\rvert'),
'oproj': (r'\lvert', r'{%(1)s}%(delimsize)s\rrangle %(phfqitOKetsRLAngleSpace)s%(delimsize)s\llangle{%(1)s}',
r'\rvert'),
'omatrixel': (r'\llangle',
r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}'
+r'%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(3)s}',
r'\rrangle'),
'odmatrixel': (r'\llangle',
r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}'
+r'%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(1)s}',
r'\rrangle'),
'intervalc': (r'[', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r']'),
'intervalo': (r']', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r'['),
'intervalco': (r'[', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r'['),
'intervaloc': (r']', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r']'),
}
def gate(x):
return r'\ifmmode\textsc{\lowercase{'+x+r'}}\else{\rmfamily\textsc{\lowercase{'+x+r'}}}\fi'
simple_substitution_macros = {
r'Hs': r'\mathscr{H}',
r'Ident': r'\mathds{1}',
# bits and gates
r'bit': {'qitargspec': '{', 'repl': r'\texttt{%(1)s}'},
r'bitstring': {'qitargspec': '{', 'repl': r'\ensuremath{\underline{\overline{\texttt{%(1)s}}}}'},
r'gate': {'qitargspec': '{',
'repl': gate("%(1)s") },
r'AND': gate('And'),
r'XOR': gate('Xor'),
r'CNOT': gate('C-Not'),
r'NOT': gate('Not'),
r'NOOP': gate('No-Op'),
# math groups
'uu': dict(qitargspec='(', repl=r'\mathrm{u}({%(1)s})'),
'UU': dict(qitargspec='(', repl=r'\mathrm{U}({%(1)s})'),
'su': dict(qitargspec='(', repl=r'\mathrm{su}({%(1)s})'),
'SU': dict(qitargspec='(', repl=r'\mathrm{SU}({%(1)s})'),
'so': dict(qitargspec='(', repl=r'\mathrm{so}({%(1)s})'),
'SO': dict(qitargspec='(', repl=r'\mathrm{SO}({%(1)s})'),
#'sl': dict(qitargspec='(', repl=r'\mathrm{sl}({%(1)s})'), # not in phfqit -- why? should add it there
#'SL': dict(qitargspec='(', repl=r'\mathrm{SL}({%(1)s})'),
'GL': dict(qitargspec='(', repl=r'\mathrm{GL}({%(1)s})'),
'SN': dict(qitargspec='(', repl=r'\mathrm{S}_{%(1)s}'),
}
math_operators = {
'tr': 'tr',
'supp': 'supp',
'rank': 'rank',
'linspan': 'span',
'spec': 'spec',
'diag': 'diag',
'Re': 'Re',
'Im': 'Im',
'poly': 'poly',
}
rx_hspace = re.compile(r'\\hspace\*?\{[^}]+\}')
def _delempties(d):
delkeys = [k for k, v in d.items() if v is None]
for k in delkeys:
del d[k]
class ExpandMacros(BaseFix):
r"""
Expand various macros defined by the {phfqit} package.
If applied along with :py:class:`latexpp.fixes.pkg.phfqit.ExpandQitObjects`,
the dependency on package {phfqit} should be removed.
Arguments:
- `subst`: a dictionary of substitutions to perform. The dictionary keys
are macro names without leading backslash, and values are dictionaries of
the form ``{'qitargspec': <qitargspec>, 'repl': <repl>}``. This has a
similar syntax to the :py:class:`latexpp.fixes.macro_subst.Subst` fix
class, but argument parsing allows an extended syntax. Instead of
specifying an `'argspec': <argspec>`, you specify `'qitargspec':
<qitargspec>` which provides argument parsing extensions to the usual
`argspec`.
Each character in `<qitargspec>` is one of:
- '*', '[', '{' represent the same kind of arguments as for 'argspec' in
:py:class:`latexpp.fixes.macro_subst.Subst`;
- '(' represents a mandatory argument in parentheses;
- '`' represents an optional argument introduced by ```<token or group>``;
- '_' represents an optional argument introduced by ``_<token or group>``;
- or '^' which represents an optional argument introduced by ``^<token or
group>``.
As for :py:class:`latexpp.fixes.macro_subst.Subst`, arguments are
available in the replacement string `<repl>` via the syntax ``%(n)s``
where `n` is the argument number.
A default set of substitutions are provided according to the macros
defined in the {phfqit} package; arguments here override the defaults.
You can disable individual default substitutions by providingthe value
`None` (`null` in the YAML file) for the given macro name in the `subst`
dictionary.
- `ops`: a dictionary of "operator names" to substitute for. This is a
dictionary ``{<opname>: <opstring>, ...}`` where `<opname>` is the macro
name of the operator without leading backslash (e.g., ``tr`` for "trace"),
and `<opstring>` is the replacement LaTeX string that will be formatted as
an operator name. See `math_operator_fmt=` for how operators are
formatted.
A default set of operator names are provided according to the macros
defined in the {phfqit} package; arguments here override the defaults.
You can disable individual default operator names by providing the value
`None` (`null` in the YAML file) for the given operator name in the `ops`
dictionary.
- `math_operator_fmt`: The template string to use to format an operator. By
default, we use `\\operatorname{...}` to format the operator. The
template should contain the string `%(opname)s` which will be replaced by
the actual operator name. The default value is
``\operatorname{%(opname)s}``; if you prefer to use ``\mbox`` for
operators, you could set this to ``\mbox{%(opname)s}``.
- `delims`: A dictionary specifying macros that format delimited expressions
(such as `\\abs`, `\\ket`, `\\norm`, etc.). These macros take an optional
star (which indicates that the delimiters should be latex-dynamically
sized with ``\left`` and ``\right``), or an optional sizing macro in
square braces (such as ``\norm[\big]{...}``). After the optional star and
optional argument, the macro must take a fixed number of mandatory
arguments (e.g., one for ``\norm`` but two for ``\ketbra`` and three for
``\matrixel``).
The `delims` argument is a dictionary ``{<delim-macro-name>: <delim-spec>,
...}`` where `<delim-macro-name>` is the name of the macro without leading
backslash (e.g., 'ket' or 'abs'). The `<delim-spec>` is either:
- `<delim-spec>=(<left-delim>, <right-delim>)`, i.e., a two-item tuple or
list specifying the left and right delimiter. The macro must take a
single mandatory argument, which will be typeset between the two
delimiters. One must be able to size the delimiters using sizing
commands such as ``\big`` or ``\left``/``\right``.
- `<delim-spec>=(<left-delim>, <contents-repl>, <right-delim>)`, i.e., a
three-item tuple or list. The `<left-delim>` and `<right-delim>` are as
above. The `<contents-repl>` specifies how to format the contents
between the two delimiters, and should contain replacement strings of
the form ``%(n)s`` that expand into the `n`-th mandatory argument of the
macro. The number of mandatory arguments that the macro accepts is
inferred by inspecting the replacement string and looking for the
highest `n` in these replacement placeholders. Furthermore, you can use
the replacement placeholder ``%(delimsize)s``, which expands to the
relevant sizing command (e.g., ``\big``, ``\middle`` to match
``\left``/``\right``, or nothing if no sizing options are given) and
which can be placed immediately before a delimiter.
- `subst_use_hspace`: In all the above substitutions (including delimiters),
there are some custom sizing corrections in the form of ``\hspace*{XXex}``
that adjust the spacing between the different symbols in the expansion of
those macros. By default, they are kept in the replacement latex code so
that the document looks the same when compiled. If instead, you would
like simple substitutions without these fine-tuning spacing commands, set
`subst_use_hspace=False`.
"""
def __init__(self, *,
subst=None, ops=None, delims=None,
math_operator_fmt=r'\operatorname{%(opname)s}',
subst_use_hspace=True,
subst_space=None,
):
super().__init__()
if subst is None:
subst = {}
if ops is None:
ops = {}
if delims is None:
delims = {}
the_simple_substitution_macros = {}
the_simple_substitution_macros.update(simple_substitution_macros)
the_simple_substitution_macros.update(subst)
# remove any items which have a None value (used to indicate a default
# key should be removed from the YAML config)
the_math_operators = {}
the_math_operators.update(math_operators)
the_math_operators.update(ops)
the_simple_substitution_macros.update(**{
opname: math_operator_fmt%dict(opname=opv)
for opname, opv in the_math_operators.items()
})
# delimiter macros --> substitution rules
self.mathtools_delims_macros = dict(mathtools_delims_macros)
self.mathtools_delims_macros.update(delims)
_delempties(self.mathtools_delims_macros)
def delim_cfg(delimtuple):
if len(delimtuple) == 2:
return dict(qitargspec='`*[{',
repl=r'%(open_delim)s{%(1)s}%(close_delim)s')
numargs = max( int(m.group(1)) for m in re.finditer(r'\%\((\d)\)s', delimtuple[1]) )
return dict(qitargspec='`*[' + '{'*numargs,
repl='%(open_delim)s' + delimtuple[1] + '%(close_delim)s')
the_simple_substitution_macros.update(**{
mname: delim_cfg(delimtuple)
for mname, delimtuple in self.mathtools_delims_macros.items()
})
_delempties(the_simple_substitution_macros)
self.subst_space = dict(
phfqitKetsBarSpace=r'\mkern 1.5mu\relax ',
phfqitKetsRLAngleSpace=r'\mkern -1.8mu\relax ',
phfqitOKetsBarSpace=r'\mkern 1.5mu\relax ',
phfqitOKetsRLAngleSpace=r'\mkern -1.8mu\relax ',
phfqitKetsBeforeCommaSpace=r'',
phfqitKetsAfterCommaSpace=r'\mkern 1.5mu\relax ',
)
if subst_space is not None:
self.subst_space.update(subst_space)
# remove \hspace...'s if we don't want them.
if not subst_use_hspace:
self.subst_space = {k: '' for k in self.subst_space.keys()}
self.substitution_helper = MacroSubstHelper(
macros=the_simple_substitution_macros,
argspecfldname='qitargspec',
args_parser_class=PhfQitObjectArgsParser,
)
def specs(self, **kwargs):
# get specs from substitution helper
return dict(**self.substitution_helper.get_specs())
def fix_node(self, n, **kwargs):
# we treat all via the substitution helper
c = self.substitution_helper.get_node_cfg(n)
if c is not None:
# got a substitution. Check if it is a delimiter, which warrants
# further processing
if n.isNodeType(latexwalker.LatexMacroNode) and \
n.macroname in self.mathtools_delims_macros:
#
# it's a delimiter macro!
#
# check for `backtick argument after checking for * and/or [,
# because the latter have precedence
delimtype = None
if n.nodeargd.argnlist[1] is not None:
# with star
delimtype = '*'
elif n.nodeargd.argnlist[2] is not None \
and n.nodeargd.argnlist[2].nodelist:
delimtype = '\\'+n.nodeargd.argnlist[2].nodelist[0].macroname
elif n.nodeargd.argnlist[0] is not None:
# we have a backtick size
delimtype = _delimtype(n.nodeargd.argnlist[0])
if delimtype is None:
delims_pc = ('%s', '%s')
delimsize = ''
elif delimtype == '*':
# with star
delims_pc = (r'\mathopen{}\left%s', r'\right%s\mathclose{}')
delimsize = r'\middle'
else:
sizemacro = delimtype
delimsize = sizemacro+r' '
delims_pc = (sizemacro+r'l %s', sizemacro+r'r %s')
# get delim specification for this macro
delimchars = list(self.mathtools_delims_macros[n.macroname])
if len(delimchars) == 3:
# replacement string is already stored in substitution helper
delimchars = [delimchars[0], delimchars[2]]
# ensure we protect bare delimiter macros with a trailing space
for j in (0, 1):
if re.match(r'^\\[a-zA-Z]+$', delimchars[j]): # bare macro, protect with space
delimchars[j] = delimchars[j] + ' '
context = dict(open_delim=delims_pc[0]%delimchars[0],
delimsize=delimsize,
close_delim=delims_pc[1]%delimchars[1],
**self.subst_space)
return self.substitution_helper.eval_subst(
c,
n,
node_contents_latex=self.preprocess_contents_latex,
argoffset=3,
context=context
)
return self.substitution_helper.eval_subst(
c,
n,
node_contents_latex=self.preprocess_contents_latex
)
return None
# qitargspec: extension of argspec with:
# *, [, { -- as in latexwalker
# ` -- optional size arg
# ( -- mandatory arg in (...)
# _ -- optional arg (subscript) that is marked by '_', e.g. \DD_{min}{...}{...}
# ^ -- optional arg (superscript) that is marked by '^', e.g. \DD^{\epsilon}{...}{...}
def qitargspec_to_argspec(qitargspec):
return "".join( x if x in ('*', '[', '{') else '[' for x in qitargspec )
class PhfQitObjectParsedArgs(ParsedMacroArgs):
def __init__(self, qitargspec, argnlist, **kwargs):
self.qitargspec = qitargspec
argspec = qitargspec_to_argspec(self.qitargspec)
super().__init__(argspec=argspec,
argnlist=argnlist,
**kwargs)
def __repr__(self):
return "{}(qitargspec={!r}, argnlist={!r})".format(
self.__class__.__name__, self.qitargspec, self.argnlist
)
def args_to_latex(self, recomposer):
return "".join(self._arg_to_latex(at, an, recomposer=recomposer)
for at, an in zip(self.qitargspec, self.argnlist))
def _arg_to_latex(self, argt, argn, recomposer):
if argn is None:
return ''
if argt == '{':
return recomposer.node_to_latex(argn)
elif argt == '[':
return recomposer.node_to_latex(argn)
elif argt == '*':
return recomposer.node_to_latex(argn)
elif argt == '`':
return '`' + recomposer.node_to_latex(argn)
elif argt == '(':
return recomposer.node_to_latex(argn)
elif argt in ('_', '^'):
return argt + recomposer.node_to_latex(argn)
raise RuntimeError("Invalid argt={!r} (argn={!r})".format(argt, argn))
class PhfQitObjectArgsParser(MacroStandardArgsParser):
def __init__(self, qitargspec):
self.qitargspec = qitargspec
argspec = qitargspec_to_argspec(self.qitargspec)
super().__init__(argspec=argspec)
def parse_args(self, w, pos, parsing_state=None):
if parsing_state is None:
parsing_state = w.make_parsing_state()
argnlist = []
p = pos
for argt in self.qitargspec:
#
# copied from MacroStandardArgsParser
#
if argt == '{':
(node, np, nl) = w.get_latex_expression(p, strict_braces=False,
parsing_state=parsing_state)
p = np + nl
argnlist.append(node)
elif argt == '[':
if self.optional_arg_no_space and w.s[p].isspace():
# don't try to read optional arg, we don't allow space
argnlist.append(None)
continue
optarginfotuple = w.get_latex_maybe_optional_arg(p, parsing_state=parsing_state)
if optarginfotuple is None:
argnlist.append(None)
continue
(node, np, nl) = optarginfotuple
p = np + nl
argnlist.append(node)
elif argt == '*':
# possible star.
tok = w.get_token(p)
if tok.tok == 'char' and tok.arg == '*':
# has star
node = w.make_node(latexwalker.LatexCharsNode,
parsing_state=parsing_state,
chars='*', pos=tok.pos, len=tok.len)
argnlist.append(node)
p = tok.pos + 1
else:
argnlist.append(None)
elif argt == '`':
# optional size arg introduced by "`"
tok = w.get_token(p)
if tok.tok in ('char', 'specials') and \
(tok.arg == '`' or getattr(tok.arg, 'specials_chars', None) == '`'):
# we have an optional size arg
p = tok.pos+1
tok = w.get_token(p)
# check for star
if tok.tok == 'char' and tok.arg == '*':
# has star
thenode = w.make_node(latexwalker.LatexCharsNode,
parsing_state=parsing_state,
chars='*', pos=tok.pos, len=tok.len)
argnlist.append(thenode)
p = tok.pos + 1
elif tok.tok == 'macro':
thenode = w.make_node(latexwalker.LatexMacroNode,
parsing_state=parsing_state,
macroname=tok.arg,
nodeargd=None,
pos=tok.pos, len=tok.len)
argnlist.append(thenode)
p = tok.pos+tok.len
else:
raise latexwalker.LatexWalkerParseError(
msg="Expected '*' or macro after `",
s=w.s,
pos=p
)
else:
# optional size arg not present
argnlist.append(None)
elif argt == '(':
(argnode, ppos, plen) = w.get_latex_braced_group(p, brace_type='(',
parsing_state=parsing_state)
argnlist.append( argnode )
p = ppos+plen
elif argt in ('_', '^'):
# optional argument introduced by "_" or "^"
tok = w.get_token(p)
# check for intro char "_"/"^"
if tok.tok == 'char' and tok.arg == argt:
# has this argument, read expression:
#optpos = tok.pos
p = tok.pos+tok.len
(node, np, nl) = w.get_latex_expression(p, strict_braces=False,
parsing_state=parsing_state)
p = np + nl
argnlist.append( node )
# argnlist.append(
# w.make_node(
# latexwalker.LatexGroupNode,
# parsing_state=parsing_state,
# nodelist=[ node ],
# delimiters=(argt, ''),
# pos=optpos,
# len=np+nl-optpos
# )
# )
else:
argnlist.append(None)
else:
raise LatexWalkerError(
"Unknown macro argument kind for macro: {!r}".format(argt)
)
parsed = PhfQitObjectParsedArgs(
qitargspec=self.qitargspec,
argnlist=argnlist,
)
return (parsed, pos, p-pos)
|
[
"logging.getLogger",
"re.compile",
"pylatexenc.latexwalker.LatexWalkerParseError",
"re.match",
"yaml.safe_load",
"re.finditer",
"latexpp.macro_subst_helper.MacroSubstHelper"
] |
[((47, 74), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (64, 74), False, 'import logging\n'), ((332, 927), 'yaml.safe_load', 'yaml.safe_load', (['"""\nstdset:\n HH:\n type: Hbase\n Hzero:\n type: Hbase\n sub: \'\\\\mathrm{max},0\'\n Hmin:\n type: Hbase\n sub: \'\\\\mathrm{min}\'\n Hmaxf:\n type: Hbase\n sub: \'\\\\mathrm{max}\'\n\n Hfn:\n type: Hfnbase\n\n Dmax:\n type: Dbase\n sub: \'\\\\mathrm{max}\'\n Dminz:\n type: Dbase\n sub: \'0\'\n Dminf:\n type: Dbase\n sub: \'\\\\mathrm{min}\'\n Dr:\n type: Dbase\n sub: \'\\\\mathrm{Rob}\'\n DHyp:\n type: Dbase\n sub: \'\\\\mathrm{H}\'\n Dhyp:\n type: Dbase\n sub: \'\\\\mathrm{h}\'\n\n DCoh:\n type: DCohbase\n DCohx:\n type: DCohbase\n\n DD:\n type: DD\n"""'], {}), '(\n """\nstdset:\n HH:\n type: Hbase\n Hzero:\n type: Hbase\n sub: \'\\\\mathrm{max},0\'\n Hmin:\n type: Hbase\n sub: \'\\\\mathrm{min}\'\n Hmaxf:\n type: Hbase\n sub: \'\\\\mathrm{max}\'\n\n Hfn:\n type: Hfnbase\n\n Dmax:\n type: Dbase\n sub: \'\\\\mathrm{max}\'\n Dminz:\n type: Dbase\n sub: \'0\'\n Dminf:\n type: Dbase\n sub: \'\\\\mathrm{min}\'\n Dr:\n type: Dbase\n sub: \'\\\\mathrm{Rob}\'\n DHyp:\n type: Dbase\n sub: \'\\\\mathrm{H}\'\n Dhyp:\n type: Dbase\n sub: \'\\\\mathrm{h}\'\n\n DCoh:\n type: DCohbase\n DCohx:\n type: DCohbase\n\n DD:\n type: DD\n"""\n )\n', (346, 927), False, 'import yaml\n'), ((927, 994), 'yaml.safe_load', 'yaml.safe_load', (['"""\nIdentProc:\n type: IdentProc\nee:\n type: ee\n"""'], {}), '("""\nIdentProc:\n type: IdentProc\nee:\n type: ee\n""")\n', (941, 994), False, 'import yaml\n'), ((18922, 18961), 're.compile', 're.compile', (['"""\\\\\\\\hspace\\\\*?\\\\{[^}]+\\\\}"""'], {}), "('\\\\\\\\hspace\\\\*?\\\\{[^}]+\\\\}')\n", (18932, 18961), False, 'import re\n'), ((27212, 27343), 'latexpp.macro_subst_helper.MacroSubstHelper', 'MacroSubstHelper', ([], {'macros': 'the_simple_substitution_macros', 'argspecfldname': '"""qitargspec"""', 'args_parser_class': 'PhfQitObjectArgsParser'}), "(macros=the_simple_substitution_macros, argspecfldname=\n 'qitargspec', args_parser_class=PhfQitObjectArgsParser)\n", (27228, 27343), False, 'from latexpp.macro_subst_helper import MacroSubstHelper\n'), ((29618, 29660), 're.match', 're.match', (['"""^\\\\\\\\[a-zA-Z]+$"""', 'delimchars[j]'], {}), "('^\\\\\\\\[a-zA-Z]+$', delimchars[j])\n", (29626, 29660), False, 'import re\n'), ((26135, 26180), 're.finditer', 're.finditer', (['"""\\\\%\\\\((\\\\d)\\\\)s"""', 'delimtuple[1]'], {}), "('\\\\%\\\\((\\\\d)\\\\)s', delimtuple[1])\n", (26146, 26180), False, 'import re\n'), ((35571, 35660), 'pylatexenc.latexwalker.LatexWalkerParseError', 'latexwalker.LatexWalkerParseError', ([], {'msg': '"""Expected \'*\' or macro after `"""', 's': 'w.s', 'pos': 'p'}), '(msg="Expected \'*\' or macro after `", s=w.\n s, pos=p)\n', (35604, 35660), False, 'from pylatexenc import latexwalker\n')]
|
from builder.laikago_task_bullet import LaikagoTaskBullet
from builder.laikago_task import InitPose
import math
import numpy as np
ABDUCTION_P_GAIN = 220.0
ABDUCTION_D_GAIN = 0.3
HIP_P_GAIN = 220.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 220.0
KNEE_D_GAIN = 2.0
class LaikagoStandImitationBulletBase(LaikagoTaskBullet):
def __init__(self,
reward_mode='without_shaping',
run_mode='train'):
super(LaikagoStandImitationBulletBase, self).__init__(run_mode=run_mode,
reward_mode=reward_mode,
init_pose=InitPose.LIE)
self.imitation_action = np.array([-10, 30, -75,
10, 30, -75,
-10, 50, -75,
10, 50, -75]) * np.pi / 180
self._kp = [ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN]
self._kd = [ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN]
self._torque_limits = np.ones(12) * 40
class LaikagoStandImitationBullet0(LaikagoStandImitationBulletBase):
def __init__(self, run_mode='train', reward_mode='with_shaping',):
super(LaikagoStandImitationBullet0, self).__init__(run_mode=run_mode,
reward_mode=reward_mode)
@property
def is_healthy(self):
return not (self.done_r_bullet(threshold=30) or
self.done_p_bullet(threshold=30) or
self.done_y_bullet(threshold=30) or
self.done_height_bullet(threshold=0.25) or
self.done_region_bullet(threshold=3) or
self.done_toe_contact_long(threshold=30) or
self.done_toe_distance(threshold=0.2))
def cal_phi_function(self):
pos = np.array(self._env.get_history_angle()[0])
vel = np.array(self._env.get_history_velocity()[0])
target_pos = self.imitation_action
target_vel = np.zeros(12)
motor_torques = -1 * (self._kp * (pos - target_pos)) - self._kd * (vel - target_vel)
return 10 / np.sum(np.abs(motor_torques))
def update_reward(self):
if self.is_healthy:
self.add_reward(1, 1)
|
[
"numpy.array",
"numpy.zeros",
"numpy.abs",
"numpy.ones"
] |
[((2373, 2385), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (2381, 2385), True, 'import numpy as np\n'), ((1383, 1394), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (1390, 1394), True, 'import numpy as np\n'), ((707, 771), 'numpy.array', 'np.array', (['[-10, 30, -75, 10, 30, -75, -10, 50, -75, 10, 50, -75]'], {}), '([-10, 30, -75, 10, 30, -75, -10, 50, -75, 10, 50, -75])\n', (715, 771), True, 'import numpy as np\n'), ((2506, 2527), 'numpy.abs', 'np.abs', (['motor_torques'], {}), '(motor_torques)\n', (2512, 2527), True, 'import numpy as np\n')]
|
from logging import Logger
from logging import getLogger
from pytrek.settings.BaseSubSetting import BaseSubSetting
from pytrek.settings.SettingsCommon import SettingsCommon
from pytrek.settings.SettingsCommon import SettingsNameValues
class LimitsSettings(BaseSubSetting):
LIMITS_SECTION: str = 'Limits'
MAXIMUM_STARS: str = 'maximum_stars'
MINIMUM_STAR_BASES: str = 'minimum_star_bases'
MAXIMUM_STAR_BASES: str = 'maximum_star_bases'
MAXIMUM_PLANETS: str = 'maximum_planets'
DEFAULT_FULL_SHIELDS: str = 'default_full_shields'
LIMITS_SETTINGS: SettingsNameValues = SettingsNameValues({
MAXIMUM_STARS: '4',
MINIMUM_STAR_BASES: '2',
MAXIMUM_STAR_BASES: '5',
MAXIMUM_PLANETS: '10',
DEFAULT_FULL_SHIELDS: '2500'
})
"""
This is a singleton based on the inheritance hierarchy
"""
def init(self, *args, **kwds):
self.logger: Logger = getLogger(__name__)
BaseSubSetting.init(self, *args, **kwds)
self._settingsCommon: SettingsCommon = SettingsCommon(self._config)
def addMissingSettings(self):
self._settingsCommon.addMissingSettings(sectionName=LimitsSettings.LIMITS_SECTION, nameValues=LimitsSettings.LIMITS_SETTINGS)
@property
def maximumStars(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MAXIMUM_STARS)
@property
def minimumStarBases(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MINIMUM_STAR_BASES)
@property
def maximumStarBases(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MAXIMUM_STAR_BASES)
@property
def maximumPlanets(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.MAXIMUM_PLANETS)
@property
def defaultFullShields(self) -> int:
return self._config.getint(LimitsSettings.LIMITS_SECTION, LimitsSettings.DEFAULT_FULL_SHIELDS)
|
[
"logging.getLogger",
"pytrek.settings.SettingsCommon.SettingsNameValues",
"pytrek.settings.SettingsCommon.SettingsCommon",
"pytrek.settings.BaseSubSetting.BaseSubSetting.init"
] |
[((617, 768), 'pytrek.settings.SettingsCommon.SettingsNameValues', 'SettingsNameValues', (["{MAXIMUM_STARS: '4', MINIMUM_STAR_BASES: '2', MAXIMUM_STAR_BASES: '5',\n MAXIMUM_PLANETS: '10', DEFAULT_FULL_SHIELDS: '2500'}"], {}), "({MAXIMUM_STARS: '4', MINIMUM_STAR_BASES: '2',\n MAXIMUM_STAR_BASES: '5', MAXIMUM_PLANETS: '10', DEFAULT_FULL_SHIELDS:\n '2500'})\n", (635, 768), False, 'from pytrek.settings.SettingsCommon import SettingsNameValues\n'), ((965, 984), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (974, 984), False, 'from logging import getLogger\n'), ((994, 1034), 'pytrek.settings.BaseSubSetting.BaseSubSetting.init', 'BaseSubSetting.init', (['self', '*args'], {}), '(self, *args, **kwds)\n', (1013, 1034), False, 'from pytrek.settings.BaseSubSetting import BaseSubSetting\n'), ((1083, 1111), 'pytrek.settings.SettingsCommon.SettingsCommon', 'SettingsCommon', (['self._config'], {}), '(self._config)\n', (1097, 1111), False, 'from pytrek.settings.SettingsCommon import SettingsCommon\n')]
|
import unittest
from datetime import date
from irLib.marketConvention.dayCount import ACT_ACT
from irLib.marketConvention.compounding import annually_k_Spot
from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve
import numpy as np
alias_disC = 'disC'
alias_forC = 'forC'
referenceDate = date(2020, 6, 26)
dayCount = ACT_ACT()
compounding = annually_k_Spot()
allowExtrapolation = False
# set synthetic data
timeIndex = [1, 2, 3, 4, 5]
flatR = 0.03
dF = ((flatR + 1) ** -np.arange(1, 6)).tolist()
forwardRates = (flatR * np.ones(5)).tolist()
spots = (flatR * np.ones(5)).tolist()
yearFrac = np.arange(1, 6).tolist()
par = (flatR * np.ones(5)).tolist()
t = date(2021, 6, 30) # try date(2021, 6, 26) will trigger extrapolation warning msg
t1 = date(2022, 6, 26)
t2 = date(2023, 6, 26)
class testYieldCurveGetRate(unittest.TestCase):
def testDiscountCurve(self):
disC = discountCurve(alias_disC, referenceDate,
dayCount, compounding, allowExtrapolation)
disC.values = dF
disC.timeIndex = timeIndex
self.assertAlmostEqual(disC.getRate(t1, t2), (1 + flatR) ** -1) # almostEqual auto rounds to 7 decimals
def testForwardCurve(self):
forwardC = forwardCurve(alias_forC, referenceDate,
dayCount, compounding, allowExtrapolation)
forwardC.values = forwardRates
forwardC.timeIndex = timeIndex
self.assertAlmostEqual(forwardC.getRate(t, t1, t2), flatR)
def testSpot2Df(self):
self.assertCountEqual(np.round(yieldCurve.spot2Df(
spots, yearFrac, compounding), 8), np.round(dF, 8))
self.assertCountEqual(np.round(yieldCurve.spot2Df(
dF, yearFrac, compounding, reverse=True), 8), np.round(spots, 8))
def testDf2Forward(self):
self.assertCountEqual(np.round(yieldCurve.dF2Forward(
dF, yearFrac), 8), np.round(forwardRates, 8))
def testForward2Spot(self):
self.assertCountEqual(np.round(yieldCurve.forward2Spot(
forwardRates, yearFrac, compounding), 8), np.round(spots, 8))
def testPar2Df(self):
self.assertCountEqual(
np.round(yieldCurve.par2Df(par, yearFrac), 8), np.round(dF, 8))
self.assertCountEqual(np.round(yieldCurve.par2Df(
dF, yearFrac, reverse=True), 8), np.round(par, 8))
|
[
"numpy.ones",
"numpy.arange",
"numpy.round",
"irLib.helpers.yieldCurve.yieldCurve.dF2Forward",
"irLib.marketConvention.dayCount.ACT_ACT",
"irLib.helpers.yieldCurve.discountCurve",
"irLib.helpers.yieldCurve.forwardCurve",
"datetime.date",
"irLib.helpers.yieldCurve.yieldCurve.spot2Df",
"irLib.helpers.yieldCurve.yieldCurve.par2Df",
"irLib.helpers.yieldCurve.yieldCurve.forward2Spot",
"irLib.marketConvention.compounding.annually_k_Spot"
] |
[((311, 328), 'datetime.date', 'date', (['(2020)', '(6)', '(26)'], {}), '(2020, 6, 26)\n', (315, 328), False, 'from datetime import date\n'), ((340, 349), 'irLib.marketConvention.dayCount.ACT_ACT', 'ACT_ACT', ([], {}), '()\n', (347, 349), False, 'from irLib.marketConvention.dayCount import ACT_ACT\n'), ((364, 381), 'irLib.marketConvention.compounding.annually_k_Spot', 'annually_k_Spot', ([], {}), '()\n', (379, 381), False, 'from irLib.marketConvention.compounding import annually_k_Spot\n'), ((680, 697), 'datetime.date', 'date', (['(2021)', '(6)', '(30)'], {}), '(2021, 6, 30)\n', (684, 697), False, 'from datetime import date\n'), ((766, 783), 'datetime.date', 'date', (['(2022)', '(6)', '(26)'], {}), '(2022, 6, 26)\n', (770, 783), False, 'from datetime import date\n'), ((789, 806), 'datetime.date', 'date', (['(2023)', '(6)', '(26)'], {}), '(2023, 6, 26)\n', (793, 806), False, 'from datetime import date\n'), ((614, 629), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (623, 629), True, 'import numpy as np\n'), ((904, 991), 'irLib.helpers.yieldCurve.discountCurve', 'discountCurve', (['alias_disC', 'referenceDate', 'dayCount', 'compounding', 'allowExtrapolation'], {}), '(alias_disC, referenceDate, dayCount, compounding,\n allowExtrapolation)\n', (917, 991), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((1241, 1327), 'irLib.helpers.yieldCurve.forwardCurve', 'forwardCurve', (['alias_forC', 'referenceDate', 'dayCount', 'compounding', 'allowExtrapolation'], {}), '(alias_forC, referenceDate, dayCount, compounding,\n allowExtrapolation)\n', (1253, 1327), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((544, 554), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (551, 554), True, 'import numpy as np\n'), ((582, 592), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (589, 592), True, 'import numpy as np\n'), ((654, 664), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (661, 664), True, 'import numpy as np\n'), ((1635, 1650), 'numpy.round', 'np.round', (['dF', '(8)'], {}), '(dF, 8)\n', (1643, 1650), True, 'import numpy as np\n'), ((1769, 1787), 'numpy.round', 'np.round', (['spots', '(8)'], {}), '(spots, 8)\n', (1777, 1787), True, 'import numpy as np\n'), ((1913, 1938), 'numpy.round', 'np.round', (['forwardRates', '(8)'], {}), '(forwardRates, 8)\n', (1921, 1938), True, 'import numpy as np\n'), ((2091, 2109), 'numpy.round', 'np.round', (['spots', '(8)'], {}), '(spots, 8)\n', (2099, 2109), True, 'import numpy as np\n'), ((2228, 2243), 'numpy.round', 'np.round', (['dF', '(8)'], {}), '(dF, 8)\n', (2236, 2243), True, 'import numpy as np\n'), ((2348, 2364), 'numpy.round', 'np.round', (['par', '(8)'], {}), '(par, 8)\n', (2356, 2364), True, 'import numpy as np\n'), ((494, 509), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (503, 509), True, 'import numpy as np\n'), ((1568, 1616), 'irLib.helpers.yieldCurve.yieldCurve.spot2Df', 'yieldCurve.spot2Df', (['spots', 'yearFrac', 'compounding'], {}), '(spots, yearFrac, compounding)\n', (1586, 1616), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((1691, 1750), 'irLib.helpers.yieldCurve.yieldCurve.spot2Df', 'yieldCurve.spot2Df', (['dF', 'yearFrac', 'compounding'], {'reverse': '(True)'}), '(dF, yearFrac, compounding, reverse=True)\n', (1709, 1750), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((1859, 1894), 'irLib.helpers.yieldCurve.yieldCurve.dF2Forward', 'yieldCurve.dF2Forward', (['dF', 'yearFrac'], {}), '(dF, yearFrac)\n', (1880, 1894), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((2012, 2072), 'irLib.helpers.yieldCurve.yieldCurve.forward2Spot', 'yieldCurve.forward2Spot', (['forwardRates', 'yearFrac', 'compounding'], {}), '(forwardRates, yearFrac, compounding)\n', (2035, 2072), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((2190, 2222), 'irLib.helpers.yieldCurve.yieldCurve.par2Df', 'yieldCurve.par2Df', (['par', 'yearFrac'], {}), '(par, yearFrac)\n', (2207, 2222), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((2284, 2329), 'irLib.helpers.yieldCurve.yieldCurve.par2Df', 'yieldCurve.par2Df', (['dF', 'yearFrac'], {'reverse': '(True)'}), '(dF, yearFrac, reverse=True)\n', (2301, 2329), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n')]
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
from setuptools import setup, dist
import wheel
import os
# required to geneerate a platlib folder required by audittools
from setuptools.command.install import install
# for generating a wheel version from git tag
from setuptools_scm import get_version
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
# force setuptools to recognize that this is
# actually a binary distribution
class BinaryDistribution(dist.Distribution):
def is_pure(self):
return False
def has_ext_modules(foo):
return True
# This gets the version from the most recent git tag, potentially concatinating
# a commit hash at the end.
current_version = get_version(
root = "..",
relative_to = __file__,
fallback_version='0.0.0-dev0'
)
optix_version = os.environ.get("OPTIX_VERSION", None)
if optix_version:
current_version = current_version + "." + optix_version
print(current_version)
setup(
# This package is called nvisii
name='nvisii',
install_requires = ['numpy>=1.19.5'],
packages = ['nvisii', "nvisii.importers"], # include the package "nvisii"
# make sure the shared library is included
package_data = {'': ("*.dll", "*.pyd", "*.so")},
include_package_data=True,
description='',
# See class BinaryDistribution that was defined earlier
distclass=BinaryDistribution,
version = current_version,
author='<NAME>',
author_email='',
maintainer='',
maintainer_email='',
python_requires = ">=3.6",
cmdclass={'install': InstallPlatlib},
)
|
[
"setuptools_scm.get_version",
"os.environ.get",
"setuptools.command.install.install.finalize_options",
"setuptools.setup"
] |
[((984, 1059), 'setuptools_scm.get_version', 'get_version', ([], {'root': '""".."""', 'relative_to': '__file__', 'fallback_version': '"""0.0.0-dev0"""'}), "(root='..', relative_to=__file__, fallback_version='0.0.0-dev0')\n", (995, 1059), False, 'from setuptools_scm import get_version\n'), ((1096, 1133), 'os.environ.get', 'os.environ.get', (['"""OPTIX_VERSION"""', 'None'], {}), "('OPTIX_VERSION', None)\n", (1110, 1133), False, 'import os\n'), ((1237, 1633), 'setuptools.setup', 'setup', ([], {'name': '"""nvisii"""', 'install_requires': "['numpy>=1.19.5']", 'packages': "['nvisii', 'nvisii.importers']", 'package_data': "{'': ('*.dll', '*.pyd', '*.so')}", 'include_package_data': '(True)', 'description': '""""""', 'distclass': 'BinaryDistribution', 'version': 'current_version', 'author': '"""<NAME>"""', 'author_email': '""""""', 'maintainer': '""""""', 'maintainer_email': '""""""', 'python_requires': '""">=3.6"""', 'cmdclass': "{'install': InstallPlatlib}"}), "(name='nvisii', install_requires=['numpy>=1.19.5'], packages=['nvisii',\n 'nvisii.importers'], package_data={'': ('*.dll', '*.pyd', '*.so')},\n include_package_data=True, description='', distclass=BinaryDistribution,\n version=current_version, author='<NAME>', author_email='', maintainer=\n '', maintainer_email='', python_requires='>=3.6', cmdclass={'install':\n InstallPlatlib})\n", (1242, 1633), False, 'from setuptools import setup, dist\n'), ((507, 537), 'setuptools.command.install.install.finalize_options', 'install.finalize_options', (['self'], {}), '(self)\n', (531, 537), False, 'from setuptools.command.install import install\n')]
|
from marshmallow import fields, Schema
from .provision import ProvisionActionSchema
class InstanceSchema(Schema):
type = fields.String(required=True)
image_id = fields.String(required=True)
availability_zone = fields.String(required=True)
ebs_optimized = fields.Boolean()
iam_fleet_role = fields.String(required=True)
class Meta:
ordered = True
class AuthSchema(Schema):
key_pair_name = fields.String(required=True)
identity_file = fields.String(required=True)
user = fields.String(required=True)
group = fields.String(required=True)
class Meta:
ordered = True
class NetworkSchema(Schema):
security_group_id = fields.String(required=True)
subnet_id = fields.String()
class Meta:
ordered = True
class ComputeAwsSchema(Schema):
provider = fields.String(required=True)
instance = fields.Nested(InstanceSchema, required=True)
auth = fields.Nested(AuthSchema, required=True)
network = fields.Nested(NetworkSchema, required=True)
provision_actions = fields.Nested(ProvisionActionSchema, many=True)
class Meta:
ordered = True
|
[
"marshmallow.fields.Boolean",
"marshmallow.fields.Nested",
"marshmallow.fields.String"
] |
[((125, 153), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (138, 153), False, 'from marshmallow import fields, Schema\n'), ((166, 194), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (179, 194), False, 'from marshmallow import fields, Schema\n'), ((216, 244), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (229, 244), False, 'from marshmallow import fields, Schema\n'), ((262, 278), 'marshmallow.fields.Boolean', 'fields.Boolean', ([], {}), '()\n', (276, 278), False, 'from marshmallow import fields, Schema\n'), ((297, 325), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (310, 325), False, 'from marshmallow import fields, Schema\n'), ((402, 430), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (415, 430), False, 'from marshmallow import fields, Schema\n'), ((448, 476), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (461, 476), False, 'from marshmallow import fields, Schema\n'), ((485, 513), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (498, 513), False, 'from marshmallow import fields, Schema\n'), ((523, 551), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (536, 551), False, 'from marshmallow import fields, Schema\n'), ((635, 663), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (648, 663), False, 'from marshmallow import fields, Schema\n'), ((677, 692), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (690, 692), False, 'from marshmallow import fields, Schema\n'), ((770, 798), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (783, 798), False, 'from marshmallow import fields, Schema\n'), ((811, 855), 'marshmallow.fields.Nested', 'fields.Nested', (['InstanceSchema'], {'required': '(True)'}), '(InstanceSchema, required=True)\n', (824, 855), False, 'from marshmallow import fields, Schema\n'), ((864, 904), 'marshmallow.fields.Nested', 'fields.Nested', (['AuthSchema'], {'required': '(True)'}), '(AuthSchema, required=True)\n', (877, 904), False, 'from marshmallow import fields, Schema\n'), ((916, 959), 'marshmallow.fields.Nested', 'fields.Nested', (['NetworkSchema'], {'required': '(True)'}), '(NetworkSchema, required=True)\n', (929, 959), False, 'from marshmallow import fields, Schema\n'), ((981, 1028), 'marshmallow.fields.Nested', 'fields.Nested', (['ProvisionActionSchema'], {'many': '(True)'}), '(ProvisionActionSchema, many=True)\n', (994, 1028), False, 'from marshmallow import fields, Schema\n')]
|
__all__ = [
"Dataset",
"forgiving_true",
"load_config",
"log",
"make_tdtax_taxonomy",
"plot_gaia_density",
"plot_gaia_hr",
"plot_light_curve_data",
"plot_periods",
]
from astropy.io import fits
import datetime
import json
import healpy as hp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pathlib
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tqdm.auto import tqdm
from typing import Mapping, Optional, Union
import yaml
def load_config(config_path: Union[str, pathlib.Path]):
"""
Load config and secrets
"""
with open(config_path) as config_yaml:
config = yaml.load(config_yaml, Loader=yaml.FullLoader)
return config
def time_stamp():
"""
:return: UTC time as a formatted string
"""
return datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S")
def log(message: str):
print(f"{time_stamp()}: {message}")
def forgiving_true(expression):
return True if expression in ("t", "True", "true", "1", 1, True) else False
def make_tdtax_taxonomy(taxonomy: Mapping):
"""Recursively convert taxonomy definition from config["taxonomy"]
into tdtax-parsable dictionary
:param taxonomy: config["taxonomy"] section
:return:
"""
tdtax_taxonomy = dict()
if taxonomy["class"] not in ("tds", "phenomenological", "ontological"):
tdtax_taxonomy["name"] = f"{taxonomy['class']}: {taxonomy['name']}"
else:
tdtax_taxonomy["name"] = taxonomy["name"]
if "subclasses" in taxonomy:
tdtax_taxonomy["children"] = []
for cls in taxonomy["subclasses"]:
tdtax_taxonomy["children"].append(make_tdtax_taxonomy(cls))
return tdtax_taxonomy
def plot_light_curve_data(
light_curve_data: pd.DataFrame,
period: Optional[float] = None,
title: Optional[str] = None,
save: Optional[str] = None,
):
"""Plot and save to file light curve data
:param light_curve_data:
:param period: float [days] if set, a phase-folded light curve will be displayed
:param title: plot title
:param save: path to save the plot
:return:
"""
plt.close("all")
# Official start of ZTF MSIP survey, March 17, 2018
jd_start = 2458194.5
colors = {
1: "#28a745",
2: "#dc3545",
3: "#00415a",
"default": "#f3dc11",
}
mask_good_data = light_curve_data["catflags"] == 0
df = light_curve_data.loc[mask_good_data]
if period is not None:
fig = plt.figure(figsize=(16, 9), dpi=200)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else:
fig = plt.figure(figsize=(16, 5), dpi=200)
ax1 = fig.add_subplot(111)
if title is not None:
fig.suptitle(title, fontsize=24)
# plot different ZTF bands/filters
for band in df["filter"].unique():
mask_filter = df["filter"] == band
ax1.errorbar(
df.loc[mask_filter, "hjd"] - jd_start,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
if period is not None:
for n in [0, -1]:
ax2.errorbar(
(df.loc[mask_filter, "hjd"] - jd_start) / period % 1 + n,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
# invert y axes since we are displaying magnitudes
ax1.invert_yaxis()
if period is not None:
ax2.invert_yaxis()
ax1.set_xlabel("Time")
ax1.grid(lw=0.3)
if period is not None:
ax2.set_xlabel(f"phase [period={period:4.4g} days]")
ax2.set_xlim(-1, 1)
ax2.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_periods(
features: pd.DataFrame,
limits: Optional[list] = None,
loglimits: Optional[bool] = False,
number_of_bins: Optional[int] = 20,
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot a histogram of periods for the sample"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# make figure
fig, ax = plt.subplots(figsize=(6, 6))
if title is not None:
fig.suptitle(title, fontsize=24)
if limits is not None:
if loglimits:
edges = np.logspace(
np.log10(limits[0]), np.log10(limits[1]), number_of_bins
)
else:
edges = np.linspace(limits[0], limits[1], number_of_bins)
else:
if loglimits:
edges = np.linspace(
np.log10(0.9 * np.min(features["period"])),
np.log10(1.1 * np.max(features["period"])),
number_of_bins,
)
else:
edges = np.linspace(
0.9 * np.min(features["period"]),
1.1 * np.max(features["period"]),
number_of_bins,
)
hist, bin_edges = np.histogram(features["period"], bins=edges)
hist = hist / np.sum(hist)
bins = (bin_edges[1:] + bin_edges[:-1]) / 2.0
ax.plot(bins, hist, linestyle="-", drawstyle="steps")
ax.set_xlabel("Period [day]")
ax.set_ylabel("Probability Density Function")
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if loglimits:
ax.set_xscale("log")
ax.set_xlim([0.9 * bins[0], 1.1 * bins[-1]])
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_hr(
gaia_data: pd.DataFrame,
path_gaia_hr_histogram: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the Gaia HR diagram with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load background histogram
histogram = np.loadtxt(path_gaia_hr_histogram)
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
x_edges = np.arange(-0.681896, 5.04454978, 0.02848978)
y_edges = np.arange(-2.90934, 16.5665952, 0.0968952)
ax.pcolormesh(x_edges, y_edges, histogram.T, antialiased=False)
ax.set_xlim(x_edges[0], x_edges[-1])
ax.set_ylim(y_edges[0], y_edges[-1])
ax.invert_yaxis()
ax.set_xlabel(r"$G_{BP} - G_{RP}$")
ax.set_ylabel(r"$M_G$")
# plot sample data
ax.errorbar(
gaia_data["BP-RP"],
gaia_data["M"],
gaia_data["M"] - gaia_data["Ml"],
marker=".",
color="#e68a00",
alpha=0.75,
ls="",
lw=0.5,
)
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_density(
positions: pd.DataFrame,
path_gaia_density: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the RA/DEC Gaia density plot with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load the data
hdulist = fits.open(path_gaia_density)
hist = hdulist[1].data["srcdens"][np.argsort(hdulist[1].data["hpx8"])]
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
# background setup
coordsys = ["C", "C"]
nest = True
# colormap
cm = plt.cm.get_cmap("viridis") # colorscale
cm.set_under("w")
cm.set_bad("w")
# plot the data in healpy
norm = "log"
hp.mollview(
hist,
norm=norm,
unit="Stars per sq. arcmin.",
cbar=False,
nest=nest,
title="",
coord=coordsys,
notext=True,
cmap=cm,
flip="astro",
nlocs=4,
min=0.1,
max=300,
)
ax = plt.gca()
image = ax.get_images()[0]
cbar = fig.colorbar(
image,
ax=ax,
ticks=[0.1, 1, 10, 100],
fraction=0.15,
pad=0.05,
location="bottom",
)
cbar.set_label("Stars per sq. arcmin.", size=12)
cbar.ax.tick_params(labelsize=12)
ax.tick_params(axis="both", which="major", labelsize=24)
# borders
lw = 3
pi = np.pi
dtor = pi / 180.0
theta = np.arange(0, 181) * dtor
hp.projplot(theta, theta * 0 - pi, "-k", lw=lw, direct=True)
hp.projplot(theta, theta * 0 + 0.9999 * pi, "-k", lw=lw, direct=True)
phi = np.arange(-180, 180) * dtor
hp.projplot(phi * 0 + 1.0e-10, phi, "-k", lw=lw, direct=True)
hp.projplot(phi * 0 + pi - 1.0e-10, phi, "-k", lw=lw, direct=True)
# ZTF
theta = np.arange(0.0, 360, 0.036)
phi = -30.0 * np.ones_like(theta)
hp.projplot(theta, phi, "k--", coord=["C"], lonlat=True, lw=2)
hp.projtext(170.0, -24.0, r"ZTF Limit", lonlat=True)
theta = np.arange(0.0, 360, 0.036)
# galaxy
for gallat in [15, 0, -15]:
phi = gallat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["G"], lonlat=True, lw=2)
# ecliptic
for ecllat in [0, -30, 30]:
phi = ecllat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["E"], lonlat=True, lw=2, ls=":")
# graticule
hp.graticule(ls="-", alpha=0.1, lw=0.5)
# labels
for lat in [60, 30, 0, -30, -60]:
hp.projtext(360.0, lat, str(lat), lonlat=True)
for lon in [0, 60, 120, 240, 300]:
hp.projtext(lon, 0.0, str(lon), lonlat=True)
# NWES
plt.text(0.0, 0.5, r"E", ha="right", transform=ax.transAxes, weight="bold")
plt.text(1.0, 0.5, r"W", ha="left", transform=ax.transAxes, weight="bold")
plt.text(
0.5,
0.992,
r"N",
va="bottom",
ha="center",
transform=ax.transAxes,
weight="bold",
)
plt.text(
0.5, 0.0, r"S", va="top", ha="center", transform=ax.transAxes, weight="bold"
)
color = "k"
lw = 10
alpha = 0.75
for pos in positions:
hp.projplot(
pos[0],
pos[1],
color=color,
markersize=5,
marker="o",
coord=coordsys,
lonlat=True,
lw=lw,
alpha=alpha,
zorder=10,
)
if save is not None:
fig.tight_layout()
plt.savefig(save)
""" Datasets """
class Dataset(object):
def __init__(
self,
tag: str,
path_dataset: str,
features: tuple,
verbose: bool = False,
**kwargs,
):
"""Load csv file with the dataset containing both data and labels
As of 20210317, it is produced by labels*.ipynb - this will likely change in a future PR
:param tag:
:param path_dataset:
:param features:
:param verbose:
"""
self.verbose = verbose
self.tag = tag
self.features = features
self.target = None
if self.verbose:
log(f"Loading {path_dataset}...")
nrows = kwargs.get("nrows", None)
self.df_ds = pd.read_csv(path_dataset, nrows=nrows)
if self.verbose:
log(self.df_ds[list(features)].describe())
self.df_ds = self.df_ds.replace([np.inf, -np.inf, np.nan], 0.0)
dmdt = []
if self.verbose:
print("Moving dmdt's to a dedicated numpy array...")
iterator = tqdm(self.df_ds.itertuples(), total=len(self.df_ds))
else:
iterator = self.df_ds.itertuples()
for i in iterator:
data = np.array(json.loads(self.df_ds["dmdt"][i.Index]))
if len(data.shape) == 0:
dmdt.append(np.zeros((26, 26)))
else:
dmdt.append(data)
self.dmdt = np.array(dmdt)
self.dmdt = np.expand_dims(self.dmdt, axis=-1)
# drop in df_ds:
self.df_ds.drop(columns="dmdt")
@staticmethod
def threshold(a, t: float = 0.5):
b = np.zeros_like(a)
b[np.array(a) > t] = 1
return b
def make(
self,
target_label: str = "variable",
threshold: float = 0.5,
balance: Optional[float] = None,
weight_per_class: bool = True,
scale_features: str = "min_max",
test_size: float = 0.1,
val_size: float = 0.1,
random_state: int = 42,
feature_stats: Optional[dict] = None,
batch_size: int = 256,
shuffle_buffer_size: int = 256,
epochs: int = 300,
**kwargs,
):
"""Make datasets for target_label
:param target_label: corresponds to training.classes.<label> in config
:param threshold: our labels are floats [0, 0.25, 0.5, 0.75, 1]
:param balance: balance ratio for the prevalent class. if null - use all available data
:param weight_per_class:
:param scale_features: min_max | median_std
:param test_size:
:param val_size:
:param random_state: set this for reproducibility
:param feature_stats: feature_stats to use to standardize features.
if None, stats are computed from the data, taking balance into account
:param batch_size
:param shuffle_buffer_size
:param epochs
:return:
"""
# Note: Dataset.from_tensor_slices method requires the target variable to be of the int type.
# TODO: see what to do about it when trying label smoothing in the future.
target = np.asarray(
list(map(int, self.threshold(self.df_ds[target_label].values, t=threshold)))
)
self.target = np.expand_dims(target, axis=1)
neg, pos = np.bincount(target.flatten())
total = neg + pos
if self.verbose:
log(
f"Examples:\n Total: {total}\n Positive: {pos} ({100 * pos / total:.2f}% of total)\n"
)
w_pos = np.rint(self.df_ds[target_label].values) == 1
index_pos = self.df_ds.loc[w_pos].index
if target_label == "variable":
# 'variable' is a special case: there is an explicit 'non-variable' label:
w_neg = (
np.asarray(
list(
map(
int,
self.threshold(
self.df_ds["non-variable"].values, t=threshold
),
)
)
)
== 1
)
else:
w_neg = ~w_pos
index_neg = self.df_ds.loc[w_neg].index
# balance positive and negative examples?
index_dropped = None
if balance:
underrepresented = min(np.sum(w_pos), np.sum(w_neg))
overrepresented = max(np.sum(w_pos), np.sum(w_neg))
sample_size = int(min(overrepresented, underrepresented * balance))
if neg > pos:
index_neg = (
self.df_ds.loc[w_neg].sample(n=sample_size, random_state=1).index
)
index_dropped = self.df_ds.loc[
list(set(self.df_ds.loc[w_neg].index) - set(index_neg))
].index
else:
index_pos = (
self.df_ds.loc[w_pos].sample(n=sample_size, random_state=1).index
)
index_dropped = self.df_ds.loc[
list(set(self.df_ds.loc[w_pos].index) - set(index_pos))
].index
if self.verbose:
log(
"Number of examples to use in training:"
f"\n Positive: {len(index_pos)}\n Negative: {len(index_neg)}\n"
)
ds_indexes = index_pos.to_list() + index_neg.to_list()
# Train/validation/test split (we will use an 81% / 9% / 10% data split by default):
train_indexes, test_indexes = train_test_split(
ds_indexes, shuffle=True, test_size=test_size, random_state=random_state
)
train_indexes, val_indexes = train_test_split(
train_indexes, shuffle=True, test_size=val_size, random_state=random_state
)
# Normalize features (dmdt's are already L2-normalized) (?using only the training samples?).
# Obviously, the same norms will have to be applied at the testing and serving stages.
# load/compute feature norms:
if feature_stats is None:
feature_stats = {
feature: {
"min": np.min(self.df_ds.loc[ds_indexes, feature]),
"max": np.max(self.df_ds.loc[ds_indexes, feature]),
"median": np.median(self.df_ds.loc[ds_indexes, feature]),
"mean": np.mean(self.df_ds.loc[ds_indexes, feature]),
"std": np.std(self.df_ds.loc[ds_indexes, feature]),
}
for feature in self.features
}
if self.verbose:
print("Computed feature stats:\n", feature_stats)
# scale features
for feature in self.features:
stats = feature_stats.get(feature)
if (stats is not None) and (stats["std"] != 0):
if scale_features == "median_std":
self.df_ds[feature] = (
self.df_ds[feature] - stats["median"]
) / stats["std"]
elif scale_features == "min_max":
self.df_ds[feature] = (self.df_ds[feature] - stats["min"]) / (
stats["max"] - stats["min"]
)
# norms = {
# feature: np.linalg.norm(self.df_ds.loc[ds_indexes, feature])
# for feature in self.features
# }
# for feature, norm in norms.items():
# if np.isnan(norm) or norm == 0.0:
# norms[feature] = 1.0
# if self.verbose:
# print('Computed feature norms:\n', norms)
#
# for feature, norm in norms.items():
# self.df_ds[feature] /= norm
train_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[train_indexes, self.features].values,
"dmdt": self.dmdt[train_indexes],
},
target[train_indexes],
)
)
val_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[val_indexes, self.features].values,
"dmdt": self.dmdt[val_indexes],
},
target[val_indexes],
)
)
test_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[test_indexes, self.features].values,
"dmdt": self.dmdt[test_indexes],
},
target[test_indexes],
)
)
dropped_samples = (
tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[index_dropped, self.features].values,
"dmdt": self.dmdt[index_dropped],
},
target[index_dropped],
)
)
if balance
else None
)
# Shuffle and batch the datasets:
train_dataset = (
train_dataset.shuffle(shuffle_buffer_size).batch(batch_size).repeat(epochs)
)
val_dataset = val_dataset.batch(batch_size).repeat(epochs)
test_dataset = test_dataset.batch(batch_size)
dropped_samples = dropped_samples.batch(batch_size) if balance else None
datasets = {
"train": train_dataset,
"val": val_dataset,
"test": test_dataset,
"dropped_samples": dropped_samples,
}
indexes = {
"train": np.array(train_indexes),
"val": np.array(val_indexes),
"test": np.array(test_indexes),
"dropped_samples": np.array(index_dropped.to_list())
if index_dropped is not None
else None,
}
# How many steps per epoch?
steps_per_epoch_train = len(train_indexes) // batch_size - 1
steps_per_epoch_val = len(val_indexes) // batch_size - 1
steps_per_epoch_test = len(test_indexes) // batch_size - 1
steps_per_epoch = {
"train": steps_per_epoch_train,
"val": steps_per_epoch_val,
"test": steps_per_epoch_test,
}
if self.verbose:
print(f"Steps per epoch: {steps_per_epoch}")
# Weight training data depending on the number of samples?
# Very useful for imbalanced classification, especially in the cases with a small number of examples.
if weight_per_class:
# weight data class depending on number of examples?
# num_training_examples_per_class = np.array([len(target) - np.sum(target), np.sum(target)])
num_training_examples_per_class = np.array([len(index_neg), len(index_pos)])
assert (
0 not in num_training_examples_per_class
), "found class without any examples!"
# fewer examples -- larger weight
weights = (1 / num_training_examples_per_class) / np.linalg.norm(
(1 / num_training_examples_per_class)
)
normalized_weight = weights / np.max(weights)
class_weight = {i: w for i, w in enumerate(normalized_weight)}
else:
# working with binary classifiers only
class_weight = {i: 1 for i in range(2)}
return datasets, indexes, steps_per_epoch, class_weight
|
[
"numpy.log10",
"pandas.read_csv",
"healpy.mollview",
"yaml.load",
"numpy.argsort",
"numpy.array",
"astropy.io.fits.open",
"numpy.linalg.norm",
"numpy.arange",
"numpy.mean",
"numpy.histogram",
"healpy.projplot",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.rint",
"numpy.min",
"healpy.graticule",
"healpy.projtext",
"json.loads",
"matplotlib.pyplot.savefig",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.gca",
"numpy.std",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.text",
"numpy.ones_like",
"numpy.median",
"datetime.datetime.utcnow",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.expand_dims",
"numpy.loadtxt",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((2182, 2198), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2191, 2198), True, 'import matplotlib.pyplot as plt\n'), ((4337, 4364), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (4343, 4364), True, 'import matplotlib.pyplot as plt\n'), ((4398, 4426), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (4410, 4426), True, 'import matplotlib.pyplot as plt\n'), ((5194, 5238), 'numpy.histogram', 'np.histogram', (["features['period']"], {'bins': 'edges'}), "(features['period'], bins=edges)\n", (5206, 5238), True, 'import numpy as np\n'), ((6135, 6162), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (6141, 6162), True, 'import matplotlib.pyplot as plt\n'), ((6212, 6246), 'numpy.loadtxt', 'np.loadtxt', (['path_gaia_hr_histogram'], {}), '(path_gaia_hr_histogram)\n', (6222, 6246), True, 'import numpy as np\n'), ((6280, 6317), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)', 'dpi': '(200)'}), '(figsize=(6, 6), dpi=200)\n', (6292, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6444), 'numpy.arange', 'np.arange', (['(-0.681896)', '(5.04454978)', '(0.02848978)'], {}), '(-0.681896, 5.04454978, 0.02848978)\n', (6409, 6444), True, 'import numpy as np\n'), ((6459, 6501), 'numpy.arange', 'np.arange', (['(-2.90934)', '(16.5665952)', '(0.0968952)'], {}), '(-2.90934, 16.5665952, 0.0968952)\n', (6468, 6501), True, 'import numpy as np\n'), ((7566, 7593), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (7572, 7593), True, 'import matplotlib.pyplot as plt\n'), ((7629, 7657), 'astropy.io.fits.open', 'fits.open', (['path_gaia_density'], {}), '(path_gaia_density)\n', (7638, 7657), False, 'from astropy.io import fits\n'), ((7766, 7803), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)', 'dpi': '(200)'}), '(figsize=(6, 6), dpi=200)\n', (7778, 7803), True, 'import matplotlib.pyplot as plt\n'), ((7962, 7988), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (7977, 7988), True, 'import matplotlib.pyplot as plt\n'), ((8097, 8276), 'healpy.mollview', 'hp.mollview', (['hist'], {'norm': 'norm', 'unit': '"""Stars per sq. arcmin."""', 'cbar': '(False)', 'nest': 'nest', 'title': '""""""', 'coord': 'coordsys', 'notext': '(True)', 'cmap': 'cm', 'flip': '"""astro"""', 'nlocs': '(4)', 'min': '(0.1)', 'max': '(300)'}), "(hist, norm=norm, unit='Stars per sq. arcmin.', cbar=False, nest\n =nest, title='', coord=coordsys, notext=True, cmap=cm, flip='astro',\n nlocs=4, min=0.1, max=300)\n", (8108, 8276), True, 'import healpy as hp\n'), ((8388, 8397), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8395, 8397), True, 'import matplotlib.pyplot as plt\n'), ((8848, 8908), 'healpy.projplot', 'hp.projplot', (['theta', '(theta * 0 - pi)', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(theta, theta * 0 - pi, '-k', lw=lw, direct=True)\n", (8859, 8908), True, 'import healpy as hp\n'), ((8913, 8982), 'healpy.projplot', 'hp.projplot', (['theta', '(theta * 0 + 0.9999 * pi)', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(theta, theta * 0 + 0.9999 * pi, '-k', lw=lw, direct=True)\n", (8924, 8982), True, 'import healpy as hp\n'), ((9025, 9084), 'healpy.projplot', 'hp.projplot', (['(phi * 0 + 1e-10)', 'phi', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(phi * 0 + 1e-10, phi, '-k', lw=lw, direct=True)\n", (9036, 9084), True, 'import healpy as hp\n'), ((9091, 9155), 'healpy.projplot', 'hp.projplot', (['(phi * 0 + pi - 1e-10)', 'phi', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(phi * 0 + pi - 1e-10, phi, '-k', lw=lw, direct=True)\n", (9102, 9155), True, 'import healpy as hp\n'), ((9181, 9207), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', '(0.036)'], {}), '(0.0, 360, 0.036)\n', (9190, 9207), True, 'import numpy as np\n'), ((9250, 9312), 'healpy.projplot', 'hp.projplot', (['theta', 'phi', '"""k--"""'], {'coord': "['C']", 'lonlat': '(True)', 'lw': '(2)'}), "(theta, phi, 'k--', coord=['C'], lonlat=True, lw=2)\n", (9261, 9312), True, 'import healpy as hp\n'), ((9317, 9368), 'healpy.projtext', 'hp.projtext', (['(170.0)', '(-24.0)', '"""ZTF Limit"""'], {'lonlat': '(True)'}), "(170.0, -24.0, 'ZTF Limit', lonlat=True)\n", (9328, 9368), True, 'import healpy as hp\n'), ((9383, 9409), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', '(0.036)'], {}), '(0.0, 360, 0.036)\n', (9392, 9409), True, 'import numpy as np\n'), ((9759, 9798), 'healpy.graticule', 'hp.graticule', ([], {'ls': '"""-"""', 'alpha': '(0.1)', 'lw': '(0.5)'}), "(ls='-', alpha=0.1, lw=0.5)\n", (9771, 9798), True, 'import healpy as hp\n'), ((10014, 10088), 'matplotlib.pyplot.text', 'plt.text', (['(0.0)', '(0.5)', '"""E"""'], {'ha': '"""right"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(0.0, 0.5, 'E', ha='right', transform=ax.transAxes, weight='bold')\n", (10022, 10088), True, 'import matplotlib.pyplot as plt\n'), ((10094, 10167), 'matplotlib.pyplot.text', 'plt.text', (['(1.0)', '(0.5)', '"""W"""'], {'ha': '"""left"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(1.0, 0.5, 'W', ha='left', transform=ax.transAxes, weight='bold')\n", (10102, 10167), True, 'import matplotlib.pyplot as plt\n'), ((10173, 10267), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.992)', '"""N"""'], {'va': '"""bottom"""', 'ha': '"""center"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(0.5, 0.992, 'N', va='bottom', ha='center', transform=ax.transAxes,\n weight='bold')\n", (10181, 10267), True, 'import matplotlib.pyplot as plt\n'), ((10332, 10421), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.0)', '"""S"""'], {'va': '"""top"""', 'ha': '"""center"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(0.5, 0.0, 'S', va='top', ha='center', transform=ax.transAxes,\n weight='bold')\n", (10340, 10421), True, 'import matplotlib.pyplot as plt\n'), ((687, 733), 'yaml.load', 'yaml.load', (['config_yaml'], {'Loader': 'yaml.FullLoader'}), '(config_yaml, Loader=yaml.FullLoader)\n', (696, 733), False, 'import yaml\n'), ((2543, 2579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)', 'dpi': '(200)'}), '(figsize=(16, 9), dpi=200)\n', (2553, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2710), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 5)', 'dpi': '(200)'}), '(figsize=(16, 5), dpi=200)\n', (2684, 2710), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3960), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (3954, 3960), True, 'import matplotlib.pyplot as plt\n'), ((5257, 5269), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (5263, 5269), True, 'import numpy as np\n'), ((5725, 5742), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (5736, 5742), True, 'import matplotlib.pyplot as plt\n'), ((7147, 7164), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (7158, 7164), True, 'import matplotlib.pyplot as plt\n'), ((7696, 7731), 'numpy.argsort', 'np.argsort', (["hdulist[1].data['hpx8']"], {}), "(hdulist[1].data['hpx8'])\n", (7706, 7731), True, 'import numpy as np\n'), ((8819, 8836), 'numpy.arange', 'np.arange', (['(0)', '(181)'], {}), '(0, 181)\n', (8828, 8836), True, 'import numpy as np\n'), ((8993, 9013), 'numpy.arange', 'np.arange', (['(-180)', '(180)'], {}), '(-180, 180)\n', (9002, 9013), True, 'import numpy as np\n'), ((9226, 9245), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (9238, 9245), True, 'import numpy as np\n'), ((9507, 9568), 'healpy.projplot', 'hp.projplot', (['theta', 'phi', '"""w-"""'], {'coord': "['G']", 'lonlat': '(True)', 'lw': '(2)'}), "(theta, phi, 'w-', coord=['G'], lonlat=True, lw=2)\n", (9518, 9568), True, 'import healpy as hp\n'), ((9668, 9737), 'healpy.projplot', 'hp.projplot', (['theta', 'phi', '"""w-"""'], {'coord': "['E']", 'lonlat': '(True)', 'lw': '(2)', 'ls': '""":"""'}), "(theta, phi, 'w-', coord=['E'], lonlat=True, lw=2, ls=':')\n", (9679, 9737), True, 'import healpy as hp\n'), ((10514, 10645), 'healpy.projplot', 'hp.projplot', (['pos[0]', 'pos[1]'], {'color': 'color', 'markersize': '(5)', 'marker': '"""o"""', 'coord': 'coordsys', 'lonlat': '(True)', 'lw': 'lw', 'alpha': 'alpha', 'zorder': '(10)'}), "(pos[0], pos[1], color=color, markersize=5, marker='o', coord=\n coordsys, lonlat=True, lw=lw, alpha=alpha, zorder=10)\n", (10525, 10645), True, 'import healpy as hp\n'), ((10833, 10850), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (10844, 10850), True, 'import matplotlib.pyplot as plt\n'), ((11586, 11624), 'pandas.read_csv', 'pd.read_csv', (['path_dataset'], {'nrows': 'nrows'}), '(path_dataset, nrows=nrows)\n', (11597, 11624), True, 'import pandas as pd\n'), ((12278, 12292), 'numpy.array', 'np.array', (['dmdt'], {}), '(dmdt)\n', (12286, 12292), True, 'import numpy as np\n'), ((12313, 12347), 'numpy.expand_dims', 'np.expand_dims', (['self.dmdt'], {'axis': '(-1)'}), '(self.dmdt, axis=-1)\n', (12327, 12347), True, 'import numpy as np\n'), ((12483, 12499), 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), '(a)\n', (12496, 12499), True, 'import numpy as np\n'), ((14145, 14175), 'numpy.expand_dims', 'np.expand_dims', (['target'], {'axis': '(1)'}), '(target, axis=1)\n', (14159, 14175), True, 'import numpy as np\n'), ((16440, 16534), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ds_indexes'], {'shuffle': '(True)', 'test_size': 'test_size', 'random_state': 'random_state'}), '(ds_indexes, shuffle=True, test_size=test_size,\n random_state=random_state)\n', (16456, 16534), False, 'from sklearn.model_selection import train_test_split\n'), ((16590, 16686), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_indexes'], {'shuffle': '(True)', 'test_size': 'val_size', 'random_state': 'random_state'}), '(train_indexes, shuffle=True, test_size=val_size,\n random_state=random_state)\n', (16606, 16686), False, 'from sklearn.model_selection import train_test_split\n'), ((18633, 18802), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[train_indexes, self.features].values, 'dmdt':\n self.dmdt[train_indexes]}, target[train_indexes])"], {}), "(({'features': self.df_ds.loc[\n train_indexes, self.features].values, 'dmdt': self.dmdt[train_indexes]},\n target[train_indexes]))\n", (18667, 18802), True, 'import tensorflow as tf\n'), ((18944, 19107), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[val_indexes, self.features].values, 'dmdt':\n self.dmdt[val_indexes]}, target[val_indexes])"], {}), "(({'features': self.df_ds.loc[val_indexes,\n self.features].values, 'dmdt': self.dmdt[val_indexes]}, target[\n val_indexes]))\n", (18978, 19107), True, 'import tensorflow as tf\n'), ((19250, 19416), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[test_indexes, self.features].values, 'dmdt':\n self.dmdt[test_indexes]}, target[test_indexes])"], {}), "(({'features': self.df_ds.loc[\n test_indexes, self.features].values, 'dmdt': self.dmdt[test_indexes]},\n target[test_indexes]))\n", (19284, 19416), True, 'import tensorflow as tf\n'), ((845, 871), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (869, 871), False, 'import datetime\n'), ((4698, 4747), 'numpy.linspace', 'np.linspace', (['limits[0]', 'limits[1]', 'number_of_bins'], {}), '(limits[0], limits[1], number_of_bins)\n', (4709, 4747), True, 'import numpy as np\n'), ((9479, 9498), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (9491, 9498), True, 'import numpy as np\n'), ((9640, 9659), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (9652, 9659), True, 'import numpy as np\n'), ((14429, 14469), 'numpy.rint', 'np.rint', (['self.df_ds[target_label].values'], {}), '(self.df_ds[target_label].values)\n', (14436, 14469), True, 'import numpy as np\n'), ((19576, 19745), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[index_dropped, self.features].values, 'dmdt':\n self.dmdt[index_dropped]}, target[index_dropped])"], {}), "(({'features': self.df_ds.loc[\n index_dropped, self.features].values, 'dmdt': self.dmdt[index_dropped]},\n target[index_dropped]))\n", (19610, 19745), True, 'import tensorflow as tf\n'), ((20547, 20570), 'numpy.array', 'np.array', (['train_indexes'], {}), '(train_indexes)\n', (20555, 20570), True, 'import numpy as np\n'), ((20591, 20612), 'numpy.array', 'np.array', (['val_indexes'], {}), '(val_indexes)\n', (20599, 20612), True, 'import numpy as np\n'), ((20634, 20656), 'numpy.array', 'np.array', (['test_indexes'], {}), '(test_indexes)\n', (20642, 20656), True, 'import numpy as np\n'), ((4593, 4612), 'numpy.log10', 'np.log10', (['limits[0]'], {}), '(limits[0])\n', (4601, 4612), True, 'import numpy as np\n'), ((4614, 4633), 'numpy.log10', 'np.log10', (['limits[1]'], {}), '(limits[1])\n', (4622, 4633), True, 'import numpy as np\n'), ((12079, 12118), 'json.loads', 'json.loads', (["self.df_ds['dmdt'][i.Index]"], {}), "(self.df_ds['dmdt'][i.Index])\n", (12089, 12118), False, 'import json\n'), ((12510, 12521), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (12518, 12521), True, 'import numpy as np\n'), ((15266, 15279), 'numpy.sum', 'np.sum', (['w_pos'], {}), '(w_pos)\n', (15272, 15279), True, 'import numpy as np\n'), ((15281, 15294), 'numpy.sum', 'np.sum', (['w_neg'], {}), '(w_neg)\n', (15287, 15294), True, 'import numpy as np\n'), ((15330, 15343), 'numpy.sum', 'np.sum', (['w_pos'], {}), '(w_pos)\n', (15336, 15343), True, 'import numpy as np\n'), ((15345, 15358), 'numpy.sum', 'np.sum', (['w_neg'], {}), '(w_neg)\n', (15351, 15358), True, 'import numpy as np\n'), ((21989, 22040), 'numpy.linalg.norm', 'np.linalg.norm', (['(1 / num_training_examples_per_class)'], {}), '(1 / num_training_examples_per_class)\n', (22003, 22040), True, 'import numpy as np\n'), ((22115, 22130), 'numpy.max', 'np.max', (['weights'], {}), '(weights)\n', (22121, 22130), True, 'import numpy as np\n'), ((5048, 5074), 'numpy.min', 'np.min', (["features['period']"], {}), "(features['period'])\n", (5054, 5074), True, 'import numpy as np\n'), ((5098, 5124), 'numpy.max', 'np.max', (["features['period']"], {}), "(features['period'])\n", (5104, 5124), True, 'import numpy as np\n'), ((12185, 12203), 'numpy.zeros', 'np.zeros', (['(26, 26)'], {}), '((26, 26))\n', (12193, 12203), True, 'import numpy as np\n'), ((17059, 17102), 'numpy.min', 'np.min', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17065, 17102), True, 'import numpy as np\n'), ((17131, 17174), 'numpy.max', 'np.max', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17137, 17174), True, 'import numpy as np\n'), ((17206, 17252), 'numpy.median', 'np.median', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17215, 17252), True, 'import numpy as np\n'), ((17282, 17326), 'numpy.mean', 'np.mean', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17289, 17326), True, 'import numpy as np\n'), ((17355, 17398), 'numpy.std', 'np.std', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17361, 17398), True, 'import numpy as np\n'), ((4844, 4870), 'numpy.min', 'np.min', (["features['period']"], {}), "(features['period'])\n", (4850, 4870), True, 'import numpy as np\n'), ((4904, 4930), 'numpy.max', 'np.max', (["features['period']"], {}), "(features['period'])\n", (4910, 4930), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 29 18:00:53 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
con = pdblp.BCon(debug=False, port=8194, timeout=5000)
con.start()
index_tickers = ['NYA Index', 'SPX Index', 'CCMP Index','NDX Index','CDAX Index' ,'DAX Index',
'ASX Index','UKX Index', 'TPX Index','NKY Index', 'SHCOMP Index' ,
'SZCOMP Index','XUTUM Index','XU100 Index', 'MEXBOL Index',
'IBOV Index', 'IMOEX Index' , 'JALSH Index']
from datetime import date
start = '20040101'
firstday = '19990101'
today = date.today().strftime('%Y%m%d')
pe_ratio = con.bdh(index_tickers, 'PE RATIO', firstday, today)
pe_ratio_int = pe_ratio.interpolate(method='linear')
pe_ratio_int_w = pe_ratio_int.groupby(pd.Grouper(freq='W')).last()
#pe_ratio_last = pe_ratio_int_w[pe_ratio_int_w.index>=start]
#
#pe_ratio_last.columns = [i[0] for i in pe_ratio_last.columns]
#pe_ratio_last= pe_ratio_last[index_tickers]
pe_ratio_smoothed = pe_ratio_int_w.rolling(500, min_periods=100).mean()
var_no='15'
pe_ratio_smoothed_last = pe_ratio_smoothed[pe_ratio_smoothed.index>=start]
pe_ratio_smoothed_last.columns = [i[0] for i in pe_ratio_smoothed_last.columns]
pe_ratio_smoothed_last = pe_ratio_smoothed_last[index_tickers]
pe_ratio_smoothed_last.columns = [var_no+'_'+i for i in pe_ratio_smoothed_last.columns]
# pe_ratio_smoothed_last = pe_ratio_smoothed_last[index_tickers]
#pe_ratio_smoothed_last.columns = ['15_US_NY','15_US_SPX','15_US_CCMP', '15_DE','15_UK','15_JP','15_CH_SH','15_CH_SZ', '15_TR','15_MX','15_BR','15_RU','15_SA']
pe_ratio_smoothed_last.to_excel('C:/Users/sb0538/Desktop/15022020/excels/15_peratiosmoothed.xlsx')
|
[
"pandas.Grouper",
"pdblp.BCon",
"datetime.date.today"
] |
[((219, 267), 'pdblp.BCon', 'pdblp.BCon', ([], {'debug': '(False)', 'port': '(8194)', 'timeout': '(5000)'}), '(debug=False, port=8194, timeout=5000)\n', (229, 267), False, 'import pdblp\n'), ((673, 685), 'datetime.date.today', 'date.today', ([], {}), '()\n', (683, 685), False, 'from datetime import date\n'), ((870, 890), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""W"""'}), "(freq='W')\n", (880, 890), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
import logging
import re
import subprocess
import sys
from typing import Dict
logger = logging.getLogger("py2ts.generate_service_registry")
logging.basicConfig(level=logging.INFO)
class RipgrepError(Exception):
pass
def camel_to_snake(name: str) -> str:
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
def get_service_registry_code(class_module_map: Dict[str, str]) -> str:
"""Return generated code for service registry."""
imports = []
services = []
for service_name, path in class_module_map.items():
imports.append(f"from {path} import {service_name}")
services.append(
f"{camel_to_snake(service_name)}: {service_name} = {service_name}()"
)
imports_code = "\n".join(imports)
services_code = "\n ".join(sorted(services))
return f"""
# Generated code. DO NOT EDIT!
from dataclasses import dataclass
{imports_code}
@dataclass
class ServiceRegistry:
{services_code}
service_registry = ServiceRegistry()
"""
def get_class_module_map() -> Dict[str, str]:
class_module_map = {}
result = subprocess.run(
f"rg '^(class \\w+Service)[\\(:]' -t py -o -r '$1'",
shell=True,
capture_output=True,
)
# Command successful
if result.returncode == 0:
# E.g., ['smartcat/services.py:class TrainingDataSetService:', 'smartcat/services.py:class SmartCatService:']
outputs = result.stdout.decode("utf-8").strip().split("\n")
logger.info(f"Output of rg:{outputs}")
for output in outputs:
# E.g., smartcat/services.py-class SmartCatService
file_path, class_name = output.split(":class ")
module = file_path.split(".py")[0].replace("/", ".")
assert class_name not in class_module_map, f"Found duplicate {class_name}"
class_module_map[class_name] = module
elif result.returncode >= 1:
# resultcode of 1 means no matches were found
raise RipgrepError(
f"Got code: {result.returncode} with message {result.stderr!r}"
)
return class_module_map
if __name__ == "__main__":
try:
code = get_service_registry_code(get_class_module_map())
print(code)
except RipgrepError as e:
logger.error(e)
sys.exit(1)
|
[
"logging.getLogger",
"logging.basicConfig",
"subprocess.run",
"sys.exit",
"re.sub"
] |
[((110, 162), 'logging.getLogger', 'logging.getLogger', (['"""py2ts.generate_service_registry"""'], {}), "('py2ts.generate_service_registry')\n", (127, 162), False, 'import logging\n'), ((163, 202), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (182, 202), False, 'import logging\n'), ((296, 339), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'name'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', name)\n", (302, 339), False, 'import re\n'), ((1172, 1277), 'subprocess.run', 'subprocess.run', (['f"""rg \'^(class \\\\w+Service)[\\\\(:]\' -t py -o -r \'$1\'"""'], {'shell': '(True)', 'capture_output': '(True)'}), '(f"rg \'^(class \\\\w+Service)[\\\\(:]\' -t py -o -r \'$1\'", shell=\n True, capture_output=True)\n', (1186, 1277), False, 'import subprocess\n'), ((350, 394), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 'name'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', name)\n", (356, 394), False, 'import re\n'), ((2366, 2377), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2374, 2377), False, 'import sys\n')]
|
import locale
import pytest
from covid.utils import fmt
class TestUtilityFunctions:
def test_format_functions_en_US(self):
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
return pytest.skip()
assert fmt(0.10) == "0.1"
assert fmt(0.12) == "0.12"
assert fmt(0.01) == "0.01"
assert fmt(0.012) == "0.012"
assert fmt(0.0123) == "0.012"
assert fmt(0.00123) == "1.23e-03"
assert fmt(0.0012) == "1.2e-03"
assert fmt(1.2341) == "1.23"
assert fmt(12.341) == "12.34"
assert fmt(123.41) == "123.4"
assert fmt(1234) == "1,234"
assert fmt(1234.5) == "1,234"
assert fmt(42_123.1) == "42,123"
assert fmt(42_123) == "42,123"
assert fmt(1_000_000) == "1M"
assert fmt(10_000_000) == "10M"
assert fmt(12_000_000) == "12M"
assert fmt(12_300_000) == "12.3M"
assert fmt(12_340_000) == "12.34M"
assert fmt(12_341_000) == "12.34M"
assert fmt(-12_341_000) == "-12.34M"
assert fmt(123_456_000) == "123.5M"
assert fmt(1_234_567_000) == "1.23B"
def test_format_functions_pt_BR(self):
try:
locale.setlocale(locale.LC_ALL, "pt_BR.UTF-8")
except locale.Error:
return pytest.skip()
assert fmt(0.10) == "0,1"
assert fmt(0.12) == "0,12"
assert fmt(0.01) == "0,01"
assert fmt(0.012) == "0,012"
assert fmt(0.0123) == "0,012"
assert fmt(0.00123) == "1,23e-03"
assert fmt(0.0012) == "1,2e-03"
assert fmt(1.2341) == "1,23"
assert fmt(12.341) == "12,34"
assert fmt(123.41) == "123,4"
assert fmt(1234) == "1.234"
assert fmt(1234.5) == "1.234"
assert fmt(42_123.1) == "42.123"
assert fmt(42_123) == "42.123"
assert fmt(1_000_000) == "1M"
assert fmt(10_000_000) == "10M"
assert fmt(12_000_000) == "12M"
assert fmt(12_300_000) == "12,3M"
assert fmt(12_340_000) == "12,34M"
assert fmt(12_341_000) == "12,34M"
assert fmt(-12_341_000) == "-12,34M"
assert fmt(123_456_000) == "123,5M"
assert fmt(1_234_567_000) == "1,23B"
|
[
"pytest.skip",
"covid.utils.fmt",
"locale.setlocale"
] |
[((156, 202), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""en_US.UTF-8"""'], {}), "(locale.LC_ALL, 'en_US.UTF-8')\n", (172, 202), False, 'import locale\n'), ((281, 289), 'covid.utils.fmt', 'fmt', (['(0.1)'], {}), '(0.1)\n', (284, 289), False, 'from covid.utils import fmt\n'), ((315, 324), 'covid.utils.fmt', 'fmt', (['(0.12)'], {}), '(0.12)\n', (318, 324), False, 'from covid.utils import fmt\n'), ((350, 359), 'covid.utils.fmt', 'fmt', (['(0.01)'], {}), '(0.01)\n', (353, 359), False, 'from covid.utils import fmt\n'), ((385, 395), 'covid.utils.fmt', 'fmt', (['(0.012)'], {}), '(0.012)\n', (388, 395), False, 'from covid.utils import fmt\n'), ((422, 433), 'covid.utils.fmt', 'fmt', (['(0.0123)'], {}), '(0.0123)\n', (425, 433), False, 'from covid.utils import fmt\n'), ((460, 472), 'covid.utils.fmt', 'fmt', (['(0.00123)'], {}), '(0.00123)\n', (463, 472), False, 'from covid.utils import fmt\n'), ((502, 513), 'covid.utils.fmt', 'fmt', (['(0.0012)'], {}), '(0.0012)\n', (505, 513), False, 'from covid.utils import fmt\n'), ((542, 553), 'covid.utils.fmt', 'fmt', (['(1.2341)'], {}), '(1.2341)\n', (545, 553), False, 'from covid.utils import fmt\n'), ((579, 590), 'covid.utils.fmt', 'fmt', (['(12.341)'], {}), '(12.341)\n', (582, 590), False, 'from covid.utils import fmt\n'), ((617, 628), 'covid.utils.fmt', 'fmt', (['(123.41)'], {}), '(123.41)\n', (620, 628), False, 'from covid.utils import fmt\n'), ((655, 664), 'covid.utils.fmt', 'fmt', (['(1234)'], {}), '(1234)\n', (658, 664), False, 'from covid.utils import fmt\n'), ((691, 702), 'covid.utils.fmt', 'fmt', (['(1234.5)'], {}), '(1234.5)\n', (694, 702), False, 'from covid.utils import fmt\n'), ((729, 741), 'covid.utils.fmt', 'fmt', (['(42123.1)'], {}), '(42123.1)\n', (732, 741), False, 'from covid.utils import fmt\n'), ((770, 780), 'covid.utils.fmt', 'fmt', (['(42123)'], {}), '(42123)\n', (773, 780), False, 'from covid.utils import fmt\n'), ((809, 821), 'covid.utils.fmt', 'fmt', (['(1000000)'], {}), '(1000000)\n', (812, 821), False, 'from covid.utils import fmt\n'), ((847, 860), 'covid.utils.fmt', 'fmt', (['(10000000)'], {}), '(10000000)\n', (850, 860), False, 'from covid.utils import fmt\n'), ((887, 900), 'covid.utils.fmt', 'fmt', (['(12000000)'], {}), '(12000000)\n', (890, 900), False, 'from covid.utils import fmt\n'), ((927, 940), 'covid.utils.fmt', 'fmt', (['(12300000)'], {}), '(12300000)\n', (930, 940), False, 'from covid.utils import fmt\n'), ((969, 982), 'covid.utils.fmt', 'fmt', (['(12340000)'], {}), '(12340000)\n', (972, 982), False, 'from covid.utils import fmt\n'), ((1012, 1025), 'covid.utils.fmt', 'fmt', (['(12341000)'], {}), '(12341000)\n', (1015, 1025), False, 'from covid.utils import fmt\n'), ((1055, 1069), 'covid.utils.fmt', 'fmt', (['(-12341000)'], {}), '(-12341000)\n', (1058, 1069), False, 'from covid.utils import fmt\n'), ((1100, 1114), 'covid.utils.fmt', 'fmt', (['(123456000)'], {}), '(123456000)\n', (1103, 1114), False, 'from covid.utils import fmt\n'), ((1144, 1159), 'covid.utils.fmt', 'fmt', (['(1234567000)'], {}), '(1234567000)\n', (1147, 1159), False, 'from covid.utils import fmt\n'), ((1243, 1289), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""pt_BR.UTF-8"""'], {}), "(locale.LC_ALL, 'pt_BR.UTF-8')\n", (1259, 1289), False, 'import locale\n'), ((1368, 1376), 'covid.utils.fmt', 'fmt', (['(0.1)'], {}), '(0.1)\n', (1371, 1376), False, 'from covid.utils import fmt\n'), ((1402, 1411), 'covid.utils.fmt', 'fmt', (['(0.12)'], {}), '(0.12)\n', (1405, 1411), False, 'from covid.utils import fmt\n'), ((1437, 1446), 'covid.utils.fmt', 'fmt', (['(0.01)'], {}), '(0.01)\n', (1440, 1446), False, 'from covid.utils import fmt\n'), ((1472, 1482), 'covid.utils.fmt', 'fmt', (['(0.012)'], {}), '(0.012)\n', (1475, 1482), False, 'from covid.utils import fmt\n'), ((1509, 1520), 'covid.utils.fmt', 'fmt', (['(0.0123)'], {}), '(0.0123)\n', (1512, 1520), False, 'from covid.utils import fmt\n'), ((1547, 1559), 'covid.utils.fmt', 'fmt', (['(0.00123)'], {}), '(0.00123)\n', (1550, 1559), False, 'from covid.utils import fmt\n'), ((1589, 1600), 'covid.utils.fmt', 'fmt', (['(0.0012)'], {}), '(0.0012)\n', (1592, 1600), False, 'from covid.utils import fmt\n'), ((1629, 1640), 'covid.utils.fmt', 'fmt', (['(1.2341)'], {}), '(1.2341)\n', (1632, 1640), False, 'from covid.utils import fmt\n'), ((1666, 1677), 'covid.utils.fmt', 'fmt', (['(12.341)'], {}), '(12.341)\n', (1669, 1677), False, 'from covid.utils import fmt\n'), ((1704, 1715), 'covid.utils.fmt', 'fmt', (['(123.41)'], {}), '(123.41)\n', (1707, 1715), False, 'from covid.utils import fmt\n'), ((1742, 1751), 'covid.utils.fmt', 'fmt', (['(1234)'], {}), '(1234)\n', (1745, 1751), False, 'from covid.utils import fmt\n'), ((1778, 1789), 'covid.utils.fmt', 'fmt', (['(1234.5)'], {}), '(1234.5)\n', (1781, 1789), False, 'from covid.utils import fmt\n'), ((1816, 1828), 'covid.utils.fmt', 'fmt', (['(42123.1)'], {}), '(42123.1)\n', (1819, 1828), False, 'from covid.utils import fmt\n'), ((1857, 1867), 'covid.utils.fmt', 'fmt', (['(42123)'], {}), '(42123)\n', (1860, 1867), False, 'from covid.utils import fmt\n'), ((1896, 1908), 'covid.utils.fmt', 'fmt', (['(1000000)'], {}), '(1000000)\n', (1899, 1908), False, 'from covid.utils import fmt\n'), ((1934, 1947), 'covid.utils.fmt', 'fmt', (['(10000000)'], {}), '(10000000)\n', (1937, 1947), False, 'from covid.utils import fmt\n'), ((1974, 1987), 'covid.utils.fmt', 'fmt', (['(12000000)'], {}), '(12000000)\n', (1977, 1987), False, 'from covid.utils import fmt\n'), ((2014, 2027), 'covid.utils.fmt', 'fmt', (['(12300000)'], {}), '(12300000)\n', (2017, 2027), False, 'from covid.utils import fmt\n'), ((2056, 2069), 'covid.utils.fmt', 'fmt', (['(12340000)'], {}), '(12340000)\n', (2059, 2069), False, 'from covid.utils import fmt\n'), ((2099, 2112), 'covid.utils.fmt', 'fmt', (['(12341000)'], {}), '(12341000)\n', (2102, 2112), False, 'from covid.utils import fmt\n'), ((2142, 2156), 'covid.utils.fmt', 'fmt', (['(-12341000)'], {}), '(-12341000)\n', (2145, 2156), False, 'from covid.utils import fmt\n'), ((2187, 2201), 'covid.utils.fmt', 'fmt', (['(123456000)'], {}), '(123456000)\n', (2190, 2201), False, 'from covid.utils import fmt\n'), ((2231, 2246), 'covid.utils.fmt', 'fmt', (['(1234567000)'], {}), '(1234567000)\n', (2234, 2246), False, 'from covid.utils import fmt\n'), ((251, 264), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (262, 264), False, 'import pytest\n'), ((1338, 1351), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (1349, 1351), False, 'import pytest\n')]
|
#!/usr/bin/env python
from anti_instagram.AntiInstagram import AntiInstagram
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D, FSMState)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.timekeeper import TimeKeeper
import cv2
import rospy
import threading
import time
from line_detector.line_detector_plot import color_segment, drawLines
import numpy as np
class LineDetectorNode(object):
def __init__(self):
self.node_name = rospy.get_name()
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.active = True
self.stats = Stats()
# Only be verbose every 10 cycles
self.intermittent_interval = 100
self.intermittent_counter = 0
# color correction
self.ai = AntiInstagram()
# these will be added if it becomes verbose
self.pub_edge = None
self.pub_colorSegment = None
self.detector = None
self.verbose = None
self.updateParams(None)
# Publishers
self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
# FSM
self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
self.sub_fsm_mode = rospy.Subscriber("~fsm_mode", FSMState, self.cbMode, queue_size=1)
rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))
rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
def updateParams(self, _event):
old_verbose = self.verbose
self.verbose = rospy.get_param('~verbose', True)
# self.loginfo('verbose = %r' % self.verbose)
if self.verbose != old_verbose:
self.loginfo('Verbose is now %r' % self.verbose)
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
if self.detector is None:
c = rospy.get_param('~detector')
assert isinstance(c, list) and len(c) == 2, c
# if str(self.detector_config) != str(c):
self.loginfo('new detector config: %s' % str(c))
self.detector = instantiate(c[0], c[1])
# self.detector_config = c
if self.verbose and self.pub_edge is None:
self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)
#FSM
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
#FSM
def cbMode(self, mode_msg):
self.fsm_state = mode_msg.state # String of current FSM state
def cbImage(self, image_msg):
self.stats.received()
if not self.active:
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def cbTransform(self, transform_msg):
self.ai.shift = transform_msg.s[0:3]
self.ai.scale = transform_msg.s[3:6]
self.loginfo("AntiInstagram transform received")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
def intermittent_log_now(self):
return self.intermittent_counter % self.intermittent_interval == 1
def intermittent_log(self, s):
if not self.intermittent_log_now():
return
self.loginfo('%3d:%s' % (self.intermittent_counter, s))
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
self.stats.skipped()
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
self.stats.processed()
if self.intermittent_log_now():
self.intermittent_log(self.stats.info())
self.stats.reset()
tk = TimeKeeper(image_msg)
self.intermittent_counter += 1
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
tk.completed('decoded')
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
tk.completed('resized')
# apply color correction: AntiInstagram
image_cv_corr = self.ai.applyTransform(image_cv)
image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
tk.completed('corrected')
# Set the image to be detected
self.detector.setImage(image_cv_corr)
# Detect lines and normals
white = self.detector.detectLines('white')
yellow = self.detector.detectLines('yellow')
red = self.detector.detectLines('red')
tk.completed('detected')
# SegmentList constructor
segmentList = SegmentList()
segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
if len(white.lines) > 0:
lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
if len(yellow.lines) > 0:
lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
if len(red.lines) > 0:
lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
len(yellow.lines), len(red.lines)))
tk.completed('prepared')
# Publish segmentList
self.pub_lines.publish(segmentList)
tk.completed('--pub_lines--')
# VISUALIZATION only below
if self.verbose:
# Draw lines and normals
image_with_lines = np.copy(image_cv_corr)
drawLines(image_with_lines, white.lines, (0, 0, 0))
drawLines(image_with_lines, yellow.lines, (255, 0, 0))
drawLines(image_with_lines, red.lines, (0, 255, 0))
tk.completed('drawn')
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
tk.completed('pub_image')
# if self.verbose:
colorSegment = color_segment(white.area, red.area, yellow.area)
edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
self.pub_edge.publish(edge_msg_out)
self.pub_colorSegment.publish(colorSegment_msg_out)
tk.completed('pub_edge/pub_segment')
self.intermittent_log(tk.getall())
def onShutdown(self):
self.loginfo("Shutdown.")
def toSegmentMsg(self, lines, normals, color):
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
class Stats():
def __init__(self):
self.nresets = 0
self.reset()
def reset(self):
self.nresets += 1
self.t0 = time.time()
self.nreceived = 0
self.nskipped = 0
self.nprocessed = 0
def received(self):
if self.nreceived == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node received first image.')
self.nreceived += 1
def skipped(self):
self.nskipped += 1
def processed(self):
if self.nprocessed == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node processing first image.')
self.nprocessed += 1
def info(self):
delta = time.time() - self.t0
if self.nreceived:
skipped_perc = (100.0 * self.nskipped / self.nreceived)
else:
skipped_perc = 0
def fps(x):
return '%.1f fps' % (x / delta)
m = ('In the last %.1f s: received %d (%s) processed %d (%s) skipped %d (%s) (%1.f%%)' %
(delta, self.nreceived, fps(self.nreceived),
self.nprocessed, fps(self.nprocessed),
self.nskipped, fps(self.nskipped), skipped_perc))
return m
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
|
[
"duckietown_utils.jpg.image_cv_from_jpg",
"cv2.convertScaleAbs",
"line_detector.line_detector_plot.drawLines",
"numpy.hstack",
"rospy.init_node",
"duckietown_msgs.msg.Segment",
"numpy.array",
"line_detector.timekeeper.TimeKeeper",
"duckietown_msgs.msg.SegmentList",
"anti_instagram.AntiInstagram.AntiInstagram",
"threading.Lock",
"rospy.Duration.from_sec",
"cv_bridge.CvBridge",
"rospy.spin",
"rospy.Subscriber",
"rospy.get_param",
"duckietown_utils.instantiate_utils.instantiate",
"rospy.get_name",
"cv2.resize",
"rospy.Publisher",
"time.time",
"rospy.loginfo",
"rospy.on_shutdown",
"numpy.copy",
"line_detector.line_detector_plot.color_segment",
"threading.Thread"
] |
[((10551, 10600), 'rospy.init_node', 'rospy.init_node', (['"""line_detector"""'], {'anonymous': '(False)'}), "('line_detector', anonymous=False)\n", (10566, 10600), False, 'import rospy\n'), ((10648, 10696), 'rospy.on_shutdown', 'rospy.on_shutdown', (['line_detector_node.onShutdown'], {}), '(line_detector_node.onShutdown)\n', (10665, 10696), False, 'import rospy\n'), ((10701, 10713), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (10711, 10713), False, 'import rospy\n'), ((752, 768), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (766, 768), False, 'import rospy\n'), ((820, 836), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (834, 836), False, 'import threading\n'), ((907, 917), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (915, 917), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1144, 1159), 'anti_instagram.AntiInstagram.AntiInstagram', 'AntiInstagram', ([], {}), '()\n', (1157, 1159), False, 'from anti_instagram.AntiInstagram import AntiInstagram\n'), ((1428, 1487), 'rospy.Publisher', 'rospy.Publisher', (['"""~segment_list"""', 'SegmentList'], {'queue_size': '(1)'}), "('~segment_list', SegmentList, queue_size=1)\n", (1443, 1487), False, 'import rospy\n'), ((1513, 1570), 'rospy.Publisher', 'rospy.Publisher', (['"""~image_with_lines"""', 'Image'], {'queue_size': '(1)'}), "('~image_with_lines', Image, queue_size=1)\n", (1528, 1570), False, 'import rospy\n'), ((1626, 1697), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~image"""', 'CompressedImage', 'self.cbImage'], {'queue_size': '(1)'}), "('~image', CompressedImage, self.cbImage, queue_size=1)\n", (1642, 1697), False, 'import rospy\n'), ((1727, 1817), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~transform"""', 'AntiInstagramTransform', 'self.cbTransform'], {'queue_size': '(1)'}), "('~transform', AntiInstagramTransform, self.cbTransform,\n queue_size=1)\n", (1743, 1817), False, 'import rospy\n'), ((1854, 1923), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~switch"""', 'BoolStamped', 'self.cbSwitch'], {'queue_size': '(1)'}), "('~switch', BoolStamped, self.cbSwitch, queue_size=1)\n", (1870, 1923), False, 'import rospy\n'), ((1952, 2018), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~fsm_mode"""', 'FSMState', 'self.cbMode'], {'queue_size': '(1)'}), "('~fsm_mode', FSMState, self.cbMode, queue_size=1)\n", (1968, 2018), False, 'import rospy\n'), ((2028, 2115), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] Initialized (verbose = %s).' % (self.node_name, self.verbose))"], {}), "('[%s] Initialized (verbose = %s).' % (self.node_name, self.\n verbose))\n", (2041, 2115), False, 'import rospy\n'), ((2276, 2309), 'rospy.get_param', 'rospy.get_param', (['"""~verbose"""', '(True)'], {}), "('~verbose', True)\n", (2291, 2309), False, 'import rospy\n'), ((2492, 2520), 'rospy.get_param', 'rospy.get_param', (['"""~img_size"""'], {}), "('~img_size')\n", (2507, 2520), False, 'import rospy\n'), ((2547, 2577), 'rospy.get_param', 'rospy.get_param', (['"""~top_cutoff"""'], {}), "('~top_cutoff')\n", (2562, 2577), False, 'import rospy\n'), ((3535, 3596), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.processImage', 'args': '(image_msg,)'}), '(target=self.processImage, args=(image_msg,))\n', (3551, 3596), False, 'import threading\n'), ((3904, 3950), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] %s' % (self.node_name, s))"], {}), "('[%s] %s' % (self.node_name, s))\n", (3917, 3950), False, 'import rospy\n'), ((4789, 4810), 'line_detector.timekeeper.TimeKeeper', 'TimeKeeper', (['image_msg'], {}), '(image_msg)\n', (4799, 4810), False, 'from line_detector.timekeeper import TimeKeeper\n'), ((5729, 5763), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['image_cv_corr'], {}), '(image_cv_corr)\n', (5748, 5763), False, 'import cv2\n'), ((6169, 6182), 'duckietown_msgs.msg.SegmentList', 'SegmentList', ([], {}), '()\n', (6180, 6182), False, 'from duckietown_msgs.msg import AntiInstagramTransform, BoolStamped, Segment, SegmentList, Vector2D, FSMState\n'), ((6354, 6404), 'numpy.array', 'np.array', (['(0, self.top_cutoff, 0, self.top_cutoff)'], {}), '((0, self.top_cutoff, 0, self.top_cutoff))\n', (6362, 6404), True, 'import numpy as np\n'), ((6425, 6544), 'numpy.array', 'np.array', (['(1.0 / self.image_size[1], 1.0 / self.image_size[0], 1.0 / self.image_size[\n 1], 1.0 / self.image_size[0])'], {}), '((1.0 / self.image_size[1], 1.0 / self.image_size[0], 1.0 / self.\n image_size[1], 1.0 / self.image_size[0]))\n', (6433, 6544), True, 'import numpy as np\n'), ((8856, 8883), 'numpy.hstack', 'np.hstack', (['(lines, normals)'], {}), '((lines, normals))\n', (8865, 8883), True, 'import numpy as np\n'), ((9457, 9468), 'time.time', 'time.time', ([], {}), '()\n', (9466, 9468), False, 'import time\n'), ((2131, 2159), 'rospy.Duration.from_sec', 'rospy.Duration.from_sec', (['(2.0)'], {}), '(2.0)\n', (2154, 2159), False, 'import rospy\n'), ((2629, 2657), 'rospy.get_param', 'rospy.get_param', (['"""~detector"""'], {}), "('~detector')\n", (2644, 2657), False, 'import rospy\n'), ((2865, 2888), 'duckietown_utils.instantiate_utils.instantiate', 'instantiate', (['c[0]', 'c[1]'], {}), '(c[0], c[1])\n', (2876, 2888), False, 'from duckietown_utils.instantiate_utils import instantiate\n'), ((3008, 3053), 'rospy.Publisher', 'rospy.Publisher', (['"""~edge"""', 'Image'], {'queue_size': '(1)'}), "('~edge', Image, queue_size=1)\n", (3023, 3053), False, 'import rospy\n'), ((3090, 3143), 'rospy.Publisher', 'rospy.Publisher', (['"""~colorSegment"""', 'Image'], {'queue_size': '(1)'}), "('~colorSegment', Image, queue_size=1)\n", (3105, 3143), False, 'import rospy\n'), ((4947, 4980), 'duckietown_utils.jpg.image_cv_from_jpg', 'image_cv_from_jpg', (['image_msg.data'], {}), '(image_msg.data)\n', (4964, 4980), False, 'from duckietown_utils.jpg import image_cv_from_jpg\n'), ((5385, 5484), 'cv2.resize', 'cv2.resize', (['image_cv', '(self.image_size[1], self.image_size[0])'], {'interpolation': 'cv2.INTER_NEAREST'}), '(image_cv, (self.image_size[1], self.image_size[0]),\n interpolation=cv2.INTER_NEAREST)\n', (5395, 5484), False, 'import cv2\n'), ((7643, 7665), 'numpy.copy', 'np.copy', (['image_cv_corr'], {}), '(image_cv_corr)\n', (7650, 7665), True, 'import numpy as np\n'), ((7678, 7729), 'line_detector.line_detector_plot.drawLines', 'drawLines', (['image_with_lines', 'white.lines', '(0, 0, 0)'], {}), '(image_with_lines, white.lines, (0, 0, 0))\n', (7687, 7729), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((7742, 7796), 'line_detector.line_detector_plot.drawLines', 'drawLines', (['image_with_lines', 'yellow.lines', '(255, 0, 0)'], {}), '(image_with_lines, yellow.lines, (255, 0, 0))\n', (7751, 7796), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((7809, 7860), 'line_detector.line_detector_plot.drawLines', 'drawLines', (['image_with_lines', 'red.lines', '(0, 255, 0)'], {}), '(image_with_lines, red.lines, (0, 255, 0))\n', (7818, 7860), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((8228, 8276), 'line_detector.line_detector_plot.color_segment', 'color_segment', (['white.area', 'red.area', 'yellow.area'], {}), '(white.area, red.area, yellow.area)\n', (8241, 8276), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((8906, 8915), 'duckietown_msgs.msg.Segment', 'Segment', ([], {}), '()\n', (8913, 8915), False, 'from duckietown_msgs.msg import AntiInstagramTransform, BoolStamped, Segment, SegmentList, Vector2D, FSMState\n'), ((9641, 9698), 'rospy.loginfo', 'rospy.loginfo', (['"""line_detector_node received first image."""'], {}), "('line_detector_node received first image.')\n", (9654, 9698), False, 'import rospy\n'), ((9871, 9930), 'rospy.loginfo', 'rospy.loginfo', (['"""line_detector_node processing first image."""'], {}), "('line_detector_node processing first image.')\n", (9884, 9930), False, 'import rospy\n'), ((9998, 10009), 'time.time', 'time.time', ([], {}), '()\n', (10007, 10009), False, 'import time\n')]
|
from django.urls import re_path
from xr_embeds.views import geojson_view, embed_html_view
app_name = "embeds"
urlpatterns = [
re_path(r"^(\d+)/html/$", embed_html_view, name="embed_html"),
re_path(
r"^geojson/(?P<model_slug>\w+)/(?P<query_slug>\w+)/$",
geojson_view,
name="geojson_view",
),
]
|
[
"django.urls.re_path"
] |
[((133, 194), 'django.urls.re_path', 're_path', (['"""^(\\\\d+)/html/$"""', 'embed_html_view'], {'name': '"""embed_html"""'}), "('^(\\\\d+)/html/$', embed_html_view, name='embed_html')\n", (140, 194), False, 'from django.urls import re_path\n'), ((200, 302), 'django.urls.re_path', 're_path', (['"""^geojson/(?P<model_slug>\\\\w+)/(?P<query_slug>\\\\w+)/$"""', 'geojson_view'], {'name': '"""geojson_view"""'}), "('^geojson/(?P<model_slug>\\\\w+)/(?P<query_slug>\\\\w+)/$',\n geojson_view, name='geojson_view')\n", (207, 302), False, 'from django.urls import re_path\n')]
|
from django.db import models
from django.core import signing
class PasswordMixin(object):
password_encrypted = models.CharField(max_length=128, null=False, blank=False)
@property
def password(self):
return signing.loads(self.password_encrypted)
@password.setter
def password(self, value):
self.password_encrypted = signing.dumps(value)
|
[
"django.core.signing.loads",
"django.db.models.CharField",
"django.core.signing.dumps"
] |
[((118, 175), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'null': '(False)', 'blank': '(False)'}), '(max_length=128, null=False, blank=False)\n', (134, 175), False, 'from django.db import models\n'), ((230, 268), 'django.core.signing.loads', 'signing.loads', (['self.password_encrypted'], {}), '(self.password_encrypted)\n', (243, 268), False, 'from django.core import signing\n'), ((356, 376), 'django.core.signing.dumps', 'signing.dumps', (['value'], {}), '(value)\n', (369, 376), False, 'from django.core import signing\n')]
|
# -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from publications import list_import_formats, get_publications_importer
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = '<NAME> <<EMAIL>>'
__docformat__ = 'epytext'
from django.contrib import admin
from django import forms
import publications.models
from publications.models import Publication, PublicationType, Group, Authorship, Person, Metadata, Import
from publications.fields import PeopleField
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
def merge_people_by_family_name(modeladmin, request, queryset):
groups = publications.models.group_people_by_family_name(list(queryset))
groups = filter(lambda x : len(x) > 2, [group for fn, group in groups.items()])
if not len(groups):
messages.info(request, "Nothing to merge")
return HttpResponseRedirect(reverse("admin:publications_person_changelist"))
return render_to_response('admin/publications/person/merge.html', {
'groups': groups
}, context_instance=RequestContext(request))
def merge_people(modeladmin, request, queryset):
return render_to_response('admin/publications/person/merge.html', {
'groups': [list(queryset)]
}, context_instance=RequestContext(request))
class PublicationForm(forms.ModelForm):
class Meta:
model = Publication
fields = '__all__'
people_authorship = PeopleField(label="People", max_length=1024, help_text = 'List of authors separated by semicolon. Both first-name last-name and last-name, first name forms can be used. Example: <NAME>; <NAME>; <NAME>.')
latitude = forms.FloatField(required=False)
def __init__(self, *args, **kwargs):
super(PublicationForm, self).__init__(*args, **kwargs)
if hasattr(self, 'instance'):
instance = self.instance
self.initial['people_authorship'] = instance.people_as_string()
def save(self, commit=True):
model = super(PublicationForm, self).save(commit=False)
model.set_people = self.cleaned_data['people_authorship']
if commit:
model.save()
return model
class MetadataInline(admin.TabularInline):
model = Metadata
class AuthorshipInline(admin.TabularInline):
model = Authorship
class PublicationAdmin(admin.ModelAdmin):
radio_fields = {"publication_type": admin.HORIZONTAL}
raw_id_fields = ["people"]
list_display = ('publication_type', 'first_author', 'title', 'year', 'within')
list_display_links = ('title',)
search_fields = ('title', 'within', 'people', 'tags', 'year')
fieldsets = (
("Basic information", {'fields':
('publication_type', 'title', 'people_authorship', 'abstract', 'note')}),
("Publishing information", {'fields':
('year', 'month', 'within', 'publisher', 'volume', 'number', 'pages')}),
("Resources", {'fields':
('url', 'code', 'file', 'doi')}),
("Categoritzation", {'fields':
('tags', 'public', 'groups')}),
)
inlines = [MetadataInline]
form = PublicationForm
def import_publications(self, request):
if request.method == 'POST':
# container for error messages
errors = {"publications" : [], "importer" : []}
# check for errors
if not request.POST['publications']:
errors["publications"].append('This field is required.')
if not request.POST['importer']:
errors["importer"].append('This field is required.')
else:
importer = get_publications_importer(request.POST['importer'])
if importer:
publications = []
importer.import_from_string(request.POST['publications'], lambda x : publications.append(x), lambda x : errors["publications"].append(x))
for publication in publications:
i = Import(title = publication["title"], data = publication, source = importer.get_format_identifier())
i.save()
if not publications:
errors["publications"].append('No valid entries found.')
else:
errors["importer"].append('Not a registered importer.')
if errors["publications"] or errors["importer"]:
# some error occurred
return render_to_response(
'admin/publications/publication/import.html', {
'errors': errors,
'title': 'Import publications',
'importers' : list_import_formats(),
'request': request},
RequestContext(request))
else:
if len(publications) > 1:
msg = 'Successfully added ' + str(len(publications)) + ' publications to import queue.'
else:
msg = 'Successfully added publication to import queue.'
# show message
messages.info(request, msg)
# redirect to publication listing
return HttpResponseRedirect(reverse("admin:publications_publication_changelist"))
else:
return render_to_response(
'admin/publications/publication/import.html', {
'title': 'Import publications', 'importers' : list_import_formats(),
'request': request},
RequestContext(request))
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(PublicationAdmin, self).get_urls()
my_urls = patterns('',
url(
r'import',
self.admin_site.admin_view(self.import_publications),
name='import_publications',
),
)
return my_urls + urls
class GroupAdmin(admin.ModelAdmin):
list_display = ('identifier', 'title', 'public')
class PublicationTypeAdmin(admin.ModelAdmin):
list_display = ('identifier', 'title', 'description', 'weight')
class PersonAdmin(admin.ModelAdmin):
list_display = ('family_name', 'primary_name' , 'url', 'public', 'group')
list_display_links = ('primary_name', 'family_name',)
actions = [merge_people, merge_people_by_family_name]
def merge(self, request):
if request.method == 'POST':
if request.POST.has_key("_cancel"):
return HttpResponseRedirect(reverse("admin:publications_person_changelist"))
groups_count = int(request.POST.get("groups_count", 0))
groups = []
for group_id in xrange(1, groups_count+1):
#TODO: more validation
group_entries = [ int(i.strip()) for i in request.POST.get("group%d_set" % group_id, "").strip().split(" ") ]
pivot_id = int(request.POST.get("group%d" % group_id, "-1"))
if pivot_id in group_entries and len(group_entries) > 1:
group = list(Person.objects.filter(id__in = group_entries))
pivot = filter(lambda x : x.id == pivot_id, group)[0]
publications.models.merge_people(group, pivot)
messages.info(request, "Merged %d people entries" % len(group))
elif len(group_entries) == 1:
continue
else:
groups.append(list(Person.objects.filter(id__in = group_entries)))
if len(groups) > 0:
return render_to_response('admin/publications/person/merge.html', {
'groups': groups
}, context_instance=RequestContext(request))
return HttpResponseRedirect(reverse("admin:publications_person_changelist"))
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(PersonAdmin, self).get_urls()
my_urls = patterns('',
url(
r'merge',
self.admin_site.admin_view(self.merge),
name='merge_people',
),
)
return my_urls + urls
admin.site.register(Publication, PublicationAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(PublicationType, PublicationTypeAdmin)
|
[
"publications.models.Person.objects.filter",
"django.forms.FloatField",
"django.contrib.admin.site.register",
"django.template.RequestContext",
"publications.list_import_formats",
"django.contrib.messages.info",
"django.core.urlresolvers.reverse",
"publications.fields.PeopleField",
"publications.get_publications_importer"
] |
[((7640, 7690), 'django.contrib.admin.site.register', 'admin.site.register', (['Publication', 'PublicationAdmin'], {}), '(Publication, PublicationAdmin)\n', (7659, 7690), False, 'from django.contrib import admin\n'), ((7691, 7729), 'django.contrib.admin.site.register', 'admin.site.register', (['Group', 'GroupAdmin'], {}), '(Group, GroupAdmin)\n', (7710, 7729), False, 'from django.contrib import admin\n'), ((7730, 7770), 'django.contrib.admin.site.register', 'admin.site.register', (['Person', 'PersonAdmin'], {}), '(Person, PersonAdmin)\n', (7749, 7770), False, 'from django.contrib import admin\n'), ((7771, 7829), 'django.contrib.admin.site.register', 'admin.site.register', (['PublicationType', 'PublicationTypeAdmin'], {}), '(PublicationType, PublicationTypeAdmin)\n', (7790, 7829), False, 'from django.contrib import admin\n'), ((1585, 1796), 'publications.fields.PeopleField', 'PeopleField', ([], {'label': '"""People"""', 'max_length': '(1024)', 'help_text': '"""List of authors separated by semicolon. Both first-name last-name and last-name, first name forms can be used. Example: <NAME>; <NAME>; <NAME>."""'}), "(label='People', max_length=1024, help_text=\n 'List of authors separated by semicolon. Both first-name last-name and last-name, first name forms can be used. Example: <NAME>; <NAME>; <NAME>.'\n )\n", (1596, 1796), False, 'from publications.fields import PeopleField\n'), ((1803, 1835), 'django.forms.FloatField', 'forms.FloatField', ([], {'required': '(False)'}), '(required=False)\n', (1819, 1835), False, 'from django import forms\n'), ((992, 1034), 'django.contrib.messages.info', 'messages.info', (['request', '"""Nothing to merge"""'], {}), "(request, 'Nothing to merge')\n", (1005, 1034), False, 'from django.contrib import messages\n'), ((1067, 1114), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:publications_person_changelist"""'], {}), "('admin:publications_person_changelist')\n", (1074, 1114), False, 'from django.core.urlresolvers import reverse\n'), ((1233, 1256), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (1247, 1256), False, 'from django.template import RequestContext\n'), ((1435, 1458), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (1449, 1458), False, 'from django.template import RequestContext\n'), ((7262, 7309), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:publications_person_changelist"""'], {}), "('admin:publications_person_changelist')\n", (7269, 7309), False, 'from django.core.urlresolvers import reverse\n'), ((3608, 3659), 'publications.get_publications_importer', 'get_publications_importer', (["request.POST['importer']"], {}), "(request.POST['importer'])\n", (3633, 3659), False, 'from publications import list_import_formats, get_publications_importer\n'), ((4846, 4873), 'django.contrib.messages.info', 'messages.info', (['request', 'msg'], {}), '(request, msg)\n', (4859, 4873), False, 'from django.contrib import messages\n'), ((5224, 5247), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (5238, 5247), False, 'from django.template import RequestContext\n'), ((4565, 4588), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (4579, 4588), False, 'from django.template import RequestContext\n'), ((4953, 5005), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:publications_publication_changelist"""'], {}), "('admin:publications_publication_changelist')\n", (4960, 5005), False, 'from django.core.urlresolvers import reverse\n'), ((5162, 5183), 'publications.list_import_formats', 'list_import_formats', ([], {}), '()\n', (5181, 5183), False, 'from publications import list_import_formats, get_publications_importer\n'), ((6170, 6217), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:publications_person_changelist"""'], {}), "('admin:publications_person_changelist')\n", (6177, 6217), False, 'from django.core.urlresolvers import reverse\n'), ((4499, 4520), 'publications.list_import_formats', 'list_import_formats', ([], {}), '()\n', (4518, 4520), False, 'from publications import list_import_formats, get_publications_importer\n'), ((6654, 6697), 'publications.models.Person.objects.filter', 'Person.objects.filter', ([], {'id__in': 'group_entries'}), '(id__in=group_entries)\n', (6675, 6697), False, 'from publications.models import Publication, PublicationType, Group, Authorship, Person, Metadata, Import\n'), ((7204, 7227), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (7218, 7227), False, 'from django.template import RequestContext\n'), ((6996, 7039), 'publications.models.Person.objects.filter', 'Person.objects.filter', ([], {'id__in': 'group_entries'}), '(id__in=group_entries)\n', (7017, 7039), False, 'from publications.models import Publication, PublicationType, Group, Authorship, Person, Metadata, Import\n')]
|
import logging
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import get_host_visible_vars
from tests.common.utilities import wait_until
CONTAINER_CHECK_INTERVAL_SECS = 1
CONTAINER_RESTART_THRESHOLD_SECS = 180
logger = logging.getLogger(__name__)
def is_supervisor_node(inv_files, hostname):
"""Check if the current node is a supervisor node in case of multi-DUT.
@param inv_files: List of inventory file paths, In tests,
you can be get it from get_inventory_files in tests.common.utilities
@param hostname: hostname as defined in the inventory
Returns:
Currently, we are using 'card_type' in the inventory to make the decision. If 'card_type' for the node is defined in
the inventory, and it is 'supervisor', then return True, else return False. In future, we can change this
logic if possible to derive it from the DUT.
"""
dut_vars = get_host_visible_vars(inv_files, hostname)
if 'card_type' in dut_vars and dut_vars['card_type'] == 'supervisor':
return True
return False
def is_frontend_node(inv_files, hostname):
"""Check if the current node is a frontend node in case of multi-DUT.
@param inv_files: List of inventory file paths, In tests,
you can be get it from get_inventory_files in tests.common.utilities
@param hostname: hostname as defined in the inventory
Returns:
True if it is not any other type of node. Currently, the only other type of node supported is 'supervisor'
node. If we add more types of nodes, then we need to exclude them from this method as well.
"""
return not is_supervisor_node(inv_files, hostname)
def is_container_running(duthost, container_name):
"""Decides whether the container is running or not
@param duthost: Host DUT.
@param container_name: Name of a container.
Returns:
Boolean value. True represents the container is running
"""
running_containers = duthost.shell(r"docker ps -f 'status=running' --format \{\{.Names\}\}")['stdout_lines']
return container_name in running_containers
def check_container_state(duthost, container_name, should_be_running):
"""Determines whether a container is in the expected state (running/not running)
@param duthost: Host DUT.
@param container_name: Name of container.
@param should_be_running: Boolean value.
Returns:
This function will return True if the container was in the expected state.
Otherwise, it will return False.
"""
is_running = is_container_running(duthost, container_name)
return is_running == should_be_running
def is_hitting_start_limit(duthost, container_name):
"""Checks whether the container can not be restarted is due to start-limit-hit.
@param duthost: Host DUT.
@param ontainer_name: name of a container.
Returns:
If start limitation was hit, then this function will return True. Otherwise
it returns False.
"""
service_status = duthost.shell("sudo systemctl status {}.service | grep 'Active'".format(container_name))
for line in service_status["stdout_lines"]:
if "start-limit-hit" in line:
return True
return False
def clear_failed_flag_and_restart(duthost, container_name):
"""Clears the failed flag of a container and restart it.
@param duthost: Host DUT.
@param container_name: name of a container.
Returns:
None
"""
logger.info("{} hits start limit and clear reset-failed flag".format(container_name))
duthost.shell("sudo systemctl reset-failed {}.service".format(container_name))
duthost.shell("sudo systemctl start {}.service".format(container_name))
restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS,
CONTAINER_CHECK_INTERVAL_SECS,
check_container_state, duthost, container_name, True)
pytest_assert(restarted, "Failed to restart container '{}' after reset-failed was cleared".format(container_name))
def get_group_program_info(duthost, container_name, group_name):
"""Gets program names, running status and their pids by analyzing the command
output of "docker exec <container_name> supervisorctl status". Program name
at here represents a program which is part of group <group_name>
Args:
duthost: Hostname of DUT.
container_name: A string shows container name.
program_name: A string shows process name.
Returns:
A dictionary where keys are the program names and values are their running
status and pids.
"""
group_program_info = defaultdict(list)
program_name = None
program_status = None
program_pid = None
program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
for program_info in program_list["stdout_lines"]:
if program_info.find(group_name) != -1:
program_name = program_info.split()[0].split(':')[1].strip()
program_status = program_info.split()[1].strip()
if program_status in ["EXITED", "STOPPED", "STARTING"]:
program_pid = -1
else:
program_pid = int(program_info.split()[3].strip(','))
group_program_info[program_name].append(program_status)
group_program_info[program_name].append(program_pid)
if program_pid != -1:
logger.info("Found program '{}' in the '{}' state with pid {}"
.format(program_name, program_status, program_pid))
return group_program_info
def get_program_info(duthost, container_name, program_name):
"""Gets program running status and its pid by analyzing the command
output of "docker exec <container_name> supervisorctl status"
Args:
duthost: Hostname of DUT.
container_name: A string shows container name.
program_name: A string shows process name.
Return:
Program running status and its pid.
"""
program_status = None
program_pid = -1
program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
for program_info in program_list["stdout_lines"]:
if program_info.find(program_name) != -1:
program_status = program_info.split()[1].strip()
if program_status == "RUNNING":
program_pid = int(program_info.split()[3].strip(','))
break
if program_pid != -1:
logger.info("Found program '{}' in the '{}' state with pid {}"
.format(program_name, program_status, program_pid))
return program_status, program_pid
def get_disabled_container_list(duthost):
"""Gets the container/service names which are disabled.
Args:
duthost: Host DUT.
Return:
A list includes the names of disabled containers/services
"""
disabled_containers = []
container_status, succeeded = duthost.get_feature_status()
pytest_assert(succeeded, "Failed to get status ('enabled'|'disabled') of containers. Exiting...")
for container_name, status in container_status.items():
if "disabled" in status:
disabled_containers.append(container_name)
return disabled_containers
|
[
"logging.getLogger",
"tests.common.utilities.get_host_visible_vars",
"tests.common.utilities.wait_until",
"tests.common.helpers.assertions.pytest_assert"
] |
[((260, 287), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (277, 287), False, 'import logging\n'), ((947, 989), 'tests.common.utilities.get_host_visible_vars', 'get_host_visible_vars', (['inv_files', 'hostname'], {}), '(inv_files, hostname)\n', (968, 989), False, 'from tests.common.utilities import get_host_visible_vars\n'), ((3766, 3899), 'tests.common.utilities.wait_until', 'wait_until', (['CONTAINER_RESTART_THRESHOLD_SECS', 'CONTAINER_CHECK_INTERVAL_SECS', 'check_container_state', 'duthost', 'container_name', '(True)'], {}), '(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS,\n check_container_state, duthost, container_name, True)\n', (3776, 3899), False, 'from tests.common.utilities import wait_until\n'), ((7100, 7201), 'tests.common.helpers.assertions.pytest_assert', 'pytest_assert', (['succeeded', '"""Failed to get status (\'enabled\'|\'disabled\') of containers. Exiting..."""'], {}), '(succeeded,\n "Failed to get status (\'enabled\'|\'disabled\') of containers. Exiting...")\n', (7113, 7201), False, 'from tests.common.helpers.assertions import pytest_assert\n')]
|
from rest_framework import serializers
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from profiles.serializers import UserRowSerializer
from .models import Comment
User = get_user_model()
# content, user
def create_comment_serializer(model_type='outfit', id=None, parent_id=None, user=None):
class CommentCreateSerializer(serializers.ModelSerializer):
class Meta:
model=Comment
fields = [
'id',
# 'user',
# 'content_type',
# 'object_id',
'content',
'created_at',
'parent',
]
def __init__(self, *args, **kwargs):
self.model_type = model_type
self.id = id
self.parent_obj = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() ==1:
self.parent_obj = parent_qs.first()
return super(CommentCreateSerializer, self).__init__(*args, **kwargs)
def validate(self, data):
model_type = self.model_type # coming from __init__
model_qs = ContentType.objects.filter(model=model_type)
if not model_qs.exists() or model_qs.count() != 1:
raise serializers.ValidationError("This is not a valid content type")
SomeModel = model_qs.first().model_class()
obj_qs = SomeModel.objects.filter(id=self.id)
if not obj_qs.exists() or obj_qs.count() !=1:
raise serializers.ValidationError("This is not a id for this content type")
return data
def create(self, validated_data):
content = validated_data.get("content")
if user:
main_user = user
else:
main_user = User.objects.all().first()
model_type = self.model_type
id = self.id
parent_obj = self.parent_obj
comment = Comment.objects.create_by_model_type(
model_type=model_type,
id=id,
user=main_user, # main_user itseslf?
content=content,
parent_obj=parent_obj,
)
return comment
return CommentCreateSerializer
class CommentSerializer(serializers.ModelSerializer):
reply_count = serializers.SerializerMethodField()
is_owner = serializers.SerializerMethodField()
child = serializers.SerializerMethodField()
user = UserRowSerializer(read_only=True)
class Meta:
model = Comment
fields = (
'id',
'user',
# 'content_type',
# 'object_id',
'content',
'created_at',
# 'parent',
'reply_count',
'is_owner',
'child',
)
def get_reply_count(self, obj):
if obj.is_parent:
return obj.children().count()
return 0
def get_is_owner(self, obj):
if obj.user:
return obj.user == self.context['request'].user
return False
def get_child(self, obj):
if obj.is_parent:
if obj.children():
return CommentChildSerializer(obj.children().first(), context=self.context).data
return None
return None
# class CommentsOnPostSerializer(serializers.ModelSerializer):
# reply_count = serializers.SerializerMethodField()
# user = UserRowSerializer(read_only=True)
#
# class Meta:
# model = Comment
# fields = (
# 'id',
# 'user',
# 'content',
# 'created_at',
# 'reply_count',
# )
#
# def get_reply_count(self, obj):
# if obj.is_parent:
# return obj.children().count()
# return 0
class CommentChildSerializer(serializers.ModelSerializer):
user = UserRowSerializer(read_only=True)
is_owner = serializers.SerializerMethodField()
class Meta:
model = Comment
fields = (
'id',
'user',
'content',
'created_at',
'is_owner',
)
def get_is_owner(self, obj):
if(obj.user):
return obj.user == self.context['request'].user
return False
class CommentDetailSerializer(serializers.ModelSerializer):
replies = serializers.SerializerMethodField()
is_owner = serializers.SerializerMethodField()
user = UserRowSerializer(read_only=True)
class Meta:
model = Comment
fields = (
'id',
'user',
'content',
'created_at',
'replies',
'is_owner',
)
read_only_fields = (
)
def get_replies(self, obj):
if obj.is_parent:
return CommentChildSerializer(obj.children(), many=True, context=self.context).data
return None
def get_is_owner(self, obj):
if(obj.user):
return obj.user == self.context['request'].user
return False
class CommentEditSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = (
'id',
'content',
'created_at',
)
|
[
"profiles.serializers.UserRowSerializer",
"django.contrib.auth.get_user_model",
"django.contrib.contenttypes.models.ContentType.objects.filter",
"rest_framework.serializers.SerializerMethodField",
"rest_framework.serializers.ValidationError"
] |
[((234, 250), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (248, 250), False, 'from django.contrib.auth import get_user_model\n'), ((2470, 2505), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (2503, 2505), False, 'from rest_framework import serializers\n'), ((2521, 2556), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (2554, 2556), False, 'from rest_framework import serializers\n'), ((2569, 2604), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (2602, 2604), False, 'from rest_framework import serializers\n'), ((2616, 2649), 'profiles.serializers.UserRowSerializer', 'UserRowSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (2633, 2649), False, 'from profiles.serializers import UserRowSerializer\n'), ((4020, 4053), 'profiles.serializers.UserRowSerializer', 'UserRowSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (4037, 4053), False, 'from profiles.serializers import UserRowSerializer\n'), ((4069, 4104), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (4102, 4104), False, 'from rest_framework import serializers\n'), ((4498, 4533), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (4531, 4533), False, 'from rest_framework import serializers\n'), ((4549, 4584), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (4582, 4584), False, 'from rest_framework import serializers\n'), ((4596, 4629), 'profiles.serializers.UserRowSerializer', 'UserRowSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (4613, 4629), False, 'from profiles.serializers import UserRowSerializer\n'), ((1262, 1306), 'django.contrib.contenttypes.models.ContentType.objects.filter', 'ContentType.objects.filter', ([], {'model': 'model_type'}), '(model=model_type)\n', (1288, 1306), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((1392, 1455), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""This is not a valid content type"""'], {}), "('This is not a valid content type')\n", (1419, 1455), False, 'from rest_framework import serializers\n'), ((1650, 1719), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""This is not a id for this content type"""'], {}), "('This is not a id for this content type')\n", (1677, 1719), False, 'from rest_framework import serializers\n')]
|
"""Copy number detection with CNVkit with specific support for targeted sequencing.
http://cnvkit.readthedocs.org
"""
import copy
import math
import operator
import os
import sys
import tempfile
import subprocess
import pybedtools
import numpy as np
import toolz as tz
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import bedutils, effects, ploidy, population, vcfutils
from bcbio.structural import annotate, shared, plot
def run(items, background=None):
"""Detect copy number variations from batched set of samples using CNVkit.
"""
if not background: background = []
return _cnvkit_by_type(items, background)
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "cnvkit"))
def _cnvkit_by_type(items, background):
"""Dispatch to specific CNVkit functionality based on input type.
"""
if len(items + background) == 1:
return _run_cnvkit_single(items[0])
elif vcfutils.get_paired_phenotype(items[0]):
return _run_cnvkit_cancer(items, background)
else:
return _run_cnvkit_population(items, background)
def _associate_cnvkit_out(ckouts, items, is_somatic=False):
"""Associate cnvkit output with individual items.
"""
assert len(ckouts) == len(items)
out = []
for ckout, data in zip(ckouts, items):
ckout = copy.deepcopy(ckout)
ckout["variantcaller"] = "cnvkit"
if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]):
ckout = _add_seg_to_output(ckout, data)
ckout = _add_gainloss_to_output(ckout, data)
ckout = _add_segmetrics_to_output(ckout, data)
ckout = _add_variantcalls_to_output(ckout, data, is_somatic)
# ckout = _add_coverage_bedgraph_to_output(ckout, data)
ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data)
if "svplots" in dd.get_tools_on(data):
ckout = _add_plots_to_output(ckout, data)
if "sv" not in data:
data["sv"] = []
data["sv"].append(ckout)
out.append(data)
return out
def _run_cnvkit_single(data, background=None):
"""Process a single input file with BAM or uniform background.
"""
if not background:
background = []
ckouts = _run_cnvkit_shared([data], background)
if not ckouts:
return [data]
else:
assert len(ckouts) == 1
return _associate_cnvkit_out(ckouts, [data])
def _run_cnvkit_cancer(items, background):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
normal_data = [x for x in items if dd.get_sample_name(x) != paired.tumor_name]
tumor_ready, normal_ready = _match_batches(paired.tumor_data, normal_data[0] if normal_data else None)
ckouts = _run_cnvkit_shared([tumor_ready], [normal_ready] if normal_ready else [])
if not ckouts:
return items
assert len(ckouts) == 1
tumor_data = _associate_cnvkit_out(ckouts, [paired.tumor_data], is_somatic=True)
return tumor_data + normal_data
def _match_batches(tumor, normal):
"""Fix batch names for shared tumor/normals to ensure matching
"""
def _get_batch(x):
b = dd.get_batch(x)
return [b] if not isinstance(b, (list, tuple)) else b
if normal:
tumor = copy.deepcopy(tumor)
normal = copy.deepcopy(normal)
cur_batch = list(set(_get_batch(tumor)) & set(_get_batch(normal)))
assert len(cur_batch) == 1, "No batch overlap: %s and %s" % (_get_batch(tumor), _get_batch(normal))
cur_batch = cur_batch[0]
tumor["metadata"]["batch"] = cur_batch
normal["metadata"]["batch"] = cur_batch
return tumor, normal
def _run_cnvkit_population(items, background):
"""Run CNVkit on a population of samples.
Tries to calculate background based on case/controls, otherwise
uses samples from the same batch as background.
"""
if background and len(background) > 0:
inputs = items
else:
inputs, background = shared.find_case_control(items)
# if we have case/control organized background or a single sample
if len(inputs) == 1 or len(background) > 0:
ckouts = _run_cnvkit_shared(inputs, background)
return _associate_cnvkit_out(ckouts, inputs) + background
# otherwise run each sample with the others in the batch as background
else:
out = []
for cur_input in items:
background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)]
ckouts = _run_cnvkit_shared([cur_input], background)
out.extend(_associate_cnvkit_out(ckouts, [cur_input]))
return out
def _get_cmd(script_name="cnvkit.py"):
return os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script_name)
def _prep_cmd(cmd, tx_out_file):
"""Wrap CNVkit commands ensuring we use local temporary directories.
"""
cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd
return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
def _bam_to_outbase(bam_file, work_dir, data):
"""Convert an input BAM file into CNVkit expected output.
Handles previous non-batch cases to avoid re-calculating,
returning both new and old values:
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0]
base = os.path.join(work_dir, out_base)
return "%s-%s" % (base, batch), base
def _run_cnvkit_shared(inputs, backgrounds):
"""Shared functionality to run CNVkit, parallelizing over multiple BAM files.
"""
work_dir = _sv_workdir(inputs[0])
raw_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
background_name = dd.get_sample_name(backgrounds[0]) if backgrounds else "flat"
background_cnn = os.path.join(raw_work_dir, "%s_background.cnn" % (background_name))
ckouts = []
for cur_input in inputs:
cur_raw_work_dir = utils.safe_makedir(os.path.join(_sv_workdir(cur_input), "raw"))
out_base, out_base_old = _bam_to_outbase(dd.get_align_bam(cur_input), cur_raw_work_dir, cur_input)
if utils.file_exists(out_base_old + ".cns"):
out_base = out_base_old
ckouts.append({"cnr": "%s.cnr" % out_base,
"cns": "%s.cns" % out_base,
"back_cnn": background_cnn})
if not utils.file_exists(ckouts[0]["cns"]):
cov_interval = dd.get_coverage_interval(inputs[0])
raw_target_bed, access_bed = _get_target_access_files(cov_interval, inputs[0], work_dir)
# bail out if we ended up with no regions
if not utils.file_exists(raw_target_bed):
return {}
raw_target_bed = annotate.add_genes(raw_target_bed, inputs[0])
parallel = {"type": "local", "cores": dd.get_cores(inputs[0]), "progs": ["cnvkit"]}
target_bed, antitarget_bed = _cnvkit_targets(raw_target_bed, access_bed, cov_interval,
raw_work_dir, inputs[0])
samples_to_run = zip(["background"] * len(backgrounds), backgrounds) + \
zip(["evaluate"] * len(inputs), inputs)
raw_coverage_cnns = [_cnvkit_coverage(cdata, bed, itype) for itype, cdata in samples_to_run
for bed in [target_bed, antitarget_bed]]
coverage_cnns = reduce(operator.add,
[_cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, inputs + backgrounds)
for cnns in tz.groupby("bam", raw_coverage_cnns).values()])
background_cnn = _cnvkit_background(_select_background_cnns(coverage_cnns),
background_cnn, target_bed, antitarget_bed, inputs[0])
fixed_cnrs = run_multicore(_cnvkit_fix,
[(cnns, background_cnn, inputs + backgrounds) for cnns in
tz.groupby("bam", [x for x in coverage_cnns
if x["itype"] == "evaluate"]).values()],
inputs[0]["config"], parallel)
[_cnvkit_segment(cnr, cov_interval, data) for cnr, data in fixed_cnrs]
return ckouts
def _cna_has_values(fname):
with open(fname) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return True
return False
def _cnvkit_segment(cnr_file, cov_interval, data):
"""Perform segmentation and copy number calling on normalized inputs
"""
out_file = "%s.cns" % os.path.splitext(cnr_file)[0]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
if not _cna_has_values(cnr_file):
with open(tx_out_file, "w") as out_handle:
out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n")
else:
cmd = [_get_cmd(), "segment", "-p", str(dd.get_cores(data)),
"-o", tx_out_file, cnr_file]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome":
cmd += ["-v", small_vrn_files[0]]
if cov_interval == "genome":
cmd += ["--threshold", "0.00001"]
# preferentially use conda installed Rscript
export_cmd = ("%s && export TMPDIR=%s && "
% (utils.get_R_exports(), os.path.dirname(tx_out_file)))
do.run(export_cmd + " ".join(cmd), "CNVkit segment")
return out_file
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items):
"""Estimate noise of a sample using a flat background.
Only used for panel/targeted data due to memory issues with whole genome
samples.
"""
if cov_interval == "genome":
return cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0]
background_file = _cnvkit_background([], background_file, target_bed, antitarget_bed, items[0])
cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground")
cns_file = _cnvkit_segment(cnr_file, cov_interval, data)
metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0]
if not utils.file_exists(metrics_file):
with file_transaction(data, metrics_file) as tx_metrics_file:
cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file]
do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics")
metrics = _read_metrics_file(metrics_file)
out = []
for cnn in cnns:
cnn["metrics"] = metrics
out.append(cnn)
return out
def _read_metrics_file(in_file):
with open(in_file) as in_handle:
header = in_handle.next().strip().split("\t")[1:]
vals = map(float, in_handle.next().strip().split("\t")[1:])
return dict(zip(header, vals))
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_fix(cnns, background_cnn, items):
"""Normalize samples, correcting sources of bias.
"""
return [_cnvkit_fix_base(cnns, background_cnn, items)]
def _cnvkit_fix_base(cnns, background_cnn, items, ext=""):
assert len(cnns) == 2, "Expected target and antitarget CNNs: %s" % cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
antitarget_cnn = [x["file"] for x in cnns if x["cnntype"] == "antitarget"][0]
data = [x for x in items if dd.get_sample_name(x) == cnns[0]["sample"]][0]
common_prefix = os.path.commonprefix([target_cnn, antitarget_cnn])
if common_prefix.endswith("."):
common_prefix = common_prefix[:-1]
out_file = "%s%s.cnr" % (common_prefix, ext)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "fix", "-o", tx_out_file, target_cnn, antitarget_cnn, background_cnn]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit fix")
return out_file, data
def _select_background_cnns(cnns):
"""Select cnns to use for background calculations.
Uses background samples in cohort, and will remove CNNs with high
on target variability. Uses (number of segments * biweight midvariance) as metric
for variability with higher numbers being more unreliable.
"""
min_for_variability_analysis = 20
pct_keep = 0.10
b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
if len(b_cnns) >= min_for_variability_analysis:
b_cnns_w_metrics = []
for b_cnn in b_cnns:
unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"]
b_cnns_w_metrics.append((unreliability, b_cnn))
b_cnns_w_metrics.sort()
to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2)
b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
return [x["file"] for x in b_cnns]
def _cnvkit_background(background_cnns, out_file, target_bed, antitarget_bed, data):
"""Calculate background reference, handling flat case with no normal sample.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(data), "-o", tx_out_file]
if len(background_cnns) == 0:
cmd += ["-t", target_bed, "-a", antitarget_bed]
else:
cmd += background_cnns
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background")
return out_file
def _cnvkit_coverage(data, bed_file, input_type):
"""Calculate coverage in a BED file for CNVkit.
"""
bam_file = dd.get_align_bam(data)
work_dir = utils.safe_makedir(os.path.join(_sv_workdir(data), "raw"))
exts = {".target.bed": ("target", "targetcoverage.cnn"),
".antitarget.bed": ("antitarget", "antitargetcoverage.cnn")}
cnntype = None
for orig, (cur_cnntype, ext) in exts.items():
if bed_file.endswith(orig):
cnntype = cur_cnntype
break
if cnntype is None:
assert bed_file.endswith(".bed"), "Unexpected BED file extension for coverage %s" % bed_file
cnntype = ""
base, base_old = _bam_to_outbase(bam_file, work_dir, data)
out_file = "%s.%s" % (base, ext)
out_file_old = "%s.%s" % (base_old, ext)
# back compatible with previous runs to avoid re-calculating
if utils.file_exists(out_file_old):
out_file = out_file_old
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "coverage", "-p", str(dd.get_cores(data)), bam_file, bed_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage")
return {"itype": input_type, "file": out_file, "bam": bam_file, "cnntype": cnntype,
"sample": dd.get_sample_name(data)}
def _cnvkit_targets(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Create target and antitarget regions from target and access files.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
basename = os.path.splitext(os.path.basename(raw_target_bed))[0]
target_bed = os.path.join(work_dir, "%s-%s.target.bed" % (basename, batch))
# back compatible with previous runs to avoid re-calculating
target_bed_old = os.path.join(work_dir, "%s.target.bed" % basename)
if utils.file_exists(target_bed_old):
target_bed = target_bed_old
if not utils.file_exists(target_bed):
with file_transaction(data, target_bed) as tx_out_file:
cmd = [_get_cmd(), "target", raw_target_bed, "--split", "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("target"):
cmd += ["--avg-size", str(bin_estimates["target"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target")
antitarget_bed = os.path.join(work_dir, "%s-%s.antitarget.bed" % (basename, batch))
antitarget_bed_old = os.path.join(work_dir, "%s.antitarget.bed" % basename)
# back compatible with previous runs to avoid re-calculating
if os.path.exists(antitarget_bed_old):
antitarget_bed = antitarget_bed_old
if not os.path.exists(antitarget_bed):
with file_transaction(data, antitarget_bed) as tx_out_file:
cmd = [_get_cmd(), "antitarget", "-g", access_bed, target_bed, "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("antitarget"):
cmd += ["--avg-size", str(bin_estimates["antitarget"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget")
return target_bed, antitarget_bed
def _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Estimate good coverage bin sizes for target regions based on coverage.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-%s-bin_estimate.txt" % (
os.path.splitext(os.path.basename(raw_target_bed))[0], batch))
method_map = {"genome": "wgs", "regional": "hybrid", "amplicon": "amplicon"}
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd("coverage_bin_size.py"), dd.get_align_bam(data),
"-m", method_map[cov_interval], "-t", raw_target_bed,
"-g", access_bed]
cmd = " ".join(cmd) + " > " + tx_out_file
try:
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage bin estimation", log_error=False)
except subprocess.CalledProcessError:
logger.info("Bin size estimate failed, using default values")
with open(tx_out_file, "w") as out_handle:
out_handle.write("Bin size estimate failed, using default values")
avg_bin_sizes = {}
estimate_map = {"On-target": "target", "Off-target": "antitarget",
"Genome": "target", "Targets (sampling)": "target"}
range_map = {("genome", "target"): (500, 1000),
("regional", "target"): (50, 267), ("regional", "antitarget"): (20000, 200000),
("amplicon", "target"): (50, 267)}
with open(out_file) as in_handle:
for line in in_handle:
if line.startswith(tuple(estimate_map.keys())):
name, depth, bin_size = line.strip().split("\t")
name = estimate_map[name.replace(":", "").strip()]
try:
bin_size = int(bin_size)
except ValueError:
bin_size = None
if bin_size and bin_size > 0:
cur_min, cur_max = range_map[(cov_interval, name)]
avg_bin_sizes[name] = max(min(bin_size, cur_max), cur_min)
return avg_bin_sizes
def _get_target_access_files(cov_interval, data, work_dir):
"""Retrieve target and access files based on the type of data to process.
pick targets, anti-targets and access files based on analysis type
http://cnvkit.readthedocs.org/en/latest/nonhybrid.html
"""
base_regions = shared.get_base_cnv_regions(data, work_dir)
target_bed = bedutils.sort_merge(base_regions, data, out_dir=work_dir)
if cov_interval == "amplicon":
return target_bed, target_bed
elif cov_interval == "genome":
return target_bed, target_bed
else:
access_file = _create_access_file(dd.get_ref_file(data), _sv_workdir(data), data)
return target_bed, access_file
def _add_seg_to_output(out, data):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg", "-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
def _add_cnr_bedgraph_and_bed_to_output(out, data):
cnr_file = out["cnr"]
bedgraph_file = cnr_file + ".bedgraph"
if not utils.file_exists(bedgraph_file):
with file_transaction(data, bedgraph_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bedgraph format")
out["cnr_bedgraph"] = bedgraph_file
bed_file = cnr_file + ".bed"
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,4,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bed format")
out["cnr_bed"] = bed_file
return out
def _compatible_small_variants(data):
"""Retrieve small variant (SNP, indel) VCFs compatible with CNVkit.
"""
supported = set(["vardict", "freebayes", "gatk-haplotype", "mutect2", "vardict"])
out = []
for v in data.get("variants", []):
vrn_file = v.get("vrn_file")
if vrn_file and v.get("variantcaller") in supported:
base, ext = utils.splitext_plus(os.path.basename(vrn_file))
if vcfutils.get_paired_phenotype(data):
out.append(vrn_file)
else:
sample_vrn_file = os.path.join(dd.get_work_dir(data), v["variantcaller"],
"%s-%s%s" % (base, dd.get_sample_name(data), ext))
sample_vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_vrn_file,
data["config"])
out.append(sample_vrn_file)
return out
def _add_variantcalls_to_output(out, data, is_somatic=False):
"""Call ploidy and convert into VCF and BED representations.
"""
call_file = "%s-call%s" % os.path.splitext(out["cns"])
gender = population.get_gender(data)
if not utils.file_exists(call_file):
with file_transaction(data, call_file) as tx_call_file:
filters = ["--filter", "cn"]
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \
filters + \
["--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_call_file, out["cns"]]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]):
cmd += ["-v", small_vrn_files[0]]
if not is_somatic:
cmd += ["-m", "clonal"]
if gender and gender.lower() != "unknown":
cmd += ["--gender", gender]
if gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit call ploidy")
calls = {}
for outformat in ["bed", "vcf"]:
out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat)
calls[outformat] = out_file
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
outformat, "--sample-id", dd.get_sample_name(data),
"--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_out_file, call_file]
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit export %s" % outformat)
out["call_file"] = call_file
out["vrn_bed"] = annotate.add_genes(calls["bed"], data)
effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff")
out["vrn_file"] = effects_vcf or calls["vcf"]
return out
def _add_segmetrics_to_output(out, data):
"""Add metrics for measuring reliability of CNV estimates.
"""
out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics",
"--ci", "--pi",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
# Use less fine grained bootstrapping intervals for whole genome runs
if dd.get_coverage_interval(data) == "genome":
cmd += ["--alpha", "0.1", "--bootstrap", "50"]
else:
cmd += ["--alpha", "0.01", "--bootstrap", "500"]
do.run(cmd, "CNVkit segmetrics")
out["segmetrics"] = out_file
return out
def _add_gainloss_to_output(out, data):
"""Add gainloss based on genes, helpful for identifying changes in smaller genes.
"""
out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit gainloss")
out["gainloss"] = out_file
return out
def _add_coverage_bedgraph_to_output(out, data):
"""Add BedGraph representation of coverage to the output
"""
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
scatter = _add_scatter_plot(out, data)
if scatter:
out["plot"]["scatter"] = scatter
scatter_global = _add_global_scatter_plot(out, data)
if scatter_global:
out["plot"]["scatter_global"] = scatter_global
return out
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
def _remove_haplotype_chroms(in_file, data):
"""Remove shorter haplotype chromosomes from cns/cnr files for plotting.
"""
larger_chroms = set(_get_larger_chroms(dd.get_ref_file(data)))
out_file = "%s-chromfilter%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("chromosome") or line.split()[0] in larger_chroms:
out_handle.write(line)
return out_file
def _add_global_scatter_plot(out, data):
out_file = "%s-scatter_global.pdf" % os.path.splitext(out["cnr"])[0]
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, cnr]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit global scatter plot")
return out_file
def _add_scatter_plot(out, data):
out_file = "%s-scatter.pdf" % os.path.splitext(out["cnr"])[0]
priority_bed = dd.get_svprioritize(data)
if not priority_bed:
return None
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, "-l",
priority_bed, cnr]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit scatter plot")
return out_file
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
def _add_diagram_plot(out, data):
out_file = "%s-diagram.pdf" % os.path.splitext(out["cnr"])[0]
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
if _cnx_is_empty(cnr) or _cnx_is_empty(cns):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "diagram", "-s", cns,
"-o", tx_out_file, cnr]
gender = population.get_gender(data)
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit diagram plot")
return out_file
def _create_access_file(ref_file, out_dir, data):
"""Create genome access file for CNVlib to define available genomic regions.
XXX Can move to installation/upgrade process if too slow here.
"""
out_file = os.path.join(out_dir, "%s-access.bed" % os.path.splitext(os.path.basename(ref_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "access",
ref_file, "-s", "10000", "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Create CNVkit access file")
return out_file
# ## Theta support
def export_theta(ckout, data):
"""Provide updated set of data with export information for TheTA2 input.
"""
cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome")
cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome")
out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2")
ckout["theta_input"] = out_file
return ckout
|
[
"toolz.groupby",
"bcbio.structural.annotate.add_genes",
"bcbio.variation.bedutils.sort_merge",
"numpy.array",
"pybedtools.BedTool",
"bcbio.pipeline.datadict.get_cores",
"copy.deepcopy",
"bcbio.variation.vcfutils.get_paired_bams",
"bcbio.pipeline.datadict.get_align_bam",
"bcbio.pipeline.datadict.get_svprioritize",
"os.remove",
"os.path.exists",
"bcbio.pipeline.datadict.get_work_dir",
"bcbio.variation.effects.add_to_vcf",
"bcbio.structural.shared.get_base_cnv_regions",
"bcbio.pipeline.config_utils.get_program",
"bcbio.pipeline.datadict.get_coverage_interval",
"bcbio.utils.splitext_plus",
"bcbio.variation.population.get_gender",
"bcbio.variation.vcfutils.get_paired_phenotype",
"tempfile.NamedTemporaryFile",
"bcbio.pipeline.datadict.get_ref_file",
"bcbio.heterogeneity.chromhacks.bed_to_standardonly",
"bcbio.utils.file_exists",
"bcbio.provenance.do.run",
"os.path.splitext",
"bcbio.pipeline.datadict.get_sample_name",
"os.path.dirname",
"bcbio.distributed.transaction.file_transaction",
"bcbio.pipeline.datadict.get_batch",
"bcbio.pipeline.datadict.get_tools_on",
"bcbio.utils.file_uptodate",
"os.path.join",
"bcbio.utils.get_R_exports",
"os.path.realpath",
"bcbio.structural.shared.find_case_control",
"os.path.commonprefix",
"os.path.basename",
"bcbio.variation.ploidy.get_ploidy",
"bcbio.log.logger.info",
"bcbio.bam.ref.file_contigs"
] |
[((2981, 3045), 'bcbio.variation.vcfutils.get_paired_bams', 'vcfutils.get_paired_bams', (["[x['align_bam'] for x in items]", 'items'], {}), "([x['align_bam'] for x in items], items)\n", (3005, 3045), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((5903, 5935), 'os.path.join', 'os.path.join', (['work_dir', 'out_base'], {}), '(work_dir, out_base)\n', (5915, 5935), False, 'import os\n'), ((6325, 6390), 'os.path.join', 'os.path.join', (['raw_work_dir', "('%s_background.cnn' % background_name)"], {}), "(raw_work_dir, '%s_background.cnn' % background_name)\n", (6337, 6390), False, 'import os\n'), ((12266, 12316), 'os.path.commonprefix', 'os.path.commonprefix', (['[target_cnn, antitarget_cnn]'], {}), '([target_cnn, antitarget_cnn])\n', (12286, 12316), False, 'import os\n'), ((14584, 14606), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['data'], {}), '(data)\n', (14600, 14606), True, 'from bcbio.pipeline import datadict as dd\n'), ((15335, 15366), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file_old'], {}), '(out_file_old)\n', (15352, 15366), False, 'from bcbio import utils\n'), ((16125, 16187), 'os.path.join', 'os.path.join', (['work_dir', "('%s-%s.target.bed' % (basename, batch))"], {}), "(work_dir, '%s-%s.target.bed' % (basename, batch))\n", (16137, 16187), False, 'import os\n'), ((16274, 16324), 'os.path.join', 'os.path.join', (['work_dir', "('%s.target.bed' % basename)"], {}), "(work_dir, '%s.target.bed' % basename)\n", (16286, 16324), False, 'import os\n'), ((16332, 16365), 'bcbio.utils.file_exists', 'utils.file_exists', (['target_bed_old'], {}), '(target_bed_old)\n', (16349, 16365), False, 'from bcbio import utils\n'), ((16910, 16976), 'os.path.join', 'os.path.join', (['work_dir', "('%s-%s.antitarget.bed' % (basename, batch))"], {}), "(work_dir, '%s-%s.antitarget.bed' % (basename, batch))\n", (16922, 16976), False, 'import os\n'), ((17002, 17056), 'os.path.join', 'os.path.join', (['work_dir', "('%s.antitarget.bed' % basename)"], {}), "(work_dir, '%s.antitarget.bed' % basename)\n", (17014, 17056), False, 'import os\n'), ((17129, 17163), 'os.path.exists', 'os.path.exists', (['antitarget_bed_old'], {}), '(antitarget_bed_old)\n', (17143, 17163), False, 'import os\n'), ((20232, 20275), 'bcbio.structural.shared.get_base_cnv_regions', 'shared.get_base_cnv_regions', (['data', 'work_dir'], {}), '(data, work_dir)\n', (20259, 20275), False, 'from bcbio.structural import annotate, shared, plot\n'), ((20293, 20350), 'bcbio.variation.bedutils.sort_merge', 'bedutils.sort_merge', (['base_regions', 'data'], {'out_dir': 'work_dir'}), '(base_regions, data, out_dir=work_dir)\n', (20312, 20350), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((23071, 23098), 'bcbio.variation.population.get_gender', 'population.get_gender', (['data'], {}), '(data)\n', (23092, 23098), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((24759, 24797), 'bcbio.structural.annotate.add_genes', 'annotate.add_genes', (["calls['bed']", 'data'], {}), "(calls['bed'], data)\n", (24777, 24797), False, 'from bcbio.structural import annotate, shared, plot\n'), ((24819, 24867), 'bcbio.variation.effects.add_to_vcf', 'effects.add_to_vcf', (["calls['vcf']", 'data', '"""snpeff"""'], {}), "(calls['vcf'], data, 'snpeff')\n", (24837, 24867), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((26544, 26571), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (26561, 26571), False, 'from bcbio import utils\n'), ((26642, 26664), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['data'], {}), '(data)\n', (26658, 26664), True, 'from bcbio.pipeline import datadict as dd\n'), ((26680, 26732), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""bedtools"""', "data['config']"], {}), "('bedtools', data['config'])\n", (26704, 26732), False, 'from bcbio.pipeline import config_utils\n'), ((26748, 26800), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""samtools"""', "data['config']"], {}), "('samtools', data['config'])\n", (26772, 26800), False, 'from bcbio.pipeline import config_utils\n'), ((28021, 28047), 'bcbio.bam.ref.file_contigs', 'ref.file_contigs', (['ref_file'], {}), '(ref_file)\n', (28037, 28047), False, 'from bcbio.bam import ref\n'), ((28844, 28870), 'bcbio.bam.ref.file_contigs', 'ref.file_contigs', (['ref_file'], {}), '(ref_file)\n', (28860, 28870), False, 'from bcbio.bam import ref\n'), ((29765, 29792), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (29782, 29792), False, 'from bcbio import utils\n'), ((30269, 30294), 'bcbio.pipeline.datadict.get_svprioritize', 'dd.get_svprioritize', (['data'], {}), '(data)\n', (30288, 30294), True, 'from bcbio.pipeline import datadict as dd\n'), ((30463, 30490), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (30480, 30490), False, 'from bcbio import utils\n'), ((32602, 32674), 'bcbio.heterogeneity.chromhacks.bed_to_standardonly', 'chromhacks.bed_to_standardonly', (["ckout['cns']", 'data'], {'headers': '"""chromosome"""'}), "(ckout['cns'], data, headers='chromosome')\n", (32632, 32674), False, 'from bcbio.heterogeneity import chromhacks\n'), ((32690, 32762), 'bcbio.heterogeneity.chromhacks.bed_to_standardonly', 'chromhacks.bed_to_standardonly', (["ckout['cnr']", 'data'], {'headers': '"""chromosome"""'}), "(ckout['cnr'], data, headers='chromosome')\n", (32720, 32762), False, 'from bcbio.heterogeneity import chromhacks\n'), ((1365, 1404), 'bcbio.variation.vcfutils.get_paired_phenotype', 'vcfutils.get_paired_phenotype', (['items[0]'], {}), '(items[0])\n', (1394, 1404), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((1758, 1778), 'copy.deepcopy', 'copy.deepcopy', (['ckout'], {}), '(ckout)\n', (1771, 1778), False, 'import copy\n'), ((3659, 3674), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['x'], {}), '(x)\n', (3671, 3674), True, 'from bcbio.pipeline import datadict as dd\n'), ((3768, 3788), 'copy.deepcopy', 'copy.deepcopy', (['tumor'], {}), '(tumor)\n', (3781, 3788), False, 'import copy\n'), ((3806, 3827), 'copy.deepcopy', 'copy.deepcopy', (['normal'], {}), '(normal)\n', (3819, 3827), False, 'import copy\n'), ((4492, 4523), 'bcbio.structural.shared.find_case_control', 'shared.find_case_control', (['items'], {}), '(items)\n', (4516, 4523), False, 'from bcbio.structural import annotate, shared, plot\n'), ((5768, 5786), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['data'], {}), '(data)\n', (5780, 5786), True, 'from bcbio.pipeline import datadict as dd\n'), ((5790, 5814), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (5808, 5814), True, 'from bcbio.pipeline import datadict as dd\n'), ((6189, 6218), 'os.path.join', 'os.path.join', (['work_dir', '"""raw"""'], {}), "(work_dir, 'raw')\n", (6201, 6218), False, 'import os\n'), ((6242, 6276), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['backgrounds[0]'], {}), '(backgrounds[0])\n', (6260, 6276), True, 'from bcbio.pipeline import datadict as dd\n'), ((6647, 6687), 'bcbio.utils.file_exists', 'utils.file_exists', (["(out_base_old + '.cns')"], {}), "(out_base_old + '.cns')\n", (6664, 6687), False, 'from bcbio import utils\n'), ((6890, 6925), 'bcbio.utils.file_exists', 'utils.file_exists', (["ckouts[0]['cns']"], {}), "(ckouts[0]['cns'])\n", (6907, 6925), False, 'from bcbio import utils\n'), ((6950, 6985), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['inputs[0]'], {}), '(inputs[0])\n', (6974, 6985), True, 'from bcbio.pipeline import datadict as dd\n'), ((7230, 7275), 'bcbio.structural.annotate.add_genes', 'annotate.add_genes', (['raw_target_bed', 'inputs[0]'], {}), '(raw_target_bed, inputs[0])\n', (7248, 7275), False, 'from bcbio.structural import annotate, shared, plot\n'), ((9154, 9193), 'bcbio.utils.file_uptodate', 'utils.file_uptodate', (['out_file', 'cnr_file'], {}), '(out_file, cnr_file)\n', (9173, 9193), False, 'from bcbio import utils\n'), ((11014, 11045), 'bcbio.utils.file_exists', 'utils.file_exists', (['metrics_file'], {}), '(metrics_file)\n', (11031, 11045), False, 'from bcbio import utils\n'), ((12456, 12483), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (12473, 12483), False, 'from bcbio import utils\n'), ((14023, 14050), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (14040, 14050), False, 'from bcbio import utils\n'), ((15411, 15438), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (15428, 15438), False, 'from bcbio import utils\n'), ((15792, 15816), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (15810, 15816), True, 'from bcbio.pipeline import datadict as dd\n'), ((15992, 16010), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['data'], {}), '(data)\n', (16004, 16010), True, 'from bcbio.pipeline import datadict as dd\n'), ((16014, 16038), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (16032, 16038), True, 'from bcbio.pipeline import datadict as dd\n'), ((16414, 16443), 'bcbio.utils.file_exists', 'utils.file_exists', (['target_bed'], {}), '(target_bed)\n', (16431, 16443), False, 'from bcbio import utils\n'), ((17220, 17250), 'os.path.exists', 'os.path.exists', (['antitarget_bed'], {}), '(antitarget_bed)\n', (17234, 17250), False, 'import os\n'), ((17949, 17967), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['data'], {}), '(data)\n', (17961, 17967), True, 'from bcbio.pipeline import datadict as dd\n'), ((17971, 17995), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (17989, 17995), True, 'from bcbio.pipeline import datadict as dd\n'), ((18226, 18250), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (18240, 18250), False, 'import os\n'), ((20824, 20851), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (20841, 20851), False, 'from bcbio import utils\n'), ((21280, 21312), 'bcbio.utils.file_exists', 'utils.file_exists', (['bedgraph_file'], {}), '(bedgraph_file)\n', (21297, 21312), False, 'from bcbio import utils\n'), ((21616, 21643), 'bcbio.utils.file_exists', 'utils.file_exists', (['bed_file'], {}), '(bed_file)\n', (21633, 21643), False, 'from bcbio import utils\n'), ((23029, 23057), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (23045, 23057), False, 'import os\n'), ((23110, 23138), 'bcbio.utils.file_exists', 'utils.file_exists', (['call_file'], {}), '(call_file)\n', (23127, 23138), False, 'from bcbio import utils\n'), ((25127, 25154), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (25144, 25154), False, 'from bcbio import utils\n'), ((26007, 26034), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (26024, 26034), False, 'from bcbio import utils\n'), ((26842, 26898), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".bed"""', 'delete': '(False)'}), "(suffix='.bed', delete=False)\n", (26869, 26898), False, 'import tempfile\n'), ((26913, 26945), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (26929, 26945), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((27215, 27256), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit bedGraph conversion"""'], {}), "(cmd, 'CNVkit bedGraph conversion')\n", (27221, 27256), False, 'from bcbio.provenance import do\n'), ((27265, 27284), 'os.remove', 'os.remove', (['bed_file'], {}), '(bed_file)\n', (27274, 27284), False, 'import os\n'), ((28202, 28221), 'numpy.array', 'np.array', (['all_sizes'], {}), '(all_sizes)\n', (28210, 28221), True, 'import numpy as np\n'), ((28242, 28261), 'numpy.array', 'np.array', (['all_sizes'], {}), '(all_sizes)\n', (28250, 28261), True, 'import numpy as np\n'), ((28504, 28526), 'numpy.array', 'np.array', (['little_sizes'], {}), '(little_sizes)\n', (28512, 28526), True, 'import numpy as np\n'), ((28548, 28570), 'numpy.array', 'np.array', (['little_sizes'], {}), '(little_sizes)\n', (28556, 28570), True, 'import numpy as np\n'), ((29200, 29228), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['in_file'], {}), '(in_file)\n', (29219, 29228), False, 'from bcbio import utils\n'), ((29240, 29267), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (29257, 29267), False, 'from bcbio import utils\n'), ((29933, 29965), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (29949, 29965), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((30389, 30421), 'pybedtools.BedTool', 'pybedtools.BedTool', (['priority_bed'], {}), '(priority_bed)\n', (30407, 30421), False, 'import pybedtools\n'), ((30429, 30454), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (30444, 30454), False, 'import os\n'), ((30631, 30663), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (30647, 30663), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((31412, 31439), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (31429, 31439), False, 'from bcbio import utils\n'), ((32158, 32185), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (32175, 32185), False, 'from bcbio import utils\n'), ((32841, 32868), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (32858, 32868), False, 'from bcbio import utils\n'), ((1119, 1143), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (1137, 1143), True, 'from bcbio.pipeline import datadict as dd\n'), ((1832, 1863), 'bcbio.utils.file_exists', 'utils.file_exists', (["ckout['cns']"], {}), "(ckout['cns'])\n", (1849, 1863), False, 'from bcbio import utils\n'), ((5232, 5264), 'os.path.realpath', 'os.path.realpath', (['sys.executable'], {}), '(sys.executable)\n', (5248, 5264), False, 'import os\n'), ((5501, 5529), 'os.path.dirname', 'os.path.dirname', (['tx_out_file'], {}), '(tx_out_file)\n', (5516, 5529), False, 'import os\n'), ((6578, 6605), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['cur_input'], {}), '(cur_input)\n', (6594, 6605), True, 'from bcbio.pipeline import datadict as dd\n'), ((7148, 7181), 'bcbio.utils.file_exists', 'utils.file_exists', (['raw_target_bed'], {}), '(raw_target_bed)\n', (7165, 7181), False, 'from bcbio import utils\n'), ((7322, 7345), 'bcbio.pipeline.datadict.get_cores', 'dd.get_cores', (['inputs[0]'], {}), '(inputs[0])\n', (7334, 7345), True, 'from bcbio.pipeline import datadict as dd\n'), ((9113, 9139), 'os.path.splitext', 'os.path.splitext', (['cnr_file'], {}), '(cnr_file)\n', (9129, 9139), False, 'import os\n'), ((9208, 9240), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (9224, 9240), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((10647, 10678), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['target_cnn'], {}), '(target_cnn)\n', (10666, 10678), False, 'from bcbio import utils\n'), ((10968, 10999), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['target_cnn'], {}), '(target_cnn)\n', (10987, 10999), False, 'from bcbio import utils\n'), ((11060, 11096), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'metrics_file'], {}), '(data, metrics_file)\n', (11076, 11096), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((12498, 12530), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (12514, 12530), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((14065, 14097), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (14081, 14097), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((15453, 15485), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (15469, 15485), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((16071, 16103), 'os.path.basename', 'os.path.basename', (['raw_target_bed'], {}), '(raw_target_bed)\n', (16087, 16103), False, 'import os\n'), ((16458, 16492), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'target_bed'], {}), '(data, target_bed)\n', (16474, 16492), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((17265, 17303), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'antitarget_bed'], {}), '(data, antitarget_bed)\n', (17281, 17303), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((18265, 18297), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (18281, 18297), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((20781, 20809), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (20797, 20809), False, 'import os\n'), ((20866, 20898), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (20882, 20898), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((21073, 21105), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit export seg"""'], {}), "(cmd, 'CNVkit export seg')\n", (21079, 21105), False, 'from bcbio.provenance import do\n'), ((21327, 21364), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'bedgraph_file'], {}), '(data, bedgraph_file)\n', (21343, 21364), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((21658, 21690), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'bed_file'], {}), '(data, bed_file)\n', (21674, 21690), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((22341, 22376), 'bcbio.variation.vcfutils.get_paired_phenotype', 'vcfutils.get_paired_phenotype', (['data'], {}), '(data)\n', (22370, 22376), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((23153, 23186), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'call_file'], {}), '(data, call_file)\n', (23169, 23186), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((23951, 23984), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit call ploidy"""'], {}), "(cmd, 'CNVkit call ploidy')\n", (23957, 23984), False, 'from bcbio.provenance import do\n'), ((24161, 24185), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (24175, 24185), False, 'import os\n'), ((25084, 25112), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (25100, 25112), False, 'import os\n'), ((25169, 25201), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (25185, 25201), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((25713, 25745), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit segmetrics"""'], {}), "(cmd, 'CNVkit segmetrics')\n", (25719, 25745), False, 'from bcbio.provenance import do\n'), ((25964, 25992), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (25980, 25992), False, 'import os\n'), ((26049, 26081), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (26065, 26081), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((26269, 26299), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit gainloss"""'], {}), "(cmd, 'CNVkit gainloss')\n", (26275, 26299), False, 'from bcbio.provenance import do\n'), ((26505, 26533), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (26521, 26533), False, 'import os\n'), ((29140, 29161), 'bcbio.pipeline.datadict.get_ref_file', 'dd.get_ref_file', (['data'], {}), '(data)\n', (29155, 29161), True, 'from bcbio.pipeline import datadict as dd\n'), ((29282, 29314), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (29298, 29314), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((29726, 29754), 'os.path.splitext', 'os.path.splitext', (["out['cnr']"], {}), "(out['cnr'])\n", (29742, 29754), False, 'import os\n'), ((30218, 30246), 'os.path.splitext', 'os.path.splitext', (["out['cnr']"], {}), "(out['cnr'])\n", (30234, 30246), False, 'import os\n'), ((31194, 31222), 'os.path.splitext', 'os.path.splitext', (["out['cnr']"], {}), "(out['cnr'])\n", (31210, 31222), False, 'import os\n'), ((31454, 31486), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (31470, 31486), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((31620, 31647), 'bcbio.variation.population.get_gender', 'population.get_gender', (['data'], {}), '(data)\n', (31641, 31647), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((32200, 32232), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (32216, 32232), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((32797, 32826), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['cns_file'], {}), '(cns_file)\n', (32816, 32826), False, 'from bcbio import utils\n'), ((32883, 32915), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (32899, 32915), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((2305, 2326), 'bcbio.pipeline.datadict.get_tools_on', 'dd.get_tools_on', (['data'], {}), '(data)\n', (2320, 2326), True, 'from bcbio.pipeline import datadict as dd\n'), ((3085, 3106), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['x'], {}), '(x)\n', (3103, 3106), True, 'from bcbio.pipeline import datadict as dd\n'), ((14164, 14185), 'bcbio.pipeline.datadict.get_ref_file', 'dd.get_ref_file', (['data'], {}), '(data)\n', (14179, 14185), True, 'from bcbio.pipeline import datadict as dd\n'), ((18367, 18389), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['data'], {}), '(data)\n', (18383, 18389), True, 'from bcbio.pipeline import datadict as dd\n'), ((20549, 20570), 'bcbio.pipeline.datadict.get_ref_file', 'dd.get_ref_file', (['data'], {}), '(data)\n', (20564, 20570), True, 'from bcbio.pipeline import datadict as dd\n'), ((22298, 22324), 'os.path.basename', 'os.path.basename', (['vrn_file'], {}), '(vrn_file)\n', (22314, 22324), False, 'import os\n'), ((24204, 24236), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (24220, 24236), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((24661, 24704), 'bcbio.provenance.do.run', 'do.run', (['cmd', "('CNVkit export %s' % outformat)"], {}), "(cmd, 'CNVkit export %s' % outformat)\n", (24667, 24704), False, 'from bcbio.provenance import do\n'), ((25511, 25541), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['data'], {}), '(data)\n', (25535, 25541), True, 'from bcbio.pipeline import datadict as dd\n'), ((12199, 12220), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['x'], {}), '(x)\n', (12217, 12220), True, 'from bcbio.pipeline import datadict as dd\n'), ((15555, 15573), 'bcbio.pipeline.datadict.get_cores', 'dd.get_cores', (['data'], {}), '(data)\n', (15567, 15573), True, 'from bcbio.pipeline import datadict as dd\n'), ((18741, 18802), 'bcbio.log.logger.info', 'logger.info', (['"""Bin size estimate failed, using default values"""'], {}), "('Bin size estimate failed, using default values')\n", (18752, 18802), False, 'from bcbio.log import logger\n'), ((20947, 20978), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (20962, 20978), False, 'import os\n'), ((22480, 22501), 'bcbio.pipeline.datadict.get_work_dir', 'dd.get_work_dir', (['data'], {}), '(data)\n', (22495, 22501), True, 'from bcbio.pipeline import datadict as dd\n'), ((22688, 22712), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (22706, 22712), True, 'from bcbio.pipeline import datadict as dd\n'), ((24067, 24094), 'os.path.splitext', 'os.path.splitext', (['call_file'], {}), '(call_file)\n', (24083, 24094), False, 'import os\n'), ((24395, 24419), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (24413, 24419), True, 'from bcbio.pipeline import datadict as dd\n'), ((25250, 25281), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (25265, 25281), False, 'import os\n'), ((26130, 26161), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (26145, 26161), False, 'import os\n'), ((32115, 32141), 'os.path.basename', 'os.path.basename', (['ref_file'], {}), '(ref_file)\n', (32131, 32141), False, 'import os\n'), ((4945, 4966), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['d'], {}), '(d)\n', (4963, 4966), True, 'from bcbio.pipeline import datadict as dd\n'), ((4970, 4999), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['cur_input'], {}), '(cur_input)\n', (4988, 4999), True, 'from bcbio.pipeline import datadict as dd\n'), ((5847, 5873), 'os.path.basename', 'os.path.basename', (['bam_file'], {}), '(bam_file)\n', (5863, 5873), False, 'import os\n'), ((9544, 9562), 'bcbio.pipeline.datadict.get_cores', 'dd.get_cores', (['data'], {}), '(data)\n', (9556, 9562), True, 'from bcbio.pipeline import datadict as dd\n'), ((10094, 10115), 'bcbio.utils.get_R_exports', 'utils.get_R_exports', ([], {}), '()\n', (10113, 10115), False, 'from bcbio import utils\n'), ((10117, 10145), 'os.path.dirname', 'os.path.dirname', (['tx_out_file'], {}), '(tx_out_file)\n', (10132, 10145), False, 'import os\n'), ((18088, 18120), 'os.path.basename', 'os.path.basename', (['raw_target_bed'], {}), '(raw_target_bed)\n', (18104, 18120), False, 'import os\n'), ((23402, 23427), 'bcbio.variation.ploidy.get_ploidy', 'ploidy.get_ploidy', (['[data]'], {}), '([data])\n', (23419, 23427), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((24289, 24320), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (24304, 24320), False, 'import os\n'), ((24460, 24485), 'bcbio.variation.ploidy.get_ploidy', 'ploidy.get_ploidy', (['[data]'], {}), '([data])\n', (24477, 24485), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((8064, 8100), 'toolz.groupby', 'tz.groupby', (['"""bam"""', 'raw_coverage_cnns'], {}), "('bam', raw_coverage_cnns)\n", (8074, 8100), True, 'import toolz as tz\n'), ((8472, 8545), 'toolz.groupby', 'tz.groupby', (['"""bam"""', "[x for x in coverage_cnns if x['itype'] == 'evaluate']"], {}), "('bam', [x for x in coverage_cnns if x['itype'] == 'evaluate'])\n", (8482, 8545), True, 'import toolz as tz\n'), ((22589, 22613), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (22607, 22613), True, 'from bcbio.pipeline import datadict as dd\n'), ((23277, 23308), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (23292, 23308), False, 'import os\n')]
|
"""
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
from ..base.calendar import BaseExchangeCalendarEvent, BaseExchangeCalendarService, ExchangeEventOrganizer, ExchangeEventResponse
from ..base.folder import BaseExchangeFolder, BaseExchangeFolderService
from ..base.soap import ExchangeServiceSOAP
from ..exceptions import FailedExchangeException, ExchangeStaleChangeKeyException, ExchangeItemNotFoundException, ExchangeInternalServerTransientErrorException, ExchangeIrresolvableConflictException, InvalidEventType
from ..compat import BASESTRING_TYPES
from . import soap_request
from lxml import etree
from copy import deepcopy
from datetime import date
import warnings
log = logging.getLogger("pyexchange")
class Exchange2010Service(ExchangeServiceSOAP):
def calendar(self, id="calendar"):
return Exchange2010CalendarService(service=self, calendar_id=id)
def mail(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def contacts(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def folder(self):
return Exchange2010FolderService(service=self)
def _send_soap_request(self, body, headers=None, retries=2, timeout=30, encoding="utf-8"):
headers = {
"Accept": "text/xml",
"Content-type": "text/xml; charset=%s " % encoding
}
return super(Exchange2010Service, self)._send_soap_request(body, headers=headers, retries=retries, timeout=timeout, encoding=encoding)
def _check_for_errors(self, xml_tree):
super(Exchange2010Service, self)._check_for_errors(xml_tree)
self._check_for_exchange_fault(xml_tree)
def _check_for_exchange_fault(self, xml_tree):
# If the request succeeded, we should see a <m:ResponseCode>NoError</m:ResponseCode>
# somewhere in the response. if we don't (a) see the tag or (b) it doesn't say "NoError"
# then flip out
response_codes = xml_tree.xpath(u'//m:ResponseCode', namespaces=soap_request.NAMESPACES)
if not response_codes:
raise FailedExchangeException(u"Exchange server did not return a status response", None)
# The full (massive) list of possible return responses is here.
# http://msdn.microsoft.com/en-us/library/aa580757(v=exchg.140).aspx
for code in response_codes:
if code.text == u"ErrorChangeKeyRequiredForWriteOperations":
# change key is missing or stale. we can fix that, so throw a special error
raise ExchangeStaleChangeKeyException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorItemNotFound":
# exchange_invite_key wasn't found on the server
raise ExchangeItemNotFoundException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorIrresolvableConflict":
# tried to update an item with an old change key
raise ExchangeIrresolvableConflictException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorInternalServerTransientError":
# temporary internal server error. throw a special error so we can retry
raise ExchangeInternalServerTransientErrorException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorCalendarOccurrenceIndexIsOutOfRecurrenceRange":
# just means some or all of the requested instances are out of range
pass
elif code.text != u"NoError":
raise FailedExchangeException(u"Exchange Fault (%s) from Exchange server" % code.text)
class Exchange2010CalendarService(BaseExchangeCalendarService):
def event(self, id=None, **kwargs):
return Exchange2010CalendarEvent(service=self.service, id=id, **kwargs)
def get_event(self, id):
return Exchange2010CalendarEvent(service=self.service, id=id)
def new_event(self, **properties):
return Exchange2010CalendarEvent(service=self.service, calendar_id=self.calendar_id, **properties)
def list_events(self, start=None, end=None, details=False, delegate_for=None):
return Exchange2010CalendarEventList(service=self.service, calendar_id=self.calendar_id, start=start, end=end, details=details, delegate_for=delegate_for)
class Exchange2010CalendarEventList(object):
"""
Creates & Stores a list of Exchange2010CalendarEvent items in the "self.events" variable.
"""
def __init__(self, service=None, calendar_id=u'calendar', start=None, end=None, details=False, delegate_for=None):
self.service = service
self.count = 0
self.start = start
self.end = end
self.events = list()
self.event_ids = list()
self.details = details
self.delegate_for = delegate_for
# This request uses a Calendar-specific query between two dates.
body = soap_request.get_calendar_items(format=u'AllProperties', calendar_id=calendar_id, start=self.start, end=self.end, delegate_for=self.delegate_for)
response_xml = self.service.send(body)
self._parse_response_for_all_events(response_xml)
# Populate the event ID list, for convenience reasons.
for event in self.events:
self.event_ids.append(event._id)
# If we have requested all the details, basically repeat the previous 3 steps,
# but instead of start/stop, we have a list of ID fields.
if self.details:
log.debug(u'Received request for all details, retrieving now!')
self.load_all_details()
return
def _parse_response_for_all_events(self, response):
"""
This function will retrieve *most* of the event data, excluding Organizer & Attendee details
"""
items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if not items:
items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if items:
self.count = len(items)
log.debug(u'Found %s items' % self.count)
for item in items:
self._add_event(xml=soap_request.M.Items(deepcopy(item)))
else:
log.debug(u'No calendar items found with search parameters.')
return self
def _add_event(self, xml=None):
log.debug(u'Adding new event to all events list.')
event = Exchange2010CalendarEvent(service=self.service, xml=xml)
log.debug(u'Subject of new event is %s' % event.subject)
self.events.append(event)
return self
def load_all_details(self):
"""
This function will execute all the event lookups for known events.
This is intended for use when you want to have a completely populated event entry, including
Organizer & Attendee details.
"""
log.debug(u"Loading all details")
if self.count > 0:
# Now, empty out the events to prevent duplicates!
del(self.events[:])
# Send the SOAP request with the list of exchange ID values.
log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids)))
body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties')
response_xml = self.service.send(body)
# Re-parse the results for all the details!
self._parse_response_for_all_events(response_xml)
return self
class Exchange2010CalendarEvent(BaseExchangeCalendarEvent):
def _init_from_service(self, id):
log.debug(u'Creating new Exchange2010CalendarEvent object from ID')
body = soap_request.get_item(exchange_id=id, format=u'AllProperties')
response_xml = self.service.send(body)
properties = self._parse_response_for_get_event(response_xml)
self._update_properties(properties)
self._id = id
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def _init_from_xml(self, xml=None):
log.debug(u'Creating new Exchange2010CalendarEvent object from XML')
properties = self._parse_response_for_get_event(xml)
self._update_properties(properties)
self._id, self._change_key = self._parse_id_and_change_key_from_response(xml)
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def as_json(self):
raise NotImplementedError
def validate(self):
if self.recurrence is not None:
if not (isinstance(self.recurrence_end_date, date)):
raise ValueError('recurrence_end_date must be of type date')
elif (self.recurrence_end_date < self.start.date()):
raise ValueError('recurrence_end_date must be after start')
if self.recurrence == u'daily':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 999):
raise ValueError('recurrence_interval must be an int in the range from 1 to 999')
elif self.recurrence == u'weekly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
if self.recurrence_days is None:
raise ValueError('recurrence_days is required')
for day in self.recurrence_days.split(' '):
if day not in self.WEEKLY_DAYS:
raise ValueError('recurrence_days received unknown value: %s' % day)
elif self.recurrence == u'monthly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
elif self.recurrence == u'yearly':
pass # everything is pulled from start
else:
raise ValueError('recurrence received unknown value: %s' % self.recurrence)
super(Exchange2010CalendarEvent, self).validate()
def create(self):
"""
Creates an event in Exchange. ::
event = service.calendar().new_event(
subject=u"80s Movie Night",
location = u"My house",
)
event.create()
Invitations to attendees are sent out immediately.
"""
self.validate()
body = soap_request.new_event(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def resend_invitations(self):
"""
Resends invites for an event. ::
event = service.calendar().get_event(id='KEY HERE')
event.resend_invitations()
Anybody who has not declined this meeting will get a new invite.
"""
if not self.id:
raise TypeError(u"You can't send invites for an event that hasn't been created yet.")
# Under the hood, this is just an .update() but with no attributes changed.
# We're going to enforce that by checking if there are any changed attributes and bail if there are
if self._dirty_attributes:
raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll')
self.service.send(body)
return self
def update(self, calendar_item_update_operation_type=u'SendToAllAndSaveCopy', **kwargs):
"""
Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``.
"""
if not self.id:
raise TypeError(u"You can't update an event that hasn't been created yet.")
if 'send_only_to_changed_attendees' in kwargs:
warnings.warn(
"The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.",
DeprecationWarning,
) # 20140502
if kwargs['send_only_to_changed_attendees']:
calendar_item_update_operation_type = u'SendToChangedAndSaveCopy'
VALID_UPDATE_OPERATION_TYPES = (
u'SendToNone', u'SendOnlyToAll', u'SendOnlyToChanged',
u'SendToAllAndSaveCopy', u'SendToChangedAndSaveCopy',
)
if calendar_item_update_operation_type not in VALID_UPDATE_OPERATION_TYPES:
raise ValueError('calendar_item_update_operation_type has unknown value')
self.validate()
if self._dirty_attributes:
log.debug(u"Updating these attributes: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, self._dirty_attributes, calendar_item_update_operation_type=calendar_item_update_operation_type)
self.service.send(body)
self._reset_dirty_attributes()
else:
log.info(u"Update was called, but there's nothing to update. Doing nothing.")
return self
def cancel(self):
"""
Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting.
"""
if not self.id:
raise TypeError(u"You can't delete an event that hasn't been created yet.")
self.refresh_change_key()
self.service.send(soap_request.delete_event(self))
# TODO rsanders high - check return status to make sure it was actually sent
return None
def move_to(self, folder_id):
"""
:param str folder_id: The Calendar ID to where you want to move the event to.
Moves an event to a different folder (calendar). ::
event = service.calendar().get_event(id='KEY HERE')
event.move_to(folder_id='NEW CALENDAR KEY HERE')
"""
if not folder_id:
raise TypeError(u"You can't move an event to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move an event that hasn't been created yet.")
self.refresh_change_key()
response_xml = self.service.send(soap_request.move_event(self, folder_id))
new_id, new_change_key = self._parse_id_and_change_key_from_response(response_xml)
if not new_id:
raise ValueError(u"MoveItem returned success but requested item not moved")
self._id = new_id
self._change_key = new_change_key
self.calendar_id = folder_id
return self
def get_master(self):
"""
get_master()
:raises InvalidEventType: When this method is called on an event that is not a Occurrence type.
This will return the master event to the occurrence.
**Examples**::
event = service.calendar().get_event(id='<event_id>')
print event.type # If it prints out 'Occurrence' then that means we could get the master.
master = event.get_master()
print master.type # Will print out 'RecurringMaster'.
"""
if self.type != 'Occurrence':
raise InvalidEventType("get_master method can only be called on a 'Occurrence' event type")
body = soap_request.get_master(exchange_id=self._id, format=u"AllProperties")
response_xml = self.service.send(body)
return Exchange2010CalendarEvent(service=self.service, xml=response_xml)
def get_occurrence(self, instance_index):
"""
get_occurrence(instance_index)
:param iterable instance_index: This should be tuple or list of integers which correspond to occurrences.
:raises TypeError: When instance_index is not an iterable of ints.
:raises InvalidEventType: When this method is called on an event that is not a RecurringMaster type.
This will return a list of occurrence events.
**Examples**::
master = service.calendar().get_event(id='<event_id>')
# The following will return the first 20 occurrences in the recurrence.
# If there are not 20 occurrences, it will only return what it finds.
occurrences = master.get_occurrence(range(1,21))
for occurrence in occurrences:
print occurrence.start
"""
if not all([isinstance(i, int) for i in instance_index]):
raise TypeError("instance_index must be an interable of type int")
if self.type != 'RecurringMaster':
raise InvalidEventType("get_occurrance method can only be called on a 'RecurringMaster' event type")
body = soap_request.get_occurrence(exchange_id=self._id, instance_index=instance_index, format=u"AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events
def conflicting_events(self):
"""
conflicting_events()
This will return a list of conflicting events.
**Example**::
event = service.calendar().get_event(id='<event_id>')
for conflict in event.conflicting_events():
print conflict.subject
"""
if not self.conflicting_event_ids:
return []
body = soap_request.get_item(exchange_id=self.conflicting_event_ids, format="AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events
def refresh_change_key(self):
body = soap_request.get_item(exchange_id=self._id, format=u"IdOnly")
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def _parse_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//m:Items/t:CalendarItem/t:ItemId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
def _parse_response_for_get_event(self, response):
result = self._parse_event_properties(response)
organizer_properties = self._parse_event_organizer(response)
if organizer_properties is not None:
if 'email' not in organizer_properties:
organizer_properties['email'] = None
result[u'organizer'] = ExchangeEventOrganizer(**organizer_properties)
attendee_properties = self._parse_event_attendees(response)
result[u'_attendees'] = self._build_resource_dictionary([ExchangeEventResponse(**attendee) for attendee in attendee_properties])
resource_properties = self._parse_event_resources(response)
result[u'_resources'] = self._build_resource_dictionary([ExchangeEventResponse(**resource) for resource in resource_properties])
result['_conflicting_event_ids'] = self._parse_event_conflicts(response)
return result
def _parse_event_properties(self, response):
property_map = {
u'subject': {
u'xpath': u'//m:Items/t:CalendarItem/t:Subject',
},
u'location':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Location',
},
u'availability':
{
u'xpath': u'//m:Items/t:CalendarItem/t:LegacyFreeBusyStatus',
},
u'start':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Start',
u'cast': u'datetime',
},
u'end':
{
u'xpath': u'//m:Items/t:CalendarItem/t:End',
u'cast': u'datetime',
},
u'html_body':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Body[@BodyType="HTML"]',
},
u'text_body':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Body[@BodyType="Text"]',
},
u'_type':
{
u'xpath': u'//m:Items/t:CalendarItem/t:CalendarItemType',
},
u'reminder_minutes_before_start':
{
u'xpath': u'//m:Items/t:CalendarItem/t:ReminderMinutesBeforeStart',
u'cast': u'int',
},
u'is_all_day':
{
u'xpath': u'//m:Items/t:CalendarItem/t:IsAllDayEvent',
u'cast': u'bool',
},
u'recurrence_end_date':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/t:EndDateRecurrence/t:EndDate',
u'cast': u'date_only_naive',
},
u'recurrence_interval':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/*/t:Interval',
u'cast': u'int',
},
u'recurrence_days':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/t:WeeklyRecurrence/t:DaysOfWeek',
},
}
result = self.service._xpath_to_dict(element=response, property_map=property_map, namespace_map=soap_request.NAMESPACES)
try:
recurrence_node = response.xpath(u'//m:Items/t:CalendarItem/t:Recurrence', namespaces=soap_request.NAMESPACES)[0]
except IndexError:
recurrence_node = None
if recurrence_node is not None:
if recurrence_node.find('t:DailyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'daily'
elif recurrence_node.find('t:WeeklyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'weekly'
elif recurrence_node.find('t:AbsoluteMonthlyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'monthly'
elif recurrence_node.find('t:AbsoluteYearlyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'yearly'
return result
def _parse_event_organizer(self, response):
organizer = response.xpath(u'//m:Items/t:CalendarItem/t:Organizer/t:Mailbox', namespaces=soap_request.NAMESPACES)
property_map = {
u'name':
{
u'xpath': u't:Name'
},
u'email':
{
u'xpath': u't:EmailAddress'
},
}
if organizer:
return self.service._xpath_to_dict(element=organizer[0], property_map=property_map, namespace_map=soap_request.NAMESPACES)
else:
return None
def _parse_event_resources(self, response):
property_map = {
u'name':
{
u'xpath': u't:Mailbox/t:Name'
},
u'email':
{
u'xpath': u't:Mailbox/t:EmailAddress'
},
u'response':
{
u'xpath': u't:ResponseType'
},
u'last_response':
{
u'xpath': u't:LastResponseTime',
u'cast': u'datetime'
},
}
result = []
resources = response.xpath(u'//m:Items/t:CalendarItem/t:Resources/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in resources:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = True
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
return result
def _parse_event_attendees(self, response):
property_map = {
u'name':
{
u'xpath': u't:Mailbox/t:Name'
},
u'email':
{
u'xpath': u't:Mailbox/t:EmailAddress'
},
u'response':
{
u'xpath': u't:ResponseType'
},
u'last_response':
{
u'xpath': u't:LastResponseTime',
u'cast': u'datetime'
},
}
result = []
required_attendees = response.xpath(u'//m:Items/t:CalendarItem/t:RequiredAttendees/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in required_attendees:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = True
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
optional_attendees = response.xpath(u'//m:Items/t:CalendarItem/t:OptionalAttendees/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in optional_attendees:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = False
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
return result
def _parse_event_conflicts(self, response):
conflicting_ids = response.xpath(u'//m:Items/t:CalendarItem/t:ConflictingMeetings/t:CalendarItem/t:ItemId', namespaces=soap_request.NAMESPACES)
return [id_element.get(u"Id") for id_element in conflicting_ids]
class Exchange2010FolderService(BaseExchangeFolderService):
def folder(self, id=None, **kwargs):
return Exchange2010Folder(service=self.service, id=id, **kwargs)
def get_folder(self, id):
"""
:param str id: The Exchange ID of the folder to retrieve from the Exchange store.
Retrieves the folder specified by the id, from the Exchange store.
**Examples**::
folder = service.folder().get_folder(id)
"""
return Exchange2010Folder(service=self.service, id=id)
def new_folder(self, **properties):
"""
new_folder(display_name=display_name, folder_type=folder_type, parent_id=parent_id)
:param str display_name: The display name given to the new folder.
:param str folder_type: The type of folder to create. Possible values are 'Folder',
'CalendarFolder', 'ContactsFolder', 'SearchFolder', 'TasksFolder'.
:param str parent_id: The parent folder where the new folder will be created.
Creates a new folder with the given properties. Not saved until you call the create() method.
**Examples**::
folder = service.folder().new_folder(
display_name=u"New Folder Name",
folder_type="CalendarFolder",
parent_id='calendar',
)
folder.create()
"""
return Exchange2010Folder(service=self.service, **properties)
def find_folder(self, parent_id):
"""
find_folder(parent_id)
:param str parent_id: The parent folder to list.
This method will return a list of sub-folders to a given parent folder.
**Examples**::
# Iterate through folders within the default 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
print(folder.display_name)
# Delete all folders within the 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
folder.delete()
"""
body = soap_request.find_folder(parent_id=parent_id, format=u'AllProperties')
response_xml = self.service.send(body)
return self._parse_response_for_find_folder(response_xml)
def _parse_response_for_find_folder(self, response):
result = []
folders = response.xpath(u'//t:Folders/t:*', namespaces=soap_request.NAMESPACES)
for folder in folders:
result.append(
Exchange2010Folder(
service=self.service,
xml=etree.fromstring(etree.tostring(folder)) # Might be a better way to do this
)
)
return result
class Exchange2010Folder(BaseExchangeFolder):
def _init_from_service(self, id):
body = soap_request.get_folder(folder_id=id, format=u'AllProperties')
response_xml = self.service.send(body)
properties = self._parse_response_for_get_folder(response_xml)
self._update_properties(properties)
return self
def _init_from_xml(self, xml):
properties = self._parse_response_for_get_folder(xml)
self._update_properties(properties)
return self
def create(self):
"""
Creates a folder in Exchange. ::
calendar = service.folder().new_folder(
display_name=u"New Folder Name",
folder_type="CalendarFolder",
parent_id='calendar',
)
calendar.create()
"""
self.validate()
body = soap_request.new_folder(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def delete(self):
"""
Deletes a folder from the Exchange store. ::
folder = service.folder().get_folder(id)
print("Deleting folder: %s" % folder.display_name)
folder.delete()
"""
if not self.id:
raise TypeError(u"You can't delete a folder that hasn't been created yet.")
body = soap_request.delete_folder(self)
response_xml = self.service.send(body) # noqa
# TODO: verify deletion
self._id = None
self._change_key = None
return None
def move_to(self, folder_id):
"""
:param str folder_id: The Folder ID of what will be the new parent folder, of this folder.
Move folder to a different location, specified by folder_id::
folder = service.folder().get_folder(id)
folder.move_to(folder_id="ID of new location's folder")
"""
if not folder_id:
raise TypeError(u"You can't move to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move a folder that hasn't been created yet.")
response_xml = self.service.send(soap_request.move_folder(self, folder_id)) # noqa
result_id, result_key = self._parse_id_and_change_key_from_response(response_xml)
if self.id != result_id:
raise ValueError(u"MoveFolder returned success but requested folder not moved")
self.parent_id = folder_id
return self
def _parse_response_for_get_folder(self, response):
FOLDER_PATH = u'//t:Folder | //t:CalendarFolder | //t:ContactsFolder | //t:SearchFolder | //t:TasksFolder'
path = response.xpath(FOLDER_PATH, namespaces=soap_request.NAMESPACES)[0]
result = self._parse_folder_properties(path)
return result
def _parse_folder_properties(self, response):
property_map = {
u'display_name': {u'xpath': u't:DisplayName'},
}
self._id, self._change_key = self._parse_id_and_change_key_from_response(response)
self._parent_id = self._parse_parent_id_and_change_key_from_response(response)[0]
self.folder_type = etree.QName(response).localname
return self.service._xpath_to_dict(element=response, property_map=property_map, namespace_map=soap_request.NAMESPACES)
def _parse_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//t:FolderId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
def _parse_parent_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//t:ParentFolderId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
|
[
"logging.getLogger",
"lxml.etree.QName",
"copy.deepcopy",
"warnings.warn",
"lxml.etree.tostring"
] |
[((1118, 1149), 'logging.getLogger', 'logging.getLogger', (['"""pyexchange"""'], {}), "('pyexchange')\n", (1135, 1149), False, 'import logging\n'), ((12203, 12358), 'warnings.warn', 'warnings.warn', (['"""The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead."""', 'DeprecationWarning'], {}), "(\n 'The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.'\n , DeprecationWarning)\n", (12216, 12358), False, 'import warnings\n'), ((31061, 31082), 'lxml.etree.QName', 'etree.QName', (['response'], {}), '(response)\n', (31072, 31082), False, 'from lxml import etree\n'), ((17171, 17185), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (17179, 17185), False, 'from copy import deepcopy\n'), ((17963, 17977), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (17971, 17977), False, 'from copy import deepcopy\n'), ((6418, 6432), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (6426, 6432), False, 'from copy import deepcopy\n'), ((27910, 27932), 'lxml.etree.tostring', 'etree.tostring', (['folder'], {}), '(folder)\n', (27924, 27932), False, 'from lxml import etree\n')]
|
import tensorflow as tf
from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
import numpy as np
import h5py
model = InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None)
graph = tf.get_default_graph()
def pil2array(pillow_img):
return np.array(pillow_img.getdata(), np.float32).reshape(pillow_img.size[1], pillow_img.size[0], 3)
def predict_pil(pillow_img):
img_array = pil2array(pillow_img)
return predict_nparray(img_array)
def predict_nparray(img_as_array):
global graph
img_batch_as_array = np.expand_dims(img_as_array, axis=0)
img_batch_as_array = preprocess_input(img_batch_as_array)
with graph.as_default():
preds = model.predict(img_batch_as_array)
decoded_preds = decode_predictions(preds, top=3)[0]
predictions = [{'label': label, 'descr': description, 'prob': probability} for label,description, probability in decoded_preds]
return predictions
|
[
"keras.applications.inception_v3.preprocess_input",
"keras.applications.inception_v3.decode_predictions",
"numpy.expand_dims",
"keras.applications.inception_v3.InceptionV3",
"tensorflow.get_default_graph"
] |
[((197, 287), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'include_top': '(True)', 'weights': '"""imagenet"""', 'input_tensor': 'None', 'input_shape': 'None'}), "(include_top=True, weights='imagenet', input_tensor=None,\n input_shape=None)\n", (208, 287), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions\n'), ((292, 314), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (312, 314), True, 'import tensorflow as tf\n'), ((633, 669), 'numpy.expand_dims', 'np.expand_dims', (['img_as_array'], {'axis': '(0)'}), '(img_as_array, axis=0)\n', (647, 669), True, 'import numpy as np\n'), ((695, 731), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['img_batch_as_array'], {}), '(img_batch_as_array)\n', (711, 731), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions\n'), ((833, 865), 'keras.applications.inception_v3.decode_predictions', 'decode_predictions', (['preds'], {'top': '(3)'}), '(preds, top=3)\n', (851, 865), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time : 2020-04-13 21:19
# @Author : <NAME>
# @EMail : <EMAIL>
import nltk
import os
import json
def sentence_split(line):
sents = nltk.tokenize.sent_tokenize(line)
rnt = [sent.split() for sent in sents]
return rnt
|
[
"nltk.tokenize.sent_tokenize"
] |
[((205, 238), 'nltk.tokenize.sent_tokenize', 'nltk.tokenize.sent_tokenize', (['line'], {}), '(line)\n', (232, 238), False, 'import nltk\n')]
|
import numpy as np
from typing import Any, Iterable, Tuple
from .ext import EnvSpec
from .parallel import ParallelEnv
from ..prelude import Action, Array, State
from ..utils.rms import RunningMeanStd
class ParallelEnvWrapper(ParallelEnv[Action, State]):
def __init__(self, penv: ParallelEnv) -> None:
self.penv = penv
def close(self) -> None:
self.penv.close()
def reset(self) -> Array[State]:
return self.penv.reset()
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[State], Array[float], Array[bool], Array[Any]]:
return self.penv.step(actions)
def seed(self, seeds: Iterable[int]) -> None:
self.penv.seed(seeds)
@property
def num_envs(self) -> int:
return self.penv.num_envs
@property
def spec(self) -> EnvSpec:
return self.penv.spec
def extract(self, states: Iterable[State]) -> Array:
return self.penv.extract(states)
class FrameStackParallel(ParallelEnvWrapper):
"""Parallel version of atari_wrappers.FrameStack
"""
def __init__(self, penv: ParallelEnv, nstack: int = 4, dtype: type = np.float32) -> None:
super().__init__(penv)
idx = 0
shape = self.penv.state_dim
for dim in shape:
if dim == 1:
idx += 1
else:
break
self.shape = (nstack, *self.penv.state_dim[idx:])
self.obs = np.zeros((self.num_envs, *self.shape), dtype=dtype)
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array, Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
self.obs = np.roll(self.obs, shift=-1, axis=1)
for i, _ in filter(lambda t: t[1], enumerate(done)):
self.obs[i] = 0.0
self.obs[:, -1] = self.extract(state).squeeze()
return (self.obs, reward, done, info)
def reset(self) -> Array[State]:
self.obs.fill(0)
state = self.penv.reset()
self.obs[:, -1] = self.extract(state).squeeze()
return self.obs
@property
def state_dim(self) -> Tuple[int, ...]:
return self.shape
class NormalizeObs(ParallelEnvWrapper[Action, Array[float]]):
def __init__(self, penv: ParallelEnv, obs_clip: float = 10.) -> None:
super().__init__(penv)
self.obs_clip = obs_clip
self._rms = RunningMeanStd(shape=self.state_dim)
self.training_mode = True
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[Array[float]], Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
return self._filter_obs(state), reward, done, info
def _filter_obs(self, obs: Array[Array[float]]) -> Array[Array[float]]:
if self.training_mode:
self._rms.update(obs) # type: ignore
obs = np.clip((obs - self._rms.mean) / self._rms.std(), -self.obs_clip, self.obs_clip)
return obs
def reset(self) -> Array[Array[float]]:
obs = self.penv.reset()
return self._filter_obs(obs)
class NormalizeReward(ParallelEnvWrapper[Action, State]):
def __init__(self, penv: ParallelEnv, reward_clip: float = 10., gamma: float = 0.99) -> None:
super().__init__(penv)
self.reward_clip = reward_clip
self.gamma = gamma
self._rms = RunningMeanStd(shape=())
self.ret = np.zeros(self.num_envs)
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[State], Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
self.ret = self.ret * self.gamma + reward
self._rms.update(self.ret)
reward = np.clip(reward / self._rms.std(), -self.reward_clip, self.reward_clip)
self.ret[done] = 0.0
return state, reward, done, info
def reset(self) -> Array[State]:
self.ret = np.zeros(self.num_envs)
return self.penv.reset()
|
[
"numpy.zeros",
"numpy.roll"
] |
[((1453, 1504), 'numpy.zeros', 'np.zeros', (['(self.num_envs, *self.shape)'], {'dtype': 'dtype'}), '((self.num_envs, *self.shape), dtype=dtype)\n', (1461, 1504), True, 'import numpy as np\n'), ((1717, 1752), 'numpy.roll', 'np.roll', (['self.obs'], {'shift': '(-1)', 'axis': '(1)'}), '(self.obs, shift=-1, axis=1)\n', (1724, 1752), True, 'import numpy as np\n'), ((3472, 3495), 'numpy.zeros', 'np.zeros', (['self.num_envs'], {}), '(self.num_envs)\n', (3480, 3495), True, 'import numpy as np\n'), ((3996, 4019), 'numpy.zeros', 'np.zeros', (['self.num_envs'], {}), '(self.num_envs)\n', (4004, 4019), True, 'import numpy as np\n')]
|
'''
This file contains method for generating calibration related plots, eg. reliability plots.
References:
[1] <NAME>, <NAME>, <NAME>, and <NAME>. On calibration of modern neural networks.
arXiv preprint arXiv:1706.04599, 2017.
'''
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import math
import torch
from torch.nn import functional as F
from scipy.interpolate import make_interp_spline
plt.rcParams.update({'font.size': 20})
# Some keys used for the following dictionaries
COUNT = 'count'
CONF = 'conf'
ACC = 'acc'
BIN_ACC = 'bin_acc'
BIN_CONF = 'bin_conf'
def _bin_initializer(bin_dict, num_bins=10):
for i in range(num_bins):
bin_dict[i][COUNT] = 0
bin_dict[i][CONF] = 0
bin_dict[i][ACC] = 0
bin_dict[i][BIN_ACC] = 0
bin_dict[i][BIN_CONF] = 0
def _populate_bins(confs, preds, labels, num_bins=10):
bin_dict = {}
for i in range(num_bins):
bin_dict[i] = {}
_bin_initializer(bin_dict, num_bins)
num_test_samples = len(confs)
for i in range(0, num_test_samples):
confidence = confs[i]
prediction = preds[i]
label = labels[i]
binn = int(math.ceil(((num_bins * confidence) - 1)))
bin_dict[binn][COUNT] = bin_dict[binn][COUNT] + 1
bin_dict[binn][CONF] = bin_dict[binn][CONF] + confidence
bin_dict[binn][ACC] = bin_dict[binn][ACC] + \
(1 if (label == prediction) else 0)
for binn in range(0, num_bins):
if (bin_dict[binn][COUNT] == 0):
bin_dict[binn][BIN_ACC] = 0
bin_dict[binn][BIN_CONF] = 0
else:
bin_dict[binn][BIN_ACC] = float(
bin_dict[binn][ACC]) / bin_dict[binn][COUNT]
bin_dict[binn][BIN_CONF] = bin_dict[binn][CONF] / \
float(bin_dict[binn][COUNT])
return bin_dict
def reliability_plot(confs, preds, labels, save_plots_loc, dataset, model, trained_loss, num_bins=15, scaling_related='before', save=False):
'''
Method to draw a reliability plot from a model's predictions and confidences.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
y = []
for i in range(num_bins):
y.append(bin_dict[i][BIN_ACC])
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, bns, align='edge', width=0.05, color='pink', label='Expected')
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Actual')
plt.ylabel('Accuracy')
plt.xlabel('Confidence')
plt.legend()
if save:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'reliability_plot_{}_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.show()
def bin_strength_plot(confs, preds, labels, num_bins=15):
'''
Method to draw a plot for the number of samples in each confidence bin.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
num_samples = len(labels)
y = []
for i in range(num_bins):
n = (bin_dict[i][COUNT] / float(num_samples)) * 100
y.append(n)
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Percentage samples')
plt.ylabel('Percentage of samples')
plt.xlabel('Confidence')
plt.show()
def pos_neg_ece_bins_plot(bins_vec, bins_ece_over, bins_ece_under, bins_ece_over_after, bins_ece_under_after, save_plots_loc, dataset, model, trained_loss,
acc_check=False, scaling_related='before', const_temp=False):
plt.figure(figsize=(10, 8))
plt.scatter(bins_vec, bins_ece_over.cpu(), s=70)
plt.scatter(bins_vec, bins_ece_under.cpu(), s=70)
#plt.scatter(bins_vec, bins_ece_over_after.cpu())
#plt.scatter(bins_vec, bins_ece_under_after.cpu())
plt.xlabel('bins', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=18)
#plt.legend(('over-confidence classes', 'under-confidence classes', 'over-confidence classes after scaling', 'under-confidence classes after scaling'), fontsize=10)
plt.legend(('over-confidence classes', 'under-confidence classes'), fontsize=22)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def pos_neg_ece_plot(acc, csece_pos, csece_neg, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_related='before', const_temp=False):
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece_pos.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece_neg.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def ece_acc_plot(acc, csece, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_related='before', const_temp=False, unc=False):
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
#plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
if acc_check:
if unc:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'uncalibrated_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=100)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
if unc:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'uncalibrated_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def ece_iters_plot(scaled_model, save_plots_loc, dataset, model, trained_loss, init_temp, acc_check=False):
plt.figure()
plt.plot(range(scaled_model.iters + 1), scaled_model.ece_list)
plt.plot(range(scaled_model.iters + 1), scaled_model.ece*torch.ones((scaled_model.iters + 1)))
plt.legend(('class-based temp scaling', 'single temp scaling'), fontsize=10)
plt.xlabel('iterations', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('ECE', fontsize=10)
plt.yticks(fontsize=10)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_{}_{}_{}_acc.pdf'.format(init_temp, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_{}_{}_{}.pdf'.format(init_temp, dataset, model, trained_loss)), dpi=40)
plt.close()
def temp_acc_plot(acc, temp, single_temp, save_plots_loc, dataset, model, trained_loss, acc_check=False, const_temp=False):
plt.figure()
plt.scatter(acc, temp.cpu(), label='Class-based temperature')
plt.plot(acc, single_temp*torch.ones(len(acc)), color='red', label='Single temperature')
plt.xlabel('accuracy', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('Temperature', fontsize=10)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}_const_temp.pdf'.format(dataset, model, trained_loss)), dpi=40)
else:
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}_acc.pdf'.format(dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}.pdf'.format(dataset, model, trained_loss)), dpi=40)
def diff_ece_plot(acc, csece1, csece2, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_type='class_based'):
plt.figure()
plt.scatter(acc, (csece1 - csece2).cpu())
plt.xlabel('accuracy', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('ECE difference', fontsize=10)
plt.yticks(fontsize=10)
plt.axhline(y=0, color='r')
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'diff_{}_ece_acc_after_scaling_{}_{}_{}_acc.pdf'.format(scaling_type, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'diff_{}_ece_acc_after_scaling_{}_{}_{}.pdf'.format(scaling_type, dataset, model, trained_loss)), dpi=40)
def bins_over_conf_plot(bins, diff, save_plots_loc, dataset, model, trained_loss, scaling_related='before'):
plt.figure()
plt.plot(bins, diff)
plt.xlabel('bins', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('confidence - accuracy', fontsize=10)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_conf_bins_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
def temp_bins_plot(single_T, bins_T, bin_boundaries, save_plots_loc, dataset, model, trained_loss, acc_check=False, const_temp=False, divide='reg_divide', ds='val', version=1, cross_validate='ECE', y_name='Temperature'):
bin_boundaries = torch.linspace(0, bins_T.shape[0], bins_T.shape[0] + 1)
bin_lowers = bin_boundaries[:-1]
plt.figure()
for i in range(bins_T.shape[1]):
#bin_lowers = bin_boundaries[i][:-1]
#x_new = np.linspace(1, bins_T.shape[0], 300)
#a_BSpline = make_interp_spline(bin_lowers, bins_T[:, i].cpu())
#y_new = a_BSpline(x_new)
plt.plot(bin_lowers, bins_T[:, i].cpu(), label='Iteration #{}'.format(i + 1))
#plt.plot(x_new, y_new, label='CBT ({})'.format(cross_validate))
#plt.plot(x_new, y_new, label='Iteration #{}'.format(i + 1))
#plt.plot(bin_lowers, torch.ones(bins_T.shape[0])*single_T, label='Single temperature')
#plt.plot(x_new, torch.ones(len(y_new)) * single_T, label='TS'.format(cross_validate))
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(y_name, fontsize=16)
plt.yticks(fontsize=10)
# plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_bins_{}_iters_{}_{}_{}_ver_{}_{}_{}_{}_smooth.pdf'.format(bins_T.shape[1], dataset, model, trained_loss, version, divide, ds, cross_validate)), dpi=40)
def ece_bin_plot(ece_bin, single_ece_bin, origin_ece_bin, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
origin_ece_bin = [i * 100 for i in origin_ece_bin]
single_ece_bin = [i * 100 for i in single_ece_bin]
ece_bin = [i * 100 for i in ece_bin]
plt.plot(range(len(ece_bin)), origin_ece_bin, label='ECE before scaling')
plt.plot(range(len(ece_bin)), single_ece_bin, label='ECE after single temp scaling')
plt.plot(range(len(ece_bin)), ece_bin, label='ECE after per bin temp scaling')
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('ECE(%)', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model),
'ece_bins_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(dataset, model, trained_loss, version,
divide, ds)), dpi=40)
def logits_diff_bin_plot(logits_diff_bin, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
plt.plot(range(len(logits_diff_bin)), logits_diff_bin)
plt.xlabel('Bins', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('Logits difference', fontsize=10)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model),
'logits_diff_bins_{}_{}_{}_ver_{}_{}_{}.pdf'.format(dataset, model, trained_loss, version,
divide, ds)), dpi=40)
def temp_bins_plot2(single_T, single_T2, bins_T, bins_T2, bin_boundaries, bin_boundaries2, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1, y_name='Temperature'):
bin_boundaries = torch.linspace(0, bins_T.shape[0], bins_T.shape[0] + 1)
bin_lowers = bin_boundaries[:-1]
plt.figure()
for i in range(bins_T.shape[1]):
#bin_lowers = bin_boundaries[i][:-1]
#bin_lowers2 = bin_boundaries2[i][:-1]
# x_new = np.linspace(1, bins_T.shape[0], 300)
# a_BSpline = make_interp_spline(bin_lowers, bins_T[:, i].cpu())
# a_BSpline2 = make_interp_spline(bin_lowers, bins_T2[:, i].cpu())
# y_new = a_BSpline(x_new)
# y_new2 = a_BSpline2(x_new)
plt.plot(bin_lowers, bins_T[:, i].cpu(), label='Weights')
plt.plot(bin_lowers, (1 / bins_T2[:, i]).cpu(), label=r'$1/Temperatures$')
# plt.plot(x_new, y_new, label='CBT ResNet-152')
# plt.plot(x_new, y_new2, label='CBT DenseNet-161')
#plt.plot(x_new, y_new, label='Iteration #{}'.format(i))
#plt.plot(bin_lowers, torch.ones(bins_T.shape[0])*single_T, label='Single temperature')
# plt.plot(x_new, torch.ones(len(y_new)) * single_T, label='TS ResNet-152')
# plt.plot(x_new, torch.ones(len(y_new2)) * single_T2, label='TS DenseNet-161')
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(y_name, fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_bins_{}_iters_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(bins_T.shape[1], dataset, model, trained_loss, version, divide, ds)), dpi=40)
def exp_value(confidences, diff):
numerator = (-1 + torch.sqrt(1 + 4 * (1 - confidences) / confidences)) / 2
denominator = (-1 + torch.sqrt(1 + 4 * (1 - (confidences - diff)) / (confidences - diff))) / 2
return numerator, denominator
def plot_temp_different_bins(save_plots_loc):
confidences = torch.linspace(0.61, 1, 40)
#optim_temps = torch.log((1 - confidences) / confidences) / torch.log((1 - (confidences - 0.1)) / (confidences - 0.1))
numerator1, denominator1 = exp_value(confidences, 0.1)
numerator2, denominator2 = exp_value(confidences, 0.05)
numerator3, denominator3 = exp_value(confidences, 0.03)
#numerator4, denominator4 = exp_value(confidences, 0.2)
optim_temps1 = torch.log(numerator1) / torch.log(denominator1)
optim_temps2 = torch.log(numerator2) / torch.log(denominator2)
optim_temps3 = torch.log(numerator3) / torch.log(denominator3)
#optim_temps4 = torch.log(numerator4) / torch.log(denominator4)
plt.figure()
#plt.plot(confidences, optim_temps4, label='\u03B5=0.2')
plt.plot(confidences, optim_temps1, label='\u03B5=0.1')
plt.plot(confidences, optim_temps2, label='\u03B5=0.05')
plt.plot(confidences, optim_temps3, label='\u03B5=0.03')
plt.xlabel('Confidence', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Temperature', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, 'temp_movements_between_bins_3_classes.pdf'), dpi=40)
def ece_iters_plot2(single_ece, single_ece2, ece_list1, ece_list2, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
if len(ece_list1) < len(ece_list2):
ece_list1 = ece_list1 + (len(ece_list2) - len(ece_list1)) * [ece_list1[-1]]
elif len(ece_list1) > len(ece_list2):
ece_list2 = ece_list2 + (len(ece_list1) - len(ece_list2)) * [ece_list2[-1]]
ece_list1 = [i * 100 for i in ece_list1]
ece_list2 = [i * 100 for i in ece_list2]
plt.figure()
plt.plot(range(len(ece_list1)), ece_list1, label='CBT ResNet-152')
plt.plot(range(len(ece_list2)), ece_list2, label='CBT DenseNet-161')
plt.plot(range(len(ece_list1)), torch.ones(len(ece_list1)) * single_ece, label='TS ResNet-152')
plt.plot(range(len(ece_list2)), torch.ones(len(ece_list2)) * single_ece2, label='TS DenseNet-161')
plt.xlabel('Iterations', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('ECE(%)', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_iters_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(len(ece_list1) - 1, dataset, model, trained_loss, version, divide, ds)), dpi=40)
def plot_trajectory(save_plots_loc): # For probabilities [0.6, 0.3, 0.1]
weights = torch.linspace(0, 1, 100).unsqueeze(-1)
temperatures = torch.linspace(1, 100, 10000).unsqueeze(-1)
starting_point = torch.tensor([0.6, 0.3]).unsqueeze(0)
starting_logits = torch.tensor([math.log(0.6), math.log(0.3), math.log(0.1)])
# starting_logits = torch.tensor([2.2, 1.525, 0.5])
ts_points = [F.softmax(starting_logits / temperature, dim=0) for temperature in temperatures]
ts_points = torch.stack(ts_points)
n_classes = starting_point.shape[1] + 1
ws_points = torch.matmul(weights, (1 / n_classes) * torch.ones(starting_point.shape)) + torch.matmul(1 - weights, starting_point)
ws_points_full = torch.cat((ws_points, (1 - torch.sum(ws_points, 1)).unsqueeze(-1)), 1)
weights_ent = -torch.sum(ws_points_full * torch.log2(ws_points_full), 1)
softmaxes_100 = torch.tensor([8.4042679500e-13, 1.4278050742e-08, 3.9925965312e-11, 7.8529644267e-14,
1.1687384394e-10, 9.7083494401e-14, 7.9007286824e-13, 1.1496912363e-13,
5.3773496073e-12, 7.6878958755e-10, 8.9035365747e-09, 5.3947623278e-12,
2.4426896617e-10, 2.2383541201e-11, 1.2707822294e-10, 2.1816673468e-10,
5.0172353387e-15, 1.6286461112e-12, 5.1560413925e-12, 8.6647043707e-12,
1.8531972623e-09, 2.7630087107e-10, 7.1155463308e-16, 3.7386840152e-11,
5.1252758981e-11, 3.1181262433e-11, 2.6755674298e-06, 9.9959415197e-01,
1.9884007635e-11, 1.1077156523e-04, 1.7637266647e-11, 2.2995503279e-09,
7.3481587606e-06, 1.2129663940e-09, 3.2103027479e-05, 5.2368401282e-11,
2.3453745612e-09, 2.9135565488e-11, 2.9145277771e-12, 3.5043259961e-11,
9.6558103581e-14, 1.9227650583e-09, 1.5236486206e-07, 4.5127812598e-09,
8.7795990112e-05, 3.4632095776e-05, 3.3900747098e-08, 5.3773188159e-12,
4.9334299666e-13, 4.7792599739e-11, 9.7179556069e-12, 2.9196653486e-05,
1.2558685400e-15, 1.9376671101e-10, 2.1402189916e-12, 1.7672345792e-12,
4.2892519397e-11, 8.4134947273e-12, 1.5762311595e-11, 2.2964830992e-12,
1.1481499413e-14, 4.4955605211e-11, 2.6382507290e-11, 1.0882557433e-07,
3.2325153665e-10, 1.4755903444e-10, 2.8219235976e-11, 1.1946493714e-06,
5.6229808136e-12, 4.9992823214e-09, 1.2134488726e-11, 2.2948927203e-09,
1.0463446776e-09, 2.0963939562e-07, 1.3484322992e-08, 1.1520114862e-09,
1.9648471489e-13, 6.5380464775e-07, 2.2771805561e-06, 6.8640011210e-12,
2.4578919692e-05, 2.0577129952e-13, 2.1242145684e-13, 2.3415527872e-13,
4.5339165755e-10, 4.0936140522e-07, 9.8099343132e-16, 9.6455538001e-11,
4.4561368484e-11, 4.3079886880e-10, 1.0865559563e-09, 7.0311572927e-05,
6.6880915140e-14, 4.8056293167e-08, 3.0499626199e-16, 5.0754581093e-11,
4.9211958293e-12, 9.5986638371e-07, 1.9191167766e-08, 1.8387422074e-07]).unsqueeze(0)
ws_points2 = torch.matmul(weights, (1 / n_classes) * torch.ones(softmaxes_100.shape)) + torch.matmul(1 - weights, softmaxes_100)
weights_ent2 = -torch.sum(ws_points2 * torch.log2(ws_points2), 1)
plt.figure()
plt.plot(ws_points[:, 0], ws_points[:, 1], label='Weight Scaling')
plt.plot(ts_points[:, 0], ts_points[:, 1], label='Temperature Scaling')
plt.xlabel(r'$p_1$', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(r'$p_2$', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, 'trajectories.pdf'), dpi=40)
plt.close()
plt.figure()
plt.plot(ws_points[:, 0], weights_ent)
plt.xlabel(r'$p_1$', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Entropy', fontsize=16)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, 'entropy.pdf'), dpi=40)
plt.figure()
plt.plot(ws_points2.max(1)[0], weights_ent2)
plt.xlabel('Confidence', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Entropy', fontsize=16)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, 'entropy_100.pdf'), dpi=40)
def conf_acc_diff_plot(conf_acc_diff, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
plt.plot(range(len(conf_acc_diff)), conf_acc_diff)
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Confidence - Accuracy', fontsize=16)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'conf_acc_diff_bins_{}_{}_{}_{}_ver_{}_{}_{}.pdf'.format(len(conf_acc_diff), dataset, model, trained_loss, version, divide, ds)), dpi=40)
|
[
"matplotlib.pyplot.ylabel",
"torch.sqrt",
"math.log",
"torch.log2",
"torch.sum",
"torch.nn.functional.softmax",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.yticks",
"torch.matmul",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"torch.log",
"math.ceil",
"torch.stack",
"os.path.join",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"torch.tensor",
"torch.linspace",
"torch.ones"
] |
[((422, 460), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (441, 460), True, 'import matplotlib.pyplot as plt\n'), ((2301, 2328), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (2311, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2430), 'matplotlib.pyplot.bar', 'plt.bar', (['bns', 'bns'], {'align': '"""edge"""', 'width': '(0.05)', 'color': '"""pink"""', 'label': '"""Expected"""'}), "(bns, bns, align='edge', width=0.05, color='pink', label='Expected')\n", (2362, 2430), True, 'import matplotlib.pyplot as plt\n'), ((2435, 2522), 'matplotlib.pyplot.bar', 'plt.bar', (['bns', 'y'], {'align': '"""edge"""', 'width': '(0.05)', 'color': '"""blue"""', 'alpha': '(0.5)', 'label': '"""Actual"""'}), "(bns, y, align='edge', width=0.05, color='blue', alpha=0.5, label=\n 'Actual')\n", (2442, 2522), True, 'import matplotlib.pyplot as plt\n'), ((2534, 2556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (2544, 2556), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2585), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence"""'], {}), "('Confidence')\n", (2571, 2585), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2602), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2600, 2602), True, 'import matplotlib.pyplot as plt\n'), ((3253, 3280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (3263, 3280), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3406), 'matplotlib.pyplot.bar', 'plt.bar', (['bns', 'y'], {'align': '"""edge"""', 'width': '(0.05)', 'color': '"""blue"""', 'alpha': '(0.5)', 'label': '"""Percentage samples"""'}), "(bns, y, align='edge', width=0.05, color='blue', alpha=0.5, label=\n 'Percentage samples')\n", (3314, 3406), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3453), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of samples"""'], {}), "('Percentage of samples')\n", (3428, 3453), True, 'import matplotlib.pyplot as plt\n'), ((3458, 3482), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence"""'], {}), "('Confidence')\n", (3468, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3487, 3497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3495, 3497), True, 'import matplotlib.pyplot as plt\n'), ((3751, 3778), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (3761, 3778), True, 'import matplotlib.pyplot as plt\n'), ((3999, 4030), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""bins"""'], {'fontsize': '(26)'}), "('bins', fontsize=26)\n", (4009, 4030), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4058), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (4045, 4058), True, 'import matplotlib.pyplot as plt\n'), ((4063, 4093), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE"""'], {'fontsize': '(26)'}), "('ECE', fontsize=26)\n", (4073, 4093), True, 'import matplotlib.pyplot as plt\n'), ((4098, 4121), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (4108, 4121), True, 'import matplotlib.pyplot as plt\n'), ((4295, 4380), 'matplotlib.pyplot.legend', 'plt.legend', (["('over-confidence classes', 'under-confidence classes')"], {'fontsize': '(22)'}), "(('over-confidence classes', 'under-confidence classes'), fontsize=22\n )\n", (4305, 4380), True, 'import matplotlib.pyplot as plt\n'), ((5015, 5026), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5024, 5026), True, 'import matplotlib.pyplot as plt\n'), ((5188, 5215), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (5198, 5215), True, 'import matplotlib.pyplot as plt\n'), ((5264, 5299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""accuracy"""'], {'fontsize': '(26)'}), "('accuracy', fontsize=26)\n", (5274, 5299), True, 'import matplotlib.pyplot as plt\n'), ((5304, 5327), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (5314, 5327), True, 'import matplotlib.pyplot as plt\n'), ((5332, 5362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE"""'], {'fontsize': '(26)'}), "('ECE', fontsize=26)\n", (5342, 5362), True, 'import matplotlib.pyplot as plt\n'), ((5367, 5390), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (5377, 5390), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5412), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (5403, 5412), True, 'import matplotlib.pyplot as plt\n'), ((6028, 6039), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6037, 6039), True, 'import matplotlib.pyplot as plt\n'), ((6045, 6072), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (6055, 6072), True, 'import matplotlib.pyplot as plt\n'), ((6121, 6156), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""accuracy"""'], {'fontsize': '(26)'}), "('accuracy', fontsize=26)\n", (6131, 6156), True, 'import matplotlib.pyplot as plt\n'), ((6161, 6184), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (6171, 6184), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6219), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE"""'], {'fontsize': '(26)'}), "('ECE', fontsize=26)\n", (6199, 6219), True, 'import matplotlib.pyplot as plt\n'), ((6224, 6247), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (6234, 6247), True, 'import matplotlib.pyplot as plt\n'), ((6252, 6269), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (6260, 6269), True, 'import matplotlib.pyplot as plt\n'), ((6885, 6896), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6894, 6896), True, 'import matplotlib.pyplot as plt\n'), ((7054, 7081), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (7064, 7081), True, 'import matplotlib.pyplot as plt\n'), ((7126, 7161), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""accuracy"""'], {'fontsize': '(26)'}), "('accuracy', fontsize=26)\n", (7136, 7161), True, 'import matplotlib.pyplot as plt\n'), ((7166, 7189), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (7176, 7189), True, 'import matplotlib.pyplot as plt\n'), ((7194, 7224), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE"""'], {'fontsize': '(26)'}), "('ECE', fontsize=26)\n", (7204, 7224), True, 'import matplotlib.pyplot as plt\n'), ((7229, 7252), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (7239, 7252), True, 'import matplotlib.pyplot as plt\n'), ((8394, 8405), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8403, 8405), True, 'import matplotlib.pyplot as plt\n'), ((8523, 8535), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8533, 8535), True, 'import matplotlib.pyplot as plt\n'), ((8706, 8782), 'matplotlib.pyplot.legend', 'plt.legend', (["('class-based temp scaling', 'single temp scaling')"], {'fontsize': '(10)'}), "(('class-based temp scaling', 'single temp scaling'), fontsize=10)\n", (8716, 8782), True, 'import matplotlib.pyplot as plt\n'), ((8787, 8824), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {'fontsize': '(10)'}), "('iterations', fontsize=10)\n", (8797, 8824), True, 'import matplotlib.pyplot as plt\n'), ((8829, 8852), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (8839, 8852), True, 'import matplotlib.pyplot as plt\n'), ((8857, 8887), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE"""'], {'fontsize': '(10)'}), "('ECE', fontsize=10)\n", (8867, 8887), True, 'import matplotlib.pyplot as plt\n'), ((8892, 8915), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (8902, 8915), True, 'import matplotlib.pyplot as plt\n'), ((9286, 9297), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9295, 9297), True, 'import matplotlib.pyplot as plt\n'), ((9431, 9443), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9441, 9443), True, 'import matplotlib.pyplot as plt\n'), ((9607, 9642), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""accuracy"""'], {'fontsize': '(10)'}), "('accuracy', fontsize=10)\n", (9617, 9642), True, 'import matplotlib.pyplot as plt\n'), ((9647, 9670), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (9657, 9670), True, 'import matplotlib.pyplot as plt\n'), ((9675, 9713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature"""'], {'fontsize': '(10)'}), "('Temperature', fontsize=10)\n", (9685, 9713), True, 'import matplotlib.pyplot as plt\n'), ((9718, 9741), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (9728, 9741), True, 'import matplotlib.pyplot as plt\n'), ((9746, 9769), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (9756, 9769), True, 'import matplotlib.pyplot as plt\n'), ((10493, 10505), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10503, 10505), True, 'import matplotlib.pyplot as plt\n'), ((10556, 10591), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""accuracy"""'], {'fontsize': '(10)'}), "('accuracy', fontsize=10)\n", (10566, 10591), True, 'import matplotlib.pyplot as plt\n'), ((10596, 10619), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (10606, 10619), True, 'import matplotlib.pyplot as plt\n'), ((10624, 10665), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE difference"""'], {'fontsize': '(10)'}), "('ECE difference', fontsize=10)\n", (10634, 10665), True, 'import matplotlib.pyplot as plt\n'), ((10670, 10693), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (10680, 10693), True, 'import matplotlib.pyplot as plt\n'), ((10698, 10725), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""r"""'}), "(y=0, color='r')\n", (10709, 10725), True, 'import matplotlib.pyplot as plt\n'), ((11247, 11259), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11257, 11259), True, 'import matplotlib.pyplot as plt\n'), ((11264, 11284), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', 'diff'], {}), '(bins, diff)\n', (11272, 11284), True, 'import matplotlib.pyplot as plt\n'), ((11289, 11320), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""bins"""'], {'fontsize': '(10)'}), "('bins', fontsize=10)\n", (11299, 11320), True, 'import matplotlib.pyplot as plt\n'), ((11325, 11348), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (11335, 11348), True, 'import matplotlib.pyplot as plt\n'), ((11353, 11401), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""confidence - accuracy"""'], {'fontsize': '(10)'}), "('confidence - accuracy', fontsize=10)\n", (11363, 11401), True, 'import matplotlib.pyplot as plt\n'), ((11406, 11429), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (11416, 11429), True, 'import matplotlib.pyplot as plt\n'), ((11856, 11911), 'torch.linspace', 'torch.linspace', (['(0)', 'bins_T.shape[0]', '(bins_T.shape[0] + 1)'], {}), '(0, bins_T.shape[0], bins_T.shape[0] + 1)\n', (11870, 11911), False, 'import torch\n'), ((11953, 11965), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11963, 11965), True, 'import matplotlib.pyplot as plt\n'), ((12623, 12654), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {'fontsize': '(16)'}), "('Bins', fontsize=16)\n", (12633, 12654), True, 'import matplotlib.pyplot as plt\n'), ((12659, 12682), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (12669, 12682), True, 'import matplotlib.pyplot as plt\n'), ((12687, 12718), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_name'], {'fontsize': '(16)'}), '(y_name, fontsize=16)\n', (12697, 12718), True, 'import matplotlib.pyplot as plt\n'), ((12723, 12746), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (12733, 12746), True, 'import matplotlib.pyplot as plt\n'), ((13165, 13177), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13175, 13177), True, 'import matplotlib.pyplot as plt\n'), ((13583, 13614), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {'fontsize': '(16)'}), "('Bins', fontsize=16)\n", (13593, 13614), True, 'import matplotlib.pyplot as plt\n'), ((13619, 13642), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (13629, 13642), True, 'import matplotlib.pyplot as plt\n'), ((13647, 13680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE(%)"""'], {'fontsize': '(16)'}), "('ECE(%)', fontsize=16)\n", (13657, 13680), True, 'import matplotlib.pyplot as plt\n'), ((13685, 13708), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (13695, 13708), True, 'import matplotlib.pyplot as plt\n'), ((13713, 13736), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (13723, 13736), True, 'import matplotlib.pyplot as plt\n'), ((14180, 14192), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14190, 14192), True, 'import matplotlib.pyplot as plt\n'), ((14256, 14287), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {'fontsize': '(10)'}), "('Bins', fontsize=10)\n", (14266, 14287), True, 'import matplotlib.pyplot as plt\n'), ((14292, 14315), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (14302, 14315), True, 'import matplotlib.pyplot as plt\n'), ((14320, 14364), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Logits difference"""'], {'fontsize': '(10)'}), "('Logits difference', fontsize=10)\n", (14330, 14364), True, 'import matplotlib.pyplot as plt\n'), ((14369, 14392), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (14379, 14392), True, 'import matplotlib.pyplot as plt\n'), ((14924, 14979), 'torch.linspace', 'torch.linspace', (['(0)', 'bins_T.shape[0]', '(bins_T.shape[0] + 1)'], {}), '(0, bins_T.shape[0], bins_T.shape[0] + 1)\n', (14938, 14979), False, 'import torch\n'), ((15021, 15033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15031, 15033), True, 'import matplotlib.pyplot as plt\n'), ((16029, 16060), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {'fontsize': '(16)'}), "('Bins', fontsize=16)\n", (16039, 16060), True, 'import matplotlib.pyplot as plt\n'), ((16065, 16088), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (16075, 16088), True, 'import matplotlib.pyplot as plt\n'), ((16093, 16124), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_name'], {'fontsize': '(16)'}), '(y_name, fontsize=16)\n', (16103, 16124), True, 'import matplotlib.pyplot as plt\n'), ((16129, 16152), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (16139, 16152), True, 'import matplotlib.pyplot as plt\n'), ((16157, 16180), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (16167, 16180), True, 'import matplotlib.pyplot as plt\n'), ((16723, 16750), 'torch.linspace', 'torch.linspace', (['(0.61)', '(1)', '(40)'], {}), '(0.61, 1, 40)\n', (16737, 16750), False, 'import torch\n'), ((17386, 17398), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17396, 17398), True, 'import matplotlib.pyplot as plt\n'), ((17464, 17514), 'matplotlib.pyplot.plot', 'plt.plot', (['confidences', 'optim_temps1'], {'label': '"""ε=0.1"""'}), "(confidences, optim_temps1, label='ε=0.1')\n", (17472, 17514), True, 'import matplotlib.pyplot as plt\n'), ((17524, 17575), 'matplotlib.pyplot.plot', 'plt.plot', (['confidences', 'optim_temps2'], {'label': '"""ε=0.05"""'}), "(confidences, optim_temps2, label='ε=0.05')\n", (17532, 17575), True, 'import matplotlib.pyplot as plt\n'), ((17585, 17636), 'matplotlib.pyplot.plot', 'plt.plot', (['confidences', 'optim_temps3'], {'label': '"""ε=0.03"""'}), "(confidences, optim_temps3, label='ε=0.03')\n", (17593, 17636), True, 'import matplotlib.pyplot as plt\n'), ((17646, 17683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence"""'], {'fontsize': '(16)'}), "('Confidence', fontsize=16)\n", (17656, 17683), True, 'import matplotlib.pyplot as plt\n'), ((17688, 17711), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (17698, 17711), True, 'import matplotlib.pyplot as plt\n'), ((17716, 17754), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature"""'], {'fontsize': '(16)'}), "('Temperature', fontsize=16)\n", (17726, 17754), True, 'import matplotlib.pyplot as plt\n'), ((17759, 17782), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (17769, 17782), True, 'import matplotlib.pyplot as plt\n'), ((17787, 17810), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (17797, 17810), True, 'import matplotlib.pyplot as plt\n'), ((18416, 18428), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18426, 18428), True, 'import matplotlib.pyplot as plt\n'), ((18780, 18817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {'fontsize': '(16)'}), "('Iterations', fontsize=16)\n", (18790, 18817), True, 'import matplotlib.pyplot as plt\n'), ((18822, 18845), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (18832, 18845), True, 'import matplotlib.pyplot as plt\n'), ((18850, 18883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECE(%)"""'], {'fontsize': '(16)'}), "('ECE(%)', fontsize=16)\n", (18860, 18883), True, 'import matplotlib.pyplot as plt\n'), ((18888, 18911), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (18898, 18911), True, 'import matplotlib.pyplot as plt\n'), ((18916, 18939), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (18926, 18939), True, 'import matplotlib.pyplot as plt\n'), ((19666, 19688), 'torch.stack', 'torch.stack', (['ts_points'], {}), '(ts_points)\n', (19677, 19688), False, 'import torch\n'), ((22283, 22295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22293, 22295), True, 'import matplotlib.pyplot as plt\n'), ((22300, 22366), 'matplotlib.pyplot.plot', 'plt.plot', (['ws_points[:, 0]', 'ws_points[:, 1]'], {'label': '"""Weight Scaling"""'}), "(ws_points[:, 0], ws_points[:, 1], label='Weight Scaling')\n", (22308, 22366), True, 'import matplotlib.pyplot as plt\n'), ((22371, 22442), 'matplotlib.pyplot.plot', 'plt.plot', (['ts_points[:, 0]', 'ts_points[:, 1]'], {'label': '"""Temperature Scaling"""'}), "(ts_points[:, 0], ts_points[:, 1], label='Temperature Scaling')\n", (22379, 22442), True, 'import matplotlib.pyplot as plt\n'), ((22447, 22479), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p_1$"""'], {'fontsize': '(16)'}), "('$p_1$', fontsize=16)\n", (22457, 22479), True, 'import matplotlib.pyplot as plt\n'), ((22485, 22508), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (22495, 22508), True, 'import matplotlib.pyplot as plt\n'), ((22513, 22545), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p_2$"""'], {'fontsize': '(16)'}), "('$p_2$', fontsize=16)\n", (22523, 22545), True, 'import matplotlib.pyplot as plt\n'), ((22551, 22574), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (22561, 22574), True, 'import matplotlib.pyplot as plt\n'), ((22579, 22602), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (22589, 22602), True, 'import matplotlib.pyplot as plt\n'), ((22681, 22692), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22690, 22692), True, 'import matplotlib.pyplot as plt\n'), ((22702, 22714), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22712, 22714), True, 'import matplotlib.pyplot as plt\n'), ((22719, 22757), 'matplotlib.pyplot.plot', 'plt.plot', (['ws_points[:, 0]', 'weights_ent'], {}), '(ws_points[:, 0], weights_ent)\n', (22727, 22757), True, 'import matplotlib.pyplot as plt\n'), ((22762, 22794), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p_1$"""'], {'fontsize': '(16)'}), "('$p_1$', fontsize=16)\n", (22772, 22794), True, 'import matplotlib.pyplot as plt\n'), ((22800, 22823), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (22810, 22823), True, 'import matplotlib.pyplot as plt\n'), ((22828, 22862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entropy"""'], {'fontsize': '(16)'}), "('Entropy', fontsize=16)\n", (22838, 22862), True, 'import matplotlib.pyplot as plt\n'), ((22867, 22890), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (22877, 22890), True, 'import matplotlib.pyplot as plt\n'), ((22969, 22981), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22979, 22981), True, 'import matplotlib.pyplot as plt\n'), ((23035, 23072), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence"""'], {'fontsize': '(16)'}), "('Confidence', fontsize=16)\n", (23045, 23072), True, 'import matplotlib.pyplot as plt\n'), ((23077, 23100), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (23087, 23100), True, 'import matplotlib.pyplot as plt\n'), ((23105, 23139), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entropy"""'], {'fontsize': '(16)'}), "('Entropy', fontsize=16)\n", (23115, 23139), True, 'import matplotlib.pyplot as plt\n'), ((23144, 23167), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (23154, 23167), True, 'import matplotlib.pyplot as plt\n'), ((23373, 23385), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23383, 23385), True, 'import matplotlib.pyplot as plt\n'), ((23445, 23476), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {'fontsize': '(16)'}), "('Bins', fontsize=16)\n", (23455, 23476), True, 'import matplotlib.pyplot as plt\n'), ((23481, 23504), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (23491, 23504), True, 'import matplotlib.pyplot as plt\n'), ((23509, 23557), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Confidence - Accuracy"""'], {'fontsize': '(16)'}), "('Confidence - Accuracy', fontsize=16)\n", (23519, 23557), True, 'import matplotlib.pyplot as plt\n'), ((23562, 23585), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (23572, 23585), True, 'import matplotlib.pyplot as plt\n'), ((2814, 2824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2822, 2824), True, 'import matplotlib.pyplot as plt\n'), ((17132, 17153), 'torch.log', 'torch.log', (['numerator1'], {}), '(numerator1)\n', (17141, 17153), False, 'import torch\n'), ((17156, 17179), 'torch.log', 'torch.log', (['denominator1'], {}), '(denominator1)\n', (17165, 17179), False, 'import torch\n'), ((17199, 17220), 'torch.log', 'torch.log', (['numerator2'], {}), '(numerator2)\n', (17208, 17220), False, 'import torch\n'), ((17223, 17246), 'torch.log', 'torch.log', (['denominator2'], {}), '(denominator2)\n', (17232, 17246), False, 'import torch\n'), ((17266, 17287), 'torch.log', 'torch.log', (['numerator3'], {}), '(numerator3)\n', (17275, 17287), False, 'import torch\n'), ((17290, 17313), 'torch.log', 'torch.log', (['denominator3'], {}), '(denominator3)\n', (17299, 17313), False, 'import torch\n'), ((17827, 17900), 'os.path.join', 'os.path.join', (['save_plots_loc', '"""temp_movements_between_bins_3_classes.pdf"""'], {}), "(save_plots_loc, 'temp_movements_between_bins_3_classes.pdf')\n", (17839, 17900), False, 'import os\n'), ((19569, 19616), 'torch.nn.functional.softmax', 'F.softmax', (['(starting_logits / temperature)'], {'dim': '(0)'}), '(starting_logits / temperature, dim=0)\n', (19578, 19616), True, 'from torch.nn import functional as F\n'), ((19825, 19866), 'torch.matmul', 'torch.matmul', (['(1 - weights)', 'starting_point'], {}), '(1 - weights, starting_point)\n', (19837, 19866), False, 'import torch\n'), ((22168, 22208), 'torch.matmul', 'torch.matmul', (['(1 - weights)', 'softmaxes_100'], {}), '(1 - weights, softmaxes_100)\n', (22180, 22208), False, 'import torch\n'), ((22619, 22667), 'os.path.join', 'os.path.join', (['save_plots_loc', '"""trajectories.pdf"""'], {}), "(save_plots_loc, 'trajectories.pdf')\n", (22631, 22667), False, 'import os\n'), ((22907, 22950), 'os.path.join', 'os.path.join', (['save_plots_loc', '"""entropy.pdf"""'], {}), "(save_plots_loc, 'entropy.pdf')\n", (22919, 22950), False, 'import os\n'), ((23184, 23231), 'os.path.join', 'os.path.join', (['save_plots_loc', '"""entropy_100.pdf"""'], {}), "(save_plots_loc, 'entropy_100.pdf')\n", (23196, 23231), False, 'import os\n'), ((1180, 1216), 'math.ceil', 'math.ceil', (['(num_bins * confidence - 1)'], {}), '(num_bins * confidence - 1)\n', (1189, 1216), False, 'import math\n'), ((8664, 8698), 'torch.ones', 'torch.ones', (['(scaled_model.iters + 1)'], {}), '(scaled_model.iters + 1)\n', (8674, 8698), False, 'import torch\n'), ((16459, 16510), 'torch.sqrt', 'torch.sqrt', (['(1 + 4 * (1 - confidences) / confidences)'], {}), '(1 + 4 * (1 - confidences) / confidences)\n', (16469, 16510), False, 'import torch\n'), ((16542, 16611), 'torch.sqrt', 'torch.sqrt', (['(1 + 4 * (1 - (confidences - diff)) / (confidences - diff))'], {}), '(1 + 4 * (1 - (confidences - diff)) / (confidences - diff))\n', (16552, 16611), False, 'import torch\n'), ((19252, 19277), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (19266, 19277), False, 'import torch\n'), ((19311, 19340), 'torch.linspace', 'torch.linspace', (['(1)', '(100)', '(10000)'], {}), '(1, 100, 10000)\n', (19325, 19340), False, 'import torch\n'), ((19376, 19400), 'torch.tensor', 'torch.tensor', (['[0.6, 0.3]'], {}), '([0.6, 0.3])\n', (19388, 19400), False, 'import torch\n'), ((19450, 19463), 'math.log', 'math.log', (['(0.6)'], {}), '(0.6)\n', (19458, 19463), False, 'import math\n'), ((19465, 19478), 'math.log', 'math.log', (['(0.3)'], {}), '(0.3)\n', (19473, 19478), False, 'import math\n'), ((19480, 19493), 'math.log', 'math.log', (['(0.1)'], {}), '(0.1)\n', (19488, 19493), False, 'import math\n'), ((20056, 21966), 'torch.tensor', 'torch.tensor', (['[8.40426795e-13, 1.4278050742e-08, 3.9925965312e-11, 7.8529644267e-14, \n 1.1687384394e-10, 9.7083494401e-14, 7.9007286824e-13, 1.1496912363e-13,\n 5.3773496073e-12, 7.6878958755e-10, 8.9035365747e-09, 5.3947623278e-12,\n 2.4426896617e-10, 2.2383541201e-11, 1.2707822294e-10, 2.1816673468e-10,\n 5.0172353387e-15, 1.6286461112e-12, 5.1560413925e-12, 8.6647043707e-12,\n 1.8531972623e-09, 2.7630087107e-10, 7.1155463308e-16, 3.7386840152e-11,\n 5.1252758981e-11, 3.1181262433e-11, 2.6755674298e-06, 0.99959415197, \n 1.9884007635e-11, 0.00011077156523, 1.7637266647e-11, 2.2995503279e-09,\n 7.3481587606e-06, 1.212966394e-09, 3.2103027479e-05, 5.2368401282e-11, \n 2.3453745612e-09, 2.9135565488e-11, 2.9145277771e-12, 3.5043259961e-11,\n 9.6558103581e-14, 1.9227650583e-09, 1.5236486206e-07, 4.5127812598e-09,\n 8.7795990112e-05, 3.4632095776e-05, 3.3900747098e-08, 5.3773188159e-12,\n 4.9334299666e-13, 4.7792599739e-11, 9.7179556069e-12, 2.9196653486e-05,\n 1.25586854e-15, 1.9376671101e-10, 2.1402189916e-12, 1.7672345792e-12, \n 4.2892519397e-11, 8.4134947273e-12, 1.5762311595e-11, 2.2964830992e-12,\n 1.1481499413e-14, 4.4955605211e-11, 2.638250729e-11, 1.0882557433e-07, \n 3.2325153665e-10, 1.4755903444e-10, 2.8219235976e-11, 1.1946493714e-06,\n 5.6229808136e-12, 4.9992823214e-09, 1.2134488726e-11, 2.2948927203e-09,\n 1.0463446776e-09, 2.0963939562e-07, 1.3484322992e-08, 1.1520114862e-09,\n 1.9648471489e-13, 6.5380464775e-07, 2.2771805561e-06, 6.864001121e-12, \n 2.4578919692e-05, 2.0577129952e-13, 2.1242145684e-13, 2.3415527872e-13,\n 4.5339165755e-10, 4.0936140522e-07, 9.8099343132e-16, 9.6455538001e-11,\n 4.4561368484e-11, 4.307988688e-10, 1.0865559563e-09, 7.0311572927e-05, \n 6.688091514e-14, 4.8056293167e-08, 3.0499626199e-16, 5.0754581093e-11, \n 4.9211958293e-12, 9.5986638371e-07, 1.9191167766e-08, 1.8387422074e-07]'], {}), '([8.40426795e-13, 1.4278050742e-08, 3.9925965312e-11, \n 7.8529644267e-14, 1.1687384394e-10, 9.7083494401e-14, 7.9007286824e-13,\n 1.1496912363e-13, 5.3773496073e-12, 7.6878958755e-10, 8.9035365747e-09,\n 5.3947623278e-12, 2.4426896617e-10, 2.2383541201e-11, 1.2707822294e-10,\n 2.1816673468e-10, 5.0172353387e-15, 1.6286461112e-12, 5.1560413925e-12,\n 8.6647043707e-12, 1.8531972623e-09, 2.7630087107e-10, 7.1155463308e-16,\n 3.7386840152e-11, 5.1252758981e-11, 3.1181262433e-11, 2.6755674298e-06,\n 0.99959415197, 1.9884007635e-11, 0.00011077156523, 1.7637266647e-11, \n 2.2995503279e-09, 7.3481587606e-06, 1.212966394e-09, 3.2103027479e-05, \n 5.2368401282e-11, 2.3453745612e-09, 2.9135565488e-11, 2.9145277771e-12,\n 3.5043259961e-11, 9.6558103581e-14, 1.9227650583e-09, 1.5236486206e-07,\n 4.5127812598e-09, 8.7795990112e-05, 3.4632095776e-05, 3.3900747098e-08,\n 5.3773188159e-12, 4.9334299666e-13, 4.7792599739e-11, 9.7179556069e-12,\n 2.9196653486e-05, 1.25586854e-15, 1.9376671101e-10, 2.1402189916e-12, \n 1.7672345792e-12, 4.2892519397e-11, 8.4134947273e-12, 1.5762311595e-11,\n 2.2964830992e-12, 1.1481499413e-14, 4.4955605211e-11, 2.638250729e-11, \n 1.0882557433e-07, 3.2325153665e-10, 1.4755903444e-10, 2.8219235976e-11,\n 1.1946493714e-06, 5.6229808136e-12, 4.9992823214e-09, 1.2134488726e-11,\n 2.2948927203e-09, 1.0463446776e-09, 2.0963939562e-07, 1.3484322992e-08,\n 1.1520114862e-09, 1.9648471489e-13, 6.5380464775e-07, 2.2771805561e-06,\n 6.864001121e-12, 2.4578919692e-05, 2.0577129952e-13, 2.1242145684e-13, \n 2.3415527872e-13, 4.5339165755e-10, 4.0936140522e-07, 9.8099343132e-16,\n 9.6455538001e-11, 4.4561368484e-11, 4.307988688e-10, 1.0865559563e-09, \n 7.0311572927e-05, 6.688091514e-14, 4.8056293167e-08, 3.0499626199e-16, \n 5.0754581093e-11, 4.9211958293e-12, 9.5986638371e-07, 1.9191167766e-08,\n 1.8387422074e-07])\n', (20068, 21966), False, 'import torch\n'), ((19789, 19821), 'torch.ones', 'torch.ones', (['starting_point.shape'], {}), '(starting_point.shape)\n', (19799, 19821), False, 'import torch\n'), ((20005, 20031), 'torch.log2', 'torch.log2', (['ws_points_full'], {}), '(ws_points_full)\n', (20015, 20031), False, 'import torch\n'), ((22133, 22164), 'torch.ones', 'torch.ones', (['softmaxes_100.shape'], {}), '(softmaxes_100.shape)\n', (22143, 22164), False, 'import torch\n'), ((22252, 22274), 'torch.log2', 'torch.log2', (['ws_points2'], {}), '(ws_points2)\n', (22262, 22274), False, 'import torch\n'), ((19915, 19938), 'torch.sum', 'torch.sum', (['ws_points', '(1)'], {}), '(ws_points, 1)\n', (19924, 19938), False, 'import torch\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceConnectorResult',
'AwaitableGetServiceConnectorResult',
'get_service_connector',
]
@pulumi.output_type
class GetServiceConnectorResult:
"""
A collection of values returned by getServiceConnector.
"""
def __init__(__self__, compartment_id=None, defined_tags=None, description=None, display_name=None, freeform_tags=None, id=None, lifecyle_details=None, service_connector_id=None, source=None, state=None, system_tags=None, target=None, tasks=None, time_created=None, time_updated=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lifecyle_details and not isinstance(lifecyle_details, str):
raise TypeError("Expected argument 'lifecyle_details' to be a str")
pulumi.set(__self__, "lifecyle_details", lifecyle_details)
if service_connector_id and not isinstance(service_connector_id, str):
raise TypeError("Expected argument 'service_connector_id' to be a str")
pulumi.set(__self__, "service_connector_id", service_connector_id)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if system_tags and not isinstance(system_tags, dict):
raise TypeError("Expected argument 'system_tags' to be a dict")
pulumi.set(__self__, "system_tags", system_tags)
if target and not isinstance(target, dict):
raise TypeError("Expected argument 'target' to be a dict")
pulumi.set(__self__, "target", target)
if tasks and not isinstance(tasks, list):
raise TypeError("Expected argument 'tasks' to be a list")
pulumi.set(__self__, "tasks", tasks)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_updated and not isinstance(time_updated, str):
raise TypeError("Expected argument 'time_updated' to be a str")
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the metric.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the resource. Avoid entering confidential information.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A user-friendly name. It does not have to be unique, and it is changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lifecyleDetails")
def lifecyle_details(self) -> str:
"""
A message describing the current state in more detail. For example, the message might provide actionable information for a resource in a `FAILED` state.
"""
return pulumi.get(self, "lifecyle_details")
@property
@pulumi.getter(name="serviceConnectorId")
def service_connector_id(self) -> str:
return pulumi.get(self, "service_connector_id")
@property
@pulumi.getter
def source(self) -> 'outputs.GetServiceConnectorSourceResult':
"""
An object that represents the source of the flow defined by the service connector. An example source is the VCNFlow logs within the NetworkLogs group. For more information about flows defined by service connectors, see [Service Connector Hub Overview](https://docs.cloud.oracle.com/iaas/Content/service-connector-hub/overview.htm).
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the service connector.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Mapping[str, Any]:
"""
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{orcl-cloud: {free-tier-retain: true}}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter
def target(self) -> 'outputs.GetServiceConnectorTargetResult':
"""
An object that represents the target of the flow defined by the service connector. An example target is a stream. For more information about flows defined by service connectors, see [Service Connector Hub Overview](https://docs.cloud.oracle.com/iaas/Content/service-connector-hub/overview.htm).
"""
return pulumi.get(self, "target")
@property
@pulumi.getter
def tasks(self) -> Sequence['outputs.GetServiceConnectorTaskResult']:
"""
The list of tasks.
"""
return pulumi.get(self, "tasks")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time when the service connector was created. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The date and time when the service connector was updated. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_updated")
class AwaitableGetServiceConnectorResult(GetServiceConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceConnectorResult(
compartment_id=self.compartment_id,
defined_tags=self.defined_tags,
description=self.description,
display_name=self.display_name,
freeform_tags=self.freeform_tags,
id=self.id,
lifecyle_details=self.lifecyle_details,
service_connector_id=self.service_connector_id,
source=self.source,
state=self.state,
system_tags=self.system_tags,
target=self.target,
tasks=self.tasks,
time_created=self.time_created,
time_updated=self.time_updated)
def get_service_connector(service_connector_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceConnectorResult:
"""
This data source provides details about a specific Service Connector resource in Oracle Cloud Infrastructure Service Connector Hub service.
Gets the specified service connector's configuration information.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_service_connector = oci.sch.get_service_connector(service_connector_id=oci_sch_service_connector["test_service_connector"]["id"])
```
:param str service_connector_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
"""
__args__ = dict()
__args__['serviceConnectorId'] = service_connector_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:sch/getServiceConnector:getServiceConnector', __args__, opts=opts, typ=GetServiceConnectorResult).value
return AwaitableGetServiceConnectorResult(
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
description=__ret__.description,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
lifecyle_details=__ret__.lifecyle_details,
service_connector_id=__ret__.service_connector_id,
source=__ret__.source,
state=__ret__.state,
system_tags=__ret__.system_tags,
target=__ret__.target,
tasks=__ret__.tasks,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated)
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] |
[((3780, 3815), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""compartmentId"""'}), "(name='compartmentId')\n", (3793, 3815), False, 'import pulumi\n'), ((4085, 4118), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""definedTags"""'}), "(name='definedTags')\n", (4098, 4118), False, 'import pulumi\n'), ((4617, 4650), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""displayName"""'}), "(name='displayName')\n", (4630, 4650), False, 'import pulumi\n'), ((4902, 4936), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""freeformTags"""'}), "(name='freeformTags')\n", (4915, 4936), False, 'import pulumi\n'), ((5482, 5519), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""lifecyleDetails"""'}), "(name='lifecyleDetails')\n", (5495, 5519), False, 'import pulumi\n'), ((5816, 5856), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""serviceConnectorId"""'}), "(name='serviceConnectorId')\n", (5829, 5856), False, 'import pulumi\n'), ((6662, 6694), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""systemTags"""'}), "(name='systemTags')\n", (6675, 6694), False, 'import pulumi\n'), ((7852, 7885), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""timeCreated"""'}), "(name='timeCreated')\n", (7865, 7885), False, 'import pulumi\n'), ((8184, 8217), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""timeUpdated"""'}), "(name='timeUpdated')\n", (8197, 8217), False, 'import pulumi\n'), ((1046, 1100), 'pulumi.set', 'pulumi.set', (['__self__', '"""compartment_id"""', 'compartment_id'], {}), "(__self__, 'compartment_id', compartment_id)\n", (1056, 1100), False, 'import pulumi\n'), ((1250, 1300), 'pulumi.set', 'pulumi.set', (['__self__', '"""defined_tags"""', 'defined_tags'], {}), "(__self__, 'defined_tags', defined_tags)\n", (1260, 1300), False, 'import pulumi\n'), ((1445, 1493), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (1455, 1493), False, 'import pulumi\n'), ((1641, 1691), 'pulumi.set', 'pulumi.set', (['__self__', '"""display_name"""', 'display_name'], {}), "(__self__, 'display_name', display_name)\n", (1651, 1691), False, 'import pulumi\n'), ((1844, 1896), 'pulumi.set', 'pulumi.set', (['__self__', '"""freeform_tags"""', 'freeform_tags'], {}), "(__self__, 'freeform_tags', freeform_tags)\n", (1854, 1896), False, 'import pulumi\n'), ((2014, 2044), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (2024, 2044), False, 'import pulumi\n'), ((2204, 2262), 'pulumi.set', 'pulumi.set', (['__self__', '"""lifecyle_details"""', 'lifecyle_details'], {}), "(__self__, 'lifecyle_details', lifecyle_details)\n", (2214, 2262), False, 'import pulumi\n'), ((2434, 2500), 'pulumi.set', 'pulumi.set', (['__self__', '"""service_connector_id"""', 'service_connector_id'], {}), "(__self__, 'service_connector_id', service_connector_id)\n", (2444, 2500), False, 'import pulumi\n'), ((2632, 2670), 'pulumi.set', 'pulumi.set', (['__self__', '"""source"""', 'source'], {}), "(__self__, 'source', source)\n", (2642, 2670), False, 'import pulumi\n'), ((2797, 2833), 'pulumi.set', 'pulumi.set', (['__self__', '"""state"""', 'state'], {}), "(__self__, 'state', state)\n", (2807, 2833), False, 'import pulumi\n'), ((2980, 3028), 'pulumi.set', 'pulumi.set', (['__self__', '"""system_tags"""', 'system_tags'], {}), "(__self__, 'system_tags', system_tags)\n", (2990, 3028), False, 'import pulumi\n'), ((3160, 3198), 'pulumi.set', 'pulumi.set', (['__self__', '"""target"""', 'target'], {}), "(__self__, 'target', target)\n", (3170, 3198), False, 'import pulumi\n'), ((3327, 3363), 'pulumi.set', 'pulumi.set', (['__self__', '"""tasks"""', 'tasks'], {}), "(__self__, 'tasks', tasks)\n", (3337, 3363), False, 'import pulumi\n'), ((3511, 3561), 'pulumi.set', 'pulumi.set', (['__self__', '"""time_created"""', 'time_created'], {}), "(__self__, 'time_created', time_created)\n", (3521, 3561), False, 'import pulumi\n'), ((3709, 3759), 'pulumi.set', 'pulumi.set', (['__self__', '"""time_updated"""', 'time_updated'], {}), "(__self__, 'time_updated', time_updated)\n", (3719, 3759), False, 'import pulumi\n'), ((4030, 4064), 'pulumi.get', 'pulumi.get', (['self', '"""compartment_id"""'], {}), "(self, 'compartment_id')\n", (4040, 4064), False, 'import pulumi\n'), ((4343, 4375), 'pulumi.get', 'pulumi.get', (['self', '"""defined_tags"""'], {}), "(self, 'defined_tags')\n", (4353, 4375), False, 'import pulumi\n'), ((4565, 4596), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (4575, 4596), False, 'import pulumi\n'), ((4849, 4881), 'pulumi.get', 'pulumi.get', (['self', '"""display_name"""'], {}), "(self, 'display_name')\n", (4859, 4881), False, 'import pulumi\n'), ((5185, 5218), 'pulumi.get', 'pulumi.get', (['self', '"""freeform_tags"""'], {}), "(self, 'freeform_tags')\n", (5195, 5218), False, 'import pulumi\n'), ((5439, 5461), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (5449, 5461), False, 'import pulumi\n'), ((5759, 5795), 'pulumi.get', 'pulumi.get', (['self', '"""lifecyle_details"""'], {}), "(self, 'lifecyle_details')\n", (5769, 5795), False, 'import pulumi\n'), ((5915, 5955), 'pulumi.get', 'pulumi.get', (['self', '"""service_connector_id"""'], {}), "(self, 'service_connector_id')\n", (5925, 5955), False, 'import pulumi\n'), ((6436, 6462), 'pulumi.get', 'pulumi.get', (['self', '"""source"""'], {}), "(self, 'source')\n", (6446, 6462), False, 'import pulumi\n'), ((6616, 6641), 'pulumi.get', 'pulumi.get', (['self', '"""state"""'], {}), "(self, 'state')\n", (6626, 6641), False, 'import pulumi\n'), ((7130, 7161), 'pulumi.get', 'pulumi.get', (['self', '"""system_tags"""'], {}), "(self, 'system_tags')\n", (7140, 7161), False, 'import pulumi\n'), ((7605, 7631), 'pulumi.get', 'pulumi.get', (['self', '"""target"""'], {}), "(self, 'target')\n", (7615, 7631), False, 'import pulumi\n'), ((7806, 7831), 'pulumi.get', 'pulumi.get', (['self', '"""tasks"""'], {}), "(self, 'tasks')\n", (7816, 7831), False, 'import pulumi\n'), ((8131, 8163), 'pulumi.get', 'pulumi.get', (['self', '"""time_created"""'], {}), "(self, 'time_created')\n", (8141, 8163), False, 'import pulumi\n'), ((8463, 8495), 'pulumi.get', 'pulumi.get', (['self', '"""time_updated"""'], {}), "(self, 'time_updated')\n", (8473, 8495), False, 'import pulumi\n'), ((10245, 10267), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (10265, 10267), False, 'import pulumi\n'), ((10359, 10487), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""oci:sch/getServiceConnector:getServiceConnector"""', '__args__'], {'opts': 'opts', 'typ': 'GetServiceConnectorResult'}), "('oci:sch/getServiceConnector:getServiceConnector',\n __args__, opts=opts, typ=GetServiceConnectorResult)\n", (10380, 10487), False, 'import pulumi\n')]
|
import imutils
import cv2
import numpy as np
import math
from math import sqrt
def find_robot_orientation(image):
robot = {}
robot['angle'] = []
robot['direction'] = []
robotLower = (139, 227, 196)
robotUpper = (255, 255, 255)
distances = []
# img = cv2.imread('all_color_terrain_with_robot.png')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, robotLower, robotUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
# find contours in thresholded image, then grab the largest
# one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
c = max(cnts, key=cv2.contourArea)
M = cv2.moments(c)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
print(extBot, extLeft, extRight, extTop, (cx, cy))
# Take care of the extra point, because there are only 3 sides,
# the distance max will be flawed of far point is 2 points (ie bottom and right)
if abs(extLeft[0] - extRight[0]) < 10 and abs(extLeft[1] - extRight[1]) < 10:
extRight = (cx, cy)
if abs(extLeft[0] - extTop[0]) < 10 and abs(extLeft[1] - extTop[1]) < 10:
extTop = (cx, cy)
if abs(extLeft[0] - extBot[0]) < 10 and abs(extLeft[1] - extBot[1]) < 10:
extBot = (cx, cy)
if abs(extBot[0] - extRight[0]) < 10 and abs(extBot[1] - extRight[1]) < 10:
extRight = (cx, cy)
if abs(extTop[0] - extRight[0]) < 10 and abs(extTop[1] - extRight[1]) < 10:
extRight = (cx, cy)
# draw the outline of the object, then draw each of the
# extreme points, where the left-most is red, right-most
# is green, top-most is blue, and bottom-most is teal
cv2.drawContours(image, [c], -1, (0, 255, 255), 2)
cv2.circle(image, (cx, cy), 7, (255, 0, 255), -1)
cv2.circle(image, extLeft, 6, (0, 0, 255), -1)
cv2.circle(image, extRight, 6, (0, 255, 0), -1)
cv2.circle(image, extTop, 6, (255, 0, 0), -1)
cv2.circle(image, extBot, 6, (255, 255, 0), -1)
# create list of extreme points
extreme_points = (extLeft, extRight, extTop, extBot)
for i in range(0, len(extreme_points)):
dist = sqrt((extreme_points[i][0] - extLeft[0]) ** 2 +
(extreme_points[i][1] - extLeft[1]) ** 2 +
(extreme_points[i][0] - extRight[0]) ** 2 +
(extreme_points[i][1] - extRight[1]) ** 2 +
(extreme_points[i][0] - extBot[0]) ** 2 +
(extreme_points[i][1] - extBot[1]) ** 2 +
(extreme_points[i][0] - extTop[0]) ** 2 +
(extreme_points[i][1] - extTop[1]) ** 2)
distances += [dist]
index_min = np.argmax(distances)
print(distances)
top_triangle = (extreme_points[index_min])
print(top_triangle)
center = (cx, cy)
# Create vector containing the top of the isosceles triangle
# and the center of the contour that was found
centerline_points = [center, top_triangle]
# draw a line through the triangle in the direction of the robot motion
rows, cols = image.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.float32(centerline_points), cv2.DIST_L2, 0, 0.01, 0.01)
lefty = int((-x * vy / vx) + y)
righty = int(((cols - x) * vy / vx) + y)
cv2.line(image, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)
# find the angle of the robot
rad = math.atan2(vx, vy)
angle = math.degrees(rad)
'''
# fix the angle such that the tip pointing up is 0deg,
# movement to the right of that is +deg
# movement to the left is -deg
# angle measurements are from -180:180
'''
if top_triangle[0] < center[0]:
angle = -angle
if top_triangle[0] > center[0]:
angle = 180 - angle
angle = round(angle)
print(angle)
cv2.putText(image, str(angle), (int(cx) - 50, int(cy) - 50), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 2,
cv2.LINE_AA)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
return angle, center
'''
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', img)
cv2.destroyAllWindows()
'''
|
[
"cv2.drawContours",
"cv2.inRange",
"cv2.erode",
"cv2.line",
"math.degrees",
"numpy.argmax",
"imutils.is_cv2",
"cv2.imshow",
"math.sqrt",
"cv2.circle",
"math.atan2",
"cv2.cvtColor",
"cv2.moments",
"cv2.dilate",
"cv2.waitKey",
"numpy.float32"
] |
[((336, 374), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (348, 374), False, 'import cv2\n'), ((386, 426), 'cv2.inRange', 'cv2.inRange', (['hsv', 'robotLower', 'robotUpper'], {}), '(hsv, robotLower, robotUpper)\n', (397, 426), False, 'import cv2\n'), ((438, 473), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (447, 473), False, 'import cv2\n'), ((485, 521), 'cv2.dilate', 'cv2.dilate', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (495, 521), False, 'import cv2\n'), ((899, 913), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (910, 913), False, 'import cv2\n'), ((2154, 2204), 'cv2.drawContours', 'cv2.drawContours', (['image', '[c]', '(-1)', '(0, 255, 255)', '(2)'], {}), '(image, [c], -1, (0, 255, 255), 2)\n', (2170, 2204), False, 'import cv2\n'), ((2209, 2258), 'cv2.circle', 'cv2.circle', (['image', '(cx, cy)', '(7)', '(255, 0, 255)', '(-1)'], {}), '(image, (cx, cy), 7, (255, 0, 255), -1)\n', (2219, 2258), False, 'import cv2\n'), ((2263, 2309), 'cv2.circle', 'cv2.circle', (['image', 'extLeft', '(6)', '(0, 0, 255)', '(-1)'], {}), '(image, extLeft, 6, (0, 0, 255), -1)\n', (2273, 2309), False, 'import cv2\n'), ((2314, 2361), 'cv2.circle', 'cv2.circle', (['image', 'extRight', '(6)', '(0, 255, 0)', '(-1)'], {}), '(image, extRight, 6, (0, 255, 0), -1)\n', (2324, 2361), False, 'import cv2\n'), ((2366, 2411), 'cv2.circle', 'cv2.circle', (['image', 'extTop', '(6)', '(255, 0, 0)', '(-1)'], {}), '(image, extTop, 6, (255, 0, 0), -1)\n', (2376, 2411), False, 'import cv2\n'), ((2416, 2463), 'cv2.circle', 'cv2.circle', (['image', 'extBot', '(6)', '(255, 255, 0)', '(-1)'], {}), '(image, extBot, 6, (255, 255, 0), -1)\n', (2426, 2463), False, 'import cv2\n'), ((3148, 3168), 'numpy.argmax', 'np.argmax', (['distances'], {}), '(distances)\n', (3157, 3168), True, 'import numpy as np\n'), ((3732, 3795), 'cv2.line', 'cv2.line', (['image', '(cols - 1, righty)', '(0, lefty)', '(0, 255, 0)', '(2)'], {}), '(image, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)\n', (3740, 3795), False, 'import cv2\n'), ((3840, 3858), 'math.atan2', 'math.atan2', (['vx', 'vy'], {}), '(vx, vy)\n', (3850, 3858), False, 'import math\n'), ((3871, 3888), 'math.degrees', 'math.degrees', (['rad'], {}), '(rad)\n', (3883, 3888), False, 'import math\n'), ((4428, 4454), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'image'], {}), "('Image', image)\n", (4438, 4454), False, 'import cv2\n'), ((4459, 4473), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4470, 4473), False, 'import cv2\n'), ((822, 838), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (836, 838), False, 'import imutils\n'), ((2617, 2985), 'math.sqrt', 'sqrt', (['((extreme_points[i][0] - extLeft[0]) ** 2 + (extreme_points[i][1] - extLeft\n [1]) ** 2 + (extreme_points[i][0] - extRight[0]) ** 2 + (extreme_points\n [i][1] - extRight[1]) ** 2 + (extreme_points[i][0] - extBot[0]) ** 2 + \n (extreme_points[i][1] - extBot[1]) ** 2 + (extreme_points[i][0] -\n extTop[0]) ** 2 + (extreme_points[i][1] - extTop[1]) ** 2)'], {}), '((extreme_points[i][0] - extLeft[0]) ** 2 + (extreme_points[i][1] -\n extLeft[1]) ** 2 + (extreme_points[i][0] - extRight[0]) ** 2 + (\n extreme_points[i][1] - extRight[1]) ** 2 + (extreme_points[i][0] -\n extBot[0]) ** 2 + (extreme_points[i][1] - extBot[1]) ** 2 + (\n extreme_points[i][0] - extTop[0]) ** 2 + (extreme_points[i][1] - extTop\n [1]) ** 2)\n', (2621, 2985), False, 'from math import sqrt\n'), ((3588, 3617), 'numpy.float32', 'np.float32', (['centerline_points'], {}), '(centerline_points)\n', (3598, 3617), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ntp
short_description: Manage NTP configurations about an ESXi host
description:
- This module can be used to manage NTP configuration information about an ESXi host.
- User can specify an ESXi hostname or Cluster name. In case of cluster name, all ESXi hosts are updated.
version_added: '2.5'
author:
- <NAME> (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- NTP settings are applied to every ESXi host system in the given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- NTP settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
ntp_servers:
description:
- "IP or FQDN of NTP server/s."
- This accepts a list of NTP servers. For multiple servers, please look at the examples.
required: True
state:
description:
- "present: Add NTP server/s, if it specified server/s are absent else do nothing."
- "absent: Remove NTP server/s, if specified server/s are present else do nothing."
default: present
choices: [ present, absent ]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set NTP setting for all ESXi Host in given Cluster
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ <PASSWORD> }}'
cluster_name: cluster_name
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Set NTP setting for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ <PASSWORD> }}'
esxi_hostname: '{{ esxi_hostname }}'
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Remove NTP setting for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ <PASSWORD> }}'
esxi_hostname: '{{ esxi_hostname }}'
state: absent
ntp_servers:
- bad.server.ntp.org
delegate_to: localhost
'''
RETURN = r'''#
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareNtpConfigManager(PyVmomi):
def __init__(self, module):
super(VmwareNtpConfigManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.ntp_servers = self.params.get('ntp_servers', list())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.results = {}
self.desired_state = module.params['state']
def update_ntp_servers(self, host, ntp_servers, operation='add'):
changed = False
host_date_time_manager = host.configManager.dateTimeSystem
if host_date_time_manager:
available_ntp_servers = host_date_time_manager.dateTimeInfo.ntpConfig.server
available_ntp_servers = list(filter(None, available_ntp_servers))
if operation == 'add':
available_ntp_servers = available_ntp_servers + ntp_servers
elif operation == 'delete':
for server in ntp_servers:
if server in available_ntp_servers:
available_ntp_servers.remove(server)
ntp_config_spec = vim.host.NtpConfig()
ntp_config_spec.server = available_ntp_servers
date_config_spec = vim.host.DateTimeConfig()
date_config_spec.ntpConfig = ntp_config_spec
try:
host_date_time_manager.UpdateDateTimeConfig(date_config_spec)
self.results[host.name]['after_change_ntp_servers'] = host_date_time_manager.dateTimeInfo.ntpConfig.server
changed = True
except vim.fault.HostConfigFault as e:
self.results[host.name]['error'] = to_native(e.msg)
except Exception as e:
self.results[host.name]['error'] = to_native(e)
return changed
def check_host_state(self):
change_list = []
changed = False
for host in self.hosts:
ntp_servers_to_change = self.check_ntp_servers(host=host)
self.results[host.name].update(dict(
ntp_servers_to_change=ntp_servers_to_change,
desired_state=self.desired_state,
)
)
if not ntp_servers_to_change:
change_list.append(False)
self.results[host.name]['current_state'] = self.desired_state
elif ntp_servers_to_change:
if self.desired_state == 'present':
changed = self.update_ntp_servers(host=host, ntp_servers=ntp_servers_to_change)
change_list.append(changed)
elif self.desired_state == 'absent':
changed = self.update_ntp_servers(host=host, ntp_servers=ntp_servers_to_change, operation='delete')
change_list.append(changed)
self.results[host.name]['current_state'] = self.desired_state
if any(change_list):
changed = True
self.module.exit_json(changed=changed, results=self.results)
def check_ntp_servers(self, host):
update_ntp_list = []
host_datetime_system = host.configManager.dateTimeSystem
if host_datetime_system:
ntp_servers = host_datetime_system.dateTimeInfo.ntpConfig.server
self.results[host.name] = dict(available_ntp_servers=ntp_servers)
for ntp_server in self.ntp_servers:
if self.desired_state == 'present' and ntp_server not in ntp_servers:
update_ntp_list.append(ntp_server)
if self.desired_state == 'absent' and ntp_server in ntp_servers:
update_ntp_list.append(ntp_server)
return update_ntp_list
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
ntp_servers=dict(type='list', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_host_ntp_config = VmwareNtpConfigManager(module)
vmware_host_ntp_config.check_host_state()
if __name__ == "__main__":
main()
|
[
"ansible.module_utils.basic.AnsibleModule",
"pyVmomi.vim.host.NtpConfig",
"pyVmomi.vim.host.DateTimeConfig",
"ansible.module_utils.vmware.vmware_argument_spec",
"ansible.module_utils._text.to_native"
] |
[((6750, 6772), 'ansible.module_utils.vmware.vmware_argument_spec', 'vmware_argument_spec', ([], {}), '()\n', (6770, 6772), False, 'from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi\n'), ((7066, 7165), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'argument_spec', 'required_one_of': "[['cluster_name', 'esxi_hostname']]"}), "(argument_spec=argument_spec, required_one_of=[['cluster_name',\n 'esxi_hostname']])\n", (7079, 7165), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((4153, 4173), 'pyVmomi.vim.host.NtpConfig', 'vim.host.NtpConfig', ([], {}), '()\n', (4171, 4173), False, 'from pyVmomi import vim\n'), ((4264, 4289), 'pyVmomi.vim.host.DateTimeConfig', 'vim.host.DateTimeConfig', ([], {}), '()\n', (4287, 4289), False, 'from pyVmomi import vim\n'), ((4698, 4714), 'ansible.module_utils._text.to_native', 'to_native', (['e.msg'], {}), '(e.msg)\n', (4707, 4714), False, 'from ansible.module_utils._text import to_native\n'), ((4801, 4813), 'ansible.module_utils._text.to_native', 'to_native', (['e'], {}), '(e)\n', (4810, 4813), False, 'from ansible.module_utils._text import to_native\n')]
|
import numpy as np
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test))
x = x.reshape(-1, 28, 28, 1).astype('float32')
x = x/255.
print('MNIST:', x.shape)
return x, y
def load_usps(data_path='./data/usps'):
import os
if not os.path.exists(data_path+'/usps_train.jf'):
if not os.path.exists(data_path+'/usps_train.jf.gz'):
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s' % data_path)
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s' % data_path)
os.system('gunzip %s/usps_train.jf.gz' % data_path)
os.system('gunzip %s/usps_test.jf.gz' % data_path)
with open(data_path + '/usps_train.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_train, labels_train = data[:, 1:], data[:, 0]
with open(data_path + '/usps_test.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_test, labels_test = data[:, 1:], data[:, 0]
x = np.concatenate((data_train, data_test)).astype('float32')
x /= 2.0
x = x.reshape([-1, 16, 16, 1])
y = np.concatenate((labels_train, labels_test))
print('USPS samples', x.shape)
return x, y
|
[
"os.path.exists",
"keras.datasets.mnist.load_data",
"numpy.array",
"numpy.concatenate",
"os.system"
] |
[((182, 199), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (197, 199), False, 'from keras.datasets import mnist\n'), ((209, 242), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (223, 242), True, 'import numpy as np\n'), ((251, 284), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (265, 284), True, 'import numpy as np\n'), ((1089, 1103), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1097, 1103), True, 'import numpy as np\n'), ((1332, 1346), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1340, 1346), True, 'import numpy as np\n'), ((1523, 1566), 'numpy.concatenate', 'np.concatenate', (['(labels_train, labels_test)'], {}), '((labels_train, labels_test))\n', (1537, 1566), True, 'import numpy as np\n'), ((463, 507), 'os.path.exists', 'os.path.exists', (["(data_path + '/usps_train.jf')"], {}), "(data_path + '/usps_train.jf')\n", (477, 507), False, 'import os\n'), ((804, 855), 'os.system', 'os.system', (["('gunzip %s/usps_train.jf.gz' % data_path)"], {}), "('gunzip %s/usps_train.jf.gz' % data_path)\n", (813, 855), False, 'import os\n'), ((864, 914), 'os.system', 'os.system', (["('gunzip %s/usps_test.jf.gz' % data_path)"], {}), "('gunzip %s/usps_test.jf.gz' % data_path)\n", (873, 914), False, 'import os\n'), ((522, 569), 'os.path.exists', 'os.path.exists', (["(data_path + '/usps_train.jf.gz')"], {}), "(data_path + '/usps_train.jf.gz')\n", (536, 569), False, 'import os\n'), ((581, 692), 'os.system', 'os.system', (["('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s'\n % data_path)"], {}), "(\n 'wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s'\n % data_path)\n", (590, 692), False, 'import os\n'), ((695, 805), 'os.system', 'os.system', (["('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s'\n % data_path)"], {}), "(\n 'wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s'\n % data_path)\n", (704, 805), False, 'import os\n'), ((1409, 1448), 'numpy.concatenate', 'np.concatenate', (['(data_train, data_test)'], {}), '((data_train, data_test))\n', (1423, 1448), True, 'import numpy as np\n')]
|
"""
This module defines a class called "balto_gui" that can be used to
create a graphical user interface (GUI) for downloading data from
OpenDAP servers from and into a Jupyter notebook. If used with Binder,
this GUI runs in a browser window and does not require the user to
install anything on their computer. However, this module should be
included in the same directory as the Jupyter notebook.
"""
#------------------------------------------------------------------------
#
# Copyright (C) 2020. <NAME>
#
#------------------------------------------------------------------------
from ipyleaflet import Map, basemaps, FullScreenControl
from ipyleaflet import MeasureControl, Rectangle
## from ipyleaflet import ScaleControl # (doesn't work)
from traitlets import Tuple
## import ipyleaflet as ipyl
import ipywidgets as widgets
from ipywidgets import Layout
from IPython.display import display, HTML
## from IPython.core.display import display
## from IPython.lib.display import display
import pydap.client # (for open_url, etc.)
import requests # (used by get_filenames() )
import json
import datetime # (used by get_duration() )
import copy
import numpy as np
import balto_plot as bp
#------------------------------------------------------------------------
#
# class balto_gui
# __init__()
# pix_str()
# show_gui()
# make_acc_gui()
# make_tab_gui()
# make_data_panel()
# reset_data_panel()
# make_map_panel()
# make_dates_panel()
# make_download_panel()
# make_prefs_panel()
# #--------------------------
# get_map_bounds()
# replace_map_bounds()
# replace_map_bounds2()
# update_map_bounds()
# zoom_out_to_new_bounds()
# --------------------------
# get_url_dir_filenames()
# update_filename_list()
# get_opendap_file_url()
# open_dataset()
# update_data_panel()
# --------------------------
# update_var_info()
# get_all_var_shortnames()
# get_all_var_longnames()
# get_all_var_units()
# --------------------------
# get_var_shortname()
# get_var_longname()
# get_var_units()
# get_var_shape()
# get_var_dimensions()
# get_var_dtype()
# get_var_attributes()
# get_var_time_attributes()
# -------------------------------
# update_datetime_panel()
# get_years_from_time_since()
# clear_datetime_notes()
# append_datetime_notes()
# list_to_string()
# -------------------------------
# pad_with_zeros()
# get_actual_time_units()
# get_time_delta_str()
# get_datetime_obj_from_str()
# get_datetime_obj_from_one_str()
# get_start_datetime_obj()
# get_end_datetime_obj()
# get_dt_from_datetime_str()
# split_datetime_str()
# split_date_str()
# split_time_str()
# get_datetime_from_time_since()
# get_time_since_from_datetime()
# get_month_difference()
# -------------------------------
# get_new_time_index_range()
# get_new_lat_index_range()
# get_new_lon_index_range()
# -------------------------------
# get_duration() ## not used yet
# ----------------------------
# get_download_format()
# clear_download_log()
# append_download_log()
# print_user_choices()
# download_data()
# show_grid()
# -------------------------------
# get_opendap_package() # (in prefs panel)
# ----------------------------
# get_abbreviated_var_name()
# get_possible_svo_names()
#
#------------------------------
# Example GES DISC opendap URL
#------------------------------
# https://gpm1.gesdisc.eosdis.nasa.gov/opendap/GPM_L3/GPM_3IMERGHHE.05/2014/091/
# 3B-HHR-E.MS.MRG.3IMERG.20140401-S000000-E002959.0000.V05B.HDF5.nc
# ?HQprecipitation[1999:2200][919:1049],lon[1999:2200],lat[919:1049]
#------------------------------------------------------------------------
class balto_gui:
#--------------------------------------------------------------------
def __init__(self):
self.version = '0.5'
self.user_var = None
self.default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.timeout_secs = 60 # (seconds)
#----------------------------------------------------------
# "full_box_width" = (label_width + widget_width)
# gui_width = left_label_width + mid_width + button_width
# The 2nd, label + widget box, is referred to as "next".
# (2 * half_widget_width) + left_label + next_label = 540
#----------------------------------------------------------
self.gui_width = 680
self.left_label_width = 120
self.next_label_width = 50
self.all_label_width = 170
self.full_box_width = 540
self.widget_width = (self.full_box_width - self.left_label_width)
# self.half_widget_width = (self.full_box_width - self.all_label_width)/2
# self.half_widget_width = 183
self.left_widget_width = 230
self.next_widget_width = 136
self.left_box_width = (self.left_label_width + self.left_widget_width)
self.next_box_width = (self.next_label_width + self.next_widget_width)
self.button_width = 70 # big enough for "Reset"
#-----------------------------------------------------
self.map_width = (self.gui_width - 40)
self.map_height = 230 # was 250
self.map_center_init = (20.0, 0)
self.add_fullscreen_control = True
self.add_scale_control = False # (doesn't work)
self.add_measure_control = True
#-----------------------------------------------------
self.gui_width_px = self.pix_str( self.gui_width )
self.map_width_px = self.pix_str( self.map_width )
self.map_height_px = self.pix_str( self.map_height )
#-----------------------------------------------------
self.date_width_px = '240px'
self.time_width_px = '180px'
self.hint_width_px = '120px'
#---------------------------------------------------
self.log_box_width_px = self.pix_str( self.full_box_width )
self.log_box_height_px = '200px'
#---------------------------------------------------
# These styles are used to control width of labels
# self.init_label_style is the initial default.
#---------------------------------------------------
llw_px = self.pix_str( self.left_label_width )
nlw_px = self.pix_str( self.next_label_width )
self.init_label_style = {'description_width': 'initial'}
self.left_label_style = {'description_width': llw_px}
self.next_label_style = {'description_width': nlw_px}
self.date_style = {'description_width': '70px'}
self.time_style = {'description_width': '70px'}
# __init__()
#--------------------------------------------------------------------
def pix_str(self, num):
return str(num) + 'px'
#--------------------------------------------------------------------
def show_gui(self, ACC_STYLE=False, SHOW_MAP=True):
#------------------------------------------------------
# Encountered a problem where there was some problem
# with ipyleaflets (used for the map panel) that
# prevented any part of the GUI from being displayed.
# The SHOW_MAP flag helps to test for this problem.
#------------------------------------------------------
#------------------------------------
# Create & display the complete GUI
#-----------------------------------
if (ACC_STYLE):
self.make_acc_gui()
else:
# Use the TAB style
self.make_tab_gui( SHOW_MAP=SHOW_MAP)
gui_output = widgets.Output()
display(self.gui, gui_output)
# show_gui()
#--------------------------------------------------------------------
def make_acc_gui(self):
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel()
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=None causes all cells to be collapsed
#-------------------------------------------------------
acc = widgets.Accordion( children=[p0, p1, p2, p3, p4],
selected_index=None,
layout=Layout(width=gui_width_px) )
acc.set_title(0, p0_title)
acc.set_title(1, p1_title)
acc.set_title(2, p2_title)
acc.set_title(3, p3_title)
acc.set_title(4, p4_title)
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=4>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
# self.gui = widgets.VBox([pad, head, acc]) # (top padding
self.gui = widgets.VBox([head, acc]) # (no top padding)
# make_acc_gui()
#--------------------------------------------------------------------
def make_tab_gui(self, SHOW_MAP=True):
#---------------------------------------------------------
# If there is a problem with ipyleaflet, it can prevent
# any part of the GUI from being displayed. You can
# set SHOW_MAP=False to remove the map to test for this.
#---------------------------------------------------------
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel( SHOW_MAP=SHOW_MAP )
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=0 shows Browse Data panel
#-------------------------------------------------------
tab = widgets.Tab( children=[p0, p1, p2, p3, p4],
selected_index=0,
layout=Layout(width=gui_width_px) )
tab.set_title(0, p0_title)
tab.set_title(1, p1_title)
tab.set_title(2, p2_title)
tab.set_title(3, p3_title)
tab.set_title(4, p4_title)
#### tab.titles = [str(i) for i in range(len(children))]
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=5>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
## self.gui = widgets.VBox([pad, head, acc])
self.gui = widgets.VBox([head, tab]) # (no padding above)
# make_tab_gui()
#--------------------------------------------------------------------
def get_padding(self, n, HORIZONTAL=True):
#-------------------------------
# Get some white space padding
#-------------------------------
if (HORIZONTAL):
#--------------------------------
# Use overloaded multiplication
#--------------------------------
## s = (' ' * n) # overloaded multiplication
s = "<p>" + (' ' * n) + "</p>"
pad = widgets.HTML( value=s )
else:
s = ("<br>" * n)
pad = widgets.HTML( value=s )
return pad
# get_padding()
#--------------------------------------------------------------------
def make_data_panel(self):
#-----------------------------------
# Browse data on an OpenDAP server
#-----------------------------------
left_style = self.left_label_style
next_style = self.next_label_style
full_width_px = self.pix_str( self.full_box_width )
left_width_px = self.pix_str( self.left_box_width )
next_width_px = self.pix_str( self.next_box_width )
btn_width_px = self.pix_str( self.button_width )
#---------------------------------------------------------
o1 = widgets.Text(description='OpenDAP URL Dir:',
value=self.default_url_dir,
disabled=False, style=left_style,
layout=Layout(width=full_width_px))
b1 = widgets.Button(description="Go", layout=Layout(width=btn_width_px))
o2 = widgets.Dropdown( description='Filename:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
#------------------------------------------------------------------
oL = widgets.Text(description='Long name:', style=left_style,
value='', layout=Layout(width=full_width_px) )
## o3 = widgets.Select( description='Variable:',
o3 = widgets.Dropdown( description='Variable:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=left_width_px) )
o4 = widgets.Text(description='Units:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o5 = widgets.Text(description='Dimensions:', style=left_style,
value='', layout=Layout(width=left_width_px) )
o6 = widgets.Text(description='Shape:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o7 = widgets.Text(description='Data type:', style=left_style,
value='', layout=Layout(width=full_width_px) )
o8 = widgets.Dropdown( description='Attributes:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
o9 = widgets.Text(description='Status:', style=left_style,
value='Ready.', layout=Layout(width=full_width_px) )
b2 = widgets.Button(description="Reset", layout=Layout(width=btn_width_px))
## pd = widgets.HTML((' ' * 1)) # for padding
#-------------------------------
# Arrange widgets in the panel
#-------------------------------
url_box = widgets.HBox([o1, b1]) # directory + Go button
stat_box = widgets.HBox([o9, b2]) # status + Reset button
name_box = widgets.VBox([o3, o5])
## pad_box = widgets.VBox([pd, pd])
unit_box = widgets.VBox([o4, o6])
mid_box = widgets.HBox([name_box, unit_box])
## mid_box = widgets.HBox([name_box, pad_box, unit_box])
panel = widgets.VBox([url_box, o2, oL, mid_box, o7, o8, stat_box])
self.data_url_dir = o1 # on an OpenDAP server
self.data_filename = o2
self.data_var_long_name = oL
self.data_var_name = o3 # short_name
self.data_var_units = o4
self.data_var_dims = o5
self.data_var_shape = o6
self.data_var_type = o7
self.data_var_atts = o8
self.data_status = o9
self.data_panel = panel
#-----------------
# Event handlers
#-----------------------------------------------------
# Note: NEED to set names='value' here. If names
# keyword is omitted, only works intermittently.
#------------------------------------------------------------
# "on_click" handler function is passed b1 as argument.
# "observe" handler function is passed "change", which
# is a dictionary, as argument. See Traitlet events.
#------------------------------------------------------------
b1.on_click( self.update_filename_list )
b2.on_click( self.reset_data_panel )
o2.observe( self.update_data_panel, names=['options','value'] )
o3.observe( self.update_var_info, names=['options', 'value'] )
## o3.observe( self.update_var_info, names='value' )
## o2.observe( self.update_data_panel, names='All' )
## o3.observe( self.update_var_info, names='All' )
#-------------------------------------------------------
# It turned out this wasn't an issue, but interesting.
#-------------------------------------------------------
# Note: Method functions have type "method" instead
# of "function" and therefore can't be passed
# directly to widget handlers like "on_click".
# But we can use the "__func__" attribute.
#-------------------------------------------------------
# b1.on_click( self.update_filename_list.__func__ )
# o2.observe( self.update_data_panel.__func__ )
# o3.observe( self.update_var_info.__func__, names='value' )
# make_data_panel()
#--------------------------------------------------------------------
def reset_data_panel(self, caller_obj=None, KEEP_DIR=False):
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Reset" button beside the status box.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
if not(KEEP_DIR):
self.data_url_dir.value = self.default_url_dir
self.data_filename.options = ['']
self.data_var_name.options = [''] # short names
self.data_var_long_name.value = ''
self.data_var_units.value = ''
self.data_var_shape.value = ''
self.data_var_dims.value = ''
self.data_var_type.value = ''
self.data_var_atts.options = ['']
self.data_status.value = 'Ready.'
#------------------------------------------
self.download_log.value = ''
# reset_data_panel()
#--------------------------------------------------------------------
def make_map_panel(self, SHOW_MAP=True):
map_width_px = self.map_width_px
map_height_px = self.map_height_px
btn_width_px = self.pix_str( self.button_width )
#--------------------------------------------------
# bm_style = {'description_width': '70px'} # for top
bbox_style = {'description_width': '100px'}
bbox_width_px = '260px'
#---------------------------------------
# Create the map width with ipyleaflet
# Center lat 20 looks better than 0.
#---------------------------------------
map_center = self.map_center_init # (lat, lon)
m = Map(center=map_center, zoom=1,
layout=Layout(width=map_width_px, height=map_height_px))
#----------------------
# Add more controls ?
#----------------------
if (self.add_fullscreen_control):
m.add_control( FullScreenControl( position='topright' ) )
#---------------------------------------------------------
# Cannot be imported. (2020-05-18)
# if (self.add_scale_control):
# m.add_control(ScaleControl( position='bottomleft' ))
#---------------------------------------------------------
if (self.add_measure_control):
measure = MeasureControl( position='bottomright',
active_color = 'orange',
primary_length_unit = 'kilometers')
m.add_control(measure)
measure.completed_color = 'red'
## measure.add_length_unit('yards', 1.09361, 4)
## measure.secondary_length_unit = 'yards'
## measure.add_area_unit('sqyards', 1.19599, 4)
## measure.secondary_area_unit = 'sqyards'
#-----------------------------------------------------
# Does "step=0.01" restrict accuracy of selection ??
#-----------------------------------------------------
w1 = widgets.BoundedFloatText(
value=-180, step=0.01, min=-360, max=360.0,
description='West edge lon:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w2 = widgets.BoundedFloatText(
value=180, step=0.01, min=-360, max=360.0,
description='East edge lon:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w3 = widgets.BoundedFloatText(
value=90, min=-90, max=90.0, step=0.01,
# description='North latitude:',
description='North edge lat:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w4 = widgets.BoundedFloatText(
value=-90, min=-90, max=90.0, step=0.01,
# description='South latitude:',
description='South edge lat:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
pd = widgets.HTML((' ' * 2)) # for padding
b1 = widgets.Button(description="Update",
layout=Layout(width=btn_width_px))
b2 = widgets.Button(description="Reset",
layout=Layout(width=btn_width_px))
#---------------------
# Choose the basemap
#---------------------
options = self.get_basemap_list()
bm = widgets.Dropdown( description='Base map:',
options=options, value=options[0],
disabled=False, style=bbox_style,
layout=Layout(width='360px') )
#-----------------------------------
# Arrange the widgets in the panel
#-----------------------------------
lons = widgets.VBox([w1, w2])
lats = widgets.VBox([w3, w4])
pads = widgets.VBox([pd, pd])
btns = widgets.VBox([b1, b2])
bbox = widgets.HBox( [lons, lats, pads, btns])
#------------------------------------------------------
# Encountered a problem where there was some problem
# with ipyleaflets (used for the map panel) that
# prevented any part of the GUI from being displayed.
# The SHOW_MAP flag helps to test for this problem.
#------------------------------------------------------
if (SHOW_MAP):
panel = widgets.VBox( [m, bbox, bm] )
else:
panel = widgets.VBox( [bbox, bm] )
self.map_window = m
self.map_minlon = w1
self.map_maxlon = w2
self.map_maxlat = w3
self.map_minlat = w4
self.map_basemap = bm
self.map_panel = panel
## self.map_bounds = (-180, -90, 180, 90)
#-----------------
# Event handlers
#-----------------
bm.observe( self.change_base_map, names=['options','value'] )
m.on_interaction( self.replace_map_bounds )
m.observe( self.zoom_out_to_new_bounds, 'bounds' )
m.new_bounds = None # (used for "zoom to fit")
b1.on_click( self.update_map_bounds )
b2.on_click( self.reset_map_panel )
# make_map_panel()
#--------------------------------------------------------------------
def get_basemap_list(self):
basemap_list = [
'OpenStreetMap.Mapnik', 'OpenStreetMap.HOT', 'OpenTopoMap',
'Esri.WorldStreetMap', 'Esri.DeLorme', 'Esri.WorldTopoMap',
'Esri.WorldImagery', 'Esri.NatGeoWorldMap',
'NASAGIBS.ModisTerraTrueColorCR', 'NASAGIBS.ModisTerraBands367CR',
'NASAGIBS.ModisTerraBands721CR', 'NASAGIBS.ModisAquaTrueColorCR',
'NASAGIBS.ModisAquaBands721CR', 'NASAGIBS.ViirsTrueColorCR',
'NASAGIBS.ViirsEarthAtNight2012',
'Strava.All', 'Strava.Ride', 'Strava.Run', 'Strava.Water',
'Strava.Winter', 'Stamen.Terrain', 'Stamen.Toner',
'Stamen.Watercolor' ]
#---------------------------------
# 'HikeBike.HikeBike', 'MtbMap'
# 'OpenStreetMap.BlackAndWhite',
# 'OpenStreetMap.France',
#----------------------------------
return basemap_list
# get_basemap_list()
#--------------------------------------------------------------------
def change_base_map(self, caller_obj=None):
#--------------------------------------------------------
# Cannot directly change the basemap for some reason.
# self.map_window.basemap = basemaps.Esri.WorldStreetMap
# Need to call clear_layers(), then add_layer().
#---------------------------------------------------------
map_choice = self.map_basemap.value
self.map_window.clear_layers()
basemap_layer = eval( 'basemaps.' + map_choice )
self.map_window.add_layer( basemap_layer )
# For testing
# print('map_choice =', map_choice)
# print('Changed the basemap.')
# change_base_map()
#--------------------------------------------------------------------
def update_map_view(self, caller_obj=None):
pass
# update_map_view()
#--------------------------------------------------------------------
def reset_map_panel(self, caller_obj=None):
self.map_window.center = self.map_center_init
self.map_window.zoom = 1
self.map_minlon.value = '-225.0'
self.map_maxlon.value = '225.0'
self.map_minlat.value = '-51.6'
self.map_maxlat.value = '70.6'
# reset_map_panel()
#--------------------------------------------------------------------
def make_datetime_panel(self):
full_box_width_px = self.pix_str( self.full_box_width )
date_width_px = self.date_width_px
time_width_px = self.time_width_px
hint_width_px = self.hint_width_px
#-----------------------------------
date_style = self.date_style
time_style = self.time_style
d1 = widgets.DatePicker( description='Start Date:',
disabled=False, style=date_style,
layout=Layout(width=date_width_px) )
d2 = widgets.DatePicker( description='End Date:',
disabled=False, style=date_style,
layout=Layout(width=date_width_px) )
d3 = widgets.Text( description='Start Time:',
disabled=False, style=time_style,
layout=Layout(width=time_width_px) )
d4 = widgets.Text( description='End Time:',
disabled=False, style=time_style,
layout=Layout(width=time_width_px) )
d3.value = '00:00:00'
d4.value = '00:00:00'
#-------------------------------
# Add some padding on the left
#-------------------------------
## margin = '0px 0px 2px 10px' # top right bottom left
pp = widgets.HTML((' ' * 3)) # for padding
d5 = widgets.Label( '(hh:mm:ss, 24-hr)',
layout=Layout(width=hint_width_px) )
## layout=Layout(width=hint_width_px, margin=margin) )
## disabled=False, style=hint_style )
d6 = widgets.Label( '(hh:mm:ss, 24-hr)',
layout=Layout(width=hint_width_px) )
## layout=Layout(width=hint_width_px, margin=margin) )
## disabled=False, style=hint_style )
d7 = widgets.Dropdown( description='Attributes:',
options=[''], value='',
disabled=False, style=date_style,
layout=Layout(width=full_box_width_px) )
# d8 = widgets.Text( description='Notes:',
# disabled=False, style=self.date_style,
# layout=Layout(width=full_box_width_px) )
d8 = widgets.Textarea( description='Notes:', value='',
disabled=False, style=self.date_style,
layout=Layout(width=full_box_width_px, height='140px'))
dates = widgets.VBox([d1, d2])
times = widgets.VBox([d3, d4])
hints = widgets.VBox([d5, d6])
pad = widgets.VBox([pp, pp])
top = widgets.HBox([dates, times, pad, hints])
panel = widgets.VBox([top, d7, d8])
## panel = widgets.VBox([top, pp, d7, d8])
self.datetime_start_date = d1
self.datetime_start_time = d3
self.datetime_end_date = d2
self.datetime_end_time = d4
self.datetime_attributes = d7
self.datetime_notes = d8
self.datetime_panel = panel
# make_datetime_panel()
#--------------------------------------------------------------------
def make_download_panel(self):
init_style = self.init_label_style
f1 = widgets.Dropdown( description='Download Format:',
options=['HDF', 'netCDF', 'netCDF4', 'ASCII'],
value='netCDF',
disabled=False, style=init_style)
pad = widgets.HTML(value=f"<p> </p>") # padding
b3 = widgets.Button(description="Download")
h3 = widgets.HBox([f1, pad, b3])
#-----------------------------------
# Could use this for info messages
#-----------------------------------
# status = widgets.Text(description=' Status:', style=self.style0,
# layout=Layout(width='380px') )
width_px = self.log_box_width_px
height_px = self.log_box_height_px
log = widgets.Textarea( description='', value='',
disabled=False, style=init_style,
layout=Layout(width=width_px, height=height_px))
## panel = widgets.VBox([h3, status, log])
panel = widgets.VBox([h3, log])
self.download_format = f1
self.download_button = b3
self.download_log = log
self.download_panel = panel
#-----------------
# Event handlers
#-----------------
b3.on_click( self.download_data )
# make_download_panel()
#--------------------------------------------------------------------
def make_prefs_panel(self):
full_box_width_px = self.pix_str( self.full_box_width )
left_style = self.left_label_style
w1 = widgets.Dropdown( description='OpenDAP package:',
options=['pydap', 'netcdf4'],
value='pydap',
disabled=False, style=left_style)
ts = self.timeout_secs
t1 = widgets.BoundedIntText( description='Timeout:',
value=ts, min=10, max=1000,
step=1, disabled=False,
style=left_style)
t2 = widgets.Label( ' (seconds)',
layout=Layout(width='80px') )
w2 = widgets.HBox([t1, t2])
note = 'Under construction; preferences will go here.'
w3 = widgets.Textarea( description='Notes:', value=note,
disabled=False, style=left_style,
layout=Layout(width=full_box_width_px, height='50px'))
panel = widgets.VBox([w1, w2, w3])
self.prefs_package = w1
self.prefs_timeout = t1
self.prefs_notes = w2
self.prefs_panel = panel
# make_prefs_panel()
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def get_map_bounds(self, FROM_MAP=True, style='sw_and_ne_corners'):
#-------------------------------------------------------
# Notes: ipyleaflet defines "bounds" as:
# [[minlat, maxlat], [minlon, maxlon]]
# matplotlib.imshow defines "extent" as:
# extent = [minlon, maxlon, minlat, maxlat]
#-------------------------------------------------------
# Return value is a list, not a tuple, but
# ok to use it like this:
# [minlon, minlat, maxlon, maxlat] = get_map_bounds().
#-------------------------------------------------------
if (FROM_MAP):
#------------------------------------
# Get the visible map bounds, after
# interaction such as pan or zoom
#------------------------------------
# bounds = self.map_window.bounds
# minlat = bounds[0][0]
# minlon = bounds[0][1]
# maxlat = bounds[1][0]
# maxlon = bounds[1][1]
#------------------------------------
# Is this more reliable ?
#------------------------------------
minlon = self.map_window.west
minlat = self.map_window.south
maxlon = self.map_window.east
maxlat = self.map_window.north
else:
#---------------------------------
# Get map bounds from text boxes
#---------------------------------
minlon = self.map_minlon.value
minlat = self.map_minlat.value
maxlon = self.map_maxlon.value
maxlat = self.map_maxlat.value
#------------------------------------------
# Return map bounds in different "styles"
#------------------------------------------
if (style == 'ipyleaflet'):
bounds = [[minlat, maxlat], [minlon, maxlon]]
elif (style == 'pyplot_imshow'):
bounds = [minlon, maxlon, minlat, maxlat]
elif (style == 'sw_and_ne_corner'):
bounds = [minlon, minlat, maxlon, maxlat]
else:
bounds = [minlon, minlat, maxlon, maxlat]
return bounds
# get_map_bounds()
#--------------------------------------------------------------------
def replace_map_bounds(self, event, type=None, coordinates=None):
#-------------------------------------------
# Get visible map bounds after interaction
# Called by m.on_interaction().
# Don't need to process separate events?
#-------------------------------------------
[minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
#--------------------------------
# Save new values in text boxes
# Format with 8 decimal places.
#--------------------------------
self.map_minlon.value = "{:.8f}".format( minlon )
self.map_maxlon.value = "{:.8f}".format( maxlon )
self.map_maxlat.value = "{:.8f}".format( maxlat )
self.map_minlat.value = "{:.8f}".format( minlat )
# replace_map_bounds()
#--------------------------------------------------------------------
# def replace_map_bounds2(self, event, type=None, coordinates=None):
#
# # events: mouseup, mousedown, mousemove, mouseover,
# # mouseout, click, dblclick, preclick
# event = kwargs.get('type')
# # print('event = ', event)
# if (event == 'mouseup') or (event == 'mousemove') or \
# (event == 'click') or (event == 'dblclick'):
# w1.value = m.west
# w2.value = m.east
# w3.value = m.north
# w4.value = m.south
#
# # status.value = event
#
# # with output2:
# # print( event )
#
#--------------------------------------------------------------------
def update_map_bounds(self, caller_obj=None):
[bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
self.get_map_bounds( FROM_MAP = False )
bb_midlon = (bb_minlon + bb_maxlon) / 2
bb_midlat = (bb_minlat + bb_maxlat) / 2
bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
#----------------------------------------------------------
zoom = self.map_window.max_zoom # (usually 18)
self.map_window.center = bb_center
self.map_window.zoom = zoom
## print('max_zoom =', self.map_window.max_zoom)
## print('map_window.bounds =', self.map_window.bounds )
#------------------------------------
# Add "new_bounds" attribute to map
#------------------------------------
new_bounds = ((bb_minlat, bb_minlon), (bb_maxlat, bb_maxlon))
self.map_window.new_bounds = Tuple()
self.map_window.new_bounds = new_bounds
# update_map_bounds()
#--------------------------------------------------------------------
def zoom_out_to_new_bounds(self, change=None):
# change owner is the widget that triggers the handler
m = change.owner
#-----------------------------------------
# If not zoomed all the way out already,
# and we have a target bounding box
#-----------------------------------------
if (m.zoom > 1 and m.new_bounds):
b = m.new_bounds
n = change.new
if (n[0][0] < b[0][0] and n[0][1] < b[0][1] and
n[1][0] > b[1][0] and n[1][1] > b[1][1]):
#---------------------------------------
# new_bounds are now within map window
# Show bounding box as a rectangle ?
# weight = line/stroke thickness
#---------------------------------------
# rectangle = Rectangle( bounds=b, fill=False, weight=4)
# ## fill_opacity=0.0, \ fill_color="#0033FF" )
# m.add_layer(rectangle)
#-----------------------
m.new_bounds = None # (remove target)
else:
# zoom out
m.zoom = m.zoom - 1
# zoom_out_to_new_bounds()
#--------------------------------------------------------------------
# def zoom_out_to_new_bounds_v0(self, caller_obj=None):
#
# [bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
# self.get_map_bounds( FROM_MAP = False )
# bb_midlon = (bb_minlon + bb_maxlon) / 2
# bb_midlat = (bb_minlat + bb_maxlat) / 2
# bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
# zoom = self.map_window.max_zoom # (usually 18)
# zoom = zoom - 1
# ## print('max_zoom =', self.map_window.max_zoom)
#
# self.map_window.center = bb_center
# self.map_window.zoom = zoom
# print('map_window.bounds =', self.map_window.bounds )
# # bounds is read-only
# ## self.map_window.bounds = ((bb_midlat,bb_midlon),(bb_midlat,bb_midlon))
# while (True):
# # time.sleep(0.5) ######
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# # zoom_out_to_new_bounds_v0
#--------------------------------------------------------------------
def get_url_dir_filenames(self):
#-----------------------------------------
# Construct a list of filenames that are
# available in the opendap url directory
#-----------------------------------------
r = requests.get( self.data_url_dir.value )
lines = r.text.splitlines()
# n_lines = len(lines)
filenames = list()
for line in lines:
if ('"sameAs": "http://' in line) and ('www' not in line):
line = line.replace('.html"', '')
parts = line.split("/")
filename = parts[-1]
filenames.append( filename )
return filenames
# get_url_dir_filenames()
#--------------------------------------------------------------------
def update_filename_list(self, caller_obj=None):
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.data_status.value = 'Retrieving filenames in URL dir...'
filenames = self.get_url_dir_filenames()
if (len(filenames) == 0):
self.reset_data_panel( KEEP_DIR=True )
msg = 'Error: No data files found in URL dir.'
self.data_status.value = msg
return
#-----------------------------------
# Update filename list & selection
#-----------------------------------
self.data_filename.options = filenames
self.data_filename.value = filenames[0]
self.data_status.value = 'Ready.'
# update_filename_list()
#--------------------------------------------------------------------
def get_opendap_file_url(self):
directory = self.data_url_dir.value
if (directory[-1] != '/'):
directory += '/'
#------------------------------------
filename = self.data_filename.value
self.opendap_file_url = (directory + filename)
# get_opendap_file_url()
#--------------------------------------------------------------------
def open_dataset(self):
timeout = self.timeout_secs
opendap_url = self.opendap_file_url
dataset = pydap.client.open_url( opendap_url, timeout=timeout )
self.dataset = dataset
# open_dataset()
#--------------------------------------------------------------------
def update_data_panel(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
# print('type(change) =', type(change))
if (self.data_filename.value == ''):
## self.update_filename_list() # (try this?)
return
self.get_opendap_file_url()
self.open_dataset()
self.get_all_var_shortnames()
self.get_all_var_longnames()
self.get_all_var_units()
#------------------------------------------
# Create map between long and short names
#------------------------------------------
long_names = self.var_long_names
short_names = self.var_short_names
units_names = self.var_units_names
self.short_name_map = dict(zip(long_names, short_names ))
self.units_map = dict(zip(long_names, units_names ))
#-------------------------------------------
# Update variable list and selected value.
#-------------------------------------------
self.data_var_name.options = short_names
self.data_var_name.value = short_names[0]
#------------------------------------
# Show other info for this variable
#------------------------------------
self.update_var_info()
self.clear_download_log() #####
#-------------------------------------------
# Try to show map extent in map panel
#-------------------------------------------
#### self.update_map_panel()
#-------------------------------------------
# Try to show date range in datetime panel
#-------------------------------------------
self.update_datetime_panel() # clears notes, too
# update_data_panel()
#--------------------------------------------------------------------
def update_var_info(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
return
#-----------------------------------------------
# Maybe later wrap this block in "try, except"
#----------------------------------------------
# Note: short_name is selected from Dropdown.
# var = dataset[ short_name ]
#----------------------------------------------
long_name = self.get_var_longname( short_name )
units = self.get_var_units( short_name )
shape = self.get_var_shape( short_name )
dims = self.get_var_dimensions( short_name )
dtype = self.get_var_dtype( short_name )
atts = self.get_var_attributes( short_name )
#---------------------------------------------
self.data_var_long_name.value = long_name
self.data_var_units.value = units
self.data_var_shape.value = shape
self.data_var_dims.value = dims
self.data_var_type.value = dtype
self.data_var_atts.options = atts
# update_var_info()
#--------------------------------------------------------------------
def get_all_var_shortnames(self):
self.var_short_names = list( self.dataset.keys() )
# get_all_var_shortnames()
#--------------------------------------------------------------------
def get_all_var_longnames(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
long_names = list()
for name in self.var_short_names:
try:
long_name = get_var_longname( name )
long_names.append( long_name )
except:
# Use short name if there is no long_name.
long_names.append( name )
# print('No long name found for:', name)
self.var_long_names = long_names
# get_all_var_longnames()
#--------------------------------------------------------------------
def get_all_var_units(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
units_names = list()
for name in self.var_short_names:
try:
units = self.get_var_units( name )
units_names.append( units )
except:
units_names.append( 'unknown' )
# print('No units name found for:', name)
self.var_units_names = units_names
# get_all_var_units()
#--------------------------------------------------------------------
def get_var_shortname(self):
short_name = self.data_var_name.value
if (short_name == ''):
pass
## print('Short name is not set.')
return short_name
# get_var_shortname()
#--------------------------------------------------------------------
def get_var_longname( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'long_name'):
return var.long_name
else:
return 'Long name not found.'
## return short_name
# get_var_longname()
#--------------------------------------------------------------------
def get_var_units( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'units'):
return var.units
else:
return 'unknown'
# get_var_units()
#--------------------------------------------------------------------
def get_var_shape( self, short_name ):
var = self.dataset[ short_name ]
return str(var.shape)
# get_var_shape()
#--------------------------------------------------------------------
def get_var_dimensions( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'dimensions'):
return str(var.dimensions)
else:
return 'No dimensions found.'
# get_var_dimensions()
#--------------------------------------------------------------------
def get_var_dtype( self, short_name ):
# The old Numeric single-character typecodes:
# ('f','d','h', 's','b','B','c','i','l'),
# corresponding to:
# ('f4','f8','i2','i2','i1','i1','S1','i4','i4'),
# are not yet supported.
type_map = {
'i1' : '1-byte signed integer',
'i2' : '2-byte signed integer',
'i4' : '4-byte signed integer',
'i8' : '8-byte signed integer',
'f4' : '4-byte floating point',
'f8' : '8-byte floating point',
'u1' : '1-byte unsigned integer',
'u2' : '2-byte unsigned integer',
'u4' : '4-byte unsigned integer',
'u8' : '8-byte unsigned integer' }
type_list = list( type_map.keys() )
var = self.dataset[ short_name ]
type_str = str( var.dtype )
#----------------------------------------
# The ">" & "<" indicate big and little
# endian byte order (i.e. MSB or LSB)
#----------------------------------------
endian = ''
if (type_str[0] == '>'):
type_str = type_str[1:]
endian = ' (big endian)'
## endian = ' (MSB)'
if (type_str[0] == '<'):
type_str = type_str[1:]
endian = ' (little endian)'
## endian = ' (LSB)'
#---------------------------------
if (type_str in type_list):
return type_map[ type_str ] + endian
elif (type_str[:2] == '|S'):
try:
num = int( type_str[2:] )
return ('string (' + str(num) + '-character max)')
except:
return type_str
elif (type_str[0] == 'S'):
try:
num = int( type_str[1:] )
return ('string (' + str(num) + '-character max)')
except:
return type_str
else:
return type_str
# get_var_dtype()
#--------------------------------------------------------------------
def get_var_attributes( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'attributes'):
#----------------------------------------
# Convert dictionary to list of strings
# to be displayed in a droplist.
#----------------------------------------
att_list = []
for key, val in var.attributes.items():
att_list.append( str(key) + ': ' + str(val) )
return att_list
#-------------------------------------------
# Return all attributes as one long string
#-------------------------------------------
### return str( var.attributes ) #### use str()
else:
return 'No attributes found.'
# get_var_attributes()
#--------------------------------------------------------------------
def get_time_attributes( self):
if (hasattr(self.dataset, 'time')):
time = self.dataset.time
elif (hasattr(self.dataset, 'TIME')):
time = self.dataset.TIME
if hasattr(time, 'attributes'):
#----------------------------------------
# Convert dictionary to list of strings
# to be displayed in a droplist.
#----------------------------------------
att_list = []
for key, val in time.attributes.items():
att_list.append( str(key) + ': ' + str(val) )
return att_list
#-------------------------------------------
# Return all attributes as one long string
#-------------------------------------------
### return str( time.attributes ) #### use str()
else:
return 'No time attributes found.'
# get_time_attributes()
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def update_datetime_panel(self):
self.clear_datetime_notes() # erase notes
#-----------------------------------------
# Are there any times for this dataset ?
#-----------------------------------------
short_names = self.var_short_names # self.dataset.keys()
if ('time' in short_names):
self.time_obj = self.dataset.time
self.time_var = self.time_obj.data[:]
elif ('TIME' in short_names):
self.time_obj = self.dataset.TIME
self.time_var = self.time_obj.data[:]
else:
msg = 'Unable to find times for this dataset.'
self.append_datetime_notes( msg )
return
#-----------------------------------------
# Show all time attributes in a droplist
#-----------------------------------------
time_att_list = self.get_time_attributes()
if (time_att_list is not None):
self.datetime_attributes.options = time_att_list
#----------------------------------------------------
# Compute the min and max times; save as time_range
#----------------------------------------------------
min_time = self.time_var.min()
max_time = self.time_var.max()
self.time_range = [min_time, max_time]
msg = 'Time range for this dataset = '
msg += '(' + str(min_time) + ', ' + str(max_time) + ')'
self.append_datetime_notes( msg )
#------------------------------------------------
# Is there an attribute called "actual_range" ?
#------------------------------------------------
# if not(hasattr(self.time_obj, 'actual_range')):
# msg = 'Unable to find "actual range" for times.'
# self.datetime_notes.value = msg
# return
# else:
# self.time_range = self.time_obj.actual_range
#-----------------------------------------
# Is there an attribute called "units" ?
#-----------------------------------------
# The full string may be something like:
# hour since 0000-01-01 00:00:00
# Save both full string and just units.
#-----------------------------------------
if (hasattr(self.time_obj, 'units')):
self.time_units_str = self.time_obj.units
self.get_actual_time_units() # (set self.time_units)
else:
msg = 'Unable to find "units" for time.'
self.append_datetime_notes( msg )
return
#-------------------------------------------
# Is there an attribute called "delta_t" ?
# If so, assume it is in "datetime" form,
# such as 00-01-00 00:00:00" for 1 month.
#-------------------------------------------
HAS_DELTA_T = hasattr(self.time_obj, 'delta_t')
if (HAS_DELTA_T):
self.time_delta = self.time_obj.delta_t
else:
self.get_time_delta_str()
# For testing:
# print('In update_datetime_panel():' )
# print('self.time_delta =', self.time_delta )
# print('HAS_DELTA_T =', HAS_DELTA_T )
#---------------------------------------------------
# Are time units given as "time since" some date ?
#---------------------------------------------------
# Sample data has cases with:
# 'days since', 'hour since' (vs hours), 'seconds since'
#--------------------------------------------------------
# Already saved "time_units_str" AND "time_units" above.
# strip() removes leading and trailing whitespace
#--------------------------------------------------------
time_units_str = self.time_units_str
if ('since' not in time_units_str):
msg = 'Time units string has no "since" part.'
self.append_datetime_notes( msg )
return
#-------------------------------------
# Process the "origin" date and time
#-------------------------------------
parts = time_units_str.split('since')
odt = parts[1].strip()
self.origin_datetime_str = odt
(date_str, time_str) = self.split_datetime_str( odt )
if (date_str.startswith('0000')):
msg = 'Warning: "Since" year must be > 0, changing to 1.'
self.append_datetime_notes( msg )
date_str = date_str[:3] + '1' + date_str[4:]
self.origin_datetime_obj = self.get_datetime_obj_from_str( date_str, time_str)
#---------------------------------------------
# Now process time_since for start and end
#---------------------------------------------
time_since1 = self.time_range[0]
time_since2 = self.time_range[1]
start_datetime_obj = self.get_datetime_from_time_since(time_since1)
end_datetime_obj = self.get_datetime_from_time_since(time_since2)
start_datetime_str = str(start_datetime_obj)
end_datetime_str = str(end_datetime_obj)
(start_date, start_time) = self.split_datetime_str( start_datetime_str )
(end_date, end_time) = self.split_datetime_str( end_datetime_str )
#-------------------------------
# Save these also, as numbers.
#-------------------------------
self.start_year = start_datetime_obj.year
self.end_year = end_datetime_obj.year
# (y1,m1,d1) = self.split_date_str( start_date )
# (y2,m2,d2) = self.split_date_str( end_date )
# self.start_year = y1
# self.end_year = y2
#-----------------------------------------------------------
# Be sure to set date values as date_obj, not datetime_obj
#-----------------------------------------------------------
self.datetime_start_date.value = start_datetime_obj.date()
self.datetime_end_date.value = end_datetime_obj.date()
self.datetime_start_time.value = start_time
self.datetime_end_time.value = end_time
#----------------------------------
# This also works, but more steps
#----------------------------------
# (y1,m1,d1) = self.split_date_str( start_date )
# (y2,m2,d2) = self.split_date_str( end_date )
# self.datetime_start_date.value = datetime.date(y1, m1, d1)
# self.datetime_end_date.value = datetime.date(y2, m2, d2)
# update_datetime_panel()
#--------------------------------------------------------------------
def get_years_from_time_since(self, data_time_since):
#----------------------------------------------------
# Notes: self.time_var contains "times since" some
# origin time, in days, hours or seconds,
# unrestricted by user start/end times.
# self.time_range[0] = self.time_var.min()
# self.time_range[1] = self.time_var.max()
#----------------------------------------------------
# For plots, want to convert these time
# offsets to decimal years, keeping in mind
# that user may have restricted the time
# range further.
#----------------------------------------------------
units_per_year = {
'years':1.0, 'days':365.0, 'hours':8760.0,
'minutes':525600.0, 'seconds':31536000.0 }
min_data_time_since = self.time_range[0]
time_since_start = (data_time_since - min_data_time_since)
#----------------------------------------------------
units = self.time_units
if (units in units_per_year.keys()):
factor = units_per_year[ units ]
years_since_start = (time_since_start / factor)
else:
print('ERROR, Unsupported units:', units)
return None
#----------------------------------------------------
start_year = self.start_year
dec_years = (years_since_start + start_year)
return dec_years
# get_years_from_time_since()
#--------------------------------------------------------------------
def clear_datetime_notes(self):
self.datetime_notes.value = ''
# clear_datetime_notes()
#--------------------------------------------------------------------
def append_datetime_notes(self, msg):
self.datetime_notes.value += (msg + '\n')
# append_datetime_notes()
#--------------------------------------------------------------------
# def list_to_string( self, array ):
#
# s = ''
# for item in array:
# s = s + item + '\n'
# return s
#
# # list_to_string()
#--------------------------------------------------------------------
def pad_with_zeros(self, num, target_len):
num_string = str( int(num) ) # int removes decimal part
n = len( num_string )
m = (target_len - n)
num_string = ('0'*m) + num_string
return num_string
# pad_with_zeros()
#--------------------------------------------------------------------
def get_actual_time_units(self):
# secs_per_unit_list = [1, 60.0, 3600.0, 86400, 31536000.0, -1]
# next_unit_factor = [60.0, 60.0, 24.0, 365.0, -1, -1]
units_list = ['second', 'minute', 'hour',
'day', 'year', 'None'] # ascending, skip month
for units in units_list:
if (self.time_units_str.startswith(units)):
break
if (units != None):
units += 's' # (make units plural now; not before)
else:
print('ERROR: No match found for units.')
return
self.time_units = units
# get_actual_time_units()
#--------------------------------------------------------------------
def get_time_delta_str(self):
## print('### self.time_var.size =', self.time_var.size )
## print('###')
#-----------------------------------
# Check size of the time_var array
#-----------------------------------
if (self.time_var.size == 1):
dt = 0
self.time_delta = '0000-00-00 00:00:00'
# print('At top of get_time_delta_str():')
# print('self.time_var.size =', self.time_var.size )
# print('self.time_delta =', self.time_delta )
return
if (self.time_var.size > 1):
dt = (self.time_var[1] - self.time_var[0])
print('dt1 =', dt)
if (self.time_var.size > 3):
dt2 = (self.time_var[2] - self.time_var[1]) ###
dt3 = (self.time_var[3] - self.time_var[2]) ###
print('dt2 =', dt2) # check if evenly spaced
print('dt3 =', dt3)
#---------------------------------------------------
# Note: Actual time units were stripped from units
# string and saved as self.time_units.
# A full units attribute string may be:
# 'hour since 0000-00-00 00:00:00'
#---------------------------------------------------
units_list = ['seconds', 'minutes', 'hours',
'days', 'years', 'None'] # ascending, skip month
secs_per_unit_list = [1, 60.0, 3600.0, 86400, 31536000.0, -1]
next_unit_factor = [60.0, 60.0, 24.0, 365.0, -1, -1]
units = self.time_units
units_index = units_list.index( units )
#----------------------------------------
if (units == 'years'):
s = self.pad_with_zeros(dt,4)
else:
if (len(str(dt)) <= 2):
s = self.pad_with_zeros(dt,2)
else:
#-------------------------------
# Must convert units to get dt
# down to 1 or 2 digits.
#-------------------------------
old_dt = dt
old_units = units
k = units_index
n = len( str(int(dt)) )
while (n > 2) and (units != 'None'):
k = k + 1
dt = (dt / next_unit_factor[k-1])
units = units_list[k]
n = len( str(int(dt)) )
if (units == 'None'):
print('#####################################')
print('ERROR in get_time_delta_str():')
print(' dt has too many digits.')
print('#####################################')
return
else:
# Note that any remainder has been dropped.
s = self.pad_with_zeros(dt,2)
print('Old dt and units =', old_dt, old_units)
print('New dt and units =', dt, units)
print('Remainder not retained yet.')
#----------------------------------------------
if (units == 'years'):
td = (s + '-00-00 00:00:00')
# if (units == 'months'):
# td= ('0000-' + s + '-00 00:00:00')
if (units == 'days'):
td = ('0000-00-' + s + ' 00:00:00')
if (units == 'hours'):
td = ('0000-00-00 ' + s + ':00:00')
if (units == 'minutes'):
td = ('0000-00-00 00:' + s + ':00')
if (units == 'seconds'):
td = ('0000-00-00 00:00:' + s)
#------------------------------------------------
self.time_delta = td
# print('At bottom of get_time_delta_str():')
# print('self.time_delta =', td)
# print()
# get_time_delta_str()
#--------------------------------------------------------------------
def get_datetime_obj_from_str(self, date_str, time_str='00:00:00'):
#---------------------------------------------------
# date_str = 'YYYY-MM-DD', time_str = 'HH:MM:SS'
#---------------------------------------------------
## e.g. d1 = str(self.datetime_end_date.value)
## e.g. t1 = self.datetime_end_time.value
(y, m1, d) = self.split_date_str(date_str)
(h, m2, s) = self.split_time_str(time_str)
if( y <= 0 ):
# msg = 'Year cannot be < 1 in start date.\n'
# msg += 'Changed year from ' + str(y) + ' to 1.'
# self.datetime_notes.value = msg
print('Year cannot be < 1 in start date.')
print('Changed year from ' + str(y) + ' to 1.')
print()
y = 1
datetime_obj = datetime.datetime(y, m1, d, h, m2, s)
return datetime_obj
# get_datetime_obj_from_str()
#--------------------------------------------------------------------
def get_datetime_obj_from_one_str(self, datetime_str):
(date, time) = self.split_datetime_str( datetime_str )
(y, m1, d) = self.split_date_str( date )
(h, m2, s) = self.split_time_str( time )
datetime_obj = datetime.datetime(y, m1, d, h, m2, s)
return datetime_obj
# get_datetime_obj_from_one_str()
#--------------------------------------------------------------------
def get_start_datetime_obj(self):
#---------------------------------------
# d1.value is a datetime "date object"
# t1.value is a time string: 00:00:00
#---------------------------------------
d1 = self.datetime_start_date
t1 = self.datetime_start_time
if (d1.value is None):
return None
date_str = str(d1.value)
time_str = t1.value # (already string)
## print('In get_start_datetime_obj():')
## print('date_str =', date_str)
## print('time_str =', time_str)
datetime_obj = self.get_datetime_obj_from_str(date_str, time_str)
return datetime_obj
# get_start_datetime_obj()
#--------------------------------------------------------------------
def get_end_datetime_obj(self):
#---------------------------------------
# d1.value is a datetime "date object"
# t1.value is a time string: 00:00:00
#---------------------------------------
d1 = self.datetime_end_date
t1 = self.datetime_end_time
if (d1.value is None):
return None
date_str = str(d1.value)
time_str = t1.value # (already string)
## print('In get_end_datetime_obj():')
## print('date_str =', date_str)
## print('time_str =', time_str)
datetime_obj = self.get_datetime_obj_from_str(date_str, time_str)
return datetime_obj
# get_end_datetime_obj()
#--------------------------------------------------------------------
def split_datetime_str(self, datetime_obj, datetime_sep=' ',
ALL=False):
#-----------------------------------------------
# Note: Still works if datetime_obj is string.
#-----------------------------------------------
datetime_str = str(datetime_obj)
parts = datetime_str.split( datetime_sep )
## print('## datetime_str =', datetime_str )
## print('## parts =', str(parts) )
date_str = parts[0]
time_str = parts[1]
if not(ALL):
return (date_str, time_str)
else:
(y,m1,d) = self.split_date_str( date_str )
(h,m2,s) = self.split_time_str( time_str )
return (y,m1,d,h,m2,s)
# split_datetime_str()
#--------------------------------------------------------------------
def split_date_str(self, date_str, date_sep='-'):
date_parts = date_str.split( date_sep )
year = int(date_parts[0])
month = int(date_parts[1]) # NOTE: int('08') = 8
day = int(date_parts[2])
return (year, month, day)
# split_date_str()
#--------------------------------------------------------------------
def split_time_str(self, time_str, time_sep=':'):
time_parts = time_str.split( time_sep )
hour = int(time_parts[0])
minute = int(time_parts[1])
second = int(time_parts[2])
return (hour, minute, second)
# split_time_str()
#--------------------------------------------------------------------
def get_datetime_from_time_since(self, time_since):
# For testing
# print('## type(times_since) =', type(time_since) )
# print('## time_since =', time_since )
# print('## int(time_since) =', int(time_since) )
#---------------------------------------------------
# Note: datetime.timedelta() can take integer or
# float arguments, and the arguments can be
# very large numbers. However, it does not
# accept any numpy types, whether float or
# int (e.g. np.int16, np.float32).
# https://docs.python.org/3/library/datetime.html
#---------------------------------------------------
units = self.time_units # ('days', 'hours', etc.)
delta = None
time_since2 = float(time_since) ## No numpy types
#------------------------------------------------------
if (units == 'days'):
delta = datetime.timedelta( days=time_since2 )
if (units == 'hours'):
delta = datetime.timedelta( hours=time_since2 )
if (units == 'minutes'):
delta = datetime.timedelta( minutes=time_since2 )
if (units == 'seconds'):
delta = datetime.timedelta( seconds=time_since2 )
#------------------------------------------------------
if (delta is None):
msg = 'ERROR: Units: ' + units + ' not supported.'
self.append_datetime_notes( msg )
return
# For testing
## print('#### delta =', delta)
#---------------------------------------------
# Create new datetime object from time_since
#---------------------------------------------
origin_obj = self.origin_datetime_obj
new_dt_obj = (origin_obj + delta)
return new_dt_obj
# get_datetime_from_time_since()
#--------------------------------------------------------------------
# def get_datetime_from_time_since_OLD(self, time_since):
#
# #---------------------------------------------------
# # datetime.timedelta has limits on inputs, e.g.
# # numpy.int32 is unsupported time for seconds arg.
# # So here we adjust big numbers for timedelta.
# # The days argument can handle really big numbers.
# #---------------------------------------------------
# maxint = 32767
# units = self.time_units # ('days', 'hours', etc.)
# n_per_day = {'seconds':86400.0, 'minutes':1440.0,
# 'hours':24.0, 'days':1.0}
# if (time_since > maxint):
# time_since = time_since / n_per_day[ units ]
# units = 'days' # (new units)
#
# #-------------------------------------------------
# # Note: We now save self.time_units_str separate
# # from self.time_units.
# #-------------------------------------------------
# delta = None
# if (units == 'days'):
# delta = datetime.timedelta( days=time_since )
# if (units == 'hours'):
# delta = datetime.timedelta( hours=time_since )
# if (units == 'minutes'):
# delta = datetime.timedelta( minutes=time_since )
# if (units == 'seconds'):
# delta = datetime.timedelta( seconds=time_since )
# #-----------------------------------------------------
# if (delta is None):
# msg = 'ERROR: Units: ' + units + ' not supported.'
# self.append_datetime_notes( msg )
# return
#
# #---------------------------------------------
# # Create new datetime object from time_since
# #---------------------------------------------
# origin_obj = self.origin_datetime_obj
# new_dt_obj = (origin_obj + delta)
# return new_dt_obj
#
# # For testing
# ## print('origin_datetime_obj =', str(origin_obj) )
# ## print('time_since delta =', str(delta) )
# ## print('new_dt_obj =', str(new_dt_obj) )
# ## return new_dt_obj
#
# # get_datetime_from_time_since()
#--------------------------------------------------------------------
def get_time_since_from_datetime(self, datetime_obj, units='days'):
#-------------------------------------------------
# Compute time duration between datetime objects
#-------------------------------------------------
origin_obj = self.origin_datetime_obj
duration_obj = (datetime_obj - origin_obj)
duration_secs = duration_obj.total_seconds()
#---------------------------------------------------
# There is not a fixed number of seconds per month
# Also 52 (weeks/year) * 7 (days/week) = 364.
#---------------------------------------------------
secs_per_unit_map = {
'years':31536000.0, 'weeks':604800.0, 'days':86400.0,
'hours':3600.0, 'minutes':60.0, 'seconds':1 }
secs_per_unit = secs_per_unit_map[ units ]
duration = (duration_secs / secs_per_unit )
time_since = duration # (in units provided)
return time_since
# get_time_since_from_datetime()
#--------------------------------------------------------------------
def get_month_difference(self, start_datetime_obj, end_datetime_obj ):
#-------------------------------------------
# Example 0: 2017-09 to 2017-09
# months = (2017-2017)*12 = 0
# months = (months - 9) = (0-9) = -0
# months = (months + 9) = 0 (as index)
#-------------------------------------------
# Example 1: 2017-09 to 2018-02
# 9:10, 10:11, 11:12, 12:1, 1:2 = 5 (if same days)
# months = (2018-2017)*12 = 12
# months = (months - 9) = 3
# months = (months + 2) = 3 + 2 = 5
#-------------------------------------------
start_year = start_datetime_obj.year
end_year = end_datetime_obj.year
months = (end_year - start_year) * 12
#-------------------------------------------
start_month = start_datetime_obj.month
end_month = end_datetime_obj.month
months = months - start_month
months = months + end_month
## months = months + 1 # (no: get 1 if dates same)
## print('month difference =', months)
return months
# get_month_difference()
#--------------------------------------------------------------------
def get_new_time_index_range(self, REPORT=True):
if not(hasattr(self, 'origin_datetime_str')):
msg = 'Sorry, origin datetime is not set.'
self.append_download_log( [msg, ' '] )
if (hasattr(self, 'time_var')):
nt = len(self.time_var)
return (0, nt - 1) # (unrestricted by choices)
else:
return (None, None)
#----------------------------------------------------
# Get min possible datetime, from time_vars.min().
# Every time_var value is measured from an "origin"
# such as: '1800-01-01 00:00:00'
#----------------------------------------------------
## origin_datetime_obj = self.origin_datetime_obj
time_since_min = self.time_var.min()
min_datetime_obj = self.get_datetime_from_time_since( time_since_min )
#-----------------------------------------------
# Get current settings from the datetime panel
#-----------------------------------------------
start_datetime_obj = self.get_start_datetime_obj()
end_datetime_obj = self.get_end_datetime_obj()
#---------------------------------------------------
# Convert dt datetime string to "timedelta" object
# e.g. 00-01-00 00:00:00
#---------------------------------------------------
# Note: datetime.timedelta() does not do "months",
# since they're not a fixed number of days,
# so we use "get_month_difference()". Also
# it does not have a "years" argument.
#---------------------------------------------------
## print('In get_new_time_index_range():')
## print('self.time_delta =', self.time_delta)
USE_LOOPS = True
(y,m1,d,h,m2,s) = self.split_datetime_str(self.time_delta, ALL=True)
## print('time_delta =', self.time_delta )
## print('y, m1, d, h, m2, s =', y, m1, d, h, m2, s )
if (m1 == 0):
d = (y*365) + d # python int(), not 2-byte int.
# print('days =', d)
dt_timedelta_obj = datetime.timedelta(days=d, hours=h, minutes=m2, seconds=s)
elif (m1 > 0 and (y+d+h+m2+s == 0)):
n_months1 = self.get_month_difference( min_datetime_obj, start_datetime_obj )
n_months2 = self.get_month_difference( min_datetime_obj, end_datetime_obj )
start_index = int(n_months1 / m1)
end_index = int(n_months2 / m1)
USE_LOOPS = False
else:
# Note: I think there is a "monthdelta" package ?
# Or we may be able to use dateutils.
print('ERROR: Cannot handle this dt case yet.')
return None
#-------------------------------------------------
# Compute start and end index into time array.
# General method, if delta_t is datetime string.
#-------------------------------------------------
if (USE_LOOPS):
start_index = 0
# print('min_datetime_str =', str(min_datetime_obj) )
# print('dt_timedelta_str =', str(dt_timedelta_obj) )
next = copy.copy( min_datetime_obj )
while (True):
next = (next + dt_timedelta_obj)
## print('next =', str(next))
if (next < start_datetime_obj):
start_index += 1
else: break
#-------------------------------------------------
end_index = 0
next = copy.copy( min_datetime_obj )
while (True):
next = (next + dt_timedelta_obj)
if (next < end_datetime_obj):
end_index += 1
else: break
#---------------------------------
# Make sure indices are in range
#---------------------------------
nt = len( self.time_var )
start_index = max(0, start_index)
end_index = min(end_index, nt-1)
#---------------------------------------
# User time period may be smaller than
# time spacing (dt).
#----------------------------------------------------
# We are using these indices like this:
# a[ t_i1:t_i2, lat_i1:lat_i2, lon_i1:lon_i2]
# So if indices are equal, result will be empty.
# If indices differ by 1, get 1 value for that dim.
#----------------------------------------------------
if (start_index == end_index):
end_index = start_index + 1
if (REPORT):
# print('n_times =', nt)
# print('New time indices =', start_index, ',', end_index)
# print()
#--------------------------
i1s = str(start_index)
i2s = str(end_index)
msg1 = 'n_times = ' + str(nt)
msg2 = 'New time indices = ' + i1s + ',' + i2s
self.append_download_log( [msg1, msg2, ' '] )
return (start_index, end_index)
# Not needed for current problem.
# days_since1 = self.get_days_since_from_datetime(start_datetime_obj)
# days_since2 = self.get_days_since_from_datetime(end_datetime_obj)
# For testing
# print('type(start_index) =', type(start_index) )
# print('type(end_index) =', type(end_index) )
# print('start_index =', start_index)
# print('end_index =', end_index)
# print('n_times =', nt)
# return (start_index, end_index)
# get_new_time_index_range()
#--------------------------------------------------------------------
def get_new_lat_index_range(self, REPORT=True):
short_name = self.get_var_shortname()
#-------------------------------------------------
# Note: dimensions can be things like 'ni', 'nj'
# so its better to use the list of all
# variable short names, stored earlier.
# They are valid keys to self.dataset.
#-------------------------------------------------
## dim_list = self.dataset[ short_name ].dimensions
## dim_list = self.dataset[ short_name ].attributes.keys()
dim_list = self.var_short_names
lat_name_list = ['lat', 'LAT', 'coadsy', 'COADSY',
'latitude', 'LATITUDE', 'None']
for lat_name in lat_name_list:
if (lat_name in dim_list):
break
if (lat_name == 'None'):
msg1 = 'Sorry, could not find a "latitude" variable.'
msg2 = 'Checked: lat, LAT, coadsy, COADSY,'
msg3 = ' latitude and LATITUDE.'
self.append_download_log( [msg1, msg2, msg3] )
return (None, None)
#--------------------------------------------
# Are lats for grid cell edges or centers ?
#--------------------------------------------
att_dict = self.dataset[ lat_name ].attributes
CENTERS = False
if ('coordinate_defines' in att_dict.keys() ):
if (att_dict['coordinate_defines'] == 'center'):
CENTERS = True
#------------------------------------
# Get user-select minlat and maxlat
#------------------------------------
user_minlat = self.map_minlat.value
user_maxlat = self.map_maxlat.value
#----------------------------------
# Get the array of lats, and info
#-----------------------------------------
# <class 'pydap.model.BaseType'>' object
# has no attribute 'array'
#--------------------------------------------------
# Next line type: <class 'pydap.model.BaseType'>
# and has no attribute "array".
#--------------------------------------------------
# lats = self.dataset[ lat_name ]
# lats = self.dataset[ lat_name ].array
#----------------------------------------------------------
# Next line type: <class 'pydap.handlers.dap.BaseProxy'>
# and has no attribute "size".
#----------------------------------------------------------
# lats = self.dataset[ lat_name ].data
#----------------------------------------------------------
# Next line type: <class 'pydap.model.BaseType'>
# and data is downloaded from server.
#----------------------------------------------------------
# lats = self.dataset[ lat_name ][:]
#----------------------------------------------------------
# Next line type: <class 'numpy.ndarray'>
#----------------------------------------------------------
lats = self.dataset[ lat_name ][:].data
if (lats.ndim > 1):
msg1 = 'Sorry, cannot yet restrict latitude indices'
msg2 = ' when lat array has more than 1 dimension.'
self.append_download_log( [msg1, msg2] )
return (None, None)
# print('## type(lats) =', type(lats) )
# print('## lats.shape =', lats.shape )
# print('## lats =', lats )
#------------------------------------------------
# It seems that values may be reverse sorted to
# indicate that the origin is upper left corner
# Don't sort them, need indices into original.
#------------------------------------------------
if (lats[0] > lats[-1]):
origin = 'upper'
else:
origin = 'lower'
#------------------------------------------
# Compute the latitude spacing, dlat
#------------------------------------------
# This only works if lats are a 1D list.
# If a "list of lists", len() will be for
# the outer list and min() won't work.
# Also, no "size" attribute, etc.
#------------------------------------------
nlats = lats.size
minlat = lats.min()
maxlat = lats.max()
dlat = np.abs(lats[1] - lats[0])
#--------------
# Another way
#--------------
# latdif = (maxlat - minlat)
# if (CENTERS):
# dlat = (latdif / (nlats - 1))
# else:
# dlat = (latdif / nlats)
#--------------------------------------
# Compute the new, restricted indices
# New method: (2020-12-12)
#--------------------------------------
all_indices = np.arange( nlats )
w = np.logical_and(lats > user_minlat, lats < user_maxlat) # boolean array
indices = all_indices[w]
if (indices.size > 0):
lat_i1 = indices[0]
lat_i2 = indices[-1]
else:
lat_i1 = 0
lat_i2 = nlats-1
#--------------------------------------
# Compute the new, restricted indices
#--------------------------------------
# Here, int() behaves like "floor()".
# So maybe add 1 to lat_i2 ???
#--------------------------------------
# lat_i1 = int( (user_minlat - minlat) / dlat )
# lat_i2 = int( (user_maxlat - minlat) / dlat )
# lat_i2 = (lat_i2 + 1) ########
#---------------------------------
# Make sure indices are in range
#----------------------------------------
# lat_i1 = min( max(lat_i1, 0), nlats-1 )
# lat_i2 = min( max(lat_i2, 0), nlats-1 )
#------------------------------------------
# User region may be smaller than v_dlat,
# as is the case with Puerto Rico, where
# data grid cells are 1 deg x 1 deg or so.
#------------------------------------------
# if (lat_i1 == lat_i2): # (still possible?)
# lat_i2 = lat_i1 + 1
if (REPORT):
print('lat_name =', lat_name)
print('minlat =', minlat, '(var)' )
print('maxlat =', maxlat, '(var)' )
print('dlat =', dlat)
print('u_minlat =', user_minlat, '(user)' )
print('u_maxlat =', user_maxlat, '(user)' )
print('lat_i1 =', lat_i1, '(new index)')
print('lat_i2 =', lat_i2, '(new index)')
# print('nlats =', nlats)
# print('New latitude indices =', lat_i1, ',', lat_i2)
# print()
#-------------------------------
i1s = str(lat_i1)
i2s = str(lat_i2)
msg1 = 'lat_name = ' + lat_name
msg2 = 'dlat = ' + str(dlat)
msg3 = 'nlats = ' + str(nlats)
msg4 = 'min, max = ' + str(minlat) + ', ' + str(maxlat) + ' (data)'
msg5 = 'min, max = ' + str(user_minlat) + ', ' + str(user_maxlat) + ' (user)'
msg6 = 'New latitude indices = ' + i1s + ', ' + i2s
self.append_download_log([msg1, msg2, msg3, msg4, msg5, msg6, ' '])
return (lat_i1, lat_i2)
# get_new_lat_index_range()
#--------------------------------------------------------------------
def get_new_lon_index_range(self, REPORT=True):
short_name = self.get_var_shortname()
#-------------------------------------------------
# Note: dimensions can be things like 'ni', 'nj'
# so its better to use the list of all
# variable short names, stored earlier.
# They are valid keys to self.dataset.
#-------------------------------------------------
## dim_list = self.dataset[ short_name ].dimensions
## dim_list = self.dataset[ short_name ].attributes.keys()
dim_list = self.var_short_names
lon_name_list = ['lon', 'LON', 'coadsx', 'COADSX',
'longitude', 'LONGITUDE', 'None']
for lon_name in lon_name_list:
if (lon_name in dim_list):
break
if (lon_name == 'None'):
msg1 = 'Sorry, could not find a "longitude" variable.'
msg2 = 'Checked: lon, LON, coadsx, COADSX,'
msg3 = ' longitude and LONGITUDE.'
self.append_download_log( [msg1, msg2, msg3] )
return (None, None)
#--------------------------------------------
# Are lons for grid cell edges or centers ?
#--------------------------------------------
att_dict = self.dataset[ lon_name ].attributes
CENTERS = False
if ('coordinate_defines' in att_dict.keys() ):
if (att_dict['coordinate_defines'] == 'center'):
CENTERS = True
#------------------------------------
# Get user-select minlat and maxlat
#------------------------------------
user_minlon = self.map_minlon.value
user_maxlon = self.map_maxlon.value
#----------------------------------
# Get the array of lons, and info
#----------------------------------
lons = self.dataset[ lon_name ][:].data
if (lons.ndim > 1):
msg1 = 'Sorry, cannot yet restrict longitude indices'
msg2 = ' when lon array has more than 1 dimension.'
self.append_download_log( [msg1, msg2] )
return (None, None)
# print('## type(lons) =', type(lons) )
# print('## lons.shape =', lons.shape )
# print('## lons.ndim =', lons.ndim )
#------------------------------------------
# Compute the longitude spacing, dlon
#------------------------------------------
# This only works if lons are a 1D list.
# If a "list of lists", len() will be for
# the outer list and min() won't work.
# Also, no "size" attribute, etc.
#------------------------------------------
nlons = lons.size
minlon = lons.min()
maxlon = lons.max()
dlon = np.abs(lons[1] - lons[0])
#--------------
# Another way
#--------------
# londif = (maxlon - minlon)
# if (CENTERS):
# dlon = (londif / (nlons - 1))
# else:
# dlon = (londif / nlons)
#-----------------------------------------
# Convert lons to have range [-180,180]?
#-----------------------------------------
# lons = ((lons + 180.0) % 360) - 180
# lons.sort() #####################
# user_maxlon = ((user_maxlon + 180.0) % 360) - 180
# user_minlon = ((user_minlon + 180.0) % 360) - 180
# if (user_minlon > user_maxlon):
# user_minlon -= 180.0
#-------------------------------------------
# Convert user lons to have range [0,360]?
#-------------------------------------------
if (minlon >= 0) and (maxlon <= 360):
user_minlon = (user_minlon + 360.0) % 360
user_maxlon = (user_maxlon + 360.0) % 360
#--------------------------------------
# Compute the new, restricted indices
# New method: (2020-12-12)
#--------------------------------------
all_indices = np.arange( nlons )
w = np.logical_and(lons > user_minlon, lons < user_maxlon) # boolean array
indices = all_indices[w]
if (indices.size > 0):
lon_i1 = indices[0]
lon_i2 = indices[-1]
else:
lon_i1 = 0
lon_i2 = nlons-1
#--------------------------------------
# Compute the new, restricted indices
#--------------------------------------
# Here, int() behaves like "floor()".
# So maybe add 1 to lon_i2 ???
#--------------------------------------
# lon_i1 = int( (user_minlon - minlon) / dlon )
# lon_i2 = int( (user_maxlon - minlon) / dlon )
# lon_i2 = lon_i2 + 1 #######
#---------------------------------
# Make sure indices are in range
#----------------------------------------
# lon_i1 = min( max(lon_i1, 0), nlons-1 )
# lon_i2 = min( max(lon_i2, 0), nlons-1 )
#------------------------------------------
# User region may be smaller than v_dlat,
# as is the case with Puerto Rico, where
# data grid cells are 1 deg x 1 deg or so.
#------------------------------------------
# if (lon_i1 == lon_i2): # (still needed?)
# lon_i2 = lon_i1 + 1
if (REPORT):
print()
print('lon_name =', lon_name)
print('minlon =', minlon, '(var)')
print('maxlon =', maxlon, '(var)')
print('dlon =', dlon)
print('u_minlon =', user_minlon, '(user)')
print('u_maxlon =', user_maxlon, '(user)')
print('lon_i1 =', lon_i1, '(new index)')
print('lon_i2 =', lon_i2, '(new index)')
# print('nlons =', nlons)
# print('New longitude indices =', lon_i1, ',', lon_i2 )
# print()
#--------------------------------------------------
i1s = str(lon_i1)
i2s = str(lon_i2)
msg1 = 'lon_name = ' + lon_name
msg2 = 'dlon = ' + str(dlon)
msg3 = 'nlons = ' + str(nlons)
msg4 = 'min, max = ' + str(minlon) + ', ' + str(maxlon) + ' (data)'
msg5 = 'min, max = ' + str(user_minlon) + ', ' + str(user_maxlon) + ' (user)'
msg6 = 'New longitude indices = ' + i1s + ', ' + i2s
self.append_download_log([msg1, msg2, msg3, msg4, msg5, msg6, ' '])
return (lon_i1, lon_i2)
# get_new_lon_index_range()
#--------------------------------------------------------------------
def get_duration(self, start_date=None, start_time=None,
end_date=None, end_time=None,
dur_units=None, REPORT=False):
#------------------------------------------------
# Note: Compute time span between 2 datetimes.
#------------------------------------------------
## date_sep = '/'
date_sep = '-'
time_sep = ':'
#-------------------------------------
# Get parts of the start date & time
#-------------------------------------
(y1, m1, d1) = self.split_date_str( start_date )
(h1, mm1, s1) = self.split_time_str( start_time )
#-----------------------------------
# Get parts of the end date & time
#-----------------------------------
(y2, m2, d2) = self.split_date_str( end_date )
(h2, mm2, s2) = self.split_time_str( end_time )
#------------------------------
# Convert to datetime objects
#------------------------------
start_obj = datetime.datetime(y1, m1, d1, h1, mm1, s1)
end_obj = datetime.datetime(y2, m2, d2, h2, mm2, s2)
#---------------------------------------------
# Comput time duration between start and end
#---------------------------------------------
duration_obj = (end_obj - start_obj)
duration_secs = duration_obj.total_seconds()
#-----------------------------------------
# Convert duration to dur_units provided
#-----------------------------------------
if (dur_units == 'seconds'):
duration = duration_secs
elif (dur_units == 'minutes'):
duration = (duration_secs / 60.0)
elif (dur_units == 'hours'):
duration = (duration_secs / 3600.0)
elif (dur_units == 'days'):
duration = (duration_secs / 86400.0)
elif (dur_units == 'years'):
duration = (duration_secs / 31536000.0)
else:
print('Unknown duration units = ' + dur_units + '.')
print('Returning duration in hours.')
duration = (duration_secs / 3600.0)
if (REPORT):
print( 'duration =', duration, '[' + dur_units + ']' )
return duration
#-----------------------------------------
# Alternate approach, where dur_units is
# determined and then returned
#-----------------------------------------
# if (duration_secs < 60):
# duration = duration_secs
# dur_units = 'seconds'
# elif (duration_secs < 3600):
# duration = divmod( duration_secs, 60 )[0]
# dur_units = 'minutes'
# elif (duration_secs < 86400):
# duration = divmod( duration_secs, 3600 )[0]
# dur_units = 'hours'
# elif (duration_secs < 31536000):
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
# else:
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
#
# return (duration, dur_units)
# get_duration()
#--------------------------------------------------------------------
def get_download_format(self):
return self.download_format.value
# get_download_format()
#--------------------------------------------------------------------
def clear_download_log(self):
self.download_log.value = ''
# clear_download_log()
#--------------------------------------------------------------------
def append_download_log(self, msg):
## type_str = str( type(msg) )
## if (type_str == "<class 'list'>"):
if (isinstance( msg, list)):
for string in msg:
self.download_log.value += (string + '\n')
else:
self.download_log.value += (msg + '\n')
# append_download_log()
#--------------------------------------------------------------------
def print_user_choices(self):
if not(hasattr(self, 'dataset')):
msg = 'ERROR: No dataset has been selected.'
self.append_download_log( msg )
return ############
start_datetime_obj = self.get_start_datetime_obj()
if (start_datetime_obj is not None):
start_date = str( start_datetime_obj.date() )
start_time = str( start_datetime_obj.time() )
else:
start_date = 'unknown'
start_time = 'unknown'
end_datetime_obj = self.get_end_datetime_obj()
if (end_datetime_obj is not None):
end_date = str( end_datetime_obj.date() )
end_time = str( end_datetime_obj.time() )
else:
end_date = 'unknown'
end_time = 'unknown'
#------------------------------------------
# Show message in downloads panel log box
#------------------------------------------
msg1 = 'var short name = ' + self.get_var_shortname()
msg2 = 'download format = ' + self.get_download_format()
msg3 = 'map bounds = ' + str(self.get_map_bounds( FROM_MAP=False ))
msg4 = 'start date and time = ' + start_date + ' ' + start_time
msg5 = 'end date and time = ' + end_date + ' ' + end_time
## msg6 = 'opendap package = ' + self.get_opendap_package()
msgs = [msg1, msg2, msg3, msg4, msg5]
self.append_download_log( msgs )
# print_user_choices()
#--------------------------------------------------------------------
def download_data(self, caller_obj=None):
#-------------------------------------------------
# Note: After a reset, self still has a dataset,
# but short_name was reset to ''.
#-------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
msg = 'Sorry, no variable has been selected.'
self.download_log.value = msg
return
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## status = self.download_status
self.print_user_choices()
#--------------------------------------------------
# print_user_choices() already displayed error msg
#--------------------------------------------------
if not(hasattr(self, 'dataset')):
return
#----------------------------------------
# Get names of the variables dimensions
#----------------------------------------
dim_list = self.dataset[ short_name ].dimensions
#--------------------------------------
# Uncomment to test other time_deltas
#------------------------------------------
# If test time_delta is too small, we'll
# get a start_index that is out of range.
# Next 3 worked in some SST tests.
#------------------------------------------
# self.time_delta = '0000-02-00 00:00:00'
# self.time_delta = '0000-00-30 12:00:00'
# self.time_delta = '0001-00-00 00:00:00'
#----------------------------------------------
# Is there a time variable ? If so, use time
# range selected in GUI to clip the data.
#----------------------------------------------
(t_i1, t_i2) = self.get_new_time_index_range( REPORT=True)
#--------------------------------------------
# Is there a lat variable ? If so, use lat
# range selected in GUI to clip the data.
# Default is the full range.
#--------------------------------------------
(lat_i1, lat_i2) = self.get_new_lat_index_range( REPORT=True)
#--------------------------------------------
# Is there a lon variable ? If so, use lon
# range selected in GUI to clip the data.
# Default is the full range.
#--------------------------------------------
(lon_i1, lon_i2) = self.get_new_lon_index_range( REPORT=True)
#--------------------------------------
# Did user set a spatial resolution ?
#--------------------------------------
# Asynchronous download. How do we know its here?
# print('Downloading variable:', short_name, '...' )
# print('Variable saved in: balto.user_var')
# print()
msg1 = 'Downloading variable: ' + short_name + '...'
msg2 = 'Variable saved in: balto.user_var'
msg3 = ' '
self.append_download_log( [msg1, msg2, msg3] )
#---------------------------------------------
# Convert reference to actual numpy variable
# which causes it to be downloaded, and then
# store it into balto.user_var.
#---------------------------------------------------
# This grid includes var and its dimension vectors.
# Note: type(pydap_grid) = pydap.model.GridType
#---------------------------------------------------
pydap_grid = self.dataset[ short_name ]
ndims = len( pydap_grid.dimensions ) # (e.g. time, lat, lon)
## data_obj = self.dataset[ short_name ]
## data_dims = data_obj.dimensions
## ndim = len( data_dims )
#------------------------------------------------
# Actually download the data here to a variable
# in the notebook, but restrict indices first,
# to only download the required data.
#------------------------------------------------
if (ndims == 3):
#-------------------------------------
# Assume dims are: (time, lat, lon)
#------------------------------------------
# After subscripting, grid still has type:
# pydap.model.GridType
#------------------------------------------
if (lat_i1 is None) or (lon_i1 is None):
if (t_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[t_i1:t_i2, :, :]
else:
if (t_i1 is None):
grid = pydap_grid[:, lat_i1:lat_i2, lon_i1:lon_i2]
else:
grid = pydap_grid[t_i1:t_i2, lat_i1:lat_i2, lon_i1:lon_i2]
#----------------------------------------
elif (ndims == 1): # time series
if (t_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[t_i1:t_i2]
#-----------------------------------
elif (ndims == 2): # spatial grid
#-------------------------------
# Assume dims are: (lat, lon)
#-------------------------------
if (lat_i1 is None) or (lon_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[lat_i1:lat_i2, lon_i1:lon_i2]
#------------------------------------
else:
grid = pydap_grid[:]
#--------------------------------------------------
# Note: type(pydap_grid) = pydap.model.gridtype
# type(grid) = pydap.model.gridtype
# type(grid[:].data) = list
# type(grid.data) = list
#--------------------------------------------------
# Subscript by *ranges* doesn't change data type.
#--------------------------------------------------
grid_list = grid.data ########
n_list = len(grid_list)
var = grid_list[0]
# For testing
# print('## type(grid) =', type(grid) )
# print('## type(grid.data) =', type(grid_list) )
# print('## len(grid.data) =', n_list )
# print('## type(var) =', type(var) )
# print()
times = None # (defaults)
lats = None
lons = None
if (n_list > 1):
times = grid_list[1]
if (n_list > 2):
lats = grid_list[2]
if (n_list > 3):
lons = grid_list[3]
#----------------------------------------------
# Are lats in reverse order ? (2020-12-12)
# MUST DO THIS BEFORE SUBSETTING WITH INDICES
#----------------------------------------------
# origin = None
# if (lats is not None):
# if (lats[0] > lats[-1]):
# origin = 'upper' # (row major?)
# lats.sort() #############################
# else:
# origin = 'lower'
#----------------------------------------------
# Adjust the longitudes ?
# MUST DO THIS BEFORE SUBSETTING WITH INDICES
#----------------------------------------------
# if (n_list > 3):
# SIGNED_LONS = True
# if (SIGNED_LONS):
# #----------------------------------------
# # Convert lons to have range [-180,180]
# #----------------------------------------
# lons = ((lons + 180.0) % 360) - 180
# lons.sort() #################
#-----------------------------
# Is there a missing value ?
# Is there a fill value ?
#-----------------------------
atts = pydap_grid.attributes
REPLACE_MISSING = False
if ('missing_value' in atts.keys()):
REPLACE_MISSING = True
missing_value = pydap_grid.attributes['missing_value']
w = (var == missing_value)
#---------------------------------------
# Is there a scale factor and offset ?
#---------------------------------------
if ('scale_factor' in atts.keys()):
#---------------------------------------------------
# Note: var may have type ">i2" while scale_factor
# may have type "float64", so need to upcast
# var and can't use "*="
#---------------------------------------------------
factor = pydap_grid.attributes['scale_factor']
## print('type(var) =', type(var))
## print('type(factor) =', type(factor))
var = var * factor
if ('add_offset' in atts.keys()):
offset = pydap_grid.attributes['add_offset']
## print('type(var) =', type(var))
## print('type(offset) =', type(offset))
var = var + offset
#-----------------------------------------
# Restore missing values after scaling ?
#-----------------------------------------
if (REPLACE_MISSING):
var[w] = missing_value
#-----------------------------------------
# Save var into balto object as user_var
#-----------------------------------------
self.user_var = var
self.user_var_times = times # (maybe None)
self.user_var_lats = lats # (maybe None)
self.user_var_lons = lons # (maybe None)
#----------------------------------------------------
# Could define self.user_var as a list, and append
# new variables to the list as downloaded.
# Could also put them into a dictionary.
#----------------------------------------------------
# download_data()
#--------------------------------------------------------------------
def show_grid(self, grid, var_name=None, extent=None,
cmap='rainbow', xsize=8, ysize=8 ):
#---------------------------------------------------
# Note: extent = [minlon, maxlon, minlat, maxlat]
# But get_map_bounds() returns:
# (minlon, minlat, maxlon, maxlat)
#---------------------------------------------------
if (grid.ndim != 2):
print('Sorry, show_grid() only works for 2D arrays.')
return
if (var_name is None):
var_name = self.data_var_long_name.value
## var_name = self.data_var_name.value
if (extent is None):
extent = self.get_map_bounds(style='plt.imshow')
## (minlon, minlat, maxlon, maxlat) = self.get_map_bounds()
## extent = [minlon, maxlon, minlat, maxlat]
bp.show_grid_as_image( grid, var_name, extent=extent,
cmap='rainbow', stretch='hist_equal',
xsize=xsize, ysize=ysize,
nodata_value=None )
## NO_SHOW=False, im_file=None,
# show_grid()
#--------------------------------------------------------------------
def get_opendap_package(self):
return self.prefs_package.value
#--------------------------------------------------------------------
def get_abbreviated_var_name(self, abbreviation ):
map = {
'lat' : ['geodetic_latitude', 'quantity'],
'lon' : ['geodetic_longitude', 'quantity'],
'sst' : ['sea_surface__temperature', 'variable'],
'temp': ['temperature', 'quantity'],
'x' : ['x-coordinate', 'quantity'],
'y' : ['y-coordinate', 'quantity'],
'z' : ['z-coordinate', 'quantity'] }
try:
return map[ abbreviation ]
except:
print('Sorry, no matches found for abbreviation.')
# get_abbreviated_var_name()
#--------------------------------------------------------------------
def get_possible_svo_names(self, var_name, SHOW_IRI=False):
#-----------------------------------------------------
# Use the SVO "match phrase" service to get a
# ranked list of possible SVO variable name matches.
#-----------------------------------------------------
# var_name should be a list of words, as a single
# string, separated by underscores.
#-----------------------------------------------------
var_name2 = var_name.replace(' ', '_')
match_phrase_svc = 'http://34.73.227.230:8000/match_phrase/'
match_phrase_url = match_phrase_svc + var_name2 + '/'
print('Working...')
#-----------------------------------------------------------------
# The result is in JSON format, for example:
# result = { "results": [
# {"IRI":"result1_IRI", "label":"result1_label", "matchrank": "result1_rank"},
# {"IRI":"result2_IRI", "label":"result2_label", "matchrank": "result2_rank"} ] }
#------------------------------------------------------------------
result = requests.get( match_phrase_url )
print('Finished.')
print()
json_str = result.text
# print( json_str )
json_data = json.loads( json_str )
match_list = json_data['results']
for item in match_list:
## print('item =', item)
if (SHOW_IRI):
print('IRI =', item['IRI'])
print('label =', item['label'])
print('rank =', item['matchrank'])
print()
# get_possible_svo_names()
#-------------------------------------------------------------------
|
[
"IPython.display.display",
"ipywidgets.VBox",
"ipywidgets.Dropdown",
"ipyleaflet.FullScreenControl",
"ipywidgets.BoundedIntText",
"datetime.timedelta",
"copy.copy",
"numpy.arange",
"ipywidgets.HBox",
"datetime.datetime",
"ipywidgets.Button",
"balto_plot.show_grid_as_image",
"ipywidgets.Output",
"ipyleaflet.MeasureControl",
"traitlets.Tuple",
"ipywidgets.HTML",
"numpy.abs",
"json.loads",
"requests.get",
"ipywidgets.Layout",
"numpy.logical_and"
] |
[((7936, 7952), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (7950, 7952), True, 'import ipywidgets as widgets\n'), ((7961, 7990), 'IPython.display.display', 'display', (['self.gui', 'gui_output'], {}), '(self.gui, gui_output)\n', (7968, 7990), False, 'from IPython.display import display, HTML\n'), ((9529, 9599), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 'f"""<b><font size=4>BALTO User Interface</font></b>"""'}), "(value=f'<b><font size=4>BALTO User Interface</font></b>')\n", (9541, 9599), True, 'import ipywidgets as widgets\n'), ((9742, 9767), 'ipywidgets.VBox', 'widgets.VBox', (['[head, acc]'], {}), '([head, acc])\n', (9754, 9767), True, 'import ipywidgets as widgets\n'), ((11753, 11823), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 'f"""<b><font size=5>BALTO User Interface</font></b>"""'}), "(value=f'<b><font size=5>BALTO User Interface</font></b>')\n", (11765, 11823), True, 'import ipywidgets as widgets\n'), ((11951, 11976), 'ipywidgets.VBox', 'widgets.VBox', (['[head, tab]'], {}), '([head, tab])\n', (11963, 11976), True, 'import ipywidgets as widgets\n'), ((15890, 15912), 'ipywidgets.HBox', 'widgets.HBox', (['[o1, b1]'], {}), '([o1, b1])\n', (15902, 15912), True, 'import ipywidgets as widgets\n'), ((15961, 15983), 'ipywidgets.HBox', 'widgets.HBox', (['[o9, b2]'], {}), '([o9, b2])\n', (15973, 15983), True, 'import ipywidgets as widgets\n'), ((16032, 16054), 'ipywidgets.VBox', 'widgets.VBox', (['[o3, o5]'], {}), '([o3, o5])\n', (16044, 16054), True, 'import ipywidgets as widgets\n'), ((16119, 16141), 'ipywidgets.VBox', 'widgets.VBox', (['[o4, o6]'], {}), '([o4, o6])\n', (16131, 16141), True, 'import ipywidgets as widgets\n'), ((16161, 16195), 'ipywidgets.HBox', 'widgets.HBox', (['[name_box, unit_box]'], {}), '([name_box, unit_box])\n', (16173, 16195), True, 'import ipywidgets as widgets\n'), ((16281, 16339), 'ipywidgets.VBox', 'widgets.VBox', (['[url_box, o2, oL, mid_box, o7, o8, stat_box]'], {}), '([url_box, o2, oL, mid_box, o7, o8, stat_box])\n', (16293, 16339), True, 'import ipywidgets as widgets\n'), ((22714, 22740), 'ipywidgets.HTML', 'widgets.HTML', (["(' ' * 2)"], {}), "(' ' * 2)\n", (22726, 22740), True, 'import ipywidgets as widgets\n'), ((23519, 23541), 'ipywidgets.VBox', 'widgets.VBox', (['[w1, w2]'], {}), '([w1, w2])\n', (23531, 23541), True, 'import ipywidgets as widgets\n'), ((23558, 23580), 'ipywidgets.VBox', 'widgets.VBox', (['[w3, w4]'], {}), '([w3, w4])\n', (23570, 23580), True, 'import ipywidgets as widgets\n'), ((23597, 23619), 'ipywidgets.VBox', 'widgets.VBox', (['[pd, pd]'], {}), '([pd, pd])\n', (23609, 23619), True, 'import ipywidgets as widgets\n'), ((23636, 23658), 'ipywidgets.VBox', 'widgets.VBox', (['[b1, b2]'], {}), '([b1, b2])\n', (23648, 23658), True, 'import ipywidgets as widgets\n'), ((23675, 23713), 'ipywidgets.HBox', 'widgets.HBox', (['[lons, lats, pads, btns]'], {}), '([lons, lats, pads, btns])\n', (23687, 23713), True, 'import ipywidgets as widgets\n'), ((28738, 28764), 'ipywidgets.HTML', 'widgets.HTML', (["(' ' * 3)"], {}), "(' ' * 3)\n", (28750, 28764), True, 'import ipywidgets as widgets\n'), ((29995, 30017), 'ipywidgets.VBox', 'widgets.VBox', (['[d1, d2]'], {}), '([d1, d2])\n', (30007, 30017), True, 'import ipywidgets as widgets\n'), ((30034, 30056), 'ipywidgets.VBox', 'widgets.VBox', (['[d3, d4]'], {}), '([d3, d4])\n', (30046, 30056), True, 'import ipywidgets as widgets\n'), ((30073, 30095), 'ipywidgets.VBox', 'widgets.VBox', (['[d5, d6]'], {}), '([d5, d6])\n', (30085, 30095), True, 'import ipywidgets as widgets\n'), ((30112, 30134), 'ipywidgets.VBox', 'widgets.VBox', (['[pp, pp]'], {}), '([pp, pp])\n', (30124, 30134), True, 'import ipywidgets as widgets\n'), ((30151, 30191), 'ipywidgets.HBox', 'widgets.HBox', (['[dates, times, pad, hints]'], {}), '([dates, times, pad, hints])\n', (30163, 30191), True, 'import ipywidgets as widgets\n'), ((30208, 30235), 'ipywidgets.VBox', 'widgets.VBox', (['[top, d7, d8]'], {}), '([top, d7, d8])\n', (30220, 30235), True, 'import ipywidgets as widgets\n'), ((30780, 30929), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'description': '"""Download Format:"""', 'options': "['HDF', 'netCDF', 'netCDF4', 'ASCII']", 'value': '"""netCDF"""', 'disabled': '(False)', 'style': 'init_style'}), "(description='Download Format:', options=['HDF', 'netCDF',\n 'netCDF4', 'ASCII'], value='netCDF', disabled=False, style=init_style)\n", (30796, 30929), True, 'import ipywidgets as widgets\n'), ((31034, 31065), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 'f"""<p> </p>"""'}), "(value=f'<p> </p>')\n", (31046, 31065), True, 'import ipywidgets as widgets\n'), ((31092, 31130), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Download"""'}), "(description='Download')\n", (31106, 31130), True, 'import ipywidgets as widgets\n'), ((31145, 31172), 'ipywidgets.HBox', 'widgets.HBox', (['[f1, pad, b3]'], {}), '([f1, pad, b3])\n', (31157, 31172), True, 'import ipywidgets as widgets\n'), ((31817, 31840), 'ipywidgets.VBox', 'widgets.VBox', (['[h3, log]'], {}), '([h3, log])\n', (31829, 31840), True, 'import ipywidgets as widgets\n'), ((32418, 32549), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'description': '"""OpenDAP package:"""', 'options': "['pydap', 'netcdf4']", 'value': '"""pydap"""', 'disabled': '(False)', 'style': 'left_style'}), "(description='OpenDAP package:', options=['pydap',\n 'netcdf4'], value='pydap', disabled=False, style=left_style)\n", (32434, 32549), True, 'import ipywidgets as widgets\n'), ((32711, 32831), 'ipywidgets.BoundedIntText', 'widgets.BoundedIntText', ([], {'description': '"""Timeout:"""', 'value': 'ts', 'min': '(10)', 'max': '(1000)', 'step': '(1)', 'disabled': '(False)', 'style': 'left_style'}), "(description='Timeout:', value=ts, min=10, max=1000,\n step=1, disabled=False, style=left_style)\n", (32733, 32831), True, 'import ipywidgets as widgets\n'), ((33034, 33056), 'ipywidgets.HBox', 'widgets.HBox', (['[t1, t2]'], {}), '([t1, t2])\n', (33046, 33056), True, 'import ipywidgets as widgets\n'), ((33378, 33404), 'ipywidgets.VBox', 'widgets.VBox', (['[w1, w2, w3]'], {}), '([w1, w2, w3])\n', (33390, 33404), True, 'import ipywidgets as widgets\n'), ((38674, 38681), 'traitlets.Tuple', 'Tuple', ([], {}), '()\n', (38679, 38681), False, 'from traitlets import Tuple\n'), ((42594, 42631), 'requests.get', 'requests.get', (['self.data_url_dir.value'], {}), '(self.data_url_dir.value)\n', (42606, 42631), False, 'import requests\n'), ((70721, 70758), 'datetime.datetime', 'datetime.datetime', (['y', 'm1', 'd', 'h', 'm2', 's'], {}), '(y, m1, d, h, m2, s)\n', (70738, 70758), False, 'import datetime\n'), ((71182, 71219), 'datetime.datetime', 'datetime.datetime', (['y', 'm1', 'd', 'h', 'm2', 's'], {}), '(y, m1, d, h, m2, s)\n', (71199, 71219), False, 'import datetime\n'), ((91452, 91477), 'numpy.abs', 'np.abs', (['(lats[1] - lats[0])'], {}), '(lats[1] - lats[0])\n', (91458, 91477), True, 'import numpy as np\n'), ((91908, 91924), 'numpy.arange', 'np.arange', (['nlats'], {}), '(nlats)\n', (91917, 91924), True, 'import numpy as np\n'), ((91939, 91993), 'numpy.logical_and', 'np.logical_and', (['(lats > user_minlat)', '(lats < user_maxlat)'], {}), '(lats > user_minlat, lats < user_maxlat)\n', (91953, 91993), True, 'import numpy as np\n'), ((97331, 97356), 'numpy.abs', 'np.abs', (['(lons[1] - lons[0])'], {}), '(lons[1] - lons[0])\n', (97337, 97356), True, 'import numpy as np\n'), ((98555, 98571), 'numpy.arange', 'np.arange', (['nlons'], {}), '(nlons)\n', (98564, 98571), True, 'import numpy as np\n'), ((98586, 98640), 'numpy.logical_and', 'np.logical_and', (['(lons > user_minlon)', '(lons < user_maxlon)'], {}), '(lons > user_minlon, lons < user_maxlon)\n', (98600, 98640), True, 'import numpy as np\n'), ((102245, 102287), 'datetime.datetime', 'datetime.datetime', (['y1', 'm1', 'd1', 'h1', 'mm1', 's1'], {}), '(y1, m1, d1, h1, mm1, s1)\n', (102262, 102287), False, 'import datetime\n'), ((102308, 102350), 'datetime.datetime', 'datetime.datetime', (['y2', 'm2', 'd2', 'h2', 'mm2', 's2'], {}), '(y2, m2, d2, h2, mm2, s2)\n', (102325, 102350), False, 'import datetime\n'), ((118063, 118202), 'balto_plot.show_grid_as_image', 'bp.show_grid_as_image', (['grid', 'var_name'], {'extent': 'extent', 'cmap': '"""rainbow"""', 'stretch': '"""hist_equal"""', 'xsize': 'xsize', 'ysize': 'ysize', 'nodata_value': 'None'}), "(grid, var_name, extent=extent, cmap='rainbow',\n stretch='hist_equal', xsize=xsize, ysize=ysize, nodata_value=None)\n", (118084, 118202), True, 'import balto_plot as bp\n'), ((120396, 120426), 'requests.get', 'requests.get', (['match_phrase_url'], {}), '(match_phrase_url)\n', (120408, 120426), False, 'import requests\n'), ((120553, 120573), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (120563, 120573), False, 'import json\n'), ((12562, 12583), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 's'}), '(value=s)\n', (12574, 12583), True, 'import ipywidgets as widgets\n'), ((12647, 12668), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 's'}), '(value=s)\n', (12659, 12668), True, 'import ipywidgets as widgets\n'), ((21018, 21117), 'ipyleaflet.MeasureControl', 'MeasureControl', ([], {'position': '"""bottomright"""', 'active_color': '"""orange"""', 'primary_length_unit': '"""kilometers"""'}), "(position='bottomright', active_color='orange',\n primary_length_unit='kilometers')\n", (21032, 21117), False, 'from ipyleaflet import MeasureControl, Rectangle\n'), ((24135, 24162), 'ipywidgets.VBox', 'widgets.VBox', (['[m, bbox, bm]'], {}), '([m, bbox, bm])\n', (24147, 24162), True, 'import ipywidgets as widgets\n'), ((24199, 24223), 'ipywidgets.VBox', 'widgets.VBox', (['[bbox, bm]'], {}), '([bbox, bm])\n', (24211, 24223), True, 'import ipywidgets as widgets\n'), ((75577, 75613), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'time_since2'}), '(days=time_since2)\n', (75595, 75613), False, 'import datetime\n'), ((75667, 75704), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'time_since2'}), '(hours=time_since2)\n', (75685, 75704), False, 'import datetime\n'), ((75760, 75799), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'time_since2'}), '(minutes=time_since2)\n', (75778, 75799), False, 'import datetime\n'), ((75855, 75894), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'time_since2'}), '(seconds=time_since2)\n', (75873, 75894), False, 'import datetime\n'), ((83443, 83501), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'd', 'hours': 'h', 'minutes': 'm2', 'seconds': 's'}), '(days=d, hours=h, minutes=m2, seconds=s)\n', (83461, 83501), False, 'import datetime\n'), ((84509, 84536), 'copy.copy', 'copy.copy', (['min_datetime_obj'], {}), '(min_datetime_obj)\n', (84518, 84536), False, 'import copy\n'), ((84881, 84908), 'copy.copy', 'copy.copy', (['min_datetime_obj'], {}), '(min_datetime_obj)\n', (84890, 84908), False, 'import copy\n'), ((9086, 9112), 'ipywidgets.Layout', 'Layout', ([], {'width': 'gui_width_px'}), '(width=gui_width_px)\n', (9092, 9112), False, 'from ipywidgets import Layout\n'), ((11237, 11263), 'ipywidgets.Layout', 'Layout', ([], {'width': 'gui_width_px'}), '(width=gui_width_px)\n', (11243, 11263), False, 'from ipywidgets import Layout\n'), ((13568, 13595), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (13574, 13595), False, 'from ipywidgets import Layout\n'), ((13650, 13676), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (13656, 13676), False, 'from ipywidgets import Layout\n'), ((13892, 13919), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (13898, 13919), False, 'from ipywidgets import Layout\n'), ((14111, 14138), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (14117, 14138), False, 'from ipywidgets import Layout\n'), ((14412, 14439), 'ipywidgets.Layout', 'Layout', ([], {'width': 'left_width_px'}), '(width=left_width_px)\n', (14418, 14439), False, 'from ipywidgets import Layout\n'), ((14551, 14578), 'ipywidgets.Layout', 'Layout', ([], {'width': 'next_width_px'}), '(width=next_width_px)\n', (14557, 14578), False, 'from ipywidgets import Layout\n'), ((14771, 14798), 'ipywidgets.Layout', 'Layout', ([], {'width': 'left_width_px'}), '(width=left_width_px)\n', (14777, 14798), False, 'from ipywidgets import Layout\n'), ((14910, 14937), 'ipywidgets.Layout', 'Layout', ([], {'width': 'next_width_px'}), '(width=next_width_px)\n', (14916, 14937), False, 'from ipywidgets import Layout\n'), ((15129, 15156), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (15135, 15156), False, 'from ipywidgets import Layout\n'), ((15375, 15402), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (15381, 15402), False, 'from ipywidgets import Layout\n'), ((15521, 15548), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (15527, 15548), False, 'from ipywidgets import Layout\n'), ((15619, 15645), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (15625, 15645), False, 'from ipywidgets import Layout\n'), ((20395, 20443), 'ipywidgets.Layout', 'Layout', ([], {'width': 'map_width_px', 'height': 'map_height_px'}), '(width=map_width_px, height=map_height_px)\n', (20401, 20443), False, 'from ipywidgets import Layout\n'), ((20625, 20663), 'ipyleaflet.FullScreenControl', 'FullScreenControl', ([], {'position': '"""topright"""'}), "(position='topright')\n", (20642, 20663), False, 'from ipyleaflet import Map, basemaps, FullScreenControl\n'), ((21874, 21901), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (21880, 21901), False, 'from ipywidgets import Layout\n'), ((22110, 22137), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (22116, 22137), False, 'from ipywidgets import Layout\n'), ((22389, 22416), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (22395, 22416), False, 'from ipywidgets import Layout\n'), ((22669, 22696), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (22675, 22696), False, 'from ipywidgets import Layout\n'), ((22843, 22869), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (22849, 22869), False, 'from ipywidgets import Layout\n'), ((22955, 22981), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (22961, 22981), False, 'from ipywidgets import Layout\n'), ((23342, 23363), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""360px"""'}), "(width='360px')\n", (23348, 23363), False, 'from ipywidgets import Layout\n'), ((27953, 27980), 'ipywidgets.Layout', 'Layout', ([], {'width': 'date_width_px'}), '(width=date_width_px)\n', (27959, 27980), False, 'from ipywidgets import Layout\n'), ((28122, 28149), 'ipywidgets.Layout', 'Layout', ([], {'width': 'date_width_px'}), '(width=date_width_px)\n', (28128, 28149), False, 'from ipywidgets import Layout\n'), ((28287, 28314), 'ipywidgets.Layout', 'Layout', ([], {'width': 'time_width_px'}), '(width=time_width_px)\n', (28293, 28314), False, 'from ipywidgets import Layout\n'), ((28450, 28477), 'ipywidgets.Layout', 'Layout', ([], {'width': 'time_width_px'}), '(width=time_width_px)\n', (28456, 28477), False, 'from ipywidgets import Layout\n'), ((28859, 28886), 'ipywidgets.Layout', 'Layout', ([], {'width': 'hint_width_px'}), '(width=hint_width_px)\n', (28865, 28886), False, 'from ipywidgets import Layout\n'), ((29101, 29128), 'ipywidgets.Layout', 'Layout', ([], {'width': 'hint_width_px'}), '(width=hint_width_px)\n', (29107, 29128), False, 'from ipywidgets import Layout\n'), ((29482, 29513), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_box_width_px'}), '(width=full_box_width_px)\n', (29488, 29513), False, 'from ipywidgets import Layout\n'), ((29867, 29914), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_box_width_px', 'height': '"""140px"""'}), "(width=full_box_width_px, height='140px')\n", (29873, 29914), False, 'from ipywidgets import Layout\n'), ((31704, 31744), 'ipywidgets.Layout', 'Layout', ([], {'width': 'width_px', 'height': 'height_px'}), '(width=width_px, height=height_px)\n', (31710, 31744), False, 'from ipywidgets import Layout\n'), ((32998, 33018), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""80px"""'}), "(width='80px')\n", (33004, 33018), False, 'from ipywidgets import Layout\n'), ((33291, 33337), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_box_width_px', 'height': '"""50px"""'}), "(width=full_box_width_px, height='50px')\n", (33297, 33337), False, 'from ipywidgets import Layout\n')]
|
import multiprocessing as mp
import os
import shutil
from functools import partial
from tqdm import tqdm
import data
from chemhelp import mndo
# def calculate(binary, filename, scr=None):
# """
# Collect sets of lines for each molecule as they become available
# and then call a parser to extract the dictionary of properties.
# DEPRECIATED
# """
# props_list = mndo.calculate_file(filename, scr=scr, mndo_cmd=binary)
# props_list = list(props_list) # NOTE that calculate_file returns an iterator
# return props_list
def calculate_parallel(
params_joblist,
param_keys,
mean_params,
scale_params,
filename,
binary,
n_procs=2,
mndo_input=None,
scr="_tmp_optim",
**kwargs,
):
worker_kwargs = {
"scr": scr,
"filename": filename,
"param_keys": param_keys,
"mean_params": mean_params,
"scale_params": scale_params,
"binary": binary,
}
mapfunc = partial(worker, **worker_kwargs)
# NOTE generating multiple pools each iteration was leading to a memory leak
# NOTE using imap may be slower but done for development purposes to check
# it's working
with mp.Pool(n_procs) as p:
# results = p.map(mapfunc, params_joblist)
results = list(tqdm(p.imap(mapfunc, params_joblist), total=len(params_joblist)))
return results
def worker(*args, **kwargs):
"""
"""
scr = kwargs["scr"]
filename = kwargs["filename"]
param_keys = kwargs["param_keys"]
mean_params = kwargs["mean_params"]
scale_params = kwargs["scale_params"]
binary = kwargs["binary"]
# Ensure unique directory for this worker in scratch directory
pid = os.getpid()
cwd = os.path.join(scr, str(pid))
if not os.path.exists(cwd):
os.mkdir(cwd)
if not os.path.exists(os.path.join(cwd, filename)):
shutil.copy2(os.path.join(scr, filename), os.path.join(cwd, filename))
# Set params in worker dir
param_list = args[0]
data.set_params(
param_list, param_keys, mean_params, scale_params, scr=cwd,
)
# Calculate properties
properties_list = mndo.calculate_file(filename, scr=cwd, mndo_cmd=binary)
# NOTE JCK properties_list is a generator, so complete parsing on worker
properties_list = list(properties_list)
shutil.rmtree(cwd)
return properties_list
|
[
"os.path.exists",
"os.path.join",
"functools.partial",
"chemhelp.mndo.calculate_file",
"os.getpid",
"shutil.rmtree",
"multiprocessing.Pool",
"os.mkdir",
"data.set_params"
] |
[((982, 1014), 'functools.partial', 'partial', (['worker'], {}), '(worker, **worker_kwargs)\n', (989, 1014), False, 'from functools import partial\n'), ((1720, 1731), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1729, 1731), False, 'import os\n'), ((2022, 2097), 'data.set_params', 'data.set_params', (['param_list', 'param_keys', 'mean_params', 'scale_params'], {'scr': 'cwd'}), '(param_list, param_keys, mean_params, scale_params, scr=cwd)\n', (2037, 2097), False, 'import data\n'), ((2163, 2218), 'chemhelp.mndo.calculate_file', 'mndo.calculate_file', (['filename'], {'scr': 'cwd', 'mndo_cmd': 'binary'}), '(filename, scr=cwd, mndo_cmd=binary)\n', (2182, 2218), False, 'from chemhelp import mndo\n'), ((2346, 2364), 'shutil.rmtree', 'shutil.rmtree', (['cwd'], {}), '(cwd)\n', (2359, 2364), False, 'import shutil\n'), ((1204, 1220), 'multiprocessing.Pool', 'mp.Pool', (['n_procs'], {}), '(n_procs)\n', (1211, 1220), True, 'import multiprocessing as mp\n'), ((1782, 1801), 'os.path.exists', 'os.path.exists', (['cwd'], {}), '(cwd)\n', (1796, 1801), False, 'import os\n'), ((1811, 1824), 'os.mkdir', 'os.mkdir', (['cwd'], {}), '(cwd)\n', (1819, 1824), False, 'import os\n'), ((1852, 1879), 'os.path.join', 'os.path.join', (['cwd', 'filename'], {}), '(cwd, filename)\n', (1864, 1879), False, 'import os\n'), ((1903, 1930), 'os.path.join', 'os.path.join', (['scr', 'filename'], {}), '(scr, filename)\n', (1915, 1930), False, 'import os\n'), ((1932, 1959), 'os.path.join', 'os.path.join', (['cwd', 'filename'], {}), '(cwd, filename)\n', (1944, 1959), False, 'import os\n')]
|
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that contains common logging commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
def FetchLogs(log_filter=None,
order_by='DESC',
limit=None,
parent=None):
"""Fetches log entries.
This method uses Cloud Logging V2 api.
https://cloud.google.com/logging/docs/api/introduction_v2
Entries are sorted on the timestamp field, and afterwards filter is applied.
If limit is passed, returns only up to that many matching entries.
If neither log_filter nor log_ids are passed, no filtering is done.
Args:
log_filter: filter expression used in the request.
order_by: the sort order, either DESC or ASC.
limit: how many entries to return.
parent: the name of the log's parent resource, e.g. "projects/foo" or
"organizations/123" or "folders/123". Defaults to the current project.
Returns:
A generator that returns matching log entries.
Callers are responsible for handling any http exceptions.
"""
if parent:
if not ('projects/' in parent or 'organizations/' in parent
or 'folders/' in parent or 'billingAccounts/' in parent):
raise exceptions.InvalidArgumentException(
'parent', 'Unknown parent type in parent %s' % parent)
else:
parent = 'projects/%s' % properties.VALUES.core.project.Get(required=True)
# The backend has an upper limit of 1000 for page_size.
# However, there is no need to retrieve more entries if limit is specified.
page_size = min(limit or 1000, 1000)
if order_by.upper() == 'DESC':
order_by = 'timestamp desc'
else:
order_by = 'timestamp asc'
client = util.GetClient()
request = client.MESSAGES_MODULE.ListLogEntriesRequest(resourceNames=[parent],
filter=log_filter,
orderBy=order_by)
return list_pager.YieldFromList(
client.entries, request, field='entries', limit=limit,
batch_size=page_size, batch_size_attribute='pageSize')
|
[
"googlecloudsdk.calliope.exceptions.InvalidArgumentException",
"apitools.base.py.list_pager.YieldFromList",
"googlecloudsdk.api_lib.logging.util.GetClient",
"googlecloudsdk.core.properties.VALUES.core.project.Get"
] |
[((2509, 2525), 'googlecloudsdk.api_lib.logging.util.GetClient', 'util.GetClient', ([], {}), '()\n', (2523, 2525), False, 'from googlecloudsdk.api_lib.logging import util\n'), ((2767, 2906), 'apitools.base.py.list_pager.YieldFromList', 'list_pager.YieldFromList', (['client.entries', 'request'], {'field': '"""entries"""', 'limit': 'limit', 'batch_size': 'page_size', 'batch_size_attribute': '"""pageSize"""'}), "(client.entries, request, field='entries', limit=\n limit, batch_size=page_size, batch_size_attribute='pageSize')\n", (2791, 2906), False, 'from apitools.base.py import list_pager\n'), ((2029, 2124), 'googlecloudsdk.calliope.exceptions.InvalidArgumentException', 'exceptions.InvalidArgumentException', (['"""parent"""', "('Unknown parent type in parent %s' % parent)"], {}), "('parent', \n 'Unknown parent type in parent %s' % parent)\n", (2064, 2124), False, 'from googlecloudsdk.calliope import exceptions\n'), ((2168, 2217), 'googlecloudsdk.core.properties.VALUES.core.project.Get', 'properties.VALUES.core.project.Get', ([], {'required': '(True)'}), '(required=True)\n', (2202, 2217), False, 'from googlecloudsdk.core import properties\n')]
|
__author__ = 'stephen'
import numpy as np
import scipy.io
import scipy.sparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as pylab
from .utils import get_subindices
import matplotlib.ticker as mtick
from collections import Counter
from sklearn.neighbors.kde import KernelDensity
from scipy import stats
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1, potential=False):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
plt.rc("font", size=10)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
if potential is False: #plot Alanine Dipeptide
for i in clusters:
if i != outliers:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)#, color=colors_jet[i])
#else:
# point = np.where(labels == i)
# plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7, color='black') # , color=colors_jet[i])
plt.title("Alanine Dipeptide " + name + " states", fontsize=10)
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-110, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
else: # if plot 2D potential
plt.figure(figsize=(10, 10))
for i in clusters:
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)], '.', markersize=1.0, alpha=0.7) #markersize=20.0, color=colors_jet[i])
#plt.plot(phi_angles[np.where(labels == i)],
# psi_angles[np.where(labels == i)],
# '.', color=colors_jet[i], label='State %d' % i)
#plt.title("2D potential " + name + " states", fontsize=20)
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.xlabel(r"$\phi$", fontsize=25)
plt.ylabel(r"$\psi$", fontsize=25)
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_each_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
for i in np.unique(clusters):
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)],
'x', color=colors_jet[i], label='State %d' % i)
#plt.title("Alanine Dipeptide " + name + " state_" + str(i))
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
# Save the result figure
plt.savefig('./'+ name + " state_" + str(i)+'.png', dpi = 400)
plt.close()
#plt.show()
def contour_cluster(labels, phi_angles, psi_angles, name, outliers=-1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
# lables_array = np.array(labels)
# colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(lables_array)+1))
for i in np.unique(labels):
#if i != outliers:
if i == 1:
print("i=", i)
x = phi_angles[np.where(labels == i)]
y = psi_angles[np.where(labels == i)]
indices = get_subindices(assignments=x, state=None, samples=1000)
x = x[indices]
y = y[indices]
X, Y= np.meshgrid(x, y)
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
#kde = KernelDensity(kernel='gaussian', bandwidth=0.2)
#kde_results = kde.score_samples([x,y])
#X, Y, Z = np.meshgrid(x, y, kde_results)
#Z = np.reshape(kernel([x,y]).T, x.shape)
#Z1 = mlab.bivariate_normal(X, Y, 5.0, 5.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 7.5, 2.5, 5, 5)
# difference of Gaussians
#Z = 10.0 * (Z2 - Z1)
#step = Z.max()-Z.min()/10
#print "Z min:",Z.min(), "Z.max:", Z.max(), "step:", step
#levels = np.arange(Z.min(), Z.min(), Z.max())
#print levels
plt.contour(X, Y, Z, origin='lower') #, linewidths=Z.min(), levels=levels)
plt.title("Alanine Dipeptide " + name + " states")
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_matrix(tProb_=None, name=None):
'''
if labels is not None:
n_states = len(set(labels)) - (1 if -1 in labels else 0)
print 'n_states=', n_states
#diagC = tProb_.diagonal()
length = len(labels)
print "length=", length
Cmn = scipy.sparse.lil_matrix(n_states, n_states, dtype=np.float32)
Cmn = np.zeros((n_states, n_states))
print "size of tProb", tProb_.shape
if scipy.sparse.issparse(tProb_):
tProb_ = tProb_.todense()
for i in xrange(length):
for j in xrange(length):
Cmn[labels[i], labels[j]] += tProb_[i, j]
#for i in xrange(n_states):
#Cmn[i,i] += diagC[i]
# for j in xrange(n_states):
# Cmn[i, j] += Cmn[j, i]
# Cmn[j, i] = Cmn[i, j]
for j in xrange(n_states):
sum_row = np.sum(Cmn[j,:])
if sum_row is not 0:
Cmn[j,:] /= sum_row
pylab.matshow(Cmn, cmap=plt.cm.OrRd)
else:
'''
pylab.matshow(tProb_, cmap=plt.cm.OrRd)
plt.colorbar()
#pylab.show()
plt.savefig('./' + name + 'Matrix.png', dpi=400)
plt.close()
def plot_block_matrix(labels, tProb_, name='BlockMatrix'):
print("Plot Block Matrix")
indices = np.argsort(labels)
#print indices
block_matrix = tProb_[:,indices]
block_matrix = block_matrix[indices,:]
block_matrix = 1 - block_matrix
#print block_matrix
pylab.matshow(block_matrix, cmap=plt.cm.OrRd)
plt.colorbar()
plt.savefig('./' + name + '.png', dpi=400)
#pylab.show()
plt.close()
def plot_cluster_size_distribution(populations, name='Populations'):
fig = plt.figure(1, (10,6))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.rc("font", size=30)
plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = range(len(populations))
X_xtick = ['']
for i in xrange(1, len(populations)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
plt.xticks(np.arange(len(populations)+1), X_xtick)
plt.ylabel(r"Probability")
plt.ylim([0,100])
print("X:", X)
distrib.bar(X, populations*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181',
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
def plot_compare_cluster_size_distribution(populations_1, populations_2, name='Populations'):
fig = plt.figure(1, (10,8))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
bar_width = 0.45
plt.rc("font", size=20)
#plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = np.arange(len(populations_1))
X_xtick = ['']
for i in xrange(1, len(populations_1)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X, populations_1*100, facecolor='black', edgecolor='white', width=bar_width,label="kNN Density Peaks 3645 states") #facecolor='#f78181',
# populations_2
#X = range(len(populations_2))
X_xtick = ['']
for i in xrange(1, len(populations_2)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X+bar_width, populations_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states") #facecolor='#f78181',
plt.xticks(np.arange(len(populations_1)+1+bar_width), X_xtick)
#plt.ylabel(r"Fraction number of clusters")
plt.ylabel(r"Probability")
plt.ylim([0,60])
plt.legend()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
#From Wang Wei's code
def plot_landscape(labels=None, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=80, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '.', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in range(0, len(phi_angles)):
if psi_angles[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_angles[i])[0][0] - 1
if phi_angles[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_angles[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_angles)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
if potential is False:
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
else:
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
#Cluster Centers on Free energy landscape distribution
fig = plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.title('Cluster Centers on Free energy landscape distribution', fontsize=20)
plt.xlabel("$k_B T$")
plt.ylabel(r"Probability")
plt.ylim([0, 100])
plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
distrib.bar(np.arange(10), distribution*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181'
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_compare_distribution(labels_1=None, labels_2=None, phi_angles=None, psi_angles=None, phi_ctr_1=None, psi_ctr_1=None, phi_ctr_2=None, psi_ctr_2=None, name='Energy_Landscape', bins=36, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
#extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
#plt.figure(figsize=(10, 10))
#plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
#if labels_1 is not None:
# plt.plot(phi_ctr_1, psi_ctr_1, '*', markersize=8, color='r')
distribution_1 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_1)):
if psi_ctr_1[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_1[i])[0][0] - 1
if phi_ctr_1[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_1[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_1[index_distrib] += 1
distribution_1 /= len(phi_ctr_1)
print(distribution_1)
distribution_2 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_2)):
if psi_ctr_2[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_2[i])[0][0] - 1
if phi_ctr_2[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_2[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_2[index_distrib] += 1
distribution_2 /= len(phi_ctr_2)
print(distribution_2)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
#cbar = plt.colorbar(shrink=0.77)
##plt.title('Free energy landscape', fontsize=20)
#cbar.set_label("$k_B T$", size=20)
#cbar.ax.tick_params(labelsize=20)
#if potential is False:
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-120, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
#else:
# plt.xlim([-75, 75])
# plt.ylim([-75, 75])
# plt.xticks([-50, 0, 50])
# plt.yticks([-50, 0, 50])
#plt.savefig('./' + name + '.png', dpi=400)
##plt.show()
#plt.close()
#Cluster Centers on Free energy landscape distribution
fig=plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
# plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
n_groups = 10
index = np.arange(n_groups)
bar_width = 0.45
distrib.bar(index, distribution_1*100, facecolor='black', edgecolor='white', width=bar_width, label="kNN Density Peaks 3645 states") #facecolor='#f78181'
distrib.bar(index+bar_width, distribution_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states")
#plt.title('Cluster Centers on Free energy landscape distribution', fontsize=10)
plt.xlabel("$k_B T$")
plt.ylabel(r"Fraction number of clusters")
plt.ylim([0, 50])
plt.xticks(index+bar_width, ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
plt.legend()
#plt.tight_layout()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_landscape_barrier(labels=None, selected=1, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=36, potential=False, outliers=-1):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot points
colors = ['y', 'b', 'tomato', 'm', 'g', 'c', 'yellowgreen']
color_index = 0
clusters = np.unique(labels)
for i in clusters:
if i != outliers:
if i in selected:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '2', alpha=0.20, color=colors[color_index])#, color=colors_jet[i])
color_index += 1
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '*', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in xrange(0, len(phi_ctr)):
if psi_ctr[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr[i])[0][0] - 1
if phi_ctr[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_ctr)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
plt.plot([-103,-103],[30,180],'w') #plot the barrier
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
def calculate_population(labels, name='Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels).values())
total_states = np.max(labels) + 1
#states_magnitude = int(np.ceil(np.log10(total_states)))
total_frames = len(labels)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations[magnitude] += 1
#print magnitude populations
print("Populations Probability:")
#bins = [0]
for i in xrange(len(populations)):
populations[i] = populations[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations[i]*100, "%")
#bins.append(10**(i+1))
name += '_Populations'
print("name:", name)
plot_cluster_size_distribution(populations=populations, name=name)
print("Done.")
def compare_population(labels_1, labels_2, name='Compare_Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels_1).values())
total_states = np.max(labels_1) + 1
total_frames = len(labels_1)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_1 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_1[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_1)):
populations_1[i] = populations_1[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_1[i]*100, "%")
counts = list(Counter(labels_2).values())
total_states = np.max(labels_2) + 1
total_frames = len(labels_2)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_2 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_2[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_2)):
populations_2[i] = populations_2[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_2[i]*100, "%")
name += '_Populations'
print("name:", name)
plot_compare_cluster_size_distribution(populations_1=populations_1, populations_2=populations_2, name=name)
#plot_cluster_size_distribution(populations_1=populations_1, name=name)
print("Done.")
def calculate_landscape(labels, centers, phi_angles, psi_angles, potential=False, name='Energy_Landscape'):
print("Calculating and plotting Landscape...")
phi_ctr = phi_angles[centers]
psi_ctr = psi_angles[centers]
labels_ctr = labels[centers]
name = name + '_Energy_Landscape'
print("name:", name)
plot_landscape(labels=labels_ctr, phi_angles=phi_angles, psi_angles=psi_angles, phi_ctr=phi_ctr, psi_ctr=psi_ctr, potential=potential, name=name)
print("Done")
#plot_landscape(labels=None, phi_angles=phi_angles, psi_angles=psi_angles)
|
[
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.imshow",
"scipy.stats.gaussian_kde",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"numpy.min",
"numpy.histogram2d",
"matplotlib.pyplot.ylim",
"numpy.meshgrid",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.use",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.legend",
"matplotlib.pylab.matshow",
"matplotlib.pyplot.rc",
"numpy.unique",
"matplotlib.pyplot.colorbar",
"collections.Counter",
"matplotlib.pyplot.figure",
"numpy.zeros"
] |
[((96, 117), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (110, 117), False, 'import matplotlib\n'), ((818, 835), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (827, 835), True, 'import numpy as np\n'), ((840, 863), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(10)'}), "('font', size=10)\n", (846, 863), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(25)'}), "('$\\\\phi$', fontsize=25)\n", (2415, 2439), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\psi$"""'], {'fontsize': '(25)'}), "('$\\\\psi$', fontsize=25)\n", (2454, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2554), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (2523, 2554), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2566), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2564, 2566), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2966), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2958, 2966), True, 'import numpy as np\n'), ((3115, 3134), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (3124, 3134), True, 'import numpy as np\n'), ((4273, 4290), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4282, 4290), True, 'import numpy as np\n'), ((5550, 5600), 'matplotlib.pyplot.title', 'plt.title', (["('Alanine Dipeptide ' + name + ' states')"], {}), "('Alanine Dipeptide ' + name + ' states')\n", (5559, 5600), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5626), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {}), "('$\\\\phi$')\n", (5615, 5626), True, 'import matplotlib.pyplot as plt\n'), ((5631, 5652), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\psi$"""'], {}), "('$\\\\psi$')\n", (5641, 5652), True, 'import matplotlib.pyplot as plt\n'), ((5658, 5679), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (5666, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5705), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (5692, 5705), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5781), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (5750, 5781), True, 'import matplotlib.pyplot as plt\n'), ((5782, 5793), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5791, 5793), True, 'import matplotlib.pyplot as plt\n'), ((6859, 6898), 'matplotlib.pylab.matshow', 'pylab.matshow', (['tProb_'], {'cmap': 'plt.cm.OrRd'}), '(tProb_, cmap=plt.cm.OrRd)\n', (6872, 6898), True, 'import matplotlib.pylab as pylab\n'), ((6903, 6917), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6915, 6917), True, 'import matplotlib.pyplot as plt\n'), ((6940, 6988), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + 'Matrix.png')"], {'dpi': '(400)'}), "('./' + name + 'Matrix.png', dpi=400)\n", (6951, 6988), True, 'import matplotlib.pyplot as plt\n'), ((6993, 7004), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7002, 7004), True, 'import matplotlib.pyplot as plt\n'), ((7110, 7128), 'numpy.argsort', 'np.argsort', (['labels'], {}), '(labels)\n', (7120, 7128), True, 'import numpy as np\n'), ((7292, 7337), 'matplotlib.pylab.matshow', 'pylab.matshow', (['block_matrix'], {'cmap': 'plt.cm.OrRd'}), '(block_matrix, cmap=plt.cm.OrRd)\n', (7305, 7337), True, 'import matplotlib.pylab as pylab\n'), ((7342, 7356), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7354, 7356), True, 'import matplotlib.pyplot as plt\n'), ((7361, 7403), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (7372, 7403), True, 'import matplotlib.pyplot as plt\n'), ((7426, 7437), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7435, 7437), True, 'import matplotlib.pyplot as plt\n'), ((7518, 7540), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 6)'], {}), '(1, (10, 6))\n', (7528, 7540), True, 'import matplotlib.pyplot as plt\n'), ((7649, 7678), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (7673, 7678), True, 'import matplotlib.ticker as mtick\n'), ((7729, 7752), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(30)'}), "('font', size=30)\n", (7735, 7752), True, 'import matplotlib.pyplot as plt\n'), ((7757, 7809), 'matplotlib.pyplot.title', 'plt.title', (['"""Cluster size distributions"""'], {'fontsize': '(20)'}), "('Cluster size distributions', fontsize=20)\n", (7766, 7809), True, 'import matplotlib.pyplot as plt\n'), ((8140, 8165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (8150, 8165), True, 'import matplotlib.pyplot as plt\n'), ((8171, 8189), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 100]'], {}), '([0, 100])\n', (8179, 8189), True, 'import matplotlib.pyplot as plt\n'), ((8319, 8374), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (8330, 8374), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8390), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8388, 8390), True, 'import matplotlib.pyplot as plt\n'), ((8513, 8535), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 8)'], {}), '(1, (10, 8))\n', (8523, 8535), True, 'import matplotlib.pyplot as plt\n'), ((8644, 8673), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (8668, 8673), True, 'import matplotlib.ticker as mtick\n'), ((8745, 8768), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(20)'}), "('font', size=20)\n", (8751, 8768), True, 'import matplotlib.pyplot as plt\n'), ((9856, 9881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (9866, 9881), True, 'import matplotlib.pyplot as plt\n'), ((9887, 9904), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 60]'], {}), '([0, 60])\n', (9895, 9904), True, 'import matplotlib.pyplot as plt\n'), ((9908, 9920), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9918, 9920), True, 'import matplotlib.pyplot as plt\n'), ((9925, 9980), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (9936, 9980), True, 'import matplotlib.pyplot as plt\n'), ((9985, 9996), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9994, 9996), True, 'import matplotlib.pyplot as plt\n'), ((10206, 10255), 'numpy.histogram2d', 'np.histogram2d', (['psi_angles', 'phi_angles'], {'bins': 'bins'}), '(psi_angles, phi_angles, bins=bins)\n', (10220, 10255), True, 'import numpy as np\n'), ((10411, 10434), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(25)'}), "('font', size=25)\n", (10417, 10434), True, 'import matplotlib.pyplot as plt\n'), ((10446, 10455), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (10452, 10455), True, 'import numpy as np\n'), ((10747, 10775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (10757, 10775), True, 'import matplotlib.pyplot as plt\n'), ((10780, 10842), 'matplotlib.pyplot.imshow', 'plt.imshow', (['H'], {'extent': 'extent', 'origin': '"""lower"""', 'cmap': 'plt.cm.gray'}), "(H, extent=extent, origin='lower', cmap=plt.cm.gray)\n", (10790, 10842), True, 'import matplotlib.pyplot as plt\n'), ((11008, 11066), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (11016, 11066), True, 'import numpy as np\n'), ((11762, 11796), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(20)'}), "('$\\\\phi$', fontsize=20)\n", (11772, 11796), True, 'import matplotlib.pyplot as plt\n'), ((11800, 11834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Psi$"""'], {'fontsize': '(20)'}), "('$\\\\Psi$', fontsize=20)\n", (11810, 11834), True, 'import matplotlib.pyplot as plt\n'), ((11846, 11871), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.77)'}), '(shrink=0.77)\n', (11858, 11871), True, 'import matplotlib.pyplot as plt\n'), ((12313, 12355), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (12324, 12355), True, 'import matplotlib.pyplot as plt\n'), ((12376, 12387), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12385, 12387), True, 'import matplotlib.pyplot as plt\n'), ((12458, 12480), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 6)'], {}), '(1, (10, 6))\n', (12468, 12480), True, 'import matplotlib.pyplot as plt\n'), ((12484, 12507), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(15)'}), "('font', size=15)\n", (12490, 12507), True, 'import matplotlib.pyplot as plt\n'), ((12640, 12669), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (12664, 12669), True, 'import matplotlib.ticker as mtick\n'), ((12720, 12799), 'matplotlib.pyplot.title', 'plt.title', (['"""Cluster Centers on Free energy landscape distribution"""'], {'fontsize': '(20)'}), "('Cluster Centers on Free energy landscape distribution', fontsize=20)\n", (12729, 12799), True, 'import matplotlib.pyplot as plt\n'), ((12804, 12825), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k_B T$"""'], {}), "('$k_B T$')\n", (12814, 12825), True, 'import matplotlib.pyplot as plt\n'), ((12830, 12855), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (12840, 12855), True, 'import matplotlib.pyplot as plt\n'), ((12861, 12879), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 100]'], {}), '([0, 100])\n', (12869, 12879), True, 'import matplotlib.pyplot as plt\n'), ((13084, 13139), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (13095, 13139), True, 'import matplotlib.pyplot as plt\n'), ((13160, 13171), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13169, 13171), True, 'import matplotlib.pyplot as plt\n'), ((13407, 13456), 'numpy.histogram2d', 'np.histogram2d', (['psi_angles', 'phi_angles'], {'bins': 'bins'}), '(psi_angles, phi_angles, bins=bins)\n', (13421, 13456), True, 'import numpy as np\n'), ((13612, 13635), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(25)'}), "('font', size=25)\n", (13618, 13635), True, 'import matplotlib.pyplot as plt\n'), ((13647, 13656), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (13653, 13656), True, 'import numpy as np\n'), ((14221, 14279), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (14229, 14279), True, 'import numpy as np\n'), ((14825, 14883), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (14833, 14883), True, 'import numpy as np\n'), ((15498, 15532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(20)'}), "('$\\\\phi$', fontsize=20)\n", (15508, 15532), True, 'import matplotlib.pyplot as plt\n'), ((15536, 15570), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Psi$"""'], {'fontsize': '(20)'}), "('$\\\\Psi$', fontsize=20)\n", (15546, 15570), True, 'import matplotlib.pyplot as plt\n'), ((16209, 16231), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 6)'], {}), '(1, (10, 6))\n', (16219, 16231), True, 'import matplotlib.pyplot as plt\n'), ((16235, 16258), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(15)'}), "('font', size=15)\n", (16241, 16258), True, 'import matplotlib.pyplot as plt\n'), ((16391, 16420), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (16415, 16420), True, 'import matplotlib.ticker as mtick\n'), ((16580, 16599), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (16589, 16599), True, 'import numpy as np\n'), ((17014, 17035), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k_B T$"""'], {}), "('$k_B T$')\n", (17024, 17035), True, 'import matplotlib.pyplot as plt\n'), ((17040, 17081), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction number of clusters"""'], {}), "('Fraction number of clusters')\n", (17050, 17081), True, 'import matplotlib.pyplot as plt\n'), ((17087, 17104), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 50]'], {}), '([0, 50])\n', (17095, 17104), True, 'import matplotlib.pyplot as plt\n'), ((17109, 17194), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(index + bar_width)', "('', '1', '', '3', '', '5', '', '7', '', '9', '')"], {}), "(index + bar_width, ('', '1', '', '3', '', '5', '', '7', '', '9', '')\n )\n", (17119, 17194), True, 'import matplotlib.pyplot as plt\n'), ((17192, 17204), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17202, 17204), True, 'import matplotlib.pyplot as plt\n'), ((17233, 17288), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (17244, 17288), True, 'import matplotlib.pyplot as plt\n'), ((17309, 17320), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17318, 17320), True, 'import matplotlib.pyplot as plt\n'), ((17525, 17574), 'numpy.histogram2d', 'np.histogram2d', (['psi_angles', 'phi_angles'], {'bins': 'bins'}), '(psi_angles, phi_angles, bins=bins)\n', (17539, 17574), True, 'import numpy as np\n'), ((17730, 17753), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(25)'}), "('font', size=25)\n", (17736, 17753), True, 'import matplotlib.pyplot as plt\n'), ((17765, 17774), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (17771, 17774), True, 'import numpy as np\n'), ((18066, 18094), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (18076, 18094), True, 'import matplotlib.pyplot as plt\n'), ((18099, 18161), 'matplotlib.pyplot.imshow', 'plt.imshow', (['H'], {'extent': 'extent', 'origin': '"""lower"""', 'cmap': 'plt.cm.gray'}), "(H, extent=extent, origin='lower', cmap=plt.cm.gray)\n", (18109, 18161), True, 'import matplotlib.pyplot as plt\n'), ((18292, 18309), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (18301, 18309), True, 'import numpy as np\n'), ((18751, 18809), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (18759, 18809), True, 'import numpy as np\n'), ((19488, 19522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(20)'}), "('$\\\\phi$', fontsize=20)\n", (19498, 19522), True, 'import matplotlib.pyplot as plt\n'), ((19526, 19560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Psi$"""'], {'fontsize': '(20)'}), "('$\\\\Psi$', fontsize=20)\n", (19536, 19560), True, 'import matplotlib.pyplot as plt\n'), ((19572, 19597), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.77)'}), '(shrink=0.77)\n', (19584, 19597), True, 'import matplotlib.pyplot as plt\n'), ((19732, 19753), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (19740, 19753), True, 'import matplotlib.pyplot as plt\n'), ((19758, 19779), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (19766, 19779), True, 'import matplotlib.pyplot as plt\n'), ((19784, 19819), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (19794, 19819), True, 'import matplotlib.pyplot as plt\n'), ((19824, 19859), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (19834, 19859), True, 'import matplotlib.pyplot as plt\n'), ((19864, 19902), 'matplotlib.pyplot.plot', 'plt.plot', (['[-103, -103]', '[30, 180]', '"""w"""'], {}), "([-103, -103], [30, 180], 'w')\n", (19872, 19902), True, 'import matplotlib.pyplot as plt\n'), ((19921, 19963), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (19932, 19963), True, 'import matplotlib.pyplot as plt\n'), ((19984, 19995), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19993, 19995), True, 'import matplotlib.pyplot as plt\n'), ((20414, 20444), 'numpy.zeros', 'np.zeros', (['(frames_magnitude + 1)'], {}), '(frames_magnitude + 1)\n', (20422, 20444), True, 'import numpy as np\n'), ((21406, 21436), 'numpy.zeros', 'np.zeros', (['(frames_magnitude + 1)'], {}), '(frames_magnitude + 1)\n', (21414, 21436), True, 'import numpy as np\n'), ((22061, 22091), 'numpy.zeros', 'np.zeros', (['(frames_magnitude + 1)'], {}), '(frames_magnitude + 1)\n', (22069, 22091), True, 'import numpy as np\n'), ((1481, 1544), 'matplotlib.pyplot.title', 'plt.title', (["('Alanine Dipeptide ' + name + ' states')"], {'fontsize': '(10)'}), "('Alanine Dipeptide ' + name + ' states', fontsize=10)\n", (1490, 1544), True, 'import matplotlib.pyplot as plt\n'), ((1739, 1767), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1749, 1767), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2304), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-75, 75]'], {}), '([-75, 75])\n', (2293, 2304), True, 'import matplotlib.pyplot as plt\n'), ((2313, 2332), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-75, 75]'], {}), '([-75, 75])\n', (2321, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2365), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (2351, 2365), True, 'import matplotlib.pyplot as plt\n'), ((2374, 2398), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (2384, 2398), True, 'import matplotlib.pyplot as plt\n'), ((10679, 10693), 'numpy.min', 'np.min', (['xedges'], {}), '(xedges)\n', (10685, 10693), True, 'import numpy as np\n'), ((10695, 10709), 'numpy.max', 'np.max', (['xedges'], {}), '(xedges)\n', (10701, 10709), True, 'import numpy as np\n'), ((10711, 10725), 'numpy.min', 'np.min', (['yedges'], {}), '(yedges)\n', (10717, 10725), True, 'import numpy as np\n'), ((10727, 10741), 'numpy.max', 'np.max', (['yedges'], {}), '(yedges)\n', (10733, 10741), True, 'import numpy as np\n'), ((10930, 10987), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_ctr', 'psi_ctr', '"""."""'], {'markersize': '(10)', 'color': '"""r"""'}), "(phi_ctr, psi_ctr, '.', markersize=10, color='r')\n", (10938, 10987), True, 'import matplotlib.pyplot as plt\n'), ((12037, 12058), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (12045, 12058), True, 'import matplotlib.pyplot as plt\n'), ((12067, 12088), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (12075, 12088), True, 'import matplotlib.pyplot as plt\n'), ((12097, 12132), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (12107, 12132), True, 'import matplotlib.pyplot as plt\n'), ((12141, 12176), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (12151, 12176), True, 'import matplotlib.pyplot as plt\n'), ((12195, 12214), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-75, 75]'], {}), '([-75, 75])\n', (12203, 12214), True, 'import matplotlib.pyplot as plt\n'), ((12223, 12242), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-75, 75]'], {}), '([-75, 75])\n', (12231, 12242), True, 'import matplotlib.pyplot as plt\n'), ((12251, 12275), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (12261, 12275), True, 'import matplotlib.pyplot as plt\n'), ((12284, 12308), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (12294, 12308), True, 'import matplotlib.pyplot as plt\n'), ((12895, 12908), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (12904, 12908), True, 'import numpy as np\n'), ((12977, 12990), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (12986, 12990), True, 'import numpy as np\n'), ((17998, 18012), 'numpy.min', 'np.min', (['xedges'], {}), '(xedges)\n', (18004, 18012), True, 'import numpy as np\n'), ((18014, 18028), 'numpy.max', 'np.max', (['xedges'], {}), '(xedges)\n', (18020, 18028), True, 'import numpy as np\n'), ((18030, 18044), 'numpy.min', 'np.min', (['yedges'], {}), '(yedges)\n', (18036, 18044), True, 'import numpy as np\n'), ((18046, 18060), 'numpy.max', 'np.max', (['yedges'], {}), '(yedges)\n', (18052, 18060), True, 'import numpy as np\n'), ((18673, 18730), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_ctr', 'psi_ctr', '"""*"""'], {'markersize': '(10)', 'color': '"""r"""'}), "(phi_ctr, psi_ctr, '*', markersize=10, color='r')\n", (18681, 18730), True, 'import matplotlib.pyplot as plt\n'), ((20166, 20180), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (20172, 20180), True, 'import numpy as np\n'), ((21213, 21229), 'numpy.max', 'np.max', (['labels_1'], {}), '(labels_1)\n', (21219, 21229), True, 'import numpy as np\n'), ((21868, 21884), 'numpy.max', 'np.max', (['labels_2'], {}), '(labels_2)\n', (21874, 21884), True, 'import numpy as np\n'), ((3426, 3447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {}), "('$\\\\phi$')\n", (3436, 3447), True, 'import matplotlib.pyplot as plt\n'), ((3460, 3481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\psi$"""'], {}), "('$\\\\psi$')\n", (3470, 3481), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3516), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (3503, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3529, 3550), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (3537, 3550), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3598), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (3573, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3646), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (3621, 3646), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3782), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3780, 3782), True, 'import matplotlib.pyplot as plt\n'), ((4615, 4632), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4626, 4632), True, 'import numpy as np\n'), ((4712, 4729), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (4721, 4729), True, 'import numpy as np\n'), ((4751, 4777), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['values'], {}), '(values)\n', (4769, 4777), False, 'from scipy import stats\n'), ((5470, 5506), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z'], {'origin': '"""lower"""'}), "(X, Y, Z, origin='lower')\n", (5481, 5506), True, 'import matplotlib.pyplot as plt\n'), ((20312, 20334), 'numpy.log10', 'np.log10', (['total_frames'], {}), '(total_frames)\n', (20320, 20334), True, 'import numpy as np\n'), ((20502, 20513), 'numpy.log10', 'np.log10', (['i'], {}), '(i)\n', (20510, 20513), True, 'import numpy as np\n'), ((20538, 20552), 'numpy.ceil', 'np.ceil', (['log_i'], {}), '(log_i)\n', (20545, 20552), True, 'import numpy as np\n'), ((21302, 21324), 'numpy.log10', 'np.log10', (['total_frames'], {}), '(total_frames)\n', (21310, 21324), True, 'import numpy as np\n'), ((21494, 21505), 'numpy.log10', 'np.log10', (['i'], {}), '(i)\n', (21502, 21505), True, 'import numpy as np\n'), ((21530, 21544), 'numpy.ceil', 'np.ceil', (['log_i'], {}), '(log_i)\n', (21537, 21544), True, 'import numpy as np\n'), ((21957, 21979), 'numpy.log10', 'np.log10', (['total_frames'], {}), '(total_frames)\n', (21965, 21979), True, 'import numpy as np\n'), ((22149, 22160), 'numpy.log10', 'np.log10', (['i'], {}), '(i)\n', (22157, 22160), True, 'import numpy as np\n'), ((22185, 22199), 'numpy.ceil', 'np.ceil', (['log_i'], {}), '(log_i)\n', (22192, 22199), True, 'import numpy as np\n'), ((977, 993), 'numpy.max', 'np.max', (['clusters'], {}), '(clusters)\n', (983, 993), True, 'import numpy as np\n'), ((1130, 1151), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1138, 1151), True, 'import numpy as np\n'), ((1168, 1246), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_angles[point]', 'psi_angles[point]', '"""."""'], {'markersize': '(1.0)', 'alpha': '(0.7)'}), "(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)\n", (1176, 1246), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3096), 'numpy.max', 'np.max', (['clusters'], {}), '(clusters)\n', (3086, 3096), True, 'import numpy as np\n'), ((4392, 4413), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (4400, 4413), True, 'import numpy as np\n'), ((4442, 4463), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (4450, 4463), True, 'import numpy as np\n'), ((18413, 18434), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (18421, 18434), True, 'import numpy as np\n'), ((18451, 18545), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_angles[point]', 'psi_angles[point]', '"""2"""'], {'alpha': '(0.2)', 'color': 'colors[color_index]'}), "(phi_angles[point], psi_angles[point], '2', alpha=0.2, color=colors\n [color_index])\n", (18459, 18545), True, 'import matplotlib.pyplot as plt\n'), ((20121, 20136), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (20128, 20136), False, 'from collections import Counter\n'), ((21166, 21183), 'collections.Counter', 'Counter', (['labels_1'], {}), '(labels_1)\n', (21173, 21183), False, 'from collections import Counter\n'), ((21821, 21838), 'collections.Counter', 'Counter', (['labels_2'], {}), '(labels_2)\n', (21828, 21838), False, 'from collections import Counter\n'), ((3194, 3215), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (3202, 3215), True, 'import numpy as np\n'), ((3249, 3270), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (3257, 3270), True, 'import numpy as np\n'), ((10614, 10636), 'numpy.log', 'np.log', (['(H[i][j] / maxH)'], {}), '(H[i][j] / maxH)\n', (10620, 10636), True, 'import numpy as np\n'), ((13815, 13837), 'numpy.log', 'np.log', (['(H[i][j] / maxH)'], {}), '(H[i][j] / maxH)\n', (13821, 13837), True, 'import numpy as np\n'), ((17933, 17955), 'numpy.log', 'np.log', (['(H[i][j] / maxH)'], {}), '(H[i][j] / maxH)\n', (17939, 17955), True, 'import numpy as np\n'), ((1861, 1882), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1869, 1882), True, 'import numpy as np\n'), ((1920, 1941), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1928, 1941), True, 'import numpy as np\n'), ((11236, 11260), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (11244, 11260), True, 'import numpy as np\n'), ((11307, 11339), 'numpy.where', 'np.where', (['(xedges > psi_angles[i])'], {}), '(xedges > psi_angles[i])\n', (11315, 11339), True, 'import numpy as np\n'), ((11406, 11430), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (11414, 11430), True, 'import numpy as np\n'), ((11477, 11509), 'numpy.where', 'np.where', (['(yedges > phi_angles[i])'], {}), '(yedges > phi_angles[i])\n', (11485, 11509), True, 'import numpy as np\n'), ((14366, 14390), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (14374, 14390), True, 'import numpy as np\n'), ((14437, 14468), 'numpy.where', 'np.where', (['(xedges > psi_ctr_1[i])'], {}), '(xedges > psi_ctr_1[i])\n', (14445, 14468), True, 'import numpy as np\n'), ((14534, 14558), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (14542, 14558), True, 'import numpy as np\n'), ((14605, 14636), 'numpy.where', 'np.where', (['(yedges > phi_ctr_1[i])'], {}), '(yedges > phi_ctr_1[i])\n', (14613, 14636), True, 'import numpy as np\n'), ((14970, 14994), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (14978, 14994), True, 'import numpy as np\n'), ((15041, 15072), 'numpy.where', 'np.where', (['(xedges > psi_ctr_2[i])'], {}), '(xedges > psi_ctr_2[i])\n', (15049, 15072), True, 'import numpy as np\n'), ((15138, 15162), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (15146, 15162), True, 'import numpy as np\n'), ((15209, 15240), 'numpy.where', 'np.where', (['(yedges > phi_ctr_2[i])'], {}), '(yedges > phi_ctr_2[i])\n', (15217, 15240), True, 'import numpy as np\n'), ((18974, 18998), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (18982, 18998), True, 'import numpy as np\n'), ((19045, 19074), 'numpy.where', 'np.where', (['(xedges > psi_ctr[i])'], {}), '(xedges > psi_ctr[i])\n', (19053, 19074), True, 'import numpy as np\n'), ((19138, 19162), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (19146, 19162), True, 'import numpy as np\n'), ((19209, 19238), 'numpy.where', 'np.where', (['(yedges > phi_ctr[i])'], {}), '(yedges > phi_ctr[i])\n', (19217, 19238), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
def drawPoint(canvas,x,y):
canvas[y,x] = 0
def drawLine(canvas,x1,y1,x2,y2):
dx, dy = abs(x2 - x1), abs(y2 - y1)
xi, yi = x1, y1
sx, sy = 1 if (x2 - x1) > 0 else -1, 1 if (y2 - y1) > 0 else -1
pi = 2*dy - dx
while xi != x2 + 1:
if pi < 0:
pi += 2 * dy
else:
pi += 2 * dy - 2 * dx
yi += 1 * sy
drawPoint(canvas,xi,yi)
xi += 1 * sx
def drawCircle(canvas,x,y,r):
x0, y0 = x, y
xi = 0
yi = r
pi = 5/4 - r
while xi <= yi:
if pi < 0:
pi += 2 * (xi + 1) + 1
else:
pi += 2 * (xi + 1) + 1 - 2 * (yi - 1)
yi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
xi += 1
xi = r
yi = 0
pi = 5/4 - r
while not (xi == yi+1 or xi == yi):
if pi < 0:
pi += 2 * (yi + 1) + 1
else:
pi += 2 * (yi + 1) + 1 - 2 * (xi - 1)
xi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
yi += 1
def drawEllipse(canvas,x,y,rx,ry):
x0, y0 = x, y
xi, yi = 0, ry
rx2 = rx ** 2
ry2 = ry ** 2
p1i = ry2 - rx2 * ry + rx2 / 4
while 2*ry2*xi < 2*rx2*yi:
if p1i < 0:
p1i += 2 * ry2 * (xi + 1) + ry2
else:
p1i += 2 * ry2 * (xi + 1) - 2* rx2 * (yi - 1) + ry2
yi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
xi += 1
xi -= 1
p2i = ry2 * (xi + .5) ** 2 + rx2 * (yi - 1) ** 2 - rx2 * ry2
while yi >= 0:
if p2i > 0:
p2i += -2 * rx2 * (yi - 1) + rx2
else:
p2i += 2 * ry2 * (xi + 1) - 2 * rx2 * (yi - 1) + rx2
xi += 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
yi -= 1
if __name__ == '__main__':
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawLine(canvas,800,100,100,600)
cv2.imwrite('line.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawCircle(canvas,500,500,300)
cv2.imwrite('circle.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawEllipse(canvas,500,500,100,200)
cv2.imwrite('ellipse.png',canvas)
|
[
"cv2.imwrite",
"numpy.ones"
] |
[((2335, 2366), 'cv2.imwrite', 'cv2.imwrite', (['"""line.png"""', 'canvas'], {}), "('line.png', canvas)\n", (2346, 2366), False, 'import cv2\n'), ((2460, 2493), 'cv2.imwrite', 'cv2.imwrite', (['"""circle.png"""', 'canvas'], {}), "('circle.png', canvas)\n", (2471, 2493), False, 'import cv2\n'), ((2592, 2626), 'cv2.imwrite', 'cv2.imwrite', (['"""ellipse.png"""', 'canvas'], {}), "('ellipse.png', canvas)\n", (2603, 2626), False, 'import cv2\n'), ((2252, 2289), 'numpy.ones', 'np.ones', (['[1000, 1000]'], {'dtype': 'np.uint8'}), '([1000, 1000], dtype=np.uint8)\n', (2259, 2289), True, 'import numpy as np\n'), ((2379, 2416), 'numpy.ones', 'np.ones', (['[1000, 1000]'], {'dtype': 'np.uint8'}), '([1000, 1000], dtype=np.uint8)\n', (2386, 2416), True, 'import numpy as np\n'), ((2506, 2543), 'numpy.ones', 'np.ones', (['[1000, 1000]'], {'dtype': 'np.uint8'}), '([1000, 1000], dtype=np.uint8)\n', (2513, 2543), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, unicode_literals
import unittest
from fluent.runtime import FluentBundle
from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError
from ..utils import dedent_ftl
class TestPlaceables(unittest.TestCase):
def setUp(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages(dedent_ftl("""
message = Message
.attr = Message Attribute
-term = Term
.attr = Term Attribute
-term2 = {
*[variant1] Term Variant 1
[variant2] Term Variant 2
}
uses-message = { message }
uses-message-attr = { message.attr }
uses-term = { -term }
bad-message-ref = Text { not-a-message }
bad-message-attr-ref = Text { message.not-an-attr }
bad-term-ref = Text { -not-a-term }
self-referencing-message = Text { self-referencing-message }
cyclic-msg1 = Text1 { cyclic-msg2 }
cyclic-msg2 = Text2 { cyclic-msg1 }
self-cyclic-message = Parent { self-cyclic-message.attr }
.attr = Attribute { self-cyclic-message }
self-attribute-ref-ok = Parent { self-attribute-ref-ok.attr }
.attr = Attribute
self-parent-ref-ok = Parent
.attr = Attribute { self-parent-ref-ok }
"""))
def test_placeable_message(self):
val, errs = self.ctx.format('uses-message', {})
self.assertEqual(val, 'Message')
self.assertEqual(len(errs), 0)
def test_placeable_message_attr(self):
val, errs = self.ctx.format('uses-message-attr', {})
self.assertEqual(val, 'Message Attribute')
self.assertEqual(len(errs), 0)
def test_placeable_term(self):
val, errs = self.ctx.format('uses-term', {})
self.assertEqual(val, 'Term')
self.assertEqual(len(errs), 0)
def test_placeable_bad_message(self):
val, errs = self.ctx.format('bad-message-ref', {})
self.assertEqual(val, 'Text not-a-message')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentReferenceError("Unknown message: not-a-message")])
def test_placeable_bad_message_attr(self):
val, errs = self.ctx.format('bad-message-attr-ref', {})
self.assertEqual(val, 'Text Message')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentReferenceError("Unknown attribute: message.not-an-attr")])
def test_placeable_bad_term(self):
val, errs = self.ctx.format('bad-term-ref', {})
self.assertEqual(val, 'Text -not-a-term')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentReferenceError("Unknown term: -not-a-term")])
def test_cycle_detection(self):
val, errs = self.ctx.format('self-referencing-message', {})
self.assertEqual(val, 'Text ???')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentCyclicReferenceError("Cyclic reference")])
def test_mutual_cycle_detection(self):
val, errs = self.ctx.format('cyclic-msg1', {})
self.assertEqual(val, 'Text1 Text2 ???')
self.assertEqual(len(errs), 1)
self.assertEqual(
errs,
[FluentCyclicReferenceError("Cyclic reference")])
def test_allowed_self_reference(self):
val, errs = self.ctx.format('self-attribute-ref-ok', {})
self.assertEqual(val, 'Parent Attribute')
self.assertEqual(len(errs), 0)
val, errs = self.ctx.format('self-parent-ref-ok.attr', {})
self.assertEqual(val, 'Attribute Parent')
self.assertEqual(len(errs), 0)
class TestSingleElementPattern(unittest.TestCase):
def test_single_literal_number_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=True)
self.ctx.add_messages('foo = { 1 }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_literal_number_non_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages('foo = { 1 }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_arg_number_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=True)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo', {'arg': 1})
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_arg_number_non_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo', {'arg': 1})
self.assertEqual(val, '1')
self.assertEqual(errs, [])
def test_single_arg_missing_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=True)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, 'arg')
self.assertEqual(len(errs), 1)
def test_single_arg_missing_non_isolating(self):
self.ctx = FluentBundle(['en-US'], use_isolating=False)
self.ctx.add_messages('foo = { $arg }')
val, errs = self.ctx.format('foo')
self.assertEqual(val, 'arg')
self.assertEqual(len(errs), 1)
|
[
"fluent.runtime.FluentBundle",
"fluent.runtime.errors.FluentCyclicReferenceError",
"fluent.runtime.errors.FluentReferenceError"
] |
[((313, 357), 'fluent.runtime.FluentBundle', 'FluentBundle', (["['en-US']"], {'use_isolating': '(False)'}), "(['en-US'], use_isolating=False)\n", (325, 357), False, 'from fluent.runtime import FluentBundle\n'), ((4043, 4086), 'fluent.runtime.FluentBundle', 'FluentBundle', (["['en-US']"], {'use_isolating': '(True)'}), "(['en-US'], use_isolating=True)\n", (4055, 4086), False, 'from fluent.runtime import FluentBundle\n'), ((4321, 4365), 'fluent.runtime.FluentBundle', 'FluentBundle', (["['en-US']"], {'use_isolating': '(False)'}), "(['en-US'], use_isolating=False)\n", (4333, 4365), False, 'from fluent.runtime import FluentBundle\n'), ((4592, 4635), 'fluent.runtime.FluentBundle', 'FluentBundle', (["['en-US']"], {'use_isolating': '(True)'}), "(['en-US'], use_isolating=True)\n", (4604, 4635), False, 'from fluent.runtime import FluentBundle\n'), ((4881, 4925), 'fluent.runtime.FluentBundle', 'FluentBundle', (["['en-US']"], {'use_isolating': '(False)'}), "(['en-US'], use_isolating=False)\n", (4893, 4925), False, 'from fluent.runtime import FluentBundle\n'), ((5168, 5211), 'fluent.runtime.FluentBundle', 'FluentBundle', (["['en-US']"], {'use_isolating': '(True)'}), "(['en-US'], use_isolating=True)\n", (5180, 5211), False, 'from fluent.runtime import FluentBundle\n'), ((5452, 5496), 'fluent.runtime.FluentBundle', 'FluentBundle', (["['en-US']"], {'use_isolating': '(False)'}), "(['en-US'], use_isolating=False)\n", (5464, 5496), False, 'from fluent.runtime import FluentBundle\n'), ((2310, 2364), 'fluent.runtime.errors.FluentReferenceError', 'FluentReferenceError', (['"""Unknown message: not-a-message"""'], {}), "('Unknown message: not-a-message')\n", (2330, 2364), False, 'from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError\n'), ((2621, 2683), 'fluent.runtime.errors.FluentReferenceError', 'FluentReferenceError', (['"""Unknown attribute: message.not-an-attr"""'], {}), "('Unknown attribute: message.not-an-attr')\n", (2641, 2683), False, 'from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError\n'), ((2928, 2977), 'fluent.runtime.errors.FluentReferenceError', 'FluentReferenceError', (['"""Unknown term: -not-a-term"""'], {}), "('Unknown term: -not-a-term')\n", (2948, 2977), False, 'from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError\n'), ((3223, 3269), 'fluent.runtime.errors.FluentCyclicReferenceError', 'FluentCyclicReferenceError', (['"""Cyclic reference"""'], {}), "('Cyclic reference')\n", (3249, 3269), False, 'from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError\n'), ((3516, 3562), 'fluent.runtime.errors.FluentCyclicReferenceError', 'FluentCyclicReferenceError', (['"""Cyclic reference"""'], {}), "('Cyclic reference')\n", (3542, 3562), False, 'from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError\n')]
|
import pytest
from brownie import Wei
@pytest.fixture(scope="function", autouse=True)
def shared_setup(fn_isolation):
pass
@pytest.fixture(scope='module')
def nocoiner(accounts, lido):
assert lido.balanceOf(accounts[9]) == 0
return accounts[9]
@pytest.fixture(scope='module')
def ape(accounts):
return accounts[0]
@pytest.fixture(scope='module')
def whale(accounts):
return accounts[1]
@pytest.fixture()
def vault(LidoVault, ape):
return LidoVault.deploy({"from": ape})
@pytest.fixture(scope='module')
def lido(interface, accounts):
lido = interface.Lido("0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84")
oracle = accounts.at(lido.getOracle(), force=True)
return interface.Lido(lido, owner=oracle)
class Helpers:
@staticmethod
def filter_events_from(addr, events):
return list(filter(lambda evt: evt.address == addr, events))
@staticmethod
def assert_single_event_named(evt_name, tx, evt_keys_dict):
receiver_events = Helpers.filter_events_from(tx.receiver, tx.events[evt_name])
assert len(receiver_events) == 1
assert dict(receiver_events[0]) == evt_keys_dict
@staticmethod
def report_beacon_balance_increase(lido):
beacon_stat = lido.getBeaconStat().dict()
total_pooled_ether = lido.getTotalPooledEther()
new_beacon_balance = Wei(total_pooled_ether * 1.5) + "1 ether"
lido.pushBeacon(beacon_stat['beaconValidators'], new_beacon_balance)
@pytest.fixture(scope='module')
def helpers():
return Helpers
|
[
"pytest.fixture",
"brownie.Wei"
] |
[((41, 87), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'autouse': '(True)'}), "(scope='function', autouse=True)\n", (55, 87), False, 'import pytest\n'), ((132, 162), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (146, 162), False, 'import pytest\n'), ((263, 293), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (277, 293), False, 'import pytest\n'), ((339, 369), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (353, 369), False, 'import pytest\n'), ((417, 433), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (431, 433), False, 'import pytest\n'), ((507, 537), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (521, 537), False, 'import pytest\n'), ((1470, 1500), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1484, 1500), False, 'import pytest\n'), ((1348, 1377), 'brownie.Wei', 'Wei', (['(total_pooled_ether * 1.5)'], {}), '(total_pooled_ether * 1.5)\n', (1351, 1377), False, 'from brownie import Wei\n')]
|
from codecs import open as codecs_open
from setuptools import setup, find_packages
# Get the long description from the relevant file
with codecs_open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(name='fio_taxa',
version='1.0.0',
description=u"Classification of GeoJSON features",
long_description=long_description,
classifiers=[],
keywords='',
author=u"<NAME>",
author_email='<EMAIL>',
url='https://github.com/sgillies/fio-taxa',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'click', 'fiona'
],
extras_require={
'test': ['pytest'],
},
entry_points="""
[fiona.fio_commands]
taxa=fio_taxa.scripts.cli:taxa
"""
)
|
[
"codecs.open",
"setuptools.find_packages"
] |
[((140, 183), 'codecs.open', 'codecs_open', (['"""README.rst"""'], {'encoding': '"""utf-8"""'}), "('README.rst', encoding='utf-8')\n", (151, 183), True, 'from codecs import open as codecs_open\n'), ((549, 605), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['ez_setup', 'examples', 'tests']"}), "(exclude=['ez_setup', 'examples', 'tests'])\n", (562, 605), False, 'from setuptools import setup, find_packages\n')]
|
import requests
import json
class test(object):
def __init__(self):
self._debug = False
self._http_debug = False
self._https = True
self._session = requests.session() # use single session for all requests
def update_csrf(self):
# Retrieve server csrf and update session's headers
for cookie in self._session.cookies:
if cookie.name == 'ccsrftoken':
csrftoken = cookie.value[1:-1] # token stored as a list
self._session.headers.update({'X-CSRFTOKEN': csrftoken})
def login(self,host,username,password):
self.host = host
if self._https is True:
self.url_prefix = 'https://' + self.host
else:
self.url_prefix = 'http://' + self.host
url = self.url_prefix + '/logincheck'
res = self._session.post(url,
data='username='+username+'&secretkey='+password,
verify = False)
#self.dprint(res)
# Update session's csrftoken
self.update_csrf()
def get(self, url):
url = url
res = self._session.get(url)
return res.content
f = test()
f.login(ip,username, password)
|
[
"requests.session"
] |
[((177, 195), 'requests.session', 'requests.session', ([], {}), '()\n', (193, 195), False, 'import requests\n')]
|
import numpy as np
import numpy.matlib
# soma das matrizes
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A + B
print(C)
# soma das linhas
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
s_linha = sum(A)
print(s_linha)
# soma dos elementos
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
soma = sum(sum(A))
print(soma)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A - B
print(C)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = np.matmul(A,B)
print(C)
# transposta
A = np.array([[1,0],[0,2]])
A_transposta = A.T
print(A_transposta)
# inversa
from numpy.linalg import *
from numpy import linalg as LA
A = np.array([[1,3],[2,0]])
A_inv = inv(A)
print(A_inv)
I = np.matmul(A,A_inv)
print(I)
A = ([2,2],[4,8])
A_det = LA.det(A)
print(A_det)
A = ([[1,2],[1,2]])
A_n = LA.matrix_power(A, 2)
|
[
"numpy.array",
"numpy.linalg.matrix_power",
"numpy.matmul",
"numpy.linalg.det"
] |
[((66, 92), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (74, 92), True, 'import numpy as np\n'), ((94, 120), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (102, 120), True, 'import numpy as np\n'), ((161, 187), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (169, 187), True, 'import numpy as np\n'), ((189, 215), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (197, 215), True, 'import numpy as np\n'), ((270, 296), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (278, 296), True, 'import numpy as np\n'), ((298, 324), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (306, 324), True, 'import numpy as np\n'), ((358, 384), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (366, 384), True, 'import numpy as np\n'), ((386, 412), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (394, 412), True, 'import numpy as np\n'), ((434, 460), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (442, 460), True, 'import numpy as np\n'), ((462, 488), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (470, 488), True, 'import numpy as np\n'), ((490, 505), 'numpy.matmul', 'np.matmul', (['A', 'B'], {}), '(A, B)\n', (499, 505), True, 'import numpy as np\n'), ((532, 558), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (540, 558), True, 'import numpy as np\n'), ((670, 696), 'numpy.array', 'np.array', (['[[1, 3], [2, 0]]'], {}), '([[1, 3], [2, 0]])\n', (678, 696), True, 'import numpy as np\n'), ((726, 745), 'numpy.matmul', 'np.matmul', (['A', 'A_inv'], {}), '(A, A_inv)\n', (735, 745), True, 'import numpy as np\n'), ((782, 791), 'numpy.linalg.det', 'LA.det', (['A'], {}), '(A)\n', (788, 791), True, 'from numpy import linalg as LA\n'), ((832, 853), 'numpy.linalg.matrix_power', 'LA.matrix_power', (['A', '(2)'], {}), '(A, 2)\n', (847, 853), True, 'from numpy import linalg as LA\n')]
|
import sys, requests, json, time
METRIC_NAME = "builtin:billing.ddu.metrics.byEntity"
PAGE_SIZE = 500
sys.tracebacklimit = 0
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 MyManagementZone
arguments = len(sys.argv) - 1
if arguments != 5 and arguments != 6:
print(
"The script was called with {} arguments but expected 5 or 6: \nFROM_DATE_AND_TIME TO_DATE_AND_TIME URL_TO_ENVIRONMENT API_TOKEN MAX_REQUESTS_PER_MINUTE [SELECTED_MANAGEMENT_ZONE]\n"
"Example: python dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 [myManagementZone]\n"
"Note: The SELECTED_MANAGEMENT_ZONE is optional. Specify it if you only want the calculate the ddu consumption for a single management zone.".format(
arguments
)
)
exit()
FROM = str(sys.argv[1])
TO = str(sys.argv[2])
BASE_URL = str(sys.argv[3])
API_TOKEN = str(sys.argv[4])
MAX_REQUESTS_PER_MINUTE = int(sys.argv[5])
if arguments == 6:
SELECTED_MANAGEMENT_ZONE_NAME = str(sys.argv[6])
else:
SELECTED_MANAGEMENT_ZONE_NAME = None
# Get all available management zones
# https://mySampleEnv.live.dynatrace.com/api/config/v1/managementZones
# try:
response = requests.get(
BASE_URL + "config/v1/managementZones",
headers={"Authorization": "Api-Token " + API_TOKEN},
)
# Show error message when a connection can’t be established. Terminates the script when there’s an error.
response.raise_for_status()
allManagemementZones = json.loads(response.content)["values"]
# print("Amount of different management zones: ", len(allManagemementZones))
# If the management zone is specified: Get the index of the occurrence
if SELECTED_MANAGEMENT_ZONE_NAME != None:
for mzIndex, managementZone in enumerate(allManagemementZones):
if allManagemementZones[mzIndex].get("name") == SELECTED_MANAGEMENT_ZONE_NAME:
SELECTED_MANAGEMENT_ZONE_INDEX = mzIndex
# Get all different entityTypes. Due to the high number of different types you can't fetch all at once => Loop through every page with nextPageKey
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes?nextPageKey=AQAAADIBAAAAMg==
response = requests.get(
BASE_URL + "v2/entityTypes", headers={"Authorization": "Api-Token " + API_TOKEN}
)
response.raise_for_status()
allEntityTypes = json.loads(response.content)["types"]
nextPage = json.loads(response.content)["nextPageKey"]
while nextPage != None:
response = requests.get(
BASE_URL + "v2/entityTypes?nextPageKey=" + nextPage,
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
nextPage = (json.loads(response.content)).get("nextPageKey", None)
allEntityTypes.extend(json.loads(response.content)["types"])
# print("Amount of different entity types: ", len(allEntityTypes))
# print()
dduConsumptionObjectOfManagementZone = {}
# Result JSON Object with Array of dduConsumption for each management zone
dduConsumptionPerManagementZone = "[ "
dduConsumptionOfEntityType = 0
dduConsumptionOfManagementZone = 0
# https://mySampleEnv.live.dynatrace.com/api/v2/metrics/query?metricSelector=builtin:billing.ddu.metrics.byEntity&entitySelector=type(HOST),mzId(123456789)&from=2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00
# Loop through every entityType of every management zone
# If there is a specific management zone selected: "loop through" the single management zone
for managementZoneIndex, managementZone in (
enumerate([allManagemementZones[SELECTED_MANAGEMENT_ZONE_INDEX]])
if SELECTED_MANAGEMENT_ZONE_NAME != None
else enumerate(allManagemementZones)
):
# If a management zone got specified: access it via the index in all management zones
if SELECTED_MANAGEMENT_ZONE_NAME != None:
managementZoneIndex = SELECTED_MANAGEMENT_ZONE_INDEX
for entityTypeIndex, entityType in enumerate(allEntityTypes):
"""
print(
"MZId: {:21} MZName: {:20} ET Name: {:5}".format(
allManagemementZones[managementZoneIndex]["id"],
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
)
)
"""
# Replace the "+" of Timezone to the encoded %2B
response = requests.get(
"{}v2/metrics/query?metricSelector={}:splitBy()&entitySelector=mzId({}),type({})&pageSize={}&from={}&to={}".format(
BASE_URL,
METRIC_NAME,
allManagemementZones[managementZoneIndex]["id"],
allEntityTypes[entityTypeIndex]["type"],
str(PAGE_SIZE),
FROM.replace("+", "%2B", 1),
TO.replace("+", "%2B", 1),
),
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
# print("Waiting for ", 60 / MAX_REQUESTS_PER_MINUTE, " seconds")
time.sleep(60 / MAX_REQUESTS_PER_MINUTE)
dduConsumptionOfMZandETDict = json.loads(response.content)["result"][0]["data"]
# If there are any results
if dduConsumptionOfMZandETDict:
# Filter out every empty usage values and create the sum of ddu usage
dduConsumptionOfMZandET = sum(
filter(None, dduConsumptionOfMZandETDict[0]["values"])
)
"""
print(
"Ddu consumption of manangement zone {} and entityType {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
round(dduConsumptionOfMZandET, 3),
)
)
"""
dduConsumptionOfManagementZone += dduConsumptionOfMZandET
dduConsumptionOfMZandET = 0
"""
print(
"Ddu consumption of management zone {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
round(dduConsumptionOfManagementZone, 3),
)
)
"""
# print()
# Populate JSON Object
dduConsumptionObjectOfManagementZone["MZId"] = allManagemementZones[
managementZoneIndex
]["id"]
dduConsumptionObjectOfManagementZone["MZName"] = allManagemementZones[
managementZoneIndex
]["name"]
dduConsumptionObjectOfManagementZone["dduConsumption"] = round(
dduConsumptionOfManagementZone, 3
)
dduConsumptionOfManagementZone = 0
# <[ > takes 2 chars
if len(dduConsumptionPerManagementZone) > 2:
dduConsumptionPerManagementZone = (
dduConsumptionPerManagementZone
+ ", "
+ json.dumps(dduConsumptionObjectOfManagementZone)
)
else:
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + json.dumps(
dduConsumptionObjectOfManagementZone
)
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + " ]"
print(dduConsumptionPerManagementZone)
|
[
"json.dumps",
"json.loads",
"time.sleep",
"requests.get"
] |
[((1509, 1619), 'requests.get', 'requests.get', (["(BASE_URL + 'config/v1/managementZones')"], {'headers': "{'Authorization': 'Api-Token ' + API_TOKEN}"}), "(BASE_URL + 'config/v1/managementZones', headers={\n 'Authorization': 'Api-Token ' + API_TOKEN})\n", (1521, 1619), False, 'import sys, requests, json, time\n'), ((2530, 2629), 'requests.get', 'requests.get', (["(BASE_URL + 'v2/entityTypes')"], {'headers': "{'Authorization': 'Api-Token ' + API_TOKEN}"}), "(BASE_URL + 'v2/entityTypes', headers={'Authorization': \n 'Api-Token ' + API_TOKEN})\n", (2542, 2629), False, 'import sys, requests, json, time\n'), ((1784, 1812), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (1794, 1812), False, 'import sys, requests, json, time\n'), ((2676, 2704), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (2686, 2704), False, 'import sys, requests, json, time\n'), ((2726, 2754), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (2736, 2754), False, 'import sys, requests, json, time\n'), ((2809, 2932), 'requests.get', 'requests.get', (["(BASE_URL + 'v2/entityTypes?nextPageKey=' + nextPage)"], {'headers': "{'Authorization': 'Api-Token ' + API_TOKEN}"}), "(BASE_URL + 'v2/entityTypes?nextPageKey=' + nextPage, headers={\n 'Authorization': 'Api-Token ' + API_TOKEN})\n", (2821, 2932), False, 'import sys, requests, json, time\n'), ((5290, 5330), 'time.sleep', 'time.sleep', (['(60 / MAX_REQUESTS_PER_MINUTE)'], {}), '(60 / MAX_REQUESTS_PER_MINUTE)\n', (5300, 5330), False, 'import sys, requests, json, time\n'), ((2999, 3027), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (3009, 3027), False, 'import sys, requests, json, time\n'), ((3080, 3108), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (3090, 3108), False, 'import sys, requests, json, time\n'), ((7015, 7063), 'json.dumps', 'json.dumps', (['dduConsumptionObjectOfManagementZone'], {}), '(dduConsumptionObjectOfManagementZone)\n', (7025, 7063), False, 'import sys, requests, json, time\n'), ((7160, 7208), 'json.dumps', 'json.dumps', (['dduConsumptionObjectOfManagementZone'], {}), '(dduConsumptionObjectOfManagementZone)\n', (7170, 7208), False, 'import sys, requests, json, time\n'), ((5369, 5397), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (5379, 5397), False, 'import sys, requests, json, time\n')]
|
import numpy as np
import scipy
import matplotlib.pyplot as plt
import sys
def compute_r_squared(data, predictions):
'''
In exercise 5, we calculated the R^2 value for you. But why don't you try and
and calculate the R^2 value yourself.
Given a list of original data points, and also a list of predicted data points,
write a function that will compute and return the coefficient of determination (R^2)
for this data. numpy.mean() and numpy.sum() might both be useful here, but
not necessary.
Documentation about numpy.mean() and numpy.sum() below:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
http://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html
'''
mean = data.mean()
numerator = np.sum((data - predictions)**2)
denom = np.sum((data-mean)**2)
r_squared = 1 - numerator/denom
return r_squared
|
[
"numpy.sum"
] |
[((804, 837), 'numpy.sum', 'np.sum', (['((data - predictions) ** 2)'], {}), '((data - predictions) ** 2)\n', (810, 837), True, 'import numpy as np\n'), ((849, 875), 'numpy.sum', 'np.sum', (['((data - mean) ** 2)'], {}), '((data - mean) ** 2)\n', (855, 875), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.