code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""
比requests更强大python库,让你的爬虫效率提高一倍
https://mp.weixin.qq.com/s/jqGx-4t4ytDDnXxDkzbPqw
HTTPX 基础教程
https://zhuanlan.zhihu.com/p/103824900
"""
def interface(url, data):
import httpx
head = {"Content-Type": "application/json; charset=UTF-8"}
return httpx.request('POST', url, json=data, headers=head)
if __name__ == '__main__':
post_url = "http://127.0.0.1:8888"
post_data = {"image": 112, "name": 1}
response = interface(post_url, post_data)
print('status_code: ', response.status_code) # 打印状态码
# print('url: ', response.url) # 打印请求url
# print('headers: ', response.headers) # 打印头信息
# print('cookies: ', response.cookies) # 打印cookie信息
print('text: ', response.text) # 以文本形式打印网页源码
# print('content: ', response.content) #以字节流形式打印
|
[
"httpx.request"
] |
[((261, 312), 'httpx.request', 'httpx.request', (['"""POST"""', 'url'], {'json': 'data', 'headers': 'head'}), "('POST', url, json=data, headers=head)\n", (274, 312), False, 'import httpx\n')]
|
from http import HTTPStatus
from typing import Tuple
from uuid import UUID
from fastapi import status, APIRouter, Response, Depends, HTTPException
from command.command_handler import Status
from domain.classroom.classroom_creation_command_handler import ClassroomCreated
from domain.classroom.classroom_type import ClassroomSubject
from domain.commands import ClassroomCreationCommand, ClassroomPatchCommand
from domain.exceptions import DomainException, AggregateNotFoundException
from infrastructure.command_bus_provider import CommandBusProvider
from web.presentation.domain.detailed_classroom import DetailedClassroom
from web.presentation.service.classroom_service import get_detailed_classroom
from web.schema.classroom_response import ClassroomReadResponse, ClassroomCreatedResponse
from web.schema.classroom_schemas import ClassroomCreation, ClassroomPatch
router = APIRouter()
@router.post("/classrooms",
response_model=ClassroomCreatedResponse,
status_code=status.HTTP_201_CREATED,
responses={
201: {
"description": "Create a classroom",
"headers": {
"location": {
"description": "The absolute path URL location of the newly created classroom",
"schema": {"type": "URL"},
}
}
},
404: {
"description": "See body message details"
},
409: {
"description": "See body message details"
}
}
)
def create_classroom(classroom_creation: ClassroomCreation, response: Response,
command_bus_provider: CommandBusProvider = Depends(CommandBusProvider)):
try:
command = ClassroomCreationCommand(classroom_creation.name, classroom_creation.position,
classroom_creation.duration,
ClassroomSubject[classroom_creation.subject],
classroom_creation.start_date, classroom_creation.stop_date,
list(map(lambda attendee: attendee.id, classroom_creation.attendees)))
from command.response import Response
result: Tuple[Response, Status] = command_bus_provider.command_bus.send(command)
event: ClassroomCreated = result[0].event
response.headers["location"] = f"/classrooms/{event.root_id}"
return {
"name": event.name,
"id": event.root_id,
"position": event.position,
"subject": event.subject.value,
"schedule": {
"start": event.schedule.start,
"stop": event.schedule.stop
},
"duration": ClassroomReadResponse.to_duration(event.duration),
"attendees": list(map(lambda attendee: {"id": attendee["id"]}, event.attendees))
}
except AggregateNotFoundException as e:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND,
detail=f"One of the attendees with id '{e.unknown_id}' has not been found")
except DomainException as e:
raise HTTPException(status_code=HTTPStatus.CONFLICT, detail=e.message)
@router.get("/classrooms/{id}",
response_model=ClassroomReadResponse,
responses={
404: {
"description": "Classroom has not been found"
}
}
)
def get_classroom(id: UUID):
try:
detailed_classroom: DetailedClassroom = get_detailed_classroom(id)
return {
"name": detailed_classroom.name,
"id": detailed_classroom.id,
"position": detailed_classroom.position,
"subject": detailed_classroom.subject.value,
"schedule": {
"start": detailed_classroom.start,
"stop": detailed_classroom.stop
},
"duration": {
"duration": detailed_classroom.duration.duration,
"time_unit": detailed_classroom.duration.time_unit
},
"attendees": detailed_classroom.attendees
}
except AggregateNotFoundException:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=f"Classroom with id '{str(id)}' not found")
@router.patch("/classrooms/{id}",
status_code=status.HTTP_204_NO_CONTENT,
description="Add attendees to a classroom. This resource works as a patch, "
"you must provide all classroom attendees (i.e: you had Clara already added to the classroom,"
" if you want John to join, you must provide both Clara and John "
"otherwise Clara will be removed",
responses={
404: {
"description": "See body message details"
},
409: {
"description": "See body message details"
}
}
)
def update_classroom(id: UUID, classroom_patch: ClassroomPatch,
command_bus_provider: CommandBusProvider = Depends(CommandBusProvider)):
try:
command_bus_provider.command_bus.send(
ClassroomPatchCommand(id, list(map(lambda client: client.id, classroom_patch.attendees))))
except AggregateNotFoundException as e:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND,
detail=f"One of the attendees with id '{e.unknown_id}' has not been found")
except DomainException as e:
raise HTTPException(status_code=HTTPStatus.CONFLICT, detail=e.message)
|
[
"fastapi.HTTPException",
"web.presentation.service.classroom_service.get_detailed_classroom",
"web.schema.classroom_response.ClassroomReadResponse.to_duration",
"fastapi.APIRouter",
"fastapi.Depends"
] |
[((877, 888), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (886, 888), False, 'from fastapi import status, APIRouter, Response, Depends, HTTPException\n'), ((1825, 1852), 'fastapi.Depends', 'Depends', (['CommandBusProvider'], {}), '(CommandBusProvider)\n', (1832, 1852), False, 'from fastapi import status, APIRouter, Response, Depends, HTTPException\n'), ((5353, 5380), 'fastapi.Depends', 'Depends', (['CommandBusProvider'], {}), '(CommandBusProvider)\n', (5360, 5380), False, 'from fastapi import status, APIRouter, Response, Depends, HTTPException\n'), ((3722, 3748), 'web.presentation.service.classroom_service.get_detailed_classroom', 'get_detailed_classroom', (['id'], {}), '(id)\n', (3744, 3748), False, 'from web.presentation.service.classroom_service import get_detailed_classroom\n'), ((2917, 2966), 'web.schema.classroom_response.ClassroomReadResponse.to_duration', 'ClassroomReadResponse.to_duration', (['event.duration'], {}), '(event.duration)\n', (2950, 2966), False, 'from web.schema.classroom_response import ClassroomReadResponse, ClassroomCreatedResponse\n'), ((3129, 3257), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'HTTPStatus.NOT_FOUND', 'detail': 'f"""One of the attendees with id \'{e.unknown_id}\' has not been found"""'}), '(status_code=HTTPStatus.NOT_FOUND, detail=\n f"One of the attendees with id \'{e.unknown_id}\' has not been found")\n', (3142, 3257), False, 'from fastapi import status, APIRouter, Response, Depends, HTTPException\n'), ((3328, 3392), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'HTTPStatus.CONFLICT', 'detail': 'e.message'}), '(status_code=HTTPStatus.CONFLICT, detail=e.message)\n', (3341, 3392), False, 'from fastapi import status, APIRouter, Response, Depends, HTTPException\n'), ((5600, 5728), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'HTTPStatus.NOT_FOUND', 'detail': 'f"""One of the attendees with id \'{e.unknown_id}\' has not been found"""'}), '(status_code=HTTPStatus.NOT_FOUND, detail=\n f"One of the attendees with id \'{e.unknown_id}\' has not been found")\n', (5613, 5728), False, 'from fastapi import status, APIRouter, Response, Depends, HTTPException\n'), ((5799, 5863), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'HTTPStatus.CONFLICT', 'detail': 'e.message'}), '(status_code=HTTPStatus.CONFLICT, detail=e.message)\n', (5812, 5863), False, 'from fastapi import status, APIRouter, Response, Depends, HTTPException\n')]
|
import logging
l = logging.getLogger("angr.codenode")
class CodeNode(object):
__slots__ = ['addr', 'size', '_graph', 'thumb']
def __init__(self, addr, size, graph=None, thumb=False):
self.addr = addr
self.size = size
self.thumb = thumb
self._graph = graph
def __len__(self):
return self.size
def __eq__(self, other):
if type(other) is Block: # pylint: disable=unidiomatic-typecheck
raise TypeError("You do not want to be comparing a CodeNode to a Block")
return type(self) is type(other) and \
self.addr == other.addr and \
self.size == other.size and \
self.is_hook == other.is_hook and \
self.thumb == other.thumb
def __ne__(self, other):
return not self == other
def __cmp__(self, other):
raise TypeError("Comparison with a code node")
def __hash__(self):
return hash((self.addr, self.size))
def successors(self):
if self._graph is None:
raise ValueError("Cannot calculate successors for graphless node")
return list(self._graph.successors(self))
def predecessors(self):
if self._graph is None:
raise ValueError("Cannot calculate predecessors for graphless node")
return list(self._graph.predecessors(self))
def __getstate__(self):
return (self.addr, self.size)
def __setstate__(self, dat):
self.__init__(*dat)
is_hook = None
class BlockNode(CodeNode):
__slots__ = ['bytestr']
is_hook = False
def __init__(self, addr, size, bytestr=None, **kwargs):
super(BlockNode, self).__init__(addr, size, **kwargs)
self.bytestr = bytestr
def __repr__(self):
return '<BlockNode at %#x (size %d)>' % (self.addr, self.size)
def __getstate__(self):
return (self.addr, self.size, self.bytestr, self.thumb)
def __setstate__(self, dat):
self.__init__(*dat[:-1], thumb=dat[-1])
class HookNode(CodeNode):
__slots__ = ['sim_procedure']
is_hook = True
def __init__(self, addr, size, sim_procedure, **kwargs):
super(HookNode, self).__init__(addr, size, **kwargs)
self.sim_procedure = sim_procedure
def __repr__(self):
return '<HookNode %r at %#x (size %s)>' % (self.sim_procedure, self.addr, self.size)
def __hash__(self):
return hash((self.addr, self.size, self.sim_procedure))
def __eq__(self, other):
return super(HookNode, self).__eq__(other) and \
self.sim_procedure == other.sim_procedure
def __getstate__(self):
return (self.addr, self.size, self.sim_procedure)
def __setstate__(self, dat):
self.__init__(*dat)
from .block import Block
|
[
"logging.getLogger"
] |
[((19, 53), 'logging.getLogger', 'logging.getLogger', (['"""angr.codenode"""'], {}), "('angr.codenode')\n", (36, 53), False, 'import logging\n')]
|
import os
scrapy_project_path = '/Users/kingname/book/chapter_12/DeploySpider'
os.chdir(scrapy_project_path) #切换工作区,进入爬虫工程根目录执行命令
os.system('scrapyd-deploy')
import json
import time
import requests
start_url = 'http://45.76.110.210:6800/schedule.json'
start_data = {'project': 'DeploySpider',
'spider': 'Example'}
end_url = 'http://172.16.31.10:6800/cancel.json'
end_data = {'project': 'DeploySpider'}
result = requests.post(start_url, data=start_data, auth=('kingname', 'genius')).text
result = requests.post(end_url, data=end_data, auth=('kingname', 'genius')).text
# result_dict = json.loads(result)
# job_id = result_dict['jobid']
# print(f'启动的爬虫,jobid为:{job_id}')
#
# time.sleep(5)
# end_data['job'] = job_id
# result = requests.post(end_url, data=end_data).text
# print(result)
|
[
"os.chdir",
"os.system",
"requests.post"
] |
[((81, 110), 'os.chdir', 'os.chdir', (['scrapy_project_path'], {}), '(scrapy_project_path)\n', (89, 110), False, 'import os\n'), ((132, 159), 'os.system', 'os.system', (['"""scrapyd-deploy"""'], {}), "('scrapyd-deploy')\n", (141, 159), False, 'import os\n'), ((431, 501), 'requests.post', 'requests.post', (['start_url'], {'data': 'start_data', 'auth': "('kingname', 'genius')"}), "(start_url, data=start_data, auth=('kingname', 'genius'))\n", (444, 501), False, 'import requests\n'), ((516, 582), 'requests.post', 'requests.post', (['end_url'], {'data': 'end_data', 'auth': "('kingname', 'genius')"}), "(end_url, data=end_data, auth=('kingname', 'genius'))\n", (529, 582), False, 'import requests\n')]
|
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import skimage.morphology
import scipy
import scipy.special
import imgaug as ia
import imgaug.random as iarandom
from imgaug import parameters as iap
from imgaug.testutils import reseed
def _eps(arr):
if ia.is_np_array(arr) and arr.dtype.kind == "f":
return np.finfo(arr.dtype).eps
return 1e-4
class Test_handle_continuous_param(unittest.TestCase):
def test_value_range_is_none(self):
result = iap.handle_continuous_param(
1, "[test1]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
result = iap.handle_continuous_param(
1, "[test1b]",
value_range=(None, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_stochastic_parameter(self):
result = iap.handle_continuous_param(
iap.Deterministic(1), "[test2]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_integers(self):
result = iap.handle_continuous_param(
1, "[test3]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range(self):
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test4]",
value_range=(2, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test4]" in str(context.exception))
def test_param_is_inside_value_range_and_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_continuous_param(
1, "[test5]",
value_range=(None, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test6]",
value_range=(None, 0),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test6]" in str(context.exception))
def test_param_is_inside_value_range_and_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_continuous_param(
1, "[test7]",
value_range=(-1, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test8]",
value_range=(2, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test8]" in str(context.exception))
def test_tuple_as_value_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test9]",
value_range=None,
tuple_to_uniform=False,
list_to_choice=True)
self.assertTrue("[test9]" in str(context.exception))
def test_tuple_as_value_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_continuous_param(
(1, 2), "[test10]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_as_value_and_tuples_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_continuous_param(
(1, 2), "[test11]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_value_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test12]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test12]" in str(context.exception))
def test_tuple_value_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test13]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test13]" in str(context.exception))
def test_list_as_value_but_no_lists_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2, 3], "[test14]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=False)
self.assertTrue("[test14]" in str(context.exception))
def test_list_as_value_and_lists_allowed(self):
# list as value and list allowed
result = iap.handle_continuous_param(
[1, 2, 3], "[test15]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_list_value_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test16]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test16]" in str(context.exception))
def test_list_value_and_allowed_and_fully_outside_of_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test17]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_and_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_continuous_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test19]",
value_range=False,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_discrete_param(unittest.TestCase):
def test_float_value_inside_value_range_but_no_floats_allowed(self):
# float value without value range when no float value is allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1.5, "[test0]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.assertTrue("[test0]" in str(context.exception))
def test_value_range_is_none(self):
# value without value range
result = iap.handle_discrete_param(
1, "[test1]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
# value without value range as (None, None)
result = iap.handle_discrete_param(
1, "[test1b]", value_range=(None, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_is_stochastic_parameter(self):
# stochastic parameter
result = iap.handle_discrete_param(
iap.Deterministic(1), "[test2]", value_range=None,
tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_inside_value_range(self):
# value within value range
result = iap.handle_discrete_param(
1, "[test3]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range(self):
# value outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test4]", value_range=(2, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test4]" in str(context.exception))
def test_value_inside_value_range_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_discrete_param(
1, "[test5]", value_range=(None, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test6]", value_range=(None, 0), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test6]" in str(context.exception))
def test_value_inside_value_range_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_discrete_param(
1, "[test7]", value_range=(-1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test8]", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test8]" in str(context.exception))
def test_value_is_tuple_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test9]", value_range=None, tuple_to_uniform=False,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test9]" in str(context.exception))
def test_value_is_tuple_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_discrete_param(
(1, 2), "[test10]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_discrete_param(
(1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_vr_allow_floats_false(self):
# tuple as value and tuple allowed and tuple within value range with
# allow_floats=False
result = iap.handle_discrete_param(
(1, 2), "[test11b]", value_range=(0, 10),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test12]" in str(context.exception))
def test_value_tuple_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test13]" in str(context.exception))
def test_value_list_but_not_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True,
list_to_choice=False, allow_floats=True)
self.assertTrue("[test14]" in str(context.exception))
def test_value_list_and_allowed(self):
# list as value and list allowed
result = iap.handle_discrete_param(
[1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_value_list_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test16]" in str(context.exception))
def test_value_list_and_allowed_and_fully_outside_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_discrete_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test19]", value_range=False, tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_categorical_string_param(unittest.TestCase):
def test_arg_is_all(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
ia.ALL, "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == valid_values
def test_arg_is_valid_str(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
"class1", "foo", valid_values)
assert isinstance(param, iap.Deterministic)
assert param.value == "class1"
def test_arg_is_invalid_str(self):
valid_values = ["class1", "class2"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
"class3", "foo", valid_values)
expected = (
"Expected parameter 'foo' to be one of: class1, class2. "
"Got: class3.")
assert expected == str(ctx.exception)
def test_arg_is_valid_list(self):
valid_values = ["class1", "class2", "class3"]
param = iap.handle_categorical_string_param(
["class1", "class3"], "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == ["class1", "class3"]
def test_arg_is_list_with_invalid_types(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", False], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"strings, got types: str, bool."
)
assert expected in str(ctx.exception)
def test_arg_is_invalid_list(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", "class4"], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"the following allowed strings: class1, class2, class3. "
"Got strings: class1, class4."
)
assert expected in str(ctx.exception)
def test_arg_is_stochastic_param(self):
param = iap.Deterministic("class1")
param_out = iap.handle_categorical_string_param(
param, "foo", ["class1"])
assert param_out is param
def test_arg_is_invalid_datatype(self):
with self.assertRaises(Exception) as ctx:
_ = iap.handle_categorical_string_param(
False, "foo", ["class1"])
expected = "Expected parameter 'foo' to be imgaug.ALL"
assert expected in str(ctx.exception)
class Test_handle_probability_param(unittest.TestCase):
def test_bool_like_values(self):
for val in [True, False, 0, 1, 0.0, 1.0]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
def test_float_probabilities(self):
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
def test_probability_is_stochastic_parameter(self):
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
def test_probability_has_bad_datatype(self):
with self.assertRaises(Exception) as context:
_p = iap.handle_probability_param("test", "[test4]")
self.assertTrue("Expected " in str(context.exception))
def test_probability_is_negative(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(-0.01, "[test5]")
def test_probability_is_above_100_percent(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(1.01, "[test6]")
class Test_force_np_float_dtype(unittest.TestCase):
def test_common_dtypes(self):
dtypes = [
("float16", "float16"),
("float32", "float32"),
("float64", "float64"),
("uint8", "float64"),
("int32", "float64")
]
for dtype_in, expected in dtypes:
with self.subTest(dtype_in=dtype_in):
arr = np.zeros((1,), dtype=dtype_in)
observed = iap.force_np_float_dtype(arr).dtype
assert observed.name == expected
class Test_both_np_float_if_one_is_float(unittest.TestCase):
def test_float16_float32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float32"
def test_float16_int32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float64"
def test_int32_float16(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float16"
def test_int32_uint8(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float64"
class Test_draw_distributions_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_basic_functionality(self):
params = [mock.Mock(), mock.Mock()]
params[0].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
params[1].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
draw_grid_mock = mock.Mock()
draw_grid_mock.return_value = np.zeros((4, 3, 2), dtype=np.uint8)
with mock.patch('imgaug.imgaug.draw_grid', draw_grid_mock):
grid_observed = iap.draw_distributions_grid(
params, rows=2, cols=3, graph_sizes=(20, 21),
sample_sizes=[(1, 2), (3, 4)], titles=["A", "B"])
assert grid_observed.shape == (4, 3, 2)
assert params[0].draw_distribution_graph.call_count == 1
assert params[1].draw_distribution_graph.call_count == 1
assert params[0].draw_distribution_graph.call_args[1]["size"] == (1, 2)
assert params[0].draw_distribution_graph.call_args[1]["title"] == "A"
assert params[1].draw_distribution_graph.call_args[1]["size"] == (3, 4)
assert params[1].draw_distribution_graph.call_args[1]["title"] == "B"
assert draw_grid_mock.call_count == 1
assert draw_grid_mock.call_args[0][0][0].shape == (20, 21, 3)
assert draw_grid_mock.call_args[0][0][1].shape == (20, 21, 3)
assert draw_grid_mock.call_args[1]["rows"] == 2
assert draw_grid_mock.call_args[1]["cols"] == 3
class Test_draw_distributions_graph(unittest.TestCase):
def test_basic_functionality(self):
# this test is very rough as we get a not-very-well-defined image out
# of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,),
bins=100)
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
graph_img_title = param.draw_distribution_graph(title="test",
size=(10000,),
bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
assert nb_white > 0.1 * nb_all
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
class TestStochasticParameter(unittest.TestCase):
def setUp(self):
reseed()
def test_copy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.copy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] == param.other_param.a[0]
def test_deepcopy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.deepcopy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] != param.other_param.a[0]
class TestStochasticParameterOperators(unittest.TestCase):
def setUp(self):
reseed()
def test_multiply_stochasic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 * param2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert param3.val == param2
def test_multiply_stochastic_param_with_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 * 2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_multiply_integer_with_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 * param1
assert isinstance(param3, iap.Multiply)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_multiply_string_with_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" * param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_multiply_stochastic_param_with_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 * "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_params(self):
# Divide (__truediv__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 / param2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_divide_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 / 2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_divide_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 / param1
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_divide_string_by_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" / param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 / "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_div_stochastic_params(self):
# Divide (__div__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1.__div__(param2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_div_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1.__div__(2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_div_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__div__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_rdiv_stochastic_param_by_integer(self):
# Divide (__rdiv__)
param1 = iap.Normal(0, 1)
param3 = param1.__rdiv__(2)
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_rdiv_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__rdiv__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_stochastic_params(self):
# Divide (__floordiv__)
param1_int = iap.DiscreteUniform(0, 10)
param2_int = iap.Choice([1, 2])
param3 = param1_int // param2_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert param3.other_param.val == param2_int
def test_floordiv_symbol_stochastic_param_by_integer(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = param1_int // 2
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert isinstance(param3.other_param.val, iap.Deterministic)
assert param3.other_param.val.value == 2
def test_floordiv_symbol_integer_by_stochastic_param(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = 2 // param1_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert isinstance(param3.other_param.other_param, iap.Deterministic)
assert param3.other_param.other_param.value == 2
assert param3.other_param.val == param1_int
def test_floordiv_symbol_string_by_stochastic_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = "test" // param1_int
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_symbol_stochastic_param_by_string_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = param1_int // "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 + param2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert param3.val == param2
def test_add_integer_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 + 2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_add_stochastic_param_to_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 + param1
assert isinstance(param3, iap.Add)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_add_stochastic_param_to_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" + param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_string_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 + "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 - param2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert param3.val == param2
def test_subtract_integer_from_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 - 2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_subtract_stochastic_param_from_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 - param1
assert isinstance(param3, iap.Subtract)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_subtract_stochastic_param_from_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" - param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_string_from_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 - "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 ** param2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
def test_exponentiate_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_exponentiate_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_exponentiate_string_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" ** param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_param_by_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 ** "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
class TestBinomial(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p_is_zero(self):
param = iap.Binomial(0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(int 0))"
)
def test___init___p_is_one(self):
param = iap.Binomial(1.0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(float 1.00000000))"
)
def test_p_is_zero(self):
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_p_is_one(self):
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_p_is_50_percent(self):
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_p_is_list(self):
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (
(0.25 - 0.05 < p < 0.25 + 0.05)
or (0.75 - 0.05 < p < 0.75 + 0.05)
)
def test_p_is_tuple(self):
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
def test_samples_same_values_for_same_seeds(self):
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestChoice(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Choice([0, 1, 2])
assert (
param.__str__()
== param.__repr__()
== "Choice(a=[0, 1, 2], replace=True, p=None)"
)
def test_value_is_list(self):
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_sampled_values_match_expected_counts(self):
param = iap.Choice([0, 1, 2])
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_value_is_list_containing_negative_number(self):
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
def test_value_is_list_of_floats(self):
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert (
(
-1.2 - _eps(sample)
< sample <
-1.2 + _eps(sample)
)
or
(
1.7 - _eps(sample)
< sample <
1.7 + _eps(sample)
)
)
assert np.all(
np.logical_or(
np.logical_and(
-1.2 - _eps(sample) < samples,
samples < -1.2 + _eps(sample)
),
np.logical_and(
1.7 - _eps(sample) < samples,
samples < 1.7 + _eps(sample)
)
)
)
def test_value_is_list_of_strings(self):
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
def test_sample_without_replacing(self):
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
def test_non_uniform_probabilities_over_elements(self):
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
def test_list_contains_stochastic_parameter(self):
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_samples_same_values_for_same_seeds(self):
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_value_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice(123)
self.assertTrue(
"Expected a to be an iterable" in str(context.exception))
def test_p_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=123)
self.assertTrue("Expected p to be" in str(context.exception))
def test_value_and_p_have_unequal_lengths(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=[1])
self.assertTrue("Expected lengths of" in str(context.exception))
class TestDiscreteUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.DiscreteUniform(0, 2)
assert (
param.__str__()
== param.__repr__()
== "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
)
def test_bounds_are_ints(self):
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_samples_match_expected_counts(self):
param = iap.DiscreteUniform(0, 2)
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_lower_bound_is_negative(self):
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(samples == -1, samples == 0),
samples == 1
)
)
def test_bounds_are_floats(self):
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_have_wrong_order(self):
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_are_the_same(self):
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestPoisson(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Poisson(1)
assert (
param.__str__()
== param.__repr__()
== "Poisson(Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Poisson(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_poisson(self):
param = iap.Poisson(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).poisson(
lam=1, size=(100, 1000))
assert samples.shape == (100, 1000)
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = int(np.sum(samples_direct == i))
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
def test_samples_same_values_for_same_seeds(self):
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Normal(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Normal(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_normal(self):
param = iap.Normal(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1,
size=(100, 1000))
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
def test_samples_same_values_for_same_seeds(self):
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestTruncatedNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.TruncatedNormal(0, 1)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(float -inf), "
"high=Deterministic(float inf)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test___init___custom_range(self):
param = iap.TruncatedNormal(0, 1, low=-100, high=50.0)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(int -100), "
"high=Deterministic(float 50.00000000)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_scale_is_zero(self):
param = iap.TruncatedNormal(0.5, 0, low=-10, high=10)
samples = param.draw_samples((100,))
assert np.allclose(samples, 0.5)
def test_scale(self):
param1 = iap.TruncatedNormal(0.0, 0.1, low=-100, high=100)
param2 = iap.TruncatedNormal(0.0, 5.0, low=-100, high=100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert np.isclose(np.std(samples1), 0.1, rtol=0, atol=0.20)
assert np.isclose(np.std(samples2), 5.0, rtol=0, atol=0.40)
def test_loc_is_stochastic_parameter(self):
param = iap.TruncatedNormal(iap.Choice([-100, 100]), 0.01,
low=-1000, high=1000)
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((5,))
observed = np.mean(samples)
dist1 = np.abs(-100 - observed)
dist2 = np.abs(100 - observed)
if dist1 < 1:
seen[0] += 1
elif dist2 < 1:
seen[1] += 1
else:
assert False
assert np.isclose(seen[0], 100, rtol=0, atol=20)
assert np.isclose(seen[1], 100, rtol=0, atol=20)
def test_samples_are_within_bounds(self):
param = iap.TruncatedNormal(0, 10.0, low=-5, high=7.5)
samples = param.draw_samples((1000,))
# are all within bounds
assert np.all(samples >= -5.0 - 1e-4)
assert np.all(samples <= 7.5 + 1e-4)
# at least some samples close to bounds
assert np.any(samples <= -4.5)
assert np.any(samples >= 7.0)
# at least some samples close to loc
assert np.any(np.abs(samples) < 0.5)
def test_samples_same_values_for_same_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=1234)
assert np.allclose(samples1, samples2)
def test_samples_different_values_for_different_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=2345)
assert not np.allclose(samples1, samples2)
class TestLaplace(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Laplace(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Laplace(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_laplace(self):
param = iap.Laplace(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).laplace(loc=0, scale=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
def test_scale_is_zero(self):
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - _eps(samples),
samples < 1 + _eps(samples)
))
def test_samples_same_values_for_same_seeds(self):
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestChiSquare(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ChiSquare(1)
assert (
param.__str__()
== param.__repr__()
== "ChiSquare(df=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.ChiSquare(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_chisquare(self):
param = iap.ChiSquare(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).chisquare(df=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_df_is_stochastic_parameter(self):
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_larger_df_leads_to_more_variance(self):
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
def test_samples_same_values_for_same_seeds(self):
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestWeibull(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Weibull(1)
assert (
param.__str__()
== param.__repr__()
== "Weibull(a=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Weibull(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_weibull(self):
param = iap.Weibull(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).weibull(a=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
matches_first = (
expected_first - 0.2 * expected_first
< observed <
expected_first + 0.2 * expected_first
)
matches_second = (
expected_second - 0.2 * expected_second
< observed <
expected_second + 0.2 * expected_second
)
if matches_first:
seen[0] += 1
elif matches_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_different_strengths(self):
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = (
scipy.special.gamma(1 + 2/1)
- (scipy.special.gamma(1 + 1/1))**2
)
expected_second = (
scipy.special.gamma(1 + 2/0.5)
- (scipy.special.gamma(1 + 1/0.5))**2
)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.2 * expected_first
< np.var(samples1) <
expected_first + 0.2 * expected_first
)
assert (
expected_second - 0.2 * expected_second
< np.var(samples2) <
expected_second + 0.2 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Uniform(0, 1.0)
assert (
param.__str__()
== param.__repr__()
== "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
)
def test_draw_sample(self):
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10, 5))
assert samples.shape == (10, 5)
assert np.all(
np.logical_and(
0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_via_density_histogram(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0),
density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert (
density_expected - density_tolerance
< density <
density_expected + density_tolerance
)
def test_negative_value(self):
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_wrong_argument_order(self):
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_integers(self):
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_identical(self):
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1.0, 1.0)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestBeta(unittest.TestCase):
@classmethod
def _mean(cls, alpha, beta):
return alpha / (alpha + beta)
@classmethod
def _var(cls, alpha, beta):
return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1))
def setUp(self):
reseed()
def test___init__(self):
param = iap.Beta(0.5, 0.5)
assert (
param.__str__()
== param.__repr__()
== "Beta("
"Deterministic(float 0.50000000), "
"Deterministic(float 0.50000000)"
")"
)
def test_draw_sample(self):
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
assert samples.shape == (100, 1000)
assert np.all(
np.logical_and(
0 - _eps(samples) <= samples,
samples <= 1.0 + _eps(samples)
)
)
def test_via_comparison_to_np_beta(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).beta(
a=0.5, b=0.5, size=(100, 1000))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Beta(iap.Choice([0.5, 2]), 0.5)
expected_first = self._mean(0.5, 0.5)
expected_second = self._mean(2, 0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((10000,))
observed = np.mean(samples)
if expected_first - 0.05 < observed < expected_first + 0.05:
seen[0] += 1
elif expected_second - 0.05 < observed < expected_second + 0.05:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_compare_curves_of_different_arguments(self):
param1 = iap.Beta(2, 2)
param2 = iap.Beta(0.5, 0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = self._var(2, 2)
expected_second = self._var(0.5, 0.5)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.1 * expected_first
< np.var(samples1) <
expected_first + 0.1 * expected_first
)
assert (
expected_second - 0.1 * expected_second
< np.var(samples2) <
expected_second + 0.1 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Beta(0.5, 0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestDeterministic(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
pairs = [
(0, "Deterministic(int 0)"),
(1.0, "Deterministic(float 1.00000000)"),
("test", "Deterministic(test)")
]
for value, expected in pairs:
with self.subTest(value=value):
param = iap.Deterministic(value)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_samples_same_values_for_same_seeds(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0
]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
rs1 = iarandom.RNG(123456)
rs2 = iarandom.RNG(123456)
samples1 = param.draw_samples(20, random_state=rs1)
samples2 = param.draw_samples(20, random_state=rs2)
assert np.array_equal(samples1, samples2)
def test_draw_sample_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 == sample2
def test_draw_sample_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert np.isclose(
sample1, sample2, rtol=0, atol=_eps(sample1))
def test_draw_samples_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.all(samples == value)
def test_draw_samples_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.allclose(samples, value, rtol=0, atol=_eps(samples))
def test_argument_is_stochastic_parameter(self):
seen = [0, 0]
for _ in sm.xrange(200):
param = iap.Deterministic(iap.Choice([0, 1]))
seen[param.value] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_argument_has_invalid_type(self):
with self.assertRaises(Exception) as context:
_ = iap.Deterministic([1, 2, 3])
self.assertTrue(
"Expected StochasticParameter object or number or string"
in str(context.exception))
class TestFromLowerResolution(unittest.TestCase):
def setUp(self):
reseed()
def test___init___size_percent(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_percent=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_percent=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test___init___size_px(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_px=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_px=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test_binomial_hwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 1))
uq = np.unique(samples)
assert samples.shape == (8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_binomial_nhwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples_nhwc = param.draw_samples((1, 8, 8, 1))
uq = np.unique(samples_nhwc)
assert samples_nhwc.shape == (1, 8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_draw_samples_with_too_many_dimensions(self):
# (N, H, W, C, something) causing error
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
with self.assertRaises(Exception) as context:
_ = param.draw_samples((1, 8, 8, 1, 1))
self.assertTrue(
"FromLowerResolution can only generate samples of shape"
in str(context.exception)
)
def test_binomial_hw3(self):
# C=3
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 3))
uq = np.unique(samples)
assert samples.shape == (8, 8, 3)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_different_size_px_arguments(self):
# different sizes in px
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_arguments_with_tuple(self):
# different sizes in px, one given as tuple (a, b)
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(400):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_argument_with_stochastic_parameters(self):
# different sizes in px, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Deterministic(1))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Choice([8, 16]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_px_has_invalid_datatype(self):
# bad datatype for size_px
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False)
self.assertTrue("Expected " in str(context.exception))
def test_min_size(self):
# min_size
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1,
min_size=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent(self):
# different sizes in percent
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_as_stochastic_parameters(self):
# different sizes in percent, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Deterministic(0.01))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Choice([0.4, 0.8]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_has_invalid_datatype(self):
# bad datatype for size_percent
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False)
self.assertTrue("Expected " in str(context.exception))
def test_method(self):
# method given as StochasticParameter
param = iap.FromLowerResolution(
iap.Binomial(0.5), size_px=4,
method=iap.Choice(["nearest", "linear"]))
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((16, 16, 1))
nb_in_between = np.sum(
np.logical_and(0.05 < samples, samples < 0.95))
if nb_in_between == 0:
seen[0] += 1
else:
seen[1] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_method_has_invalid_datatype(self):
# bad datatype for method
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4,
method=False)
self.assertTrue("Expected " in str(context.exception))
def test_samples_same_values_for_same_seeds(self):
# multiple calls with same random_state
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
samples1 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestClip(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), -1.000000, 1.000000)"
)
def test_value_within_bounds(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_value_exactly_at_upper_bound(self):
param = iap.Clip(iap.Deterministic(1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_exactly_at_lower_bound(self):
param = iap.Clip(iap.Deterministic(-1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_within_bounds_and_float(self):
param = iap.Clip(iap.Deterministic(0.5), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0.5 - _eps(sample) < sample < 0.5 + _eps(sample)
assert np.all(
np.logical_and(
0.5 - _eps(sample) <= samples,
samples <= 0.5 + _eps(sample)
)
)
def test_value_is_above_upper_bound(self):
param = iap.Clip(iap.Deterministic(2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_below_lower_bound(self):
param = iap.Clip(iap.Deterministic(-2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_sometimes_without_bounds_sometimes_beyond(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_samples_same_values_for_same_seeds(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_lower_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), None, 1)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, 1.000000)"
)
def test_upper_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), 0, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), 0.000000, None)"
)
def test_both_bounds_are_none(self):
param = iap.Clip(iap.Deterministic(0), None, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, None)"
)
class TestDiscretize(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Discretize(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Discretize(Deterministic(int 0))"
)
def test_applied_to_deterministic(self):
values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043,
0,
0.00043, 0.7, 1.0, 1, 54.3, 100.2]
for value in values:
with self.subTest(value=value):
param = iap.Discretize(iap.Deterministic(value))
value_expected = np.round(
np.float64([value])
).astype(np.int32)[0]
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == value_expected
assert np.all(samples == value_expected)
# TODO why are these tests applied to DiscreteUniform instead of Uniform?
def test_applied_to_discrete_uniform(self):
param_orig = iap.DiscreteUniform(0, 1)
param = iap.Discretize(param_orig)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_applied_to_discrete_uniform_with_wider_range(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param_orig.draw_samples((10000,))
samples2 = param.draw_samples((10000,))
assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3))
def test_samples_same_values_for_same_seeds(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Multiply(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_multiply_example_integer_values(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 * v2
)
def test_multiply_example_integer_values_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.name == "int32"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 * v2
)
def test_multiply_example_float_values(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_example_float_values_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_by_stochastic_parameter(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_by_stochastic_parameter_elementwise(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value_elementwise(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestDivide(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Divide(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Divide(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_divide_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_by_stochastic_parameter(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_by_stochastic_parameter_elementwise(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float_elementwise(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted)
< samples_sorted[-1]
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted)
)
def test_divide_by_stochastic_parameter_that_can_by_zero(self):
# test division by zero automatically being converted to division by 1
param = iap.Divide(2,
iap.Choice([0, 2]),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_unique = np.sort(np.unique(samples.flatten()))
assert samples_unique[0] == 1 and samples_unique[1] == 2
def test_divide_by_zero(self):
param = iap.Divide(iap.Deterministic(1), 0, elementwise=False)
sample = param.draw_sample()
assert sample == 1
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Add(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Add(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_add_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_stochastic_parameter(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
class TestSubtract(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Subtract(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Subtract(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_subtract_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_stochastic_parameter(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestPower(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Power(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Power(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_pairs(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), exponent)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_pairs_both_deterministic(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), iap.Deterministic(exponent))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_exponent_is_stochastic_parameter(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_exponent_is_stochastic_parameter_elementwise(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform_elementwise(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestAbsolute(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Absolute(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Absolute(Deterministic(int 0))"
)
def test_fixed_values(self):
simple_values = [-1.5, -1, -1.0, -0.1, 0, 0.0, 0.1, 1, 1.0, 1.5]
for value in simple_values:
with self.subTest(value=value):
param = iap.Absolute(iap.Deterministic(value))
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
if ia.is_single_float(value):
assert (
abs(value) - _eps(sample)
< sample <
abs(value) + _eps(sample)
)
assert np.all(abs(value) - _eps(samples) < samples)
assert np.all(samples < abs(value) + _eps(samples))
else:
assert sample == abs(value)
assert np.all(samples == abs(value))
def test_value_is_stochastic_parameter(self):
param = iap.Absolute(iap.Choice([-3, -1, 1, 3]))
sample = param.draw_sample()
samples = param.draw_samples((10, 10))
samples_uq = np.sort(np.unique(samples))
assert sample.shape == tuple()
assert sample in [3, 1]
assert samples.shape == (10, 10)
assert len(samples_uq) == 2
assert samples_uq[0] == 1 and samples_uq[1] == 3
class TestRandomSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.RandomSign(iap.Deterministic(0), 0.5)
assert (
param.__str__()
== param.__repr__()
== "RandomSign(Deterministic(int 0), 0.50)"
)
def test_value_is_deterministic(self):
param = iap.RandomSign(iap.Deterministic(1))
samples = param.draw_samples((1000,))
n_positive = np.sum(samples == 1)
n_negative = np.sum(samples == -1)
assert samples.shape == (1000,)
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_deterministic_many_samples(self):
param = iap.RandomSign(iap.Deterministic(1))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
assert sample.shape == tuple()
if sample == 1:
seen[1] += 1
else:
seen[0] += 1
n_negative, n_positive = seen
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_stochastic_parameter(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples = param.draw_samples((4000,))
seen = [0, 0, 0, 0]
seen[0] = np.sum(samples == -2)
seen[1] = np.sum(samples == -1)
seen[2] = np.sum(samples == 1)
seen[3] = np.sum(samples == 2)
assert np.sum(seen) == 4000
assert all([700 < v < 1300 for v in seen])
def test_samples_same_values_for_same_seeds(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
assert np.sum(samples1 == -2) > 50
assert np.sum(samples1 == -1) > 50
assert np.sum(samples1 == 1) > 50
assert np.sum(samples1 == 2) > 50
class TestForceSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ForceSign(iap.Deterministic(0), True, "invert", 1)
assert (
param.__str__()
== param.__repr__()
== "ForceSign(Deterministic(int 0), True, invert, 1)"
)
def test_single_sample_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == 1
def test_single_sample_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == -1
def test_many_samples_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_negative_value_to_positive(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative_value_to_negative(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_stochastic_value_to_positive(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="invert")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert 200 < n_twos < 700
assert 200 < n_ones < 700
def test_many_samples_stochastic_value_to_positive_reroll(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert n_twos > 0
assert n_ones > 0
def test_many_samples_stochastic_value_to_positive_reroll_max_count(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll", reroll_count_max=100)
samples = param.draw_samples(100)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (100,)
assert n_twos + n_ones == 100
assert n_twos < 5
def test_samples_same_values_for_same_seeds(self):
param = iap.ForceSign(iap.Choice([-2, 1]),
positive=True,
mode="invert")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
class TestPositive(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Positive(iap.Deterministic(-1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == 1)
class TestNegative(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Negative(iap.Deterministic(1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == -1)
class TestIterativeNoiseAggregator(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(0),
iterations=(1, 3),
aggregation_method="max")
assert (
param.__str__()
== param.__repr__()
== (
"IterativeNoiseAggregator("
"Deterministic(int 0), "
"DiscreteUniform(Deterministic(int 1), "
"Deterministic(int 3)"
"), "
"Deterministic(max)"
")"
)
)
def test_value_is_deterministic_max_1_iter(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(1),
iterations=1,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_stochastic_avg_200_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=200,
aggregation_method="avg")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert 25 - 10 < sample < 25 + 10
assert np.all(np.logical_and(25 - 10 < samples, samples < 25 + 10))
def test_value_is_stochastic_max_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 50
assert np.all(samples == 50)
def test_value_is_stochastic_min_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="min")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 0
assert np.all(samples == 0)
def test_value_is_stochastic_avg_or_max_100_iter_evaluate_counts(self):
seen = [0, 0, 0, 0]
for _ in sm.xrange(100):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=100,
aggregation_method=["avg", "max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_25 = abs(25 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_25 < 10.0:
seen[0] += 1
elif diff_50 < _eps(samples):
seen[1] += 1
elif diff_0 < _eps(samples):
seen[2] += 1
else:
seen[3] += 1
assert seen[2] <= 2 # around 0.0
assert seen[3] <= 2 # 0.0+eps <= x < 15.0 or 35.0 < x < 50.0 or >50.0
assert 50 - 20 < seen[0] < 50 + 20
assert 50 - 20 < seen[1] < 50 + 20
def test_value_is_stochastic_avg_tuple_as_iter_evaluate_histograms(self):
# iterations as tuple
param = iap.IterativeNoiseAggregator(
iap.Uniform(-1.0, 1.0),
iterations=(1, 100),
aggregation_method="avg")
diffs = []
for _ in sm.xrange(100):
samples = param.draw_samples((1, 1))
diff = abs(samples[0, 0] - 0.0)
diffs.append(diff)
nb_bins = 3
hist, _ = np.histogram(diffs, bins=nb_bins, range=(-1.0, 1.0),
density=False)
assert hist[1] > hist[0]
assert hist[1] > hist[2]
def test_value_is_stochastic_max_list_as_iter_evaluate_counts(self):
# iterations as list
seen = [0, 0]
for _ in sm.xrange(400):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=[1, 100],
aggregation_method=["max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_50 < _eps(samples):
seen[0] += 1
elif diff_0 < _eps(samples):
seen[1] += 1
else:
assert False
assert 300 - 50 < seen[0] < 300 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_value_is_stochastic_all_100_iter(self):
# test ia.ALL as aggregation_method
# note that each method individually and list of methods are already
# tested, so no in depth test is needed here
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=100, aggregation_method=ia.ALL)
assert isinstance(param.aggregation_method, iap.Choice)
assert len(param.aggregation_method.a) == 3
assert [v in param.aggregation_method.a for v in ["min", "avg", "max"]]
def test_value_is_stochastic_max_2_iter(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=2, aggregation_method="max")
samples = param.draw_samples((2, 1000))
nb_0 = np.sum(samples == 0)
nb_50 = np.sum(samples == 50)
assert nb_0 + nb_50 == 2 * 1000
assert 0.25 - 0.05 < nb_0 / (2 * 1000) < 0.25 + 0.05
def test_samples_same_values_for_same_seeds(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method="avg")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.allclose(samples1, samples2)
def test_stochastic_param_as_aggregation_method(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=5,
aggregation_method=iap.Deterministic("max"))
assert isinstance(param.aggregation_method, iap.Deterministic)
assert param.aggregation_method.value == "max"
def test_bad_datatype_for_aggregation_method(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method=False)
self.assertTrue(
"Expected aggregation_method to be" in str(context.exception))
def test_bad_datatype_for_iterations(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=False,
aggregation_method="max")
self.assertTrue("Expected iterations to be" in str(context.exception))
class TestSigmoid(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Sigmoid(
iap.Deterministic(0),
threshold=(-10, 10),
activated=True,
mul=1,
add=0)
assert (
param.__str__()
== param.__repr__()
== (
"Sigmoid("
"Deterministic(int 0), "
"Uniform("
"Deterministic(int -10), "
"Deterministic(int 10)"
"), "
"Deterministic(int 1), "
"1, "
"0)"
)
)
def test_activated_is_true(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(samples) < samples,
samples < expected + _eps(samples)
)
)
def test_activated_is_false(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=False)
expected = 5
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(sample) < samples,
samples < expected + _eps(sample)
)
)
def test_activated_is_probabilistic(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=0.5)
expected_first = 5
expected_second = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_value_is_stochastic_param(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected_first = 1 / (1 + np.exp(-(1 * 1 + 0 - 0.5)))
expected_second = 1 / (1 + np.exp(-(10 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_mul_add_threshold_with_various_fixed_values(self):
muls = [0.1, 1, 10.3]
adds = [-5.7, -0.0734, 0, 0.0734, 5.7]
vals = [-1, -0.7, 0, 0.7, 1]
threshs = [-5.7, -0.0734, 0, 0.0734, 5.7]
for mul, add, val, thresh in itertools.product(muls, adds, vals,
threshs):
with self.subTest(mul=mul, add=add, val=val, threshold=thresh):
param = iap.Sigmoid(
iap.Deterministic(val),
add=add,
mul=mul,
threshold=thresh)
sample = param.draw_sample()
samples = param.draw_samples((2, 3))
dt = sample.dtype
val_ = np.array([val], dtype=dt)
mul_ = np.array([mul], dtype=dt)
add_ = np.array([add], dtype=dt)
thresh_ = np.array([thresh], dtype=dt)
expected = (
1 / (
1 + np.exp(
-(val_ * mul_ + add_ - thresh_)
)
)
)
assert sample.shape == tuple()
assert samples.shape == (2, 3)
assert (
expected - 5*_eps(sample)
< sample <
expected + 5*_eps(sample)
)
assert np.all(
np.logical_and(
expected - 5*_eps(sample) < samples,
samples < expected + 5*_eps(sample)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
|
[
"numpy.clip",
"numpy.prod",
"imgaug.random.RNG",
"imgaug.parameters.Choice",
"mock.Mock",
"imgaug.parameters.Uniform",
"imgaug.parameters.draw_distributions_grid",
"imgaug.parameters.handle_discrete_param",
"numpy.array",
"six.moves.xrange",
"imgaug.parameters.handle_continuous_param",
"imgaug.parameters.Weibull",
"imgaug.parameters.Discretize",
"numpy.mean",
"numpy.histogram",
"mock.patch",
"imgaug.parameters.TruncatedNormal",
"numpy.float64",
"itertools.product",
"skimage.morphology.label",
"imgaug.parameters.Normal",
"imgaug.is_single_float",
"numpy.exp",
"imgaug.parameters.Poisson",
"imgaug.parameters.Deterministic",
"imgaug.is_np_array",
"imgaug.parameters.ChiSquare",
"numpy.abs",
"numpy.allclose",
"imgaug.parameters.force_np_float_dtype",
"matplotlib.use",
"imgaug.testutils.reseed",
"numpy.any",
"imgaug.parameters.Laplace",
"scipy.special.gamma",
"numpy.std",
"numpy.finfo",
"imgaug.parameters.handle_categorical_string_param",
"imgaug.parameters.DiscreteUniform",
"numpy.unique",
"numpy.isclose",
"numpy.logical_and",
"imgaug.parameters.Binomial",
"imgaug.parameters.both_np_float_if_one_is_float",
"numpy.logical_or",
"numpy.sum",
"numpy.zeros",
"numpy.array_equal",
"imgaug.parameters.Beta",
"numpy.all",
"numpy.var",
"imgaug.parameters.handle_probability_param"
] |
[((422, 443), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (436, 443), False, 'import matplotlib\n'), ((789, 808), 'imgaug.is_np_array', 'ia.is_np_array', (['arr'], {}), '(arr)\n', (803, 808), True, 'import imgaug as ia\n'), ((1005, 1112), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test1]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test1]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True)\n", (1032, 1112), True, 'from imgaug import parameters as iap\n'), ((1265, 1381), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test1b]"""'], {'value_range': '(None, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test1b]', value_range=(None, None),\n tuple_to_uniform=True, list_to_choice=True)\n", (1292, 1381), True, 'from imgaug import parameters as iap\n'), ((1840, 1950), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test3]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test3]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True)\n", (1867, 1950), True, 'from imgaug import parameters as iap\n'), ((2557, 2670), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test5]"""'], {'value_range': '(None, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test5]', value_range=(None, 12),\n tuple_to_uniform=True, list_to_choice=True)\n", (2584, 2670), True, 'from imgaug import parameters as iap\n'), ((3359, 3472), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test7]"""'], {'value_range': '(-1, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test7]', value_range=(-1, None),\n tuple_to_uniform=True, list_to_choice=True)\n", (3386, 3472), True, 'from imgaug import parameters as iap\n'), ((4546, 4659), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test10]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test10]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True)\n", (4573, 4659), True, 'from imgaug import parameters as iap\n'), ((4929, 5045), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test11]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test11]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True)\n", (4956, 5045), True, 'from imgaug import parameters as iap\n'), ((6635, 6751), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2, 3]', '"""[test15]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "([1, 2, 3], '[test15]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True)\n", (6662, 6751), True, 'from imgaug import parameters as iap\n'), ((8019, 8135), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test18]"""'], {'value_range': '_value_range', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test18]', value_range=_value_range,\n tuple_to_uniform=True, list_to_choice=True)\n", (8046, 8135), True, 'from imgaug import parameters as iap\n'), ((9287, 9412), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test1]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test1]', value_range=None, tuple_to_uniform=\n True, list_to_choice=True, allow_floats=True)\n", (9312, 9412), True, 'from imgaug import parameters as iap\n'), ((9616, 9749), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test1b]"""'], {'value_range': '(None, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test1b]', value_range=(None, None),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (9641, 9749), True, 'from imgaug import parameters as iap\n'), ((10259, 10386), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test3]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test3]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (10284, 10386), True, 'from imgaug import parameters as iap\n'), ((10980, 11110), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test5]"""'], {'value_range': '(None, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test5]', value_range=(None, 12),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (11005, 11110), True, 'from imgaug import parameters as iap\n'), ((11743, 11873), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test7]"""'], {'value_range': '(-1, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test7]', value_range=(-1, None),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (11768, 11873), True, 'from imgaug import parameters as iap\n'), ((12883, 13013), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test10]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test10]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (12908, 13013), True, 'from imgaug import parameters as iap\n'), ((13257, 13390), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test11]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test11]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (13282, 13390), True, 'from imgaug import parameters as iap\n'), ((13678, 13813), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test11b]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(False)'}), "((1, 2), '[test11b]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=False)\n", (13703, 13813), True, 'from imgaug import parameters as iap\n'), ((15323, 15456), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 2, 3]', '"""[test15]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "([1, 2, 3], '[test15]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (15348, 15456), True, 'from imgaug import parameters as iap\n'), ((16639, 16753), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test18]"""'], {'value_range': '_value_range', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test18]', value_range=_value_range,\n tuple_to_uniform=True, list_to_choice=True)\n", (16664, 16753), True, 'from imgaug import parameters as iap\n'), ((17413, 17477), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['ia.ALL', '"""foo"""', 'valid_values'], {}), "(ia.ALL, 'foo', valid_values)\n", (17448, 17477), True, 'from imgaug import parameters as iap\n'), ((17675, 17741), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['"""class1"""', '"""foo"""', 'valid_values'], {}), "('class1', 'foo', valid_values)\n", (17710, 17741), True, 'from imgaug import parameters as iap\n'), ((18368, 18446), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (["['class1', 'class3']", '"""foo"""', 'valid_values'], {}), "(['class1', 'class3'], 'foo', valid_values)\n", (18403, 18446), True, 'from imgaug import parameters as iap\n'), ((19620, 19647), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['"""class1"""'], {}), "('class1')\n", (19637, 19647), True, 'from imgaug import parameters as iap\n'), ((19669, 19730), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['param', '"""foo"""', "['class1']"], {}), "(param, 'foo', ['class1'])\n", (19704, 19730), True, 'from imgaug import parameters as iap\n'), ((20885, 20905), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (20902, 20905), True, 'from imgaug import parameters as iap\n'), ((20918, 20962), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['det', '"""[test3]"""'], {}), "(det, '[test3]')\n", (20946, 20962), True, 'from imgaug import parameters as iap\n'), ((22202, 22234), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float16'}), '((1,), dtype=np.float16)\n', (22210, 22234), True, 'import numpy as np\n'), ((22248, 22280), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float32'}), '((1,), dtype=np.float32)\n', (22256, 22280), True, 'import numpy as np\n'), ((22298, 22339), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (22331, 22339), True, 'from imgaug import parameters as iap\n'), ((22472, 22504), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float16'}), '((1,), dtype=np.float16)\n', (22480, 22504), True, 'import numpy as np\n'), ((22518, 22548), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (22526, 22548), True, 'import numpy as np\n'), ((22566, 22607), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (22599, 22607), True, 'from imgaug import parameters as iap\n'), ((22740, 22770), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (22748, 22770), True, 'import numpy as np\n'), ((22784, 22816), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float16'}), '((1,), dtype=np.float16)\n', (22792, 22816), True, 'import numpy as np\n'), ((22834, 22875), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (22867, 22875), True, 'from imgaug import parameters as iap\n'), ((23006, 23036), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (23014, 23036), True, 'import numpy as np\n'), ((23050, 23080), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.uint8'}), '((1,), dtype=np.uint8)\n', (23058, 23080), True, 'import numpy as np\n'), ((23098, 23139), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (23131, 23139), True, 'from imgaug import parameters as iap\n'), ((23310, 23318), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (23316, 23318), False, 'from imgaug.testutils import reseed\n'), ((23475, 23510), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.uint8'}), '((1, 1, 3), dtype=np.uint8)\n', (23483, 23510), True, 'import numpy as np\n'), ((23582, 23617), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.uint8'}), '((1, 1, 3), dtype=np.uint8)\n', (23590, 23617), True, 'import numpy as np\n'), ((23644, 23655), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (23653, 23655), False, 'import mock\n'), ((23694, 23729), 'numpy.zeros', 'np.zeros', (['(4, 3, 2)'], {'dtype': 'np.uint8'}), '((4, 3, 2), dtype=np.uint8)\n', (23702, 23729), True, 'import numpy as np\n'), ((24994, 25015), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (25005, 25015), True, 'from imgaug import parameters as iap\n'), ((25243, 25286), 'numpy.sum', 'np.sum', (['(graph_img[..., :] > [200, 200, 200])'], {}), '(graph_img[..., :] > [200, 200, 200])\n', (25249, 25286), True, 'import numpy as np\n'), ((25304, 25328), 'numpy.prod', 'np.prod', (['graph_img.shape'], {}), '(graph_img.shape)\n', (25311, 25328), True, 'import numpy as np\n'), ((25880, 25888), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (25886, 25888), False, 'from imgaug.testutils import reseed\n'), ((25937, 25959), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (25948, 25959), True, 'from imgaug import parameters as iap\n'), ((25976, 26003), 'imgaug.parameters.Discretize', 'iap.Discretize', (['other_param'], {}), '(other_param)\n', (25990, 26003), True, 'from imgaug import parameters as iap\n'), ((26344, 26366), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26355, 26366), True, 'from imgaug import parameters as iap\n'), ((26383, 26410), 'imgaug.parameters.Discretize', 'iap.Discretize', (['other_param'], {}), '(other_param)\n', (26397, 26410), True, 'from imgaug import parameters as iap\n'), ((26793, 26801), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (26799, 26801), False, 'from imgaug.testutils import reseed\n'), ((26866, 26882), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (26876, 26882), True, 'from imgaug import parameters as iap\n'), ((26900, 26922), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (26911, 26922), True, 'from imgaug import parameters as iap\n'), ((27163, 27179), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (27173, 27179), True, 'from imgaug import parameters as iap\n'), ((27473, 27489), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (27483, 27489), True, 'from imgaug import parameters as iap\n'), ((27802, 27818), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (27812, 27818), True, 'from imgaug import parameters as iap\n'), ((28066, 28082), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28076, 28082), True, 'from imgaug import parameters as iap\n'), ((28336, 28352), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28346, 28352), True, 'from imgaug import parameters as iap\n'), ((28370, 28392), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (28381, 28392), True, 'from imgaug import parameters as iap\n'), ((28627, 28643), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28637, 28643), True, 'from imgaug import parameters as iap\n'), ((28931, 28947), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28941, 28947), True, 'from imgaug import parameters as iap\n'), ((29254, 29270), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (29264, 29270), True, 'from imgaug import parameters as iap\n'), ((29514, 29530), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (29524, 29530), True, 'from imgaug import parameters as iap\n'), ((29777, 29793), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (29787, 29793), True, 'from imgaug import parameters as iap\n'), ((29811, 29833), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (29822, 29833), True, 'from imgaug import parameters as iap\n'), ((30072, 30088), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (30082, 30088), True, 'from imgaug import parameters as iap\n'), ((30391, 30407), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (30401, 30407), True, 'from imgaug import parameters as iap\n'), ((30673, 30689), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (30683, 30689), True, 'from imgaug import parameters as iap\n'), ((31002, 31018), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (31012, 31018), True, 'from imgaug import parameters as iap\n'), ((31287, 31313), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (31306, 31313), True, 'from imgaug import parameters as iap\n'), ((31335, 31353), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {}), '([1, 2])\n', (31345, 31353), True, 'from imgaug import parameters as iap\n'), ((31704, 31730), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (31723, 31730), True, 'from imgaug import parameters as iap\n'), ((32138, 32164), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (32157, 32164), True, 'from imgaug import parameters as iap\n'), ((32585, 32611), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (32604, 32611), True, 'from imgaug import parameters as iap\n'), ((32873, 32899), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (32892, 32899), True, 'from imgaug import parameters as iap\n'), ((33124, 33140), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (33134, 33140), True, 'from imgaug import parameters as iap\n'), ((33158, 33180), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (33169, 33180), True, 'from imgaug import parameters as iap\n'), ((33409, 33425), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (33419, 33425), True, 'from imgaug import parameters as iap\n'), ((33707, 33723), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (33717, 33723), True, 'from imgaug import parameters as iap\n'), ((34012, 34028), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34022, 34028), True, 'from imgaug import parameters as iap\n'), ((34257, 34273), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34267, 34273), True, 'from imgaug import parameters as iap\n'), ((34498, 34514), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34508, 34514), True, 'from imgaug import parameters as iap\n'), ((34532, 34554), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (34543, 34554), True, 'from imgaug import parameters as iap\n'), ((34795, 34811), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34805, 34811), True, 'from imgaug import parameters as iap\n'), ((35105, 35121), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35115, 35121), True, 'from imgaug import parameters as iap\n'), ((35434, 35450), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35444, 35450), True, 'from imgaug import parameters as iap\n'), ((35698, 35714), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35708, 35714), True, 'from imgaug import parameters as iap\n'), ((35943, 35959), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35953, 35959), True, 'from imgaug import parameters as iap\n'), ((35977, 35999), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (35988, 35999), True, 'from imgaug import parameters as iap\n'), ((36240, 36256), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (36250, 36256), True, 'from imgaug import parameters as iap\n'), ((36550, 36566), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (36560, 36566), True, 'from imgaug import parameters as iap\n'), ((36867, 36883), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (36877, 36883), True, 'from imgaug import parameters as iap\n'), ((37122, 37138), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (37132, 37138), True, 'from imgaug import parameters as iap\n'), ((37369, 37377), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (37375, 37377), False, 'from imgaug.testutils import reseed\n'), ((37434, 37449), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0)'], {}), '(0)\n', (37446, 37449), True, 'from imgaug import parameters as iap\n'), ((37640, 37657), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(1.0)'], {}), '(1.0)\n', (37652, 37657), True, 'from imgaug import parameters as iap\n'), ((37851, 37866), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0)'], {}), '(0)\n', (37863, 37866), True, 'from imgaug import parameters as iap\n'), ((38073, 38093), 'numpy.all', 'np.all', (['(samples == 0)'], {}), '(samples == 0)\n', (38079, 38093), True, 'import numpy as np\n'), ((38140, 38157), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(1.0)'], {}), '(1.0)\n', (38152, 38157), True, 'from imgaug import parameters as iap\n'), ((38364, 38384), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (38370, 38384), True, 'import numpy as np\n'), ((38438, 38455), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (38450, 38455), True, 'from imgaug import parameters as iap\n'), ((38566, 38604), 'numpy.unique', 'np.unique', (['samples'], {'return_counts': '(True)'}), '(samples, return_counts=True)\n', (38575, 38604), True, 'import numpy as np\n'), ((39109, 39122), 'six.moves.xrange', 'sm.xrange', (['(10)'], {}), '(10)\n', (39118, 39122), True, 'import six.moves as sm\n'), ((39403, 39427), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.0, 1.0)'], {}), '((0.0, 1.0))\n', (39415, 39427), True, 'from imgaug import parameters as iap\n'), ((39486, 39499), 'six.moves.xrange', 'sm.xrange', (['(30)'], {}), '(30)\n', (39495, 39499), True, 'import six.moves as sm\n'), ((39848, 39865), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (39860, 39865), True, 'from imgaug import parameters as iap\n'), ((40119, 40153), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (40133, 40153), True, 'import numpy as np\n'), ((40222, 40230), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (40228, 40230), False, 'from imgaug.testutils import reseed\n'), ((40277, 40298), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (40287, 40298), True, 'from imgaug import parameters as iap\n'), ((40496, 40517), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (40506, 40517), True, 'from imgaug import parameters as iap\n'), ((40953, 40974), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (40963, 40974), True, 'from imgaug import parameters as iap\n'), ((41395, 41414), 'imgaug.parameters.Choice', 'iap.Choice', (['[-1, 1]'], {}), '([-1, 1])\n', (41405, 41414), True, 'from imgaug import parameters as iap\n'), ((41739, 41762), 'imgaug.parameters.Choice', 'iap.Choice', (['[-1.2, 1.7]'], {}), '([-1.2, 1.7])\n', (41749, 41762), True, 'from imgaug import parameters as iap\n'), ((42658, 42698), 'imgaug.parameters.Choice', 'iap.Choice', (["['first', 'second', 'third']"], {}), "(['first', 'second', 'third'])\n", (42668, 42698), True, 'from imgaug import parameters as iap\n'), ((43560, 43594), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1]'], {'p': '[0.25, 0.75]'}), '([0, 1], p=[0.25, 0.75])\n', (43570, 43594), True, 'from imgaug import parameters as iap\n'), ((43668, 43706), 'numpy.unique', 'np.unique', (['samples'], {'return_counts': '(True)'}), '(samples, return_counts=True)\n', (43677, 43706), True, 'import numpy as np\n'), ((44177, 44215), 'numpy.unique', 'np.unique', (['samples'], {'return_counts': '(True)'}), '(samples, return_counts=True)\n', (44186, 44215), True, 'import numpy as np\n'), ((44582, 44610), 'imgaug.parameters.Choice', 'iap.Choice', (['[-1, 0, 1, 2, 3]'], {}), '([-1, 0, 1, 2, 3])\n', (44592, 44610), True, 'from imgaug import parameters as iap\n'), ((44864, 44898), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (44878, 44898), True, 'import numpy as np\n'), ((45631, 45639), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (45637, 45639), False, 'from imgaug.testutils import reseed\n'), ((45686, 45711), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (45705, 45711), True, 'from imgaug import parameters as iap\n'), ((45929, 45954), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (45948, 45954), True, 'from imgaug import parameters as iap\n'), ((46383, 46408), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (46402, 46408), True, 'from imgaug import parameters as iap\n'), ((46812, 46838), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (46831, 46838), True, 'from imgaug import parameters as iap\n'), ((47257, 47287), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(-1.2)', '(1.2)'], {}), '(-1.2, 1.2)\n', (47276, 47287), True, 'from imgaug import parameters as iap\n'), ((47765, 47791), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(1)', '(-1)'], {}), '(1, -1)\n', (47784, 47791), True, 'from imgaug import parameters as iap\n'), ((48265, 48290), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(1)', '(1)'], {}), '(1, 1)\n', (48284, 48290), True, 'from imgaug import parameters as iap\n'), ((48417, 48437), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (48423, 48437), True, 'import numpy as np\n'), ((48510, 48528), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (48521, 48528), True, 'from imgaug import parameters as iap\n'), ((48782, 48816), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (48796, 48816), True, 'import numpy as np\n'), ((48886, 48894), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (48892, 48894), False, 'from imgaug.testutils import reseed\n'), ((48941, 48955), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (48952, 48955), True, 'from imgaug import parameters as iap\n'), ((49139, 49153), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (49150, 49153), True, 'from imgaug import parameters as iap\n'), ((49325, 49339), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (49336, 49339), True, 'from imgaug import parameters as iap\n'), ((49880, 49894), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (49891, 49894), True, 'from imgaug import parameters as iap\n'), ((50148, 50182), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (50162, 50182), True, 'import numpy as np\n'), ((50251, 50259), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (50257, 50259), False, 'from imgaug.testutils import reseed\n'), ((50306, 50322), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (50316, 50322), True, 'from imgaug import parameters as iap\n'), ((50537, 50553), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (50547, 50553), True, 'from imgaug import parameters as iap\n'), ((50697, 50713), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (50707, 50713), True, 'from imgaug import parameters as iap\n'), ((50928, 50951), 'numpy.clip', 'np.clip', (['samples', '(-1)', '(1)'], {}), '(samples, -1, 1)\n', (50935, 50951), True, 'import numpy as np\n'), ((50977, 51007), 'numpy.clip', 'np.clip', (['samples_direct', '(-1)', '(1)'], {}), '(samples_direct, -1, 1)\n', (50984, 51007), True, 'import numpy as np\n'), ((51047, 51116), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (51059, 51116), True, 'import numpy as np\n'), ((51173, 51249), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (51185, 51249), True, 'import numpy as np\n'), ((51792, 51807), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (51801, 51807), True, 'import six.moves as sm\n'), ((52225, 52241), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (52235, 52241), True, 'from imgaug import parameters as iap\n'), ((52259, 52277), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(100)'], {}), '(0, 100)\n', (52269, 52277), True, 'from imgaug import parameters as iap\n'), ((52553, 52569), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (52563, 52569), True, 'from imgaug import parameters as iap\n'), ((52823, 52854), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (52834, 52854), True, 'import numpy as np\n'), ((52932, 52940), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (52938, 52940), False, 'from imgaug.testutils import reseed\n'), ((52987, 53012), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {}), '(0, 1)\n', (53006, 53012), True, 'from imgaug import parameters as iap\n'), ((53435, 53481), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {'low': '(-100)', 'high': '(50.0)'}), '(0, 1, low=-100, high=50.0)\n', (53454, 53481), True, 'from imgaug import parameters as iap\n'), ((53902, 53947), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0.5)', '(0)'], {'low': '(-10)', 'high': '(10)'}), '(0.5, 0, low=-10, high=10)\n', (53921, 53947), True, 'from imgaug import parameters as iap\n'), ((54008, 54033), 'numpy.allclose', 'np.allclose', (['samples', '(0.5)'], {}), '(samples, 0.5)\n', (54019, 54033), True, 'import numpy as np\n'), ((54078, 54127), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0.0)', '(0.1)'], {'low': '(-100)', 'high': '(100)'}), '(0.0, 0.1, low=-100, high=100)\n', (54097, 54127), True, 'from imgaug import parameters as iap\n'), ((54145, 54194), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0.0)', '(5.0)'], {'low': '(-100)', 'high': '(100)'}), '(0.0, 5.0, low=-100, high=100)\n', (54164, 54194), True, 'from imgaug import parameters as iap\n'), ((54692, 54706), 'six.moves.xrange', 'sm.xrange', (['(200)'], {}), '(200)\n', (54701, 54706), True, 'import six.moves as sm\n'), ((55058, 55099), 'numpy.isclose', 'np.isclose', (['seen[0]', '(100)'], {'rtol': '(0)', 'atol': '(20)'}), '(seen[0], 100, rtol=0, atol=20)\n', (55068, 55099), True, 'import numpy as np\n'), ((55115, 55156), 'numpy.isclose', 'np.isclose', (['seen[1]', '(100)'], {'rtol': '(0)', 'atol': '(20)'}), '(seen[1], 100, rtol=0, atol=20)\n', (55125, 55156), True, 'import numpy as np\n'), ((55220, 55266), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(10.0)'], {'low': '(-5)', 'high': '(7.5)'}), '(0, 10.0, low=-5, high=7.5)\n', (55239, 55266), True, 'from imgaug import parameters as iap\n'), ((55362, 55394), 'numpy.all', 'np.all', (['(samples >= -5.0 - 0.0001)'], {}), '(samples >= -5.0 - 0.0001)\n', (55368, 55394), True, 'import numpy as np\n'), ((55408, 55439), 'numpy.all', 'np.all', (['(samples <= 7.5 + 0.0001)'], {}), '(samples <= 7.5 + 0.0001)\n', (55414, 55439), True, 'import numpy as np\n'), ((55502, 55525), 'numpy.any', 'np.any', (['(samples <= -4.5)'], {}), '(samples <= -4.5)\n', (55508, 55525), True, 'import numpy as np\n'), ((55541, 55563), 'numpy.any', 'np.any', (['(samples >= 7.0)'], {}), '(samples >= 7.0)\n', (55547, 55563), True, 'import numpy as np\n'), ((55727, 55752), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {}), '(0, 1)\n', (55746, 55752), True, 'from imgaug import parameters as iap\n'), ((55902, 55933), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (55913, 55933), True, 'import numpy as np\n'), ((56016, 56041), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {}), '(0, 1)\n', (56035, 56041), True, 'from imgaug import parameters as iap\n'), ((56296, 56304), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (56302, 56304), False, 'from imgaug.testutils import reseed\n'), ((56351, 56368), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (56362, 56368), True, 'from imgaug import parameters as iap\n'), ((56584, 56601), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (56595, 56601), True, 'from imgaug import parameters as iap\n'), ((56746, 56763), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (56757, 56763), True, 'from imgaug import parameters as iap\n'), ((57026, 57049), 'numpy.clip', 'np.clip', (['samples', '(-1)', '(1)'], {}), '(samples, -1, 1)\n', (57033, 57049), True, 'import numpy as np\n'), ((57075, 57105), 'numpy.clip', 'np.clip', (['samples_direct', '(-1)', '(1)'], {}), '(samples_direct, -1, 1)\n', (57082, 57105), True, 'import numpy as np\n'), ((57145, 57214), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (57157, 57214), True, 'import numpy as np\n'), ((57271, 57347), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (57283, 57347), True, 'import numpy as np\n'), ((57891, 57906), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (57900, 57906), True, 'import six.moves as sm\n'), ((58325, 58342), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (58336, 58342), True, 'from imgaug import parameters as iap\n'), ((58360, 58379), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(100)'], {}), '(0, 100)\n', (58371, 58379), True, 'from imgaug import parameters as iap\n'), ((58581, 58598), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(1)', '(0)'], {}), '(1, 0)\n', (58592, 58598), True, 'from imgaug import parameters as iap\n'), ((58849, 58866), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (58860, 58866), True, 'from imgaug import parameters as iap\n'), ((59120, 59151), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (59131, 59151), True, 'import numpy as np\n'), ((59223, 59231), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (59229, 59231), False, 'from imgaug.testutils import reseed\n'), ((59278, 59294), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (59291, 59294), True, 'from imgaug import parameters as iap\n'), ((59484, 59500), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (59497, 59500), True, 'from imgaug import parameters as iap\n'), ((59674, 59690), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (59687, 59690), True, 'from imgaug import parameters as iap\n'), ((59943, 59963), 'numpy.all', 'np.all', (['(0 <= samples)'], {}), '(0 <= samples)\n', (59949, 59963), True, 'import numpy as np\n'), ((59983, 60005), 'numpy.clip', 'np.clip', (['samples', '(0)', '(3)'], {}), '(samples, 0, 3)\n', (59990, 60005), True, 'import numpy as np\n'), ((60031, 60060), 'numpy.clip', 'np.clip', (['samples_direct', '(0)', '(3)'], {}), '(samples_direct, 0, 3)\n', (60038, 60060), True, 'import numpy as np\n'), ((60100, 60166), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0, 3.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0, 3.0), density=False)\n', (60112, 60166), True, 'import numpy as np\n'), ((60223, 60296), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(0, 3.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(0, 3.0), density=False)\n', (60235, 60296), True, 'import numpy as np\n'), ((60834, 60849), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (60843, 60849), True, 'import six.moves as sm\n'), ((61291, 61307), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (61304, 61307), True, 'from imgaug import parameters as iap\n'), ((61325, 61342), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(10)'], {}), '(10)\n', (61338, 61342), True, 'from imgaug import parameters as iap\n'), ((61678, 61694), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (61691, 61694), True, 'from imgaug import parameters as iap\n'), ((61948, 61979), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (61959, 61979), True, 'import numpy as np\n'), ((62049, 62057), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (62055, 62057), False, 'from imgaug.testutils import reseed\n'), ((62104, 62118), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (62115, 62118), True, 'from imgaug import parameters as iap\n'), ((62305, 62319), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (62316, 62319), True, 'from imgaug import parameters as iap\n'), ((62491, 62505), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (62502, 62505), True, 'from imgaug import parameters as iap\n'), ((62753, 62773), 'numpy.all', 'np.all', (['(0 <= samples)'], {}), '(0 <= samples)\n', (62759, 62773), True, 'import numpy as np\n'), ((62793, 62815), 'numpy.clip', 'np.clip', (['samples', '(0)', '(2)'], {}), '(samples, 0, 2)\n', (62800, 62815), True, 'import numpy as np\n'), ((62841, 62870), 'numpy.clip', 'np.clip', (['samples_direct', '(0)', '(2)'], {}), '(samples_direct, 0, 2)\n', (62848, 62870), True, 'import numpy as np\n'), ((62910, 62976), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0, 2.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0, 2.0), density=False)\n', (62922, 62976), True, 'import numpy as np\n'), ((63033, 63106), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(0, 2.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(0, 2.0), density=False)\n', (63045, 63106), True, 'import numpy as np\n'), ((63635, 63665), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 1)'], {}), '(1 + 1 / 1)\n', (63654, 63665), False, 'import scipy\n'), ((63690, 63722), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 0.5)'], {}), '(1 + 1 / 0.5)\n', (63709, 63722), False, 'import scipy\n'), ((63760, 63774), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (63769, 63774), True, 'import six.moves as sm\n'), ((64549, 64563), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (64560, 64563), True, 'from imgaug import parameters as iap\n'), ((64581, 64597), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(0.5)'], {}), '(0.5)\n', (64592, 64597), True, 'from imgaug import parameters as iap\n'), ((65402, 65416), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (65413, 65416), True, 'from imgaug import parameters as iap\n'), ((65670, 65701), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (65681, 65701), True, 'import numpy as np\n'), ((65771, 65779), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (65777, 65779), False, 'from imgaug.testutils import reseed\n'), ((65826, 65845), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (65837, 65845), True, 'from imgaug import parameters as iap\n'), ((66062, 66081), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (66073, 66081), True, 'from imgaug import parameters as iap\n'), ((66272, 66291), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (66283, 66291), True, 'from imgaug import parameters as iap\n'), ((66605, 66624), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (66616, 66624), True, 'from imgaug import parameters as iap\n'), ((66712, 66780), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0.0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0.0, 1.0), density=False)\n', (66724, 66780), True, 'import numpy as np\n'), ((67185, 67207), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (67196, 67207), True, 'from imgaug import parameters as iap\n'), ((67664, 67686), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(-1.0)'], {}), '(1.0, -1.0)\n', (67675, 67686), True, 'from imgaug import parameters as iap\n'), ((68145, 68163), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (68156, 68163), True, 'from imgaug import parameters as iap\n'), ((68623, 68640), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1)', '(1)'], {}), '(1, 1)\n', (68634, 68640), True, 'from imgaug import parameters as iap\n'), ((69109, 69131), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (69120, 69131), True, 'from imgaug import parameters as iap\n'), ((69385, 69416), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (69396, 69416), True, 'import numpy as np\n'), ((69695, 69703), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (69701, 69703), False, 'from imgaug.testutils import reseed\n'), ((69750, 69768), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (69758, 69768), True, 'from imgaug import parameters as iap\n'), ((70047, 70065), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (70055, 70065), True, 'from imgaug import parameters as iap\n'), ((70256, 70274), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (70264, 70274), True, 'from imgaug import parameters as iap\n'), ((70602, 70620), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (70610, 70620), True, 'from imgaug import parameters as iap\n'), ((70806, 70872), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0, 1.0), density=False)\n', (70818, 70872), True, 'import numpy as np\n'), ((70929, 71002), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(0, 1.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(0, 1.0), density=False)\n', (70941, 71002), True, 'import numpy as np\n'), ((71638, 71652), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (71647, 71652), True, 'import six.moves as sm\n'), ((72164, 72178), 'imgaug.parameters.Beta', 'iap.Beta', (['(2)', '(2)'], {}), '(2, 2)\n', (72172, 72178), True, 'from imgaug import parameters as iap\n'), ((72196, 72214), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (72204, 72214), True, 'from imgaug import parameters as iap\n'), ((72850, 72868), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (72858, 72868), True, 'from imgaug import parameters as iap\n'), ((73122, 73153), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (73133, 73153), True, 'import numpy as np\n'), ((73229, 73237), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (73235, 73237), False, 'from imgaug.testutils import reseed\n'), ((75359, 75392), 'itertools.product', 'itertools.product', (['values', 'shapes'], {}), '(values, shapes)\n', (75376, 75392), False, 'import itertools\n'), ((76011, 76044), 'itertools.product', 'itertools.product', (['values', 'shapes'], {}), '(values, shapes)\n', (76028, 76044), False, 'import itertools\n'), ((76583, 76597), 'six.moves.xrange', 'sm.xrange', (['(200)'], {}), '(200)\n', (76592, 76597), True, 'import six.moves as sm\n'), ((77145, 77153), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (77151, 77153), False, 'from imgaug.testutils import reseed\n'), ((78281, 78299), 'numpy.unique', 'np.unique', (['samples'], {}), '(samples)\n', (78290, 78299), True, 'import numpy as np\n'), ((78592, 78615), 'numpy.unique', 'np.unique', (['samples_nhwc'], {}), '(samples_nhwc)\n', (78601, 78615), True, 'import numpy as np\n'), ((79348, 79366), 'numpy.unique', 'np.unique', (['samples'], {}), '(samples)\n', (79357, 79366), True, 'import numpy as np\n'), ((79788, 79802), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (79797, 79802), True, 'import six.moves as sm\n'), ((80928, 80942), 'six.moves.xrange', 'sm.xrange', (['(400)'], {}), '(400)\n', (80937, 80942), True, 'import six.moves as sm\n'), ((82199, 82213), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (82208, 82213), True, 'import six.moves as sm\n'), ((83594, 83608), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (83603, 83608), True, 'import six.moves as sm\n'), ((84695, 84709), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (84704, 84709), True, 'import six.moves as sm\n'), ((85971, 85985), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (85980, 85985), True, 'import six.moves as sm\n'), ((87307, 87321), 'six.moves.xrange', 'sm.xrange', (['(200)'], {}), '(200)\n', (87316, 87321), True, 'import six.moves as sm\n'), ((88437, 88468), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (88448, 88468), True, 'import numpy as np\n'), ((88535, 88543), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (88541, 88543), False, 'from imgaug.testutils import reseed\n'), ((89081, 89101), 'numpy.all', 'np.all', (['(samples == 0)'], {}), '(samples == 0)\n', (89087, 89101), True, 'import numpy as np\n'), ((89412, 89432), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (89418, 89432), True, 'import numpy as np\n'), ((89745, 89766), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (89751, 89766), True, 'import numpy as np\n'), ((90581, 90601), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (90587, 90601), True, 'import numpy as np\n'), ((90912, 90933), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (90918, 90933), True, 'import numpy as np\n'), ((91680, 91714), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (91694, 91714), True, 'import numpy as np\n'), ((92717, 92725), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (92723, 92725), False, 'from imgaug.testutils import reseed\n'), ((93834, 93859), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(1)'], {}), '(0, 1)\n', (93853, 93859), True, 'from imgaug import parameters as iap\n'), ((93876, 93902), 'imgaug.parameters.Discretize', 'iap.Discretize', (['param_orig'], {}), '(param_orig)\n', (93890, 93902), True, 'from imgaug import parameters as iap\n'), ((94251, 94276), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (94270, 94276), True, 'from imgaug import parameters as iap\n'), ((94293, 94319), 'imgaug.parameters.Discretize', 'iap.Discretize', (['param_orig'], {}), '(param_orig)\n', (94307, 94319), True, 'from imgaug import parameters as iap\n'), ((94567, 94592), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (94586, 94592), True, 'from imgaug import parameters as iap\n'), ((94609, 94635), 'imgaug.parameters.Discretize', 'iap.Discretize', (['param_orig'], {}), '(param_orig)\n', (94623, 94635), True, 'from imgaug import parameters as iap\n'), ((94889, 94923), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (94903, 94923), True, 'import numpy as np\n'), ((94994, 95002), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (95000, 95002), False, 'from imgaug.testutils import reseed\n'), ((95398, 95439), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (95415, 95439), False, 'import itertools\n'), ((95997, 96038), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (96014, 96038), False, 'import itertools\n'), ((96623, 96668), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (96640, 96668), False, 'import itertools\n'), ((97309, 97354), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (97326, 97354), False, 'import itertools\n'), ((100415, 100423), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (100421, 100423), False, 'from imgaug.testutils import reseed\n'), ((100799, 100840), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (100816, 100840), False, 'import itertools\n'), ((101466, 101507), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (101483, 101507), False, 'import itertools\n'), ((102157, 102202), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (102174, 102202), False, 'import itertools\n'), ((102972, 103017), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (102989, 103017), False, 'import itertools\n'), ((106837, 106845), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (106843, 106845), False, 'from imgaug.testutils import reseed\n'), ((107212, 107253), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (107229, 107253), False, 'import itertools\n'), ((107819, 107860), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (107836, 107860), False, 'import itertools\n'), ((108449, 108494), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (108466, 108494), False, 'import itertools\n'), ((109111, 109156), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (109128, 109156), False, 'import itertools\n'), ((111979, 111987), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (111985, 111987), False, 'from imgaug.testutils import reseed\n'), ((112369, 112410), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (112386, 112410), False, 'import itertools\n'), ((112986, 113027), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (113003, 113027), False, 'import itertools\n'), ((113626, 113671), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (113643, 113671), False, 'import itertools\n'), ((114308, 114353), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (114325, 114353), False, 'import itertools\n'), ((117340, 117348), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (117346, 117348), False, 'from imgaug.testutils import reseed\n'), ((117854, 117890), 'itertools.product', 'itertools.product', (['values', 'exponents'], {}), '(values, exponents)\n', (117871, 117890), False, 'import itertools\n'), ((118909, 118945), 'itertools.product', 'itertools.product', (['values', 'exponents'], {}), '(values, exponents)\n', (118926, 118945), False, 'import itertools\n'), ((122196, 122204), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (122202, 122204), False, 'from imgaug.testutils import reseed\n'), ((123891, 123899), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (123897, 123899), False, 'from imgaug.testutils import reseed\n'), ((124296, 124316), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (124302, 124316), True, 'import numpy as np\n'), ((124338, 124359), 'numpy.sum', 'np.sum', (['(samples == -1)'], {}), '(samples == -1)\n', (124344, 124359), True, 'import numpy as np\n'), ((124636, 124651), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (124645, 124651), True, 'import six.moves as sm\n'), ((125160, 125181), 'numpy.sum', 'np.sum', (['(samples == -2)'], {}), '(samples == -2)\n', (125166, 125181), True, 'import numpy as np\n'), ((125200, 125221), 'numpy.sum', 'np.sum', (['(samples == -1)'], {}), '(samples == -1)\n', (125206, 125221), True, 'import numpy as np\n'), ((125240, 125260), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (125246, 125260), True, 'import numpy as np\n'), ((125279, 125299), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (125285, 125299), True, 'import numpy as np\n'), ((125838, 125872), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (125852, 125872), True, 'import numpy as np\n'), ((126114, 126122), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (126120, 126122), False, 'from imgaug.testutils import reseed\n'), ((127154, 127174), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (127160, 127174), True, 'import numpy as np\n'), ((127429, 127450), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (127435, 127450), True, 'import numpy as np\n'), ((127723, 127743), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (127729, 127743), True, 'import numpy as np\n'), ((128017, 128038), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (128023, 128038), True, 'import numpy as np\n'), ((128274, 128294), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (128280, 128294), True, 'import numpy as np\n'), ((128312, 128332), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (128318, 128332), True, 'import numpy as np\n'), ((128723, 128743), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (128729, 128743), True, 'import numpy as np\n'), ((128761, 128781), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (128767, 128781), True, 'import numpy as np\n'), ((129187, 129207), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (129193, 129207), True, 'import numpy as np\n'), ((129225, 129245), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (129231, 129245), True, 'import numpy as np\n'), ((129890, 129924), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (129904, 129924), True, 'import numpy as np\n'), ((129995, 130003), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (130001, 130003), False, 'from imgaug.testutils import reseed\n'), ((130291, 130311), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (130297, 130311), True, 'import numpy as np\n'), ((130382, 130390), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (130388, 130390), False, 'from imgaug.testutils import reseed\n'), ((130677, 130698), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (130683, 130698), True, 'import numpy as np\n'), ((130785, 130793), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (130791, 130793), False, 'from imgaug.testutils import reseed\n'), ((131860, 131880), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (131866, 131880), True, 'import numpy as np\n'), ((132870, 132891), 'numpy.all', 'np.all', (['(samples == 50)'], {}), '(samples == 50)\n', (132876, 132891), True, 'import numpy as np\n'), ((133348, 133368), 'numpy.all', 'np.all', (['(samples == 0)'], {}), '(samples == 0)\n', (133354, 133368), True, 'import numpy as np\n'), ((133491, 133505), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (133500, 133505), True, 'import six.moves as sm\n'), ((134617, 134631), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (134626, 134631), True, 'import six.moves as sm\n'), ((134796, 134863), 'numpy.histogram', 'np.histogram', (['diffs'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(diffs, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (134808, 134863), True, 'import numpy as np\n'), ((135104, 135118), 'six.moves.xrange', 'sm.xrange', (['(400)'], {}), '(400)\n', (135113, 135118), True, 'import six.moves as sm\n'), ((136486, 136506), 'numpy.sum', 'np.sum', (['(samples == 0)'], {}), '(samples == 0)\n', (136492, 136506), True, 'import numpy as np\n'), ((136523, 136544), 'numpy.sum', 'np.sum', (['(samples == 50)'], {}), '(samples == 50)\n', (136529, 136544), True, 'import numpy as np\n'), ((137165, 137196), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (137176, 137196), True, 'import numpy as np\n'), ((138292, 138300), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (138298, 138300), False, 'from imgaug.testutils import reseed\n'), ((140527, 140542), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (140536, 140542), True, 'import six.moves as sm\n'), ((141350, 141365), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (141359, 141365), True, 'import six.moves as sm\n'), ((142071, 142115), 'itertools.product', 'itertools.product', (['muls', 'adds', 'vals', 'threshs'], {}), '(muls, adds, vals, threshs)\n', (142088, 142115), False, 'import itertools\n'), ((144020, 144054), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (144034, 144054), True, 'import numpy as np\n'), ((851, 870), 'numpy.finfo', 'np.finfo', (['arr.dtype'], {}), '(arr.dtype)\n', (859, 870), True, 'import numpy as np\n'), ((1599, 1619), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (1616, 1619), True, 'from imgaug import parameters as iap\n'), ((2182, 2292), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test4]"""'], {'value_range': '(2, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test4]', value_range=(2, 12),\n tuple_to_uniform=True, list_to_choice=True)\n", (2209, 2292), True, 'from imgaug import parameters as iap\n'), ((2982, 3094), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test6]"""'], {'value_range': '(None, 0)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test6]', value_range=(None, 0),\n tuple_to_uniform=True, list_to_choice=True)\n", (3009, 3094), True, 'from imgaug import parameters as iap\n'), ((3784, 3896), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test8]"""'], {'value_range': '(2, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test8]', value_range=(2, None),\n tuple_to_uniform=True, list_to_choice=True)\n", (3811, 3896), True, 'from imgaug import parameters as iap\n'), ((4195, 4308), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test9]"""'], {'value_range': 'None', 'tuple_to_uniform': '(False)', 'list_to_choice': '(True)'}), "((1, 2), '[test9]', value_range=None,\n tuple_to_uniform=False, list_to_choice=True)\n", (4222, 4308), True, 'from imgaug import parameters as iap\n'), ((5393, 5511), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test12]"""'], {'value_range': '(1.5, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test12]', value_range=(1.5, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (5420, 5511), True, 'from imgaug import parameters as iap\n'), ((5872, 5988), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test13]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test13]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (5899, 5988), True, 'from imgaug import parameters as iap\n'), ((6283, 6400), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2, 3]', '"""[test14]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(False)'}), "([1, 2, 3], '[test14]', value_range=None,\n tuple_to_uniform=True, list_to_choice=False)\n", (6310, 6400), True, 'from imgaug import parameters as iap\n'), ((7094, 7212), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2]', '"""[test16]"""'], {'value_range': '(1.5, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "([1, 2], '[test16]', value_range=(1.5, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (7121, 7212), True, 'from imgaug import parameters as iap\n'), ((7562, 7678), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2]', '"""[test17]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "([1, 2], '[test17]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (7589, 7678), True, 'from imgaug import parameters as iap\n'), ((8402, 8511), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test19]"""'], {'value_range': '(False)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test19]', value_range=False,\n tuple_to_uniform=True, list_to_choice=True)\n", (8429, 8511), True, 'from imgaug import parameters as iap\n'), ((8943, 9070), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1.5)', '"""[test0]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(False)'}), "(1.5, '[test0]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True, allow_floats=False)\n", (8968, 9070), True, 'from imgaug import parameters as iap\n'), ((9972, 9992), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (9989, 9992), True, 'from imgaug import parameters as iap\n'), ((10627, 10754), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test4]"""'], {'value_range': '(2, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test4]', value_range=(2, 12),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (10652, 10754), True, 'from imgaug import parameters as iap\n'), ((11388, 11517), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test6]"""'], {'value_range': '(None, 0)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test6]', value_range=(None, 0),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (11413, 11517), True, 'from imgaug import parameters as iap\n'), ((12151, 12280), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test8]"""'], {'value_range': '(2, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test8]', value_range=(2, None),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (12176, 12280), True, 'from imgaug import parameters as iap\n'), ((12547, 12677), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test9]"""'], {'value_range': 'None', 'tuple_to_uniform': '(False)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test9]', value_range=None,\n tuple_to_uniform=False, list_to_choice=True, allow_floats=True)\n", (12572, 12677), True, 'from imgaug import parameters as iap\n'), ((14145, 14278), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 3)', '"""[test12]"""'], {'value_range': '(2, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 3), '[test12]', value_range=(2, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (14170, 14278), True, 'from imgaug import parameters as iap\n'), ((14607, 14740), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test13]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test13]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (14632, 14740), True, 'from imgaug import parameters as iap\n'), ((14995, 15129), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 2, 3]', '"""[test14]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(False)', 'allow_floats': '(True)'}), "([1, 2, 3], '[test14]', value_range=None,\n tuple_to_uniform=True, list_to_choice=False, allow_floats=True)\n", (15020, 15129), True, 'from imgaug import parameters as iap\n'), ((15765, 15898), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 3]', '"""[test16]"""'], {'value_range': '(2, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "([1, 3], '[test16]', value_range=(2, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (15790, 15898), True, 'from imgaug import parameters as iap\n'), ((16213, 16346), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 2]', '"""[test17]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "([1, 2], '[test17]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (16238, 16346), True, 'from imgaug import parameters as iap\n'), ((17020, 17127), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test19]"""'], {'value_range': '(False)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test19]', value_range=False,\n tuple_to_uniform=True, list_to_choice=True)\n", (17045, 17127), True, 'from imgaug import parameters as iap\n'), ((18008, 18074), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['"""class3"""', '"""foo"""', 'valid_values'], {}), "('class3', 'foo', valid_values)\n", (18043, 18074), True, 'from imgaug import parameters as iap\n'), ((18736, 18811), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (["['class1', False]", '"""foo"""', 'valid_values'], {}), "(['class1', False], 'foo', valid_values)\n", (18771, 18811), True, 'from imgaug import parameters as iap\n'), ((19198, 19276), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (["['class1', 'class4']", '"""foo"""', 'valid_values'], {}), "(['class1', 'class4'], 'foo', valid_values)\n", (19233, 19276), True, 'from imgaug import parameters as iap\n'), ((19890, 19951), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['(False)', '"""foo"""', "['class1']"], {}), "(False, 'foo', ['class1'])\n", (19925, 19951), True, 'from imgaug import parameters as iap\n'), ((21108, 21155), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['"""test"""', '"""[test4]"""'], {}), "('test', '[test4]')\n", (21136, 21155), True, 'from imgaug import parameters as iap\n'), ((21329, 21375), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['(-0.01)', '"""[test5]"""'], {}), "(-0.01, '[test5]')\n", (21357, 21375), True, 'from imgaug import parameters as iap\n'), ((21495, 21540), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['(1.01)', '"""[test6]"""'], {}), "(1.01, '[test6]')\n", (21523, 21540), True, 'from imgaug import parameters as iap\n'), ((23378, 23389), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (23387, 23389), False, 'import mock\n'), ((23391, 23402), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (23400, 23402), False, 'import mock\n'), ((23743, 23796), 'mock.patch', 'mock.patch', (['"""imgaug.imgaug.draw_grid"""', 'draw_grid_mock'], {}), "('imgaug.imgaug.draw_grid', draw_grid_mock)\n", (23753, 23796), False, 'import mock\n'), ((23826, 23953), 'imgaug.parameters.draw_distributions_grid', 'iap.draw_distributions_grid', (['params'], {'rows': '(2)', 'cols': '(3)', 'graph_sizes': '(20, 21)', 'sample_sizes': '[(1, 2), (3, 4)]', 'titles': "['A', 'B']"}), "(params, rows=2, cols=3, graph_sizes=(20, 21),\n sample_sizes=[(1, 2), (3, 4)], titles=['A', 'B'])\n", (23853, 23953), True, 'from imgaug import parameters as iap\n'), ((25756, 25798), 'numpy.array_equal', 'np.array_equal', (['graph_img_title', 'graph_img'], {}), '(graph_img_title, graph_img)\n', (25770, 25798), True, 'import numpy as np\n'), ((39066, 39090), 'imgaug.parameters.Choice', 'iap.Choice', (['[0.25, 0.75]'], {}), '([0.25, 0.75])\n', (39076, 39090), True, 'from imgaug import parameters as iap\n'), ((41143, 41163), 'numpy.sum', 'np.sum', (['(samples == v)'], {}), '(samples == v)\n', (41149, 41163), True, 'import numpy as np\n'), ((41634, 41676), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 1)'], {}), '(samples == -1, samples == 1)\n', (41647, 41676), True, 'import numpy as np\n'), ((45012, 45027), 'imgaug.parameters.Choice', 'iap.Choice', (['(123)'], {}), '(123)\n', (45022, 45027), True, 'from imgaug import parameters as iap\n'), ((45233, 45258), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {'p': '(123)'}), '([1, 2], p=123)\n', (45243, 45258), True, 'from imgaug import parameters as iap\n'), ((45454, 45479), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {'p': '[1]'}), '([1, 2], p=[1])\n', (45464, 45479), True, 'from imgaug import parameters as iap\n'), ((46577, 46597), 'numpy.sum', 'np.sum', (['(samples == v)'], {}), '(samples == v)\n', (46583, 46597), True, 'import numpy as np\n'), ((49655, 49675), 'numpy.sum', 'np.sum', (['(samples == i)'], {}), '(samples == i)\n', (49661, 49675), True, 'import numpy as np\n'), ((51724, 51747), 'imgaug.parameters.Choice', 'iap.Choice', (['[-100, 100]'], {}), '([-100, 100])\n', (51734, 51747), True, 'from imgaug import parameters as iap\n'), ((51876, 51892), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (51883, 51892), True, 'import numpy as np\n'), ((52391, 52407), 'numpy.std', 'np.std', (['samples1'], {}), '(samples1)\n', (52397, 52407), True, 'import numpy as np\n'), ((52410, 52426), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (52416, 52426), True, 'import numpy as np\n'), ((52453, 52469), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (52459, 52469), True, 'import numpy as np\n'), ((54306, 54322), 'numpy.std', 'np.std', (['samples1'], {}), '(samples1)\n', (54312, 54322), True, 'import numpy as np\n'), ((54325, 54341), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (54331, 54341), True, 'import numpy as np\n'), ((54368, 54384), 'numpy.std', 'np.std', (['samples1'], {}), '(samples1)\n', (54374, 54384), True, 'import numpy as np\n'), ((54436, 54452), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (54442, 54452), True, 'import numpy as np\n'), ((54563, 54586), 'imgaug.parameters.Choice', 'iap.Choice', (['[-100, 100]'], {}), '([-100, 100])\n', (54573, 54586), True, 'from imgaug import parameters as iap\n'), ((54778, 54794), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (54785, 54794), True, 'import numpy as np\n'), ((54816, 54839), 'numpy.abs', 'np.abs', (['(-100 - observed)'], {}), '(-100 - observed)\n', (54822, 54839), True, 'import numpy as np\n'), ((54860, 54882), 'numpy.abs', 'np.abs', (['(100 - observed)'], {}), '(100 - observed)\n', (54866, 54882), True, 'import numpy as np\n'), ((56195, 56226), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (56206, 56226), True, 'import numpy as np\n'), ((57823, 57846), 'imgaug.parameters.Choice', 'iap.Choice', (['[-100, 100]'], {}), '([-100, 100])\n', (57833, 57846), True, 'from imgaug import parameters as iap\n'), ((57975, 57991), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (57982, 57991), True, 'import numpy as np\n'), ((58493, 58509), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (58499, 58509), True, 'import numpy as np\n'), ((58512, 58528), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (58518, 58528), True, 'import numpy as np\n'), ((60773, 60792), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 10]'], {}), '([1, 10])\n', (60783, 60792), True, 'from imgaug import parameters as iap\n'), ((60918, 60934), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (60925, 60934), True, 'import numpy as np\n'), ((61456, 61472), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (61462, 61472), True, 'import numpy as np\n'), ((61475, 61491), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (61481, 61491), True, 'import numpy as np\n'), ((61519, 61535), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (61525, 61535), True, 'import numpy as np\n'), ((61576, 61592), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (61582, 61592), True, 'import numpy as np\n'), ((63587, 63607), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 0.5]'], {}), '([1, 0.5])\n', (63597, 63607), True, 'from imgaug import parameters as iap\n'), ((63850, 63866), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (63857, 63866), True, 'import numpy as np\n'), ((64736, 64766), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 2 / 1)'], {}), '(1 + 2 / 1)\n', (64755, 64766), False, 'import scipy\n'), ((64863, 64895), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 2 / 0.5)'], {}), '(1 + 2 / 0.5)\n', (64882, 64895), False, 'import scipy\n'), ((64970, 64986), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (64976, 64986), True, 'import numpy as np\n'), ((64989, 65005), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (64995, 65005), True, 'import numpy as np\n'), ((65087, 65103), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (65093, 65103), True, 'import numpy as np\n'), ((65249, 65265), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (65255, 65265), True, 'import numpy as np\n'), ((71480, 71500), 'imgaug.parameters.Choice', 'iap.Choice', (['[0.5, 2]'], {}), '([0.5, 2])\n', (71490, 71500), True, 'from imgaug import parameters as iap\n'), ((71728, 71744), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (71735, 71744), True, 'import numpy as np\n'), ((72418, 72434), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (72424, 72434), True, 'import numpy as np\n'), ((72437, 72453), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (72443, 72453), True, 'import numpy as np\n'), ((72535, 72551), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (72541, 72551), True, 'import numpy as np\n'), ((72697, 72713), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (72703, 72713), True, 'import numpy as np\n'), ((76900, 76928), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (76917, 76928), True, 'from imgaug import parameters as iap\n'), ((78189, 78206), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (78201, 78206), True, 'from imgaug import parameters as iap\n'), ((78492, 78509), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (78504, 78509), True, 'from imgaug import parameters as iap\n'), ((78888, 78905), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (78900, 78905), True, 'from imgaug import parameters as iap\n'), ((79256, 79273), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (79268, 79273), True, 'from imgaug import parameters as iap\n'), ((79606, 79623), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (79618, 79623), True, 'from imgaug import parameters as iap\n'), ((79677, 79694), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (79689, 79694), True, 'from imgaug import parameters as iap\n'), ((79938, 80024), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (79962, 80024), False, 'import skimage\n'), ((80089, 80175), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (80113, 80175), False, 'import skimage\n'), ((80326, 80347), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (80332, 80347), True, 'import numpy as np\n'), ((80378, 80399), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (80384, 80399), True, 'import numpy as np\n'), ((80741, 80758), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (80753, 80758), True, 'from imgaug import parameters as iap\n'), ((80812, 80829), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (80824, 80829), True, 'from imgaug import parameters as iap\n'), ((81078, 81164), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (81102, 81164), False, 'import skimage\n'), ((81229, 81315), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (81253, 81315), False, 'import skimage\n'), ((81466, 81487), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (81472, 81487), True, 'import numpy as np\n'), ((81518, 81539), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (81524, 81539), True, 'import numpy as np\n'), ((81899, 81916), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (81911, 81916), True, 'from imgaug import parameters as iap\n'), ((82030, 82047), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (82042, 82047), True, 'from imgaug import parameters as iap\n'), ((82349, 82435), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (82373, 82435), False, 'import skimage\n'), ((82500, 82586), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (82524, 82586), False, 'import skimage\n'), ((82737, 82758), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (82743, 82758), True, 'import numpy as np\n'), ((82789, 82810), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (82795, 82810), True, 'import numpy as np\n'), ((83359, 83376), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (83371, 83376), True, 'from imgaug import parameters as iap\n'), ((83430, 83447), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (83442, 83447), True, 'from imgaug import parameters as iap\n'), ((83744, 83830), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (83768, 83830), False, 'import skimage\n'), ((83895, 83981), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (83919, 83981), False, 'import skimage\n'), ((84132, 84153), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (84138, 84153), True, 'import numpy as np\n'), ((84184, 84205), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (84190, 84205), True, 'import numpy as np\n'), ((84499, 84516), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (84511, 84516), True, 'from imgaug import parameters as iap\n'), ((84578, 84595), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (84590, 84595), True, 'from imgaug import parameters as iap\n'), ((84845, 84931), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (84869, 84931), False, 'import skimage\n'), ((84996, 85082), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (85020, 85082), False, 'import skimage\n'), ((85233, 85254), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (85239, 85254), True, 'import numpy as np\n'), ((85285, 85306), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (85291, 85306), True, 'import numpy as np\n'), ((85655, 85672), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (85667, 85672), True, 'from imgaug import parameters as iap\n'), ((85794, 85811), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (85806, 85811), True, 'from imgaug import parameters as iap\n'), ((86121, 86207), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (86145, 86207), False, 'import skimage\n'), ((86272, 86358), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (86296, 86358), False, 'import skimage\n'), ((86509, 86530), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (86515, 86530), True, 'import numpy as np\n'), ((86561, 86582), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (86567, 86582), True, 'import numpy as np\n'), ((87183, 87200), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (87195, 87200), True, 'from imgaug import parameters as iap\n'), ((88148, 88165), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (88160, 88165), True, 'from imgaug import parameters as iap\n'), ((88599, 88619), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (88616, 88619), True, 'from imgaug import parameters as iap\n'), ((88846, 88866), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (88863, 88866), True, 'from imgaug import parameters as iap\n'), ((89177, 89197), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (89194, 89197), True, 'from imgaug import parameters as iap\n'), ((89508, 89529), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (89525, 89529), True, 'from imgaug import parameters as iap\n'), ((89846, 89868), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0.5)'], {}), '(0.5)\n', (89863, 89868), True, 'from imgaug import parameters as iap\n'), ((90346, 90366), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(2)'], {}), '(2)\n', (90363, 90366), True, 'from imgaug import parameters as iap\n'), ((90675, 90696), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-2)'], {}), '(-2)\n', (90692, 90696), True, 'from imgaug import parameters as iap\n'), ((91031, 91049), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 2]'], {}), '([0, 2])\n', (91041, 91049), True, 'from imgaug import parameters as iap\n'), ((91276, 91317), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (91289, 91317), True, 'import numpy as np\n'), ((91400, 91418), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 2]'], {}), '([0, 2])\n', (91410, 91418), True, 'from imgaug import parameters as iap\n'), ((91781, 91801), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (91798, 91801), True, 'from imgaug import parameters as iap\n'), ((92091, 92111), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (92108, 92111), True, 'from imgaug import parameters as iap\n'), ((92402, 92422), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (92419, 92422), True, 'from imgaug import parameters as iap\n'), ((92787, 92807), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (92804, 92807), True, 'from imgaug import parameters as iap\n'), ((94121, 94162), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (94134, 94162), True, 'import numpy as np\n'), ((95062, 95082), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (95079, 95082), True, 'from imgaug import parameters as iap\n'), ((97927, 97949), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (97944, 97949), True, 'from imgaug import parameters as iap\n'), ((98569, 98591), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (98586, 98591), True, 'from imgaug import parameters as iap\n'), ((99214, 99235), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (99225, 99235), True, 'from imgaug import parameters as iap\n'), ((99863, 99884), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (99874, 99884), True, 'from imgaug import parameters as iap\n'), ((100481, 100501), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (100498, 100501), True, 'from imgaug import parameters as iap\n'), ((103730, 103752), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (103747, 103752), True, 'from imgaug import parameters as iap\n'), ((104348, 104370), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (104365, 104370), True, 'from imgaug import parameters as iap\n'), ((104963, 104984), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (104974, 104984), True, 'from imgaug import parameters as iap\n'), ((105583, 105604), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (105594, 105604), True, 'from imgaug import parameters as iap\n'), ((106357, 106375), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 2]'], {}), '([0, 2])\n', (106367, 106375), True, 'from imgaug import parameters as iap\n'), ((106662, 106682), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (106679, 106682), True, 'from imgaug import parameters as iap\n'), ((106900, 106920), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (106917, 106920), True, 'from imgaug import parameters as iap\n'), ((109711, 109733), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (109728, 109733), True, 'from imgaug import parameters as iap\n'), ((110284, 110306), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (110301, 110306), True, 'from imgaug import parameters as iap\n'), ((110851, 110872), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (110862, 110872), True, 'from imgaug import parameters as iap\n'), ((111423, 111444), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (111434, 111444), True, 'from imgaug import parameters as iap\n'), ((112047, 112067), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (112064, 112067), True, 'from imgaug import parameters as iap\n'), ((114933, 114955), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (114950, 114955), True, 'from imgaug import parameters as iap\n'), ((115572, 115594), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (115589, 115594), True, 'from imgaug import parameters as iap\n'), ((116207, 116228), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (116218, 116228), True, 'from imgaug import parameters as iap\n'), ((116789, 116810), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (116800, 116810), True, 'from imgaug import parameters as iap\n'), ((117405, 117425), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (117422, 117425), True, 'from imgaug import parameters as iap\n'), ((119800, 119822), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.5)'], {}), '(1.5)\n', (119817, 119822), True, 'from imgaug import parameters as iap\n'), ((120443, 120465), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.5)'], {}), '(1.5)\n', (120460, 120465), True, 'from imgaug import parameters as iap\n'), ((121061, 121082), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (121072, 121082), True, 'from imgaug import parameters as iap\n'), ((121632, 121653), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (121643, 121653), True, 'from imgaug import parameters as iap\n'), ((122264, 122284), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (122281, 122284), True, 'from imgaug import parameters as iap\n'), ((123451, 123477), 'imgaug.parameters.Choice', 'iap.Choice', (['[-3, -1, 1, 3]'], {}), '([-3, -1, 1, 3])\n', (123461, 123477), True, 'from imgaug import parameters as iap\n'), ((123593, 123611), 'numpy.unique', 'np.unique', (['samples'], {}), '(samples)\n', (123602, 123611), True, 'import numpy as np\n'), ((123961, 123981), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (123978, 123981), True, 'from imgaug import parameters as iap\n'), ((124206, 124226), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (124223, 124226), True, 'from imgaug import parameters as iap\n'), ((124574, 124594), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (124591, 124594), True, 'from imgaug import parameters as iap\n'), ((125047, 125065), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {}), '([1, 2])\n', (125057, 125065), True, 'from imgaug import parameters as iap\n'), ((125316, 125328), 'numpy.sum', 'np.sum', (['seen'], {}), '(seen)\n', (125322, 125328), True, 'import numpy as np\n'), ((125475, 125493), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {}), '([1, 2])\n', (125485, 125493), True, 'from imgaug import parameters as iap\n'), ((125888, 125910), 'numpy.sum', 'np.sum', (['(samples1 == -2)'], {}), '(samples1 == -2)\n', (125894, 125910), True, 'import numpy as np\n'), ((125931, 125953), 'numpy.sum', 'np.sum', (['(samples1 == -1)'], {}), '(samples1 == -1)\n', (125937, 125953), True, 'import numpy as np\n'), ((125974, 125995), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (125980, 125995), True, 'import numpy as np\n'), ((126016, 126037), 'numpy.sum', 'np.sum', (['(samples1 == 2)'], {}), '(samples1 == 2)\n', (126022, 126037), True, 'import numpy as np\n'), ((126183, 126203), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (126200, 126203), True, 'from imgaug import parameters as iap\n'), ((126451, 126471), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (126468, 126471), True, 'from imgaug import parameters as iap\n'), ((126712, 126732), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (126729, 126732), True, 'from imgaug import parameters as iap\n'), ((126974, 126994), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (126991, 126994), True, 'from imgaug import parameters as iap\n'), ((127248, 127268), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (127265, 127268), True, 'from imgaug import parameters as iap\n'), ((127542, 127563), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (127559, 127563), True, 'from imgaug import parameters as iap\n'), ((127835, 127856), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (127852, 127856), True, 'from imgaug import parameters as iap\n'), ((128132, 128151), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (128142, 128151), True, 'from imgaug import parameters as iap\n'), ((128581, 128600), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (128591, 128600), True, 'from imgaug import parameters as iap\n'), ((129024, 129043), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (129034, 129043), True, 'from imgaug import parameters as iap\n'), ((129436, 129455), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (129446, 129455), True, 'from imgaug import parameters as iap\n'), ((130074, 130095), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (130091, 130095), True, 'from imgaug import parameters as iap\n'), ((130461, 130481), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (130478, 130481), True, 'from imgaug import parameters as iap\n'), ((130869, 130889), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (130886, 130889), True, 'from imgaug import parameters as iap\n'), ((131504, 131524), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (131521, 131524), True, 'from imgaug import parameters as iap\n'), ((131980, 131999), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (131990, 131999), True, 'from imgaug import parameters as iap\n'), ((132359, 132411), 'numpy.logical_and', 'np.logical_and', (['(25 - 10 < samples)', '(samples < 25 + 10)'], {}), '(25 - 10 < samples, samples < 25 + 10)\n', (132373, 132411), True, 'import numpy as np\n'), ((132512, 132531), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (132522, 132531), True, 'from imgaug import parameters as iap\n'), ((132991, 133010), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (133001, 133010), True, 'from imgaug import parameters as iap\n'), ((134485, 134507), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (134496, 134507), True, 'from imgaug import parameters as iap\n'), ((135990, 136009), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (136000, 136009), True, 'from imgaug import parameters as iap\n'), ((136361, 136380), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (136371, 136380), True, 'from imgaug import parameters as iap\n'), ((136761, 136780), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (136771, 136780), True, 'from imgaug import parameters as iap\n'), ((137315, 137334), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (137325, 137334), True, 'from imgaug import parameters as iap\n'), ((138372, 138392), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (138389, 138392), True, 'from imgaug import parameters as iap\n'), ((138975, 138995), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(5)'], {}), '(5)\n', (138992, 138995), True, 'from imgaug import parameters as iap\n'), ((139643, 139663), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(5)'], {}), '(5)\n', (139660, 139663), True, 'from imgaug import parameters as iap\n'), ((140283, 140303), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(5)'], {}), '(5)\n', (140300, 140303), True, 'from imgaug import parameters as iap\n'), ((141070, 141089), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 10]'], {}), '([1, 10])\n', (141080, 141089), True, 'from imgaug import parameters as iap\n'), ((143563, 143582), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 10]'], {}), '([1, 10])\n', (143573, 143582), True, 'from imgaug import parameters as iap\n'), ((20285, 20329), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['val', '"""[test1]"""'], {}), "(val, '[test1]')\n", (20313, 20329), True, 'from imgaug import parameters as iap\n'), ((20605, 20649), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['val', '"""[test2]"""'], {}), "(val, '[test2]')\n", (20633, 20649), True, 'from imgaug import parameters as iap\n'), ((21947, 21977), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'dtype_in'}), '((1,), dtype=dtype_in)\n', (21955, 21977), True, 'import numpy as np\n'), ((39190, 39205), 'numpy.sum', 'np.sum', (['samples'], {}), '(samples)\n', (39196, 39205), True, 'import numpy as np\n'), ((39965, 39983), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (39977, 39983), True, 'import imgaug.random as iarandom\n'), ((40083, 40101), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (40095, 40101), True, 'import imgaug.random as iarandom\n'), ((40783, 40824), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (40796, 40824), True, 'import numpy as np\n'), ((42983, 43037), 'numpy.logical_or', 'np.logical_or', (["(samples == 'first')", "(samples == 'second')"], {}), "(samples == 'first', samples == 'second')\n", (42996, 43037), True, 'import numpy as np\n'), ((43347, 43361), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (43356, 43361), True, 'import six.moves as sm\n'), ((44080, 44098), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1]'], {}), '([0, 1])\n', (44090, 44098), True, 'from imgaug import parameters as iap\n'), ((44710, 44728), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (44722, 44728), True, 'import imgaug.random as iarandom\n'), ((44828, 44846), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (44840, 44846), True, 'import imgaug.random as iarandom\n'), ((46220, 46261), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (46233, 46261), True, 'import numpy as np\n'), ((47105, 47147), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 0)'], {}), '(samples == -1, samples == 0)\n', (47118, 47147), True, 'import numpy as np\n'), ((47554, 47596), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 0)'], {}), '(samples == -1, samples == 0)\n', (47567, 47596), True, 'import numpy as np\n'), ((48058, 48100), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 0)'], {}), '(samples == -1, samples == 0)\n', (48071, 48100), True, 'import numpy as np\n'), ((48628, 48646), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (48640, 48646), True, 'import imgaug.random as iarandom\n'), ((48746, 48764), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (48758, 48764), True, 'import imgaug.random as iarandom\n'), ((49416, 49434), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (49428, 49434), True, 'import imgaug.random as iarandom\n'), ((49606, 49633), 'numpy.sum', 'np.sum', (['(samples_direct == i)'], {}), '(samples_direct == i)\n', (49612, 49633), True, 'import numpy as np\n'), ((49994, 50012), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (50006, 50012), True, 'import imgaug.random as iarandom\n'), ((50112, 50130), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (50124, 50130), True, 'import imgaug.random as iarandom\n'), ((50790, 50808), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (50802, 50808), True, 'import imgaug.random as iarandom\n'), ((52669, 52687), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (52681, 52687), True, 'import imgaug.random as iarandom\n'), ((52787, 52805), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (52799, 52805), True, 'import imgaug.random as iarandom\n'), ((55632, 55647), 'numpy.abs', 'np.abs', (['samples'], {}), '(samples)\n', (55638, 55647), True, 'import numpy as np\n'), ((56840, 56858), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (56852, 56858), True, 'import imgaug.random as iarandom\n'), ((58966, 58984), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (58978, 58984), True, 'import imgaug.random as iarandom\n'), ((59084, 59102), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (59096, 59102), True, 'import imgaug.random as iarandom\n'), ((59767, 59785), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (59779, 59785), True, 'import imgaug.random as iarandom\n'), ((61794, 61812), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (61806, 61812), True, 'import imgaug.random as iarandom\n'), ((61912, 61930), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (61924, 61930), True, 'import imgaug.random as iarandom\n'), ((62582, 62600), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (62594, 62600), True, 'import imgaug.random as iarandom\n'), ((64780, 64810), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 1)'], {}), '(1 + 1 / 1)\n', (64799, 64810), False, 'import scipy\n'), ((64909, 64941), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 0.5)'], {}), '(1 + 1 / 0.5)\n', (64928, 64941), False, 'import scipy\n'), ((65516, 65534), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (65528, 65534), True, 'import imgaug.random as iarandom\n'), ((65634, 65652), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (65646, 65652), True, 'import imgaug.random as iarandom\n'), ((69231, 69249), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (69243, 69249), True, 'import imgaug.random as iarandom\n'), ((69349, 69367), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (69361, 69367), True, 'import imgaug.random as iarandom\n'), ((70697, 70715), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (70709, 70715), True, 'import imgaug.random as iarandom\n'), ((72968, 72986), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (72980, 72986), True, 'import imgaug.random as iarandom\n'), ((73086, 73104), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (73098, 73104), True, 'import imgaug.random as iarandom\n'), ((73541, 73565), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (73558, 73565), True, 'from imgaug import parameters as iap\n'), ((74006, 74030), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (74023, 74030), True, 'from imgaug import parameters as iap\n'), ((74054, 74074), 'imgaug.random.RNG', 'iarandom.RNG', (['(123456)'], {}), '(123456)\n', (74066, 74074), True, 'import imgaug.random as iarandom\n'), ((74097, 74117), 'imgaug.random.RNG', 'iarandom.RNG', (['(123456)'], {}), '(123456)\n', (74109, 74117), True, 'import imgaug.random as iarandom\n'), ((74279, 74313), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (74293, 74313), True, 'import numpy as np\n'), ((74496, 74520), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (74513, 74520), True, 'from imgaug import parameters as iap\n'), ((74913, 74937), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (74930, 74937), True, 'from imgaug import parameters as iap\n'), ((75475, 75499), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (75492, 75499), True, 'from imgaug import parameters as iap\n'), ((75782, 75806), 'numpy.all', 'np.all', (['(samples == value)'], {}), '(samples == value)\n', (75788, 75806), True, 'import numpy as np\n'), ((76127, 76151), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (76144, 76151), True, 'from imgaug import parameters as iap\n'), ((76637, 76655), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1]'], {}), '([0, 1])\n', (76647, 76655), True, 'from imgaug import parameters as iap\n'), ((77249, 77269), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (77266, 77269), True, 'from imgaug import parameters as iap\n'), ((77732, 77752), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (77749, 77752), True, 'from imgaug import parameters as iap\n'), ((81967, 81987), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (81984, 81987), True, 'from imgaug import parameters as iap\n'), ((82098, 82117), 'imgaug.parameters.Choice', 'iap.Choice', (['[8, 16]'], {}), '([8, 16])\n', (82108, 82117), True, 'from imgaug import parameters as iap\n'), ((83171, 83188), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (83183, 83188), True, 'from imgaug import parameters as iap\n'), ((85728, 85751), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0.01)'], {}), '(0.01)\n', (85745, 85751), True, 'from imgaug import parameters as iap\n'), ((85867, 85889), 'imgaug.parameters.Choice', 'iap.Choice', (['[0.4, 0.8]'], {}), '([0.4, 0.8])\n', (85877, 85889), True, 'from imgaug import parameters as iap\n'), ((86953, 86970), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (86965, 86970), True, 'from imgaug import parameters as iap\n'), ((87232, 87265), 'imgaug.parameters.Choice', 'iap.Choice', (["['nearest', 'linear']"], {}), "(['nearest', 'linear'])\n", (87242, 87265), True, 'from imgaug import parameters as iap\n'), ((87429, 87475), 'numpy.logical_and', 'np.logical_and', (['(0.05 < samples)', '(samples < 0.95)'], {}), '(0.05 < samples, samples < 0.95)\n', (87443, 87475), True, 'import numpy as np\n'), ((87856, 87873), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (87868, 87873), True, 'from imgaug import parameters as iap\n'), ((88280, 88298), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (88292, 88298), True, 'import imgaug.random as iarandom\n'), ((88401, 88419), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (88413, 88419), True, 'import imgaug.random as iarandom\n'), ((91526, 91544), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (91538, 91544), True, 'import imgaug.random as iarandom\n'), ((91644, 91662), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (91656, 91662), True, 'import imgaug.random as iarandom\n'), ((93652, 93685), 'numpy.all', 'np.all', (['(samples == value_expected)'], {}), '(samples == value_expected)\n', (93658, 93685), True, 'import numpy as np\n'), ((94445, 94472), 'numpy.abs', 'np.abs', (['(samples1 - samples2)'], {}), '(samples1 - samples2)\n', (94451, 94472), True, 'import numpy as np\n'), ((94735, 94753), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (94747, 94753), True, 'import imgaug.random as iarandom\n'), ((94853, 94871), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (94865, 94871), True, 'import imgaug.random as iarandom\n'), ((96895, 96942), 'numpy.isclose', 'np.isclose', (['sample', '(v1 * v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 * v2, atol=0.001, rtol=0)\n', (96905, 96942), True, 'import numpy as np\n'), ((97600, 97647), 'numpy.isclose', 'np.isclose', (['sample', '(v1 * v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 * v2, atol=0.001, rtol=0)\n', (97610, 97647), True, 'import numpy as np\n'), ((108716, 108763), 'numpy.isclose', 'np.isclose', (['sample', '(v1 + v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 + v2, atol=0.001, rtol=0)\n', (108726, 108763), True, 'import numpy as np\n'), ((109397, 109444), 'numpy.isclose', 'np.isclose', (['sample', '(v1 + v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 + v2, atol=0.001, rtol=0)\n', (109407, 109444), True, 'import numpy as np\n'), ((117920, 117948), 'imgaug.is_single_float', 'ia.is_single_float', (['exponent'], {}), '(exponent)\n', (117938, 117948), True, 'import imgaug as ia\n'), ((118975, 119003), 'imgaug.is_single_float', 'ia.is_single_float', (['exponent'], {}), '(exponent)\n', (118993, 119003), True, 'import imgaug as ia\n'), ((122887, 122912), 'imgaug.is_single_float', 'ia.is_single_float', (['value'], {}), '(value)\n', (122905, 122912), True, 'import imgaug as ia\n'), ((125596, 125614), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (125608, 125614), True, 'import imgaug.random as iarandom\n'), ((125716, 125734), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (125728, 125734), True, 'import imgaug.random as iarandom\n'), ((129648, 129666), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (129660, 129666), True, 'import imgaug.random as iarandom\n'), ((129768, 129786), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (129780, 129786), True, 'import imgaug.random as iarandom\n'), ((133573, 133592), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (133583, 133592), True, 'from imgaug import parameters as iap\n'), ((135186, 135205), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (135196, 135205), True, 'from imgaug import parameters as iap\n'), ((136923, 136941), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (136935, 136941), True, 'import imgaug.random as iarandom\n'), ((137043, 137061), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (137055, 137061), True, 'import imgaug.random as iarandom\n'), ((137393, 137417), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['"""max"""'], {}), "('max')\n", (137410, 137417), True, 'from imgaug import parameters as iap\n'), ((137719, 137738), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (137729, 137738), True, 'from imgaug import parameters as iap\n'), ((138046, 138065), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (138056, 138065), True, 'from imgaug import parameters as iap\n'), ((139119, 139145), 'numpy.exp', 'np.exp', (['(-(5 * 1 + 0 - 0.5))'], {}), '(-(5 * 1 + 0 - 0.5))\n', (139125, 139145), True, 'import numpy as np\n'), ((140460, 140486), 'numpy.exp', 'np.exp', (['(-(5 * 1 + 0 - 0.5))'], {}), '(-(5 * 1 + 0 - 0.5))\n', (140466, 140486), True, 'import numpy as np\n'), ((141219, 141245), 'numpy.exp', 'np.exp', (['(-(1 * 1 + 0 - 0.5))'], {}), '(-(1 * 1 + 0 - 0.5))\n', (141225, 141245), True, 'import numpy as np\n'), ((141282, 141309), 'numpy.exp', 'np.exp', (['(-(10 * 1 + 0 - 0.5))'], {}), '(-(10 * 1 + 0 - 0.5))\n', (141288, 141309), True, 'import numpy as np\n'), ((142581, 142606), 'numpy.array', 'np.array', (['[val]'], {'dtype': 'dt'}), '([val], dtype=dt)\n', (142589, 142606), True, 'import numpy as np\n'), ((142630, 142655), 'numpy.array', 'np.array', (['[mul]'], {'dtype': 'dt'}), '([mul], dtype=dt)\n', (142638, 142655), True, 'import numpy as np\n'), ((142679, 142704), 'numpy.array', 'np.array', (['[add]'], {'dtype': 'dt'}), '([add], dtype=dt)\n', (142687, 142704), True, 'import numpy as np\n'), ((142731, 142759), 'numpy.array', 'np.array', (['[thresh]'], {'dtype': 'dt'}), '([thresh], dtype=dt)\n', (142739, 142759), True, 'import numpy as np\n'), ((143778, 143796), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (143790, 143796), True, 'import imgaug.random as iarandom\n'), ((143898, 143916), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (143910, 143916), True, 'import imgaug.random as iarandom\n'), ((22005, 22034), 'imgaug.parameters.force_np_float_dtype', 'iap.force_np_float_dtype', (['arr'], {}), '(arr)\n', (22029, 22034), True, 'from imgaug import parameters as iap\n'), ((43243, 43257), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (43252, 43257), True, 'import six.moves as sm\n'), ((93238, 93262), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (93255, 93262), True, 'from imgaug import parameters as iap\n'), ((95524, 95545), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (95541, 95545), True, 'from imgaug import parameters as iap\n'), ((96123, 96144), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (96140, 96144), True, 'from imgaug import parameters as iap\n'), ((96146, 96167), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (96163, 96167), True, 'from imgaug import parameters as iap\n'), ((96753, 96774), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (96770, 96774), True, 'from imgaug import parameters as iap\n'), ((97439, 97460), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (97456, 97460), True, 'from imgaug import parameters as iap\n'), ((97462, 97483), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (97479, 97483), True, 'from imgaug import parameters as iap\n'), ((100971, 100992), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (100988, 100992), True, 'from imgaug import parameters as iap\n'), ((101638, 101659), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (101655, 101659), True, 'from imgaug import parameters as iap\n'), ((101661, 101682), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (101678, 101682), True, 'from imgaug import parameters as iap\n'), ((102333, 102354), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (102350, 102354), True, 'from imgaug import parameters as iap\n'), ((103148, 103169), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (103165, 103169), True, 'from imgaug import parameters as iap\n'), ((103171, 103192), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (103188, 103192), True, 'from imgaug import parameters as iap\n'), ((107333, 107354), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (107350, 107354), True, 'from imgaug import parameters as iap\n'), ((107940, 107961), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (107957, 107961), True, 'from imgaug import parameters as iap\n'), ((107963, 107984), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (107980, 107984), True, 'from imgaug import parameters as iap\n'), ((108574, 108595), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (108591, 108595), True, 'from imgaug import parameters as iap\n'), ((109236, 109257), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (109253, 109257), True, 'from imgaug import parameters as iap\n'), ((109259, 109280), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (109276, 109280), True, 'from imgaug import parameters as iap\n'), ((112495, 112516), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (112512, 112516), True, 'from imgaug import parameters as iap\n'), ((113112, 113133), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (113129, 113133), True, 'from imgaug import parameters as iap\n'), ((113135, 113156), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (113152, 113156), True, 'from imgaug import parameters as iap\n'), ((113756, 113777), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (113773, 113777), True, 'from imgaug import parameters as iap\n'), ((114438, 114459), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (114455, 114459), True, 'from imgaug import parameters as iap\n'), ((114461, 114482), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (114478, 114482), True, 'from imgaug import parameters as iap\n'), ((118135, 118158), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['base'], {}), '(base)\n', (118152, 118158), True, 'from imgaug import parameters as iap\n'), ((119190, 119213), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['base'], {}), '(base)\n', (119207, 119213), True, 'from imgaug import parameters as iap\n'), ((119215, 119242), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['exponent'], {}), '(exponent)\n', (119232, 119242), True, 'from imgaug import parameters as iap\n'), ((122646, 122670), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (122663, 122670), True, 'from imgaug import parameters as iap\n'), ((142305, 142327), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['val'], {}), '(val)\n', (142322, 142327), True, 'from imgaug import parameters as iap\n'), ((39567, 39582), 'numpy.sum', 'np.sum', (['samples'], {}), '(samples)\n', (39573, 39582), True, 'import numpy as np\n'), ((95789, 95821), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int64'}), '((2, 3), dtype=np.int64)\n', (95797, 95821), True, 'import numpy as np\n'), ((96411, 96443), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int32'}), '((2, 3), dtype=np.int32)\n', (96419, 96443), True, 'import numpy as np\n'), ((97076, 97110), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (97084, 97110), True, 'import numpy as np\n'), ((97781, 97815), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (97789, 97815), True, 'import numpy as np\n'), ((101270, 101304), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (101278, 101304), True, 'import numpy as np\n'), ((101956, 101990), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (101964, 101990), True, 'import numpy as np\n'), ((102752, 102786), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (102760, 102786), True, 'import numpy as np\n'), ((103586, 103620), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (103594, 103620), True, 'import numpy as np\n'), ((118574, 118608), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (118582, 118608), True, 'import numpy as np\n'), ((119648, 119682), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (119656, 119682), True, 'import numpy as np\n'), ((142843, 142882), 'numpy.exp', 'np.exp', (['(-(val_ * mul_ + add_ - thresh_))'], {}), '(-(val_ * mul_ + add_ - thresh_))\n', (142849, 142882), True, 'import numpy as np\n'), ((107630, 107662), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int32'}), '((2, 3), dtype=np.int32)\n', (107638, 107662), True, 'import numpy as np\n'), ((108256, 108288), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int32'}), '((2, 3), dtype=np.int32)\n', (108264, 108288), True, 'import numpy as np\n'), ((108897, 108931), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (108905, 108931), True, 'import numpy as np\n'), ((109578, 109612), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (109586, 109612), True, 'import numpy as np\n'), ((112792, 112824), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int64'}), '((2, 3), dtype=np.int64)\n', (112800, 112824), True, 'import numpy as np\n'), ((113428, 113460), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int64'}), '((2, 3), dtype=np.int64)\n', (113436, 113460), True, 'import numpy as np\n'), ((114089, 114123), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (114097, 114123), True, 'import numpy as np\n'), ((114790, 114824), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (114798, 114824), True, 'import numpy as np\n'), ((93327, 93346), 'numpy.float64', 'np.float64', (['[value]'], {}), '([value])\n', (93337, 93346), True, 'import numpy as np\n')]
|
import os
import databases
import sqlalchemy
DB_CONNECTOR = os.getenv('APP_DB_CONNECTOR')
DB_USERNAME = os.getenv('APP_DB_USERNAME')
DB_PASSWORD = os.getenv('APP_DB_PASSWORD')
DB_HOST = os.getenv('APP_DB_HOST')
DB_PORT = os.getenv('APP_DB_PORT')
DB_DATABASE = os.getenv('APP_DB_DATABASE')
DB_URL = f'{DB_CONNECTOR}://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DATABASE}'
db: databases.Database = databases.Database(DB_URL)
metadata: sqlalchemy.MetaData = sqlalchemy.MetaData()
|
[
"sqlalchemy.MetaData",
"databases.Database",
"os.getenv"
] |
[((62, 91), 'os.getenv', 'os.getenv', (['"""APP_DB_CONNECTOR"""'], {}), "('APP_DB_CONNECTOR')\n", (71, 91), False, 'import os\n'), ((106, 134), 'os.getenv', 'os.getenv', (['"""APP_DB_USERNAME"""'], {}), "('APP_DB_USERNAME')\n", (115, 134), False, 'import os\n'), ((149, 177), 'os.getenv', 'os.getenv', (['"""APP_DB_PASSWORD"""'], {}), "('APP_DB_PASSWORD')\n", (158, 177), False, 'import os\n'), ((188, 212), 'os.getenv', 'os.getenv', (['"""APP_DB_HOST"""'], {}), "('APP_DB_HOST')\n", (197, 212), False, 'import os\n'), ((223, 247), 'os.getenv', 'os.getenv', (['"""APP_DB_PORT"""'], {}), "('APP_DB_PORT')\n", (232, 247), False, 'import os\n'), ((262, 290), 'os.getenv', 'os.getenv', (['"""APP_DB_DATABASE"""'], {}), "('APP_DB_DATABASE')\n", (271, 290), False, 'import os\n'), ((409, 435), 'databases.Database', 'databases.Database', (['DB_URL'], {}), '(DB_URL)\n', (427, 435), False, 'import databases\n'), ((468, 489), 'sqlalchemy.MetaData', 'sqlalchemy.MetaData', ([], {}), '()\n', (487, 489), False, 'import sqlalchemy\n')]
|
import os
import time
import argparse
import torchvision
import torch
import torch.nn as nn
from util import AverageMeter, TwoAugUnsupervisedDataset
from encoder import SmallAlexNet
from align_uniform import align_loss, uniform_loss
import json
def parse_option():
parser = argparse.ArgumentParser('STL-10 Representation Learning with Alignment and Uniformity Losses')
parser.add_argument('--align_w', type=float, default=1, help='Alignment loss weight')
parser.add_argument('--unif_w', type=float, default=1, help='Uniformity loss weight')
parser.add_argument('--align_alpha', type=float, default=2, help='alpha in alignment loss')
parser.add_argument('--unif_t', type=float, default=2, help='t in uniformity loss')
parser.add_argument('--batch_size', type=int, default=768, help='Batch size')
parser.add_argument('--epochs', type=int, default=200, help='Number of training epochs')
parser.add_argument('--lr', type=float, default=None,
help='Learning rate. Default is linear scaling 0.12 per 256 batch size')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='Learning rate decay rate')
parser.add_argument('--lr_decay_epochs', default=[155, 170, 185], nargs='*', type=int,
help='When to decay learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='L2 weight decay')
parser.add_argument('--feat_dim', type=int, default=128, help='Feature dimensionality')
parser.add_argument('--num_workers', type=int, default=20, help='Number of data loader workers to use')
parser.add_argument('--log_interval', type=int, default=40, help='Number of iterations between logs')
parser.add_argument('--gpus', default=[0], nargs='*', type=int,
help='List of GPU indices to use, e.g., --gpus 0 1 2 3')
parser.add_argument('--data_folder', type=str, default='./data', help='Path to data')
parser.add_argument('--result_folder', type=str, default='./results', help='Base directory to save model')
parser.add_argument('--suffix', type=str, default='info', help='Name Suffix')
opt = parser.parse_args()
opt.data_folder = '/afs/csail.mit.edu/u/h/hehaodele/radar/Hao/datasets'
opt.result_folder = '/afs/csail.mit.edu/u/h/hehaodele/radar/Hao/projects/align_uniform/results'
if opt.lr is None:
opt.lr = 0.12 * (opt.batch_size / 256)
print(json.dumps(vars(opt), indent=2, default=lambda o: o.__dict__))
opt.gpus = list(map(lambda x: torch.device('cuda', x), opt.gpus))
exp_name = f"align{opt.align_w:g}alpha{opt.align_alpha:g}_unif{opt.unif_w:g}t{opt.unif_t:g}"
if len(opt.suffix) > 0:
exp_name += f'_{opt.suffix}'
opt.save_folder = os.path.join(
opt.result_folder,
exp_name,
)
os.makedirs(opt.save_folder, exist_ok=True)
return opt
def get_data_loader(opt):
from util import RandomResizedCropWithBox, TwoAugUnsupervisedDatasetWithBox
transform_crop = RandomResizedCropWithBox(64, scale=(0.08, 1))
transform_others = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
torchvision.transforms.RandomGrayscale(p=0.2),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.44087801806139126, 0.42790631331699347, 0.3867879370752931),
(0.26826768628079806, 0.2610450402318512, 0.26866836876860795),
),
])
dataset = TwoAugUnsupervisedDatasetWithBox(
torchvision.datasets.STL10(opt.data_folder, 'train+unlabeled', download=True), transform_crop, transform_others)
return torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, num_workers=opt.num_workers,
shuffle=True, pin_memory=True)
def get_rate(x):
return sum(x) / len(x) * 100
def main():
opt = parse_option()
print(f'Optimize: {opt.align_w:g} * loss_align(alpha={opt.align_alpha:g}) + {opt.unif_w:g} * loss_uniform(t={opt.unif_t:g})')
torch.cuda.set_device(opt.gpus[0])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
encoder = nn.DataParallel(SmallAlexNet(feat_dim=opt.feat_dim).to(opt.gpus[0]), opt.gpus)
optim = torch.optim.SGD(encoder.parameters(), lr=opt.lr,
momentum=opt.momentum, weight_decay=opt.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, gamma=opt.lr_decay_rate,
milestones=opt.lr_decay_epochs)
loader = get_data_loader(opt)
align_meter = AverageMeter('align_loss')
unif_meter = AverageMeter('uniform_loss')
loss_meter = AverageMeter('total_loss')
it_time_meter = AverageMeter('iter_time')
info_rate_meter = AverageMeter('info_rate')
noni_rate_meter = AverageMeter('noni_rate')
for epoch in range(opt.epochs):
align_meter.reset()
unif_meter.reset()
loss_meter.reset()
it_time_meter.reset()
t0 = time.time()
for ii, (im_x, info_x, im_y, info_y) in enumerate(loader):
optim.zero_grad()
x, y = encoder(torch.cat([im_x.to(opt.gpus[0]), im_y.to(opt.gpus[0])])).chunk(2)
align_loss_val = align_loss(x, y, alpha=opt.align_alpha)
unif_loss_val = (uniform_loss(x, t=opt.unif_t) + uniform_loss(y, t=opt.unif_t)) / 2
loss = align_loss_val * opt.align_w + unif_loss_val * opt.unif_w
info_x, info_y = info_x.to(opt.gpus[0]), info_y.to(opt.gpus[0])
info_x_idx, noni_x_idx = info_x > 0.5, info_x < 0.2
info_y_idx, noni_y_idx = info_y > 0.5, info_y < 0.2
info_pair_idx = info_x_idx & info_y_idx
if info_pair_idx.any():
align_loss_info = align_loss(x[info_pair_idx], y[info_pair_idx], alpha=opt.align_alpha)
else:
align_loss_info = 0
uniform_loss_noninfo = 0
if noni_x_idx.any():
uniform_loss_noninfo += uniform_loss(x[noni_x_idx], t=opt.unif_t)
if noni_y_idx.any():
uniform_loss_noninfo += uniform_loss(y[noni_y_idx], t=opt.unif_t)
uniform_loss_noninfo /= 2
loss_info = align_loss_info * opt.align_w + uniform_loss_noninfo * opt.unif_w
loss = loss + loss_info
align_meter.update(align_loss_val, x.shape[0])
unif_meter.update(unif_loss_val)
loss_meter.update(loss, x.shape[0])
info_rate_meter.update((get_rate(info_x_idx)+get_rate(info_y_idx))/2)
noni_rate_meter.update((get_rate(noni_x_idx)+get_rate(noni_y_idx))/2)
loss.backward()
optim.step()
it_time_meter.update(time.time() - t0)
if ii % opt.log_interval == 0:
print(f"Epoch {epoch}/{opt.epochs}\tIt {ii}/{len(loader)}\t" +
f"{align_meter}\t{unif_meter}\t{loss_meter}\t{it_time_meter}\t{info_rate_meter}\t{noni_rate_meter}")
t0 = time.time()
scheduler.step()
if epoch % 40 == 0:
ckpt_file = os.path.join(opt.save_folder, f'encoder-ep{epoch}.pth')
torch.save(encoder.module.state_dict(), ckpt_file)
ckpt_file = os.path.join(opt.save_folder, 'encoder.pth')
torch.save(encoder.module.state_dict(), ckpt_file)
print(f'Saved to {ckpt_file}')
if __name__ == '__main__':
main()
|
[
"torch.optim.lr_scheduler.MultiStepLR",
"torchvision.transforms.ColorJitter",
"argparse.ArgumentParser",
"torchvision.datasets.STL10",
"align_uniform.uniform_loss",
"util.RandomResizedCropWithBox",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.Normalize",
"util.AverageMeter",
"time.time",
"torch.cuda.set_device",
"torch.device",
"os.makedirs",
"align_uniform.align_loss",
"torchvision.transforms.RandomGrayscale",
"os.path.join",
"encoder.SmallAlexNet",
"torch.utils.data.DataLoader"
] |
[((282, 381), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""STL-10 Representation Learning with Alignment and Uniformity Losses"""'], {}), "(\n 'STL-10 Representation Learning with Alignment and Uniformity Losses')\n", (305, 381), False, 'import argparse\n'), ((2851, 2892), 'os.path.join', 'os.path.join', (['opt.result_folder', 'exp_name'], {}), '(opt.result_folder, exp_name)\n', (2863, 2892), False, 'import os\n'), ((2920, 2963), 'os.makedirs', 'os.makedirs', (['opt.save_folder'], {'exist_ok': '(True)'}), '(opt.save_folder, exist_ok=True)\n', (2931, 2963), False, 'import os\n'), ((3109, 3154), 'util.RandomResizedCropWithBox', 'RandomResizedCropWithBox', (['(64)'], {'scale': '(0.08, 1)'}), '(64, scale=(0.08, 1))\n', (3133, 3154), False, 'from util import RandomResizedCropWithBox, TwoAugUnsupervisedDatasetWithBox\n'), ((3820, 3948), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'opt.batch_size', 'num_workers': 'opt.num_workers', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(dataset, batch_size=opt.batch_size, num_workers\n =opt.num_workers, shuffle=True, pin_memory=True)\n', (3847, 3948), False, 'import torch\n'), ((4209, 4243), 'torch.cuda.set_device', 'torch.cuda.set_device', (['opt.gpus[0]'], {}), '(opt.gpus[0])\n', (4230, 4243), False, 'import torch\n'), ((4586, 4690), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optim'], {'gamma': 'opt.lr_decay_rate', 'milestones': 'opt.lr_decay_epochs'}), '(optim, gamma=opt.lr_decay_rate,\n milestones=opt.lr_decay_epochs)\n', (4622, 4690), False, 'import torch\n'), ((4794, 4820), 'util.AverageMeter', 'AverageMeter', (['"""align_loss"""'], {}), "('align_loss')\n", (4806, 4820), False, 'from util import AverageMeter, TwoAugUnsupervisedDataset\n'), ((4838, 4866), 'util.AverageMeter', 'AverageMeter', (['"""uniform_loss"""'], {}), "('uniform_loss')\n", (4850, 4866), False, 'from util import AverageMeter, TwoAugUnsupervisedDataset\n'), ((4884, 4910), 'util.AverageMeter', 'AverageMeter', (['"""total_loss"""'], {}), "('total_loss')\n", (4896, 4910), False, 'from util import AverageMeter, TwoAugUnsupervisedDataset\n'), ((4931, 4956), 'util.AverageMeter', 'AverageMeter', (['"""iter_time"""'], {}), "('iter_time')\n", (4943, 4956), False, 'from util import AverageMeter, TwoAugUnsupervisedDataset\n'), ((4979, 5004), 'util.AverageMeter', 'AverageMeter', (['"""info_rate"""'], {}), "('info_rate')\n", (4991, 5004), False, 'from util import AverageMeter, TwoAugUnsupervisedDataset\n'), ((5027, 5052), 'util.AverageMeter', 'AverageMeter', (['"""noni_rate"""'], {}), "('noni_rate')\n", (5039, 5052), False, 'from util import AverageMeter, TwoAugUnsupervisedDataset\n'), ((7456, 7500), 'os.path.join', 'os.path.join', (['opt.save_folder', '"""encoder.pth"""'], {}), "(opt.save_folder, 'encoder.pth')\n", (7468, 7500), False, 'import os\n'), ((3696, 3773), 'torchvision.datasets.STL10', 'torchvision.datasets.STL10', (['opt.data_folder', '"""train+unlabeled"""'], {'download': '(True)'}), "(opt.data_folder, 'train+unlabeled', download=True)\n", (3722, 3773), False, 'import torchvision\n'), ((5215, 5226), 'time.time', 'time.time', ([], {}), '()\n', (5224, 5226), False, 'import time\n'), ((3219, 3264), 'torchvision.transforms.RandomHorizontalFlip', 'torchvision.transforms.RandomHorizontalFlip', ([], {}), '()\n', (3262, 3264), False, 'import torchvision\n'), ((3274, 3328), 'torchvision.transforms.ColorJitter', 'torchvision.transforms.ColorJitter', (['(0.4)', '(0.4)', '(0.4)', '(0.4)'], {}), '(0.4, 0.4, 0.4, 0.4)\n', (3308, 3328), False, 'import torchvision\n'), ((3338, 3383), 'torchvision.transforms.RandomGrayscale', 'torchvision.transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (3376, 3383), False, 'import torchvision\n'), ((3393, 3426), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (3424, 3426), False, 'import torchvision\n'), ((3436, 3605), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(0.44087801806139126, 0.42790631331699347, 0.3867879370752931)', '(0.26826768628079806, 0.2610450402318512, 0.26866836876860795)'], {}), '((0.44087801806139126, 0.42790631331699347,\n 0.3867879370752931), (0.26826768628079806, 0.2610450402318512, \n 0.26866836876860795))\n', (3468, 3605), False, 'import torchvision\n'), ((5446, 5485), 'align_uniform.align_loss', 'align_loss', (['x', 'y'], {'alpha': 'opt.align_alpha'}), '(x, y, alpha=opt.align_alpha)\n', (5456, 5485), False, 'from align_uniform import align_loss, uniform_loss\n'), ((7231, 7242), 'time.time', 'time.time', ([], {}), '()\n', (7240, 7242), False, 'import time\n'), ((7321, 7376), 'os.path.join', 'os.path.join', (['opt.save_folder', 'f"""encoder-ep{epoch}.pth"""'], {}), "(opt.save_folder, f'encoder-ep{epoch}.pth')\n", (7333, 7376), False, 'import os\n'), ((2629, 2652), 'torch.device', 'torch.device', (['"""cuda"""', 'x'], {}), "('cuda', x)\n", (2641, 2652), False, 'import torch\n'), ((4363, 4398), 'encoder.SmallAlexNet', 'SmallAlexNet', ([], {'feat_dim': 'opt.feat_dim'}), '(feat_dim=opt.feat_dim)\n', (4375, 4398), False, 'from encoder import SmallAlexNet\n'), ((5989, 6058), 'align_uniform.align_loss', 'align_loss', (['x[info_pair_idx]', 'y[info_pair_idx]'], {'alpha': 'opt.align_alpha'}), '(x[info_pair_idx], y[info_pair_idx], alpha=opt.align_alpha)\n', (5999, 6058), False, 'from align_uniform import align_loss, uniform_loss\n'), ((6224, 6265), 'align_uniform.uniform_loss', 'uniform_loss', (['x[noni_x_idx]'], {'t': 'opt.unif_t'}), '(x[noni_x_idx], t=opt.unif_t)\n', (6236, 6265), False, 'from align_uniform import align_loss, uniform_loss\n'), ((6339, 6380), 'align_uniform.uniform_loss', 'uniform_loss', (['y[noni_y_idx]'], {'t': 'opt.unif_t'}), '(y[noni_y_idx], t=opt.unif_t)\n', (6351, 6380), False, 'from align_uniform import align_loss, uniform_loss\n'), ((5515, 5544), 'align_uniform.uniform_loss', 'uniform_loss', (['x'], {'t': 'opt.unif_t'}), '(x, t=opt.unif_t)\n', (5527, 5544), False, 'from align_uniform import align_loss, uniform_loss\n'), ((5547, 5576), 'align_uniform.uniform_loss', 'uniform_loss', (['y'], {'t': 'opt.unif_t'}), '(y, t=opt.unif_t)\n', (5559, 5576), False, 'from align_uniform import align_loss, uniform_loss\n'), ((6951, 6962), 'time.time', 'time.time', ([], {}), '()\n', (6960, 6962), False, 'import time\n')]
|
# SPDX-License-Identifier: Apache-2.0
# Copyright(c) 2021 Open Networking Foundation
import time
from ipaddress import IPv4Address
from pprint import pprint
from trex_test import TrexTest
from grpc_test import *
from trex_stl_lib.api import (
STLVM,
STLPktBuilder,
STLStream,
STLTXCont,
)
import ptf.testutils as testutils
UPF_DEST_MAC = "0c:c4:7a:19:6d:ca"
# Port setup
TREX_SENDER_PORT = 0
TREX_RECEIVER_PORT = 1
BESS_SENDER_PORT = 2
BESS_RECEIVER_PORT = 3
# Test specs
DURATION = 10
RATE = 100_000 # 100 Kpps
UE_COUNT = 10_000 # 10k UEs
GTPU_PORT = 2152
PKT_SIZE = 64
class PerFlowQosMetricsTest(TrexTest, GrpcTest):
"""
Generates 1 Mpps downlink traffic for 10k dest UE IP addresses. Uses
BESS-UPF QoS metrics to verify baseline packet loss, latency, and jitter
results.
"""
@autocleanup
def runTest(self):
n3TEID = 0
startIP = IPv4Address('172.16.17.32')
endIP = startIP + UE_COUNT - 1
accessIP = IPv4Address('10.128.13.29')
enbIP = IPv4Address('10.27.19.99') # arbitrary ip for non-existent eNodeB for gtpu encap
# program UPF for downlink traffic by installing PDRs and FARs
print("Installing PDRs and FARs...")
for i in range(UE_COUNT):
# install N6 DL PDR to match UE dst IP
pdrDown = self.createPDR(
srcIface = CORE,
dstIP = int(startIP + i),
srcIfaceMask = 0xFF,
dstIPMask = 0xFFFFFFFF,
precedence = 255,
fseID = n3TEID + i + 1, # start from 1
ctrID = 0,
farID = i,
qerIDList = [N6, 1],
needDecap = 0,
)
self.addPDR(pdrDown)
# install N6 DL FAR for encap
farDown = self.createFAR(
farID = i,
fseID = n3TEID + i + 1, # start from 1
applyAction = ACTION_FORWARD,
dstIntf = DST_ACCESS,
tunnelType = 0x1,
tunnelIP4Src = int(accessIP),
tunnelIP4Dst = int(enbIP), # only one eNB to send to downlink
tunnelTEID = 0,
tunnelPort = GTPU_PORT,
)
self.addFAR(farDown)
# install N6 DL/UL application QER
qer = self.createQER(
gate = GATE_UNMETER,
qerID = N6,
fseID = n3TEID + i + 1, # start from 1
qfi = 9,
ulGbr = 0,
ulMbr = 0,
dlGbr = 0,
dlMbr = 0,
burstDurationMs = 10,
)
self.addApplicationQER(qer)
# set up trex to send traffic thru UPF
print("Setting up TRex client...")
vm = STLVM()
vm.var(
name="dst",
min_value=str(startIP),
max_value=str(endIP),
size=4,
op="random",
)
vm.write(fv_name="dst", pkt_offset="IP.dst")
vm.fix_chksum()
pkt = testutils.simple_udp_packet(
pktlen=PKT_SIZE,
eth_dst=UPF_DEST_MAC,
with_udp_chksum=False,
)
stream = STLStream(
packet=STLPktBuilder(pkt=pkt, vm=vm),
mode=STLTXCont(pps=RATE),
)
self.trex_client.add_streams(stream, ports=[BESS_SENDER_PORT])
print("Running traffic...")
s_time = time.time()
self.trex_client.start(
ports=[BESS_SENDER_PORT], mult="1", duration=DURATION
)
# FIXME: pull QoS metrics at end instead of while traffic running
time.sleep(DURATION - 5)
if self.trex_client.is_traffic_active():
stats = self.getSessionStats(q=[90, 99, 99.9], quiet=True)
preQos = stats["preQos"]
postDlQos = stats["postDlQos"]
postUlQos = stats["postUlQos"]
self.trex_client.wait_on_traffic(ports=[BESS_SENDER_PORT])
print(f"Duration was {time.time() - s_time}")
trex_stats = self.trex_client.get_stats()
sent_packets = trex_stats['total']['opackets']
recv_packets = trex_stats['total']['ipackets']
# 0% packet loss
self.assertEqual(
sent_packets,
recv_packets,
f"Didn't receive all packets; sent {sent_packets}, received {recv_packets}",
)
for fseid in postDlQos:
lat = fseid['latency']['percentileValuesNs']
jitter = fseid['jitter']['percentileValuesNs']
# 99th %ile latency < 100 us
self.assertLessEqual(
int(lat[1]) / 1000,
100,
f"99th %ile latency was higher than 100 us! Was {int(lat[1]) / 1000} us"
)
# 99.9th %ile latency < 200 us
self.assertLessEqual(
int(lat[2]) / 1000,
200,
f"99.9th %ile latency was higher than 200 us! Was {int(lat[2]) / 1000} us"
)
# 99th% jitter < 100 us
self.assertLessEqual(
int(jitter[1]) / 1000,
100,
f"99th %ile jitter was higher than 100 us! Was {int(jitter[1]) / 1000} us"
)
return
|
[
"ptf.testutils.simple_udp_packet",
"ipaddress.IPv4Address",
"trex_stl_lib.api.STLTXCont",
"time.sleep",
"trex_stl_lib.api.STLVM",
"time.time",
"trex_stl_lib.api.STLPktBuilder"
] |
[((901, 928), 'ipaddress.IPv4Address', 'IPv4Address', (['"""172.16.17.32"""'], {}), "('172.16.17.32')\n", (912, 928), False, 'from ipaddress import IPv4Address\n'), ((988, 1015), 'ipaddress.IPv4Address', 'IPv4Address', (['"""10.128.13.29"""'], {}), "('10.128.13.29')\n", (999, 1015), False, 'from ipaddress import IPv4Address\n'), ((1032, 1058), 'ipaddress.IPv4Address', 'IPv4Address', (['"""10.27.19.99"""'], {}), "('10.27.19.99')\n", (1043, 1058), False, 'from ipaddress import IPv4Address\n'), ((2818, 2825), 'trex_stl_lib.api.STLVM', 'STLVM', ([], {}), '()\n', (2823, 2825), False, 'from trex_stl_lib.api import STLVM, STLPktBuilder, STLStream, STLTXCont\n'), ((3083, 3176), 'ptf.testutils.simple_udp_packet', 'testutils.simple_udp_packet', ([], {'pktlen': 'PKT_SIZE', 'eth_dst': 'UPF_DEST_MAC', 'with_udp_chksum': '(False)'}), '(pktlen=PKT_SIZE, eth_dst=UPF_DEST_MAC,\n with_udp_chksum=False)\n', (3110, 3176), True, 'import ptf.testutils as testutils\n'), ((3471, 3482), 'time.time', 'time.time', ([], {}), '()\n', (3480, 3482), False, 'import time\n'), ((3674, 3698), 'time.sleep', 'time.sleep', (['(DURATION - 5)'], {}), '(DURATION - 5)\n', (3684, 3698), False, 'import time\n'), ((3267, 3296), 'trex_stl_lib.api.STLPktBuilder', 'STLPktBuilder', ([], {'pkt': 'pkt', 'vm': 'vm'}), '(pkt=pkt, vm=vm)\n', (3280, 3296), False, 'from trex_stl_lib.api import STLVM, STLPktBuilder, STLStream, STLTXCont\n'), ((3315, 3334), 'trex_stl_lib.api.STLTXCont', 'STLTXCont', ([], {'pps': 'RATE'}), '(pps=RATE)\n', (3324, 3334), False, 'from trex_stl_lib.api import STLVM, STLPktBuilder, STLStream, STLTXCont\n'), ((4041, 4052), 'time.time', 'time.time', ([], {}), '()\n', (4050, 4052), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) 2017 Science and Technology Facilities Council
#
# All rights reserved.
#
# Modifications made as part of the fparser project are distributed
# under the following license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##############################################################################
# Modified M.Hambley, UK Met Office
##############################################################################
'''
Test battery associated with fparser.common.base_classes package.
'''
import re
import pytest
import fparser.common.base_classes
import fparser.common.readfortran
import fparser.common.sourceinfo
import fparser.common.utils
from fparser import api
def test_statement_logging(log, monkeypatch):
'''
Tests the Statement class' logging methods.
'''
class DummyParser(object):
'''
Null parser harness.
'''
def __init__(self, reader):
self.reader = reader
reader = fparser.common.readfortran.FortranStringReader("dummy = 1")
parser = DummyParser(reader)
monkeypatch.setattr(fparser.common.base_classes.Statement,
'process_item', lambda x: None, raising=False)
unit_under_test = fparser.common.base_classes.Statement(parser, None)
unit_under_test.error('Scary biscuits')
assert(log.messages == {'critical': [],
'debug': [],
'error': ['Scary biscuits'],
'info': [],
'warning': []})
log.reset()
unit_under_test.warning('Trepidacious Cetations')
assert(log.messages == {'critical': [],
'debug': [],
'error': [],
'info': [],
'warning': ['Trepidacious Cetations']})
log.reset()
unit_under_test.info('Hilarious Ontologies')
assert(log.messages == {'critical': [],
'debug': [],
'error': [],
'info': ['Hilarious Ontologies'],
'warning': []})
def test_log_comment_mix(log):
'''
Tests that unexpected Fortran 90 comment in fixed format source is logged.
'''
class EndDummy(fparser.common.base_classes.EndStatement):
'''
Dummy EndStatement.
'''
match = re.compile(r'\s*end(\s*thing\s*\w*|)\s*\Z', re.I).match
class BeginHarness(fparser.common.base_classes.BeginStatement):
'''
Dummy BeginStatement.
'''
end_stmt_cls = EndDummy
classes = []
match = re.compile(r'\s*thing\s+(\w*)\s*\Z', re.I).match
def get_classes(self):
'''
Returns an empty list of contained statements.
'''
return []
code = ' x=1 ! Cheese'
parent = fparser.common.readfortran.FortranStringReader(
code, ignore_comments=False)
parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True))
item = fparser.common.readfortran.Line(code, (1, 1), None, None, parent)
with pytest.raises(fparser.common.utils.AnalyzeError):
__ = BeginHarness(parent, item)
expected = ' 1: x=1 ! Cheese <== ' \
+ 'no parse pattern found for "x=1 ! cheese" ' \
+ "in 'BeginHarness' block, " \
+ 'trying to remove inline comment (not in Fortran 77).'
result = log.messages['warning'][0].split('\n')[1]
assert result == expected
def test_log_unexpected(log):
'''
Tests that an unexpected thing between begin and end statements logs an
event.
'''
class EndThing(fparser.common.base_classes.EndStatement):
'''
Dummy EndStatement class.
'''
isvalid = True
match = re.compile(r'\s*end(\s+thing(\s+\w+)?)?\s*$', re.I).match
class BeginThing(fparser.common.base_classes.BeginStatement):
'''
Dummy BeginStatement class.
'''
end_stmt_cls = EndThing
classes = []
match = re.compile(r'\s*thing\s+(\w+)?\s*$', re.I).match
def get_classes(self):
'''
Returns an empty list of contained classes.
'''
return []
code = [' jumper', ' end thing']
parent = fparser.common.readfortran.FortranStringReader('\n'.join(code))
parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True))
item = fparser.common.readfortran.Line(code[0], (1, 1), None, None, parent)
with pytest.raises(fparser.common.utils.AnalyzeError):
__ = BeginThing(parent, item)
expected = ' 1: jumper <== no parse pattern found for "jumper" ' \
"in 'BeginThing' block."
result = log.messages['warning'][0].split('\n')[1]
assert result == expected
def test_space_after_enddo():
'''Make sure that there is no space after an 'END DO' without name,
but there is a space if there is a name after 'END DO'.
'''
# Unnamed loop:
source_str = '''\
subroutine foo
integer i, r
do i = 1,100
r = r + 1
end do
end subroutine foo
'''
tree = api.parse(source_str, isfree=True, isstrict=False)
assert "END DO " not in tree.tofortran()
# Named loop:
source_str = '''\
subroutine foo
integer i, r
loop1: do i = 1,100
r = r + 1
end do loop1
end subroutine foo
'''
tree = api.parse(source_str, isfree=True, isstrict=False)
assert "END DO loop1" in tree.tofortran()
|
[
"pytest.raises",
"fparser.api.parse",
"re.compile"
] |
[((6763, 6813), 'fparser.api.parse', 'api.parse', (['source_str'], {'isfree': '(True)', 'isstrict': '(False)'}), '(source_str, isfree=True, isstrict=False)\n', (6772, 6813), False, 'from fparser import api\n'), ((7035, 7085), 'fparser.api.parse', 'api.parse', (['source_str'], {'isfree': '(True)', 'isstrict': '(False)'}), '(source_str, isfree=True, isstrict=False)\n', (7044, 7085), False, 'from fparser import api\n'), ((4698, 4746), 'pytest.raises', 'pytest.raises', (['fparser.common.utils.AnalyzeError'], {}), '(fparser.common.utils.AnalyzeError)\n', (4711, 4746), False, 'import pytest\n'), ((6134, 6182), 'pytest.raises', 'pytest.raises', (['fparser.common.utils.AnalyzeError'], {}), '(fparser.common.utils.AnalyzeError)\n', (6147, 6182), False, 'import pytest\n'), ((3963, 4017), 're.compile', 're.compile', (['"""\\\\s*end(\\\\s*thing\\\\s*\\\\w*|)\\\\s*\\\\Z"""', 're.I'], {}), "('\\\\s*end(\\\\s*thing\\\\s*\\\\w*|)\\\\s*\\\\Z', re.I)\n", (3973, 4017), False, 'import re\n'), ((4211, 4257), 're.compile', 're.compile', (['"""\\\\s*thing\\\\s+(\\\\w*)\\\\s*\\\\Z"""', 're.I'], {}), "('\\\\s*thing\\\\s+(\\\\w*)\\\\s*\\\\Z', re.I)\n", (4221, 4257), False, 'import re\n'), ((5399, 5454), 're.compile', 're.compile', (['"""\\\\s*end(\\\\s+thing(\\\\s+\\\\w+)?)?\\\\s*$"""', 're.I'], {}), "('\\\\s*end(\\\\s+thing(\\\\s+\\\\w+)?)?\\\\s*$', re.I)\n", (5409, 5454), False, 'import re\n'), ((5653, 5698), 're.compile', 're.compile', (['"""\\\\s*thing\\\\s+(\\\\w+)?\\\\s*$"""', 're.I'], {}), "('\\\\s*thing\\\\s+(\\\\w+)?\\\\s*$', re.I)\n", (5663, 5698), False, 'import re\n')]
|
import curtsies.events as ev
import sys
DELIMITERS = ' .'
WHITESPACE = ' '
def print_console(txt, npadding=0, newline=False, flush=True):
"""
Prints txt without newline, cursor positioned at the end.
:param txt: The text to print
:param length: The txt will be padded with spaces to fit this length
:param newline: If True, a newline character will be appended
:return:
"""
sys.stdout.write('\r{0}{1}'.format(txt, WHITESPACE * npadding))
if newline:
sys.stdout.write('\n')
if flush:
sys.stdout.flush()
def move_next_line():
sys.stdout.write('\n')
sys.stdout.flush()
def find_next_in_list(lst, what, start=0, reverse=False):
"""
Finds the next occurrence of what in lst starting at start.
:param lst: The list to search
:param what: The item to find, should be an iterable
:param start: The starting position in the list
:param reverse: Set this to True in order to traverse the list towards 0
:return: False if no occurrence found, index otherwise
"""
if start < 0 or start >= len(lst):
return False
end = -1 if reverse else len(lst)
step = -1 if reverse else 1
for i in range(start, end, step):
if lst[i] in what:
return i
return False
class InputHandler:
def __init__(self, history):
self._input = []
self._position = 0
self._handlers = {}
self._highlight = None
self._max_length = 0
self._complete = None
self._history = history
self._prefix = ''
def process_input(self, c):
"""
Processes the input captured by curtsies.
:param c: the input, either a curtsies keystroke or an event
:return: False if program should stop, the current line otherwise
"""
if isinstance(c, ev.Event):
return self._process_event(c)
else:
return self._process_char(c)
def register_handler(self, key, handler):
if key not in self._handlers:
self._handlers[key] = []
self._handlers[key].append(handler)
def set_highlighter(self, highlight):
self._highlight = highlight
def set_completer(self, complete):
self._complete = complete
def set_prefix(self, prefix):
self._prefix = prefix
def _process_char(self, c):
"""
Processes keystrokes internally, may call handlers as well.
:param c: The curtsies keystroke
:return: The current line
"""
if len(c) == 1:
self._insert(c)
elif c == '<LEFT>':
self._left()
elif c == '<RIGHT>':
self._right()
elif c == '<UP>':
self._hist_up()
elif c == '<DOWN>':
self._hist_down()
elif c == '<SPACE>':
self._insert(' ')
elif c == '<TAB>':
if not self._tab_completion():
self._insert(' ')
elif c == '<BACKSPACE>':
self._back()
elif c == '<Ctrl-w>':
self._delete_last_word()
elif c == '<DELETE>':
self._delete()
elif c == '<HOME>' or c == '<Ctrl-a>':
self._home()
elif c == '<END>' or c == '<Ctrl-e>':
self._end()
elif c == '<Ctrl-u>':
self._delete_before()
elif c == '<Ctrl-k>':
self._delete_after()
elif c == '<Esc+f>':
self._move_word_forwards()
elif c == '<Esc+b>':
self._move_word_backwards()
elif c == '<Ctrl-r>':
pass # history search mode
elif c == '<ESC>':
pass # history search mode
elif c == '<Ctrl-j>':
old_line = self._newline()
if c in self._handlers:
for handler in self._handlers[c]:
handler(old_line)
elif c == '<Ctrl-c>' or c == '<Ctrl-d>':
return False
# new lines are handled differently
if c in self._handlers and c != '<Ctrl-j>':
# call handlers if necessary
for handler in self._handlers[c]:
handler(self._curline())
return self._curline()
def _process_event(self, e):
"""
Processes events internally.
:param e: The event
:return: False in case of SigInt, the input otherwise
"""
if isinstance(e, ev.SigIntEvent):
return False
elif isinstance(e, ev.PasteEvent):
for c in e.events:
self.process_input(c)
return self._curline()
def _line_changed(self):
self._history.edit(self._curline())
def _hist_up(self):
"""
Moves up in the history object.
:return:
"""
self._input = list(self._history.move_up())
self._position = len(self._input)
self.draw()
def _hist_down(self):
"""
Moves down in the history object.
:return:
"""
self._input = list(self._history.move_down())
self._position = len(self._input)
self.draw()
def _curline(self):
"""
Returns the current line.
:return: current line
"""
return ''.join(self._input)
def _insert(self, c):
"""
Inserts a character at current position, moves cursor forward and redraws.
:param c: character
:return:
"""
if len(c) > 1:
# only insert single characters
for cc in c:
self._insert(cc)
return
self._input.insert(self._position, c)
self._position += 1
self._line_changed()
self.draw()
def _left(self):
"""
Moves cursor back and redraws.
:return:
"""
if self._position > 0:
self._position -= 1
self.draw()
def _home(self):
"""
Moves cursor home and redraws.
:return:
"""
self._position = 0
self.draw()
def _right(self):
"""
Moves cursor forward and redraws.
:return:
"""
if self._position < len(self._input):
self._position += 1
self.draw()
def _end(self):
"""
Moves cursor to end and redraws.
:return:
"""
self._position = len(self._input)
self.draw()
def _move_word_forwards(self):
"""
Moves cursor towards the next delimiter.
:return:
"""
next_del = find_next_in_list(self._input, DELIMITERS, start=self._position+1)
if next_del is False:
self._end()
else:
self._position = next_del
self.draw()
def _move_word_backwards(self):
"""
Moves cursor towards the next delimiter.
:return:
"""
next_del = find_next_in_list(self._input, DELIMITERS, start=self._position-2, reverse=True)
if next_del is False:
self._home()
else:
self._position = next_del + 1
self.draw()
def _delete_last_word(self):
"""
Deletes until last delimiter.
:return:
"""
next_del = find_next_in_list(self._input, DELIMITERS, start=self._position - 2, reverse=True)
if next_del is False:
next_del = 0
else:
next_del += 1
del self._input[next_del:self._position]
self._position = next_del
self._line_changed()
self.draw()
def _back(self):
"""
Removes element in front of cursor, moves cursor back and redraws.
:return:
"""
if self._position > 0:
del self._input[self._position - 1]
self._position -= 1
self._line_changed()
self.draw()
def _delete(self):
"""
Removes element behind cursor and redraws.
:return:
"""
if self._position < len(self._input):
del self._input[self._position]
self._line_changed()
self.draw()
def _delete_before(self):
"""
Deletes everything in front of the cursor.
:return:
"""
self._input = self._input[self._position:]
self._position = 0
self._line_changed()
self.draw()
def _delete_after(self):
"""
Deletes everything after the cursor.
:return:
"""
self._input = self._input[:self._position]
self._line_changed()
self.draw()
def _newline(self):
"""
Creates a new line and returns the old one.
:return: old line
"""
self._history.commit()
old_line = self._curline()
self._position = 0
self._max_length = 0
self._input = []
move_next_line()
return old_line
def draw(self):
"""
Draws input with cursor at right position.
:return:
"""
whole_line = self._curline()
cursor_line = whole_line[:self._position]
# add prefix
whole_line = self._prefix + whole_line
cursor_line = self._prefix + cursor_line
self._max_length = max(len(whole_line), self._max_length)
# highlight texts
if self._highlight is not None:
whole_line_h = self._highlight(whole_line).strip()
cursor_line_h = self._highlight(cursor_line).strip()
else:
whole_line_h = whole_line
cursor_line_h = cursor_line
# first print whole line
npadding = max(0, self._max_length - len(whole_line))
print_console(whole_line_h, npadding=npadding, flush=False)
# then print for cursor position
print_console(cursor_line_h)
def _tab_completion(self):
"""
Calls completion function. If possible insert completion.
:return: True if completion was successful
"""
if self._complete is not None:
# try completing
completion = self._complete(self._curline()[:self._position])
if completion is not False:
# if successful, insert the completion
for c in completion:
self._insert(c)
return True
return False
|
[
"sys.stdout.flush",
"sys.stdout.write"
] |
[((589, 611), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (605, 611), False, 'import sys\n'), ((616, 634), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (632, 634), False, 'import sys\n'), ((497, 519), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (513, 519), False, 'import sys\n'), ((542, 560), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (558, 560), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows Registry Module 'module.reg'
:platform: Windows
:maturity: develop
:codeauthor: <NAME> <https://github.com/damon-atkins>
versionadded:: 2016.11.0
'''
# Import Python future libs
from __future__ import absolute_import
from __future__ import unicode_literals
# Import Python Libs
import sys
import time
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.modules.reg as win_mod_reg
from salt.ext import six
try:
from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module
NO_WINDOWS_MODULES = False
except ImportError:
NO_WINDOWS_MODULES = True
PY2 = sys.version_info[0] == 2
# The following used to make sure we are not
# testing already existing data
# Note strftime retunrns a str, so we need to make it unicode
TIMEINT = int(time.time())
if PY2:
TIME_INT_UNICODE = six.text_type(TIMEINT)
TIMESTR = time.strftime('%X %x %Z').decode('utf-8')
else:
TIMESTR = time.strftime('%X %x %Z')
TIME_INT_UNICODE = str(TIMEINT) # pylint: disable=R0204
# we do not need to prefix this with u, as we are
# using from __future__ import unicode_literals
UNICODETEST_WITH_SIGNS = 'Testing Unicode \N{COPYRIGHT SIGN},\N{TRADE MARK SIGN},\N{REGISTERED SIGN} '+TIMESTR
UNICODETEST_WITHOUT_SIGNS = 'Testing Unicode'+TIMESTR
UNICODE_TEST_KEY = 'UnicodeKey \N{TRADE MARK SIGN} '+TIME_INT_UNICODE
UNICODE_TEST_KEY_DEL = 'Delete Me \N{TRADE MARK SIGN} '+TIME_INT_UNICODE
@skipIf(NO_WINDOWS_MODULES, 'requires Windows OS to test Windows registry')
class RegWinTestCase(TestCase):
'''
Test cases for salt.modules.reg
'''
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_read_reg_plain(self):
'''
Test - Read a registry value from a subkey using Pythen 2 Strings or
Pythen 3 Bytes
'''
if not PY2:
self.skipTest('Invalid for Python Version 2')
subkey = b'Software\\Microsoft\\Windows NT\\CurrentVersion'
vname = b'PathName'
handle = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
(current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname)
_winreg.CloseKey(handle)
test_vdata = win_mod_reg.read_value(b'HKEY_LOCAL_MACHINE', subkey, vname)[b'vdata']
self.assertEqual(
test_vdata, current_vdata)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_read_reg_unicode(self):
'''
Test - Read a registry value from a subkey using Pythen 2 Unicode
or Pythen 3 Str i.e. Unicode
'''
subkey = 'Software\\Microsoft\\Windows NT\\CurrentVersion'
vname = 'PathName'
handle = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
(current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname)
_winreg.CloseKey(handle)
test_vdata = win_mod_reg.read_value(
'HKEY_LOCAL_MACHINE',
subkey,
vname)['vdata']
self.assertEqual(test_vdata, current_vdata)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_keys_fail(self):
'''
Test - Read list the keys under a subkey which does not exist.
'''
subkey = 'ThisIsJunkItDoesNotExistIhope'
test_list = win_mod_reg.list_keys('HKEY_LOCAL_MACHINE', subkey)
# returns a tuple with first item false, and second item a reason
test = isinstance(test_list, tuple) and (not test_list[0])
self.assertTrue(test)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_keys(self):
'''
Test - Read list the keys under a subkey
'''
subkey = 'Software\\Microsoft\\Windows NT\\CurrentVersion'
test_list = win_mod_reg.list_keys('HKEY_LOCAL_MACHINE', subkey)
test = len(test_list) > 5 # Their should be a lot more than 5 items
self.assertTrue(test)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_values_fail(self):
'''
Test - List the values under a subkey which does not exist.
'''
subkey = 'ThisIsJunkItDoesNotExistIhope'
test_list = win_mod_reg.list_values('HKEY_LOCAL_MACHINE', subkey)
# returns a tuple with first item false, and second item a reason
test = isinstance(test_list, tuple) and (not test_list[0])
self.assertTrue(test)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_values(self):
'''
Test - List the values under a subkey.
'''
subkey = r'Software\Microsoft\Windows NT\CurrentVersion'
test_list = win_mod_reg.list_values('HKEY_LOCAL_MACHINE', subkey)
test = len(test_list) > 5 # There should be a lot more than 5 items
self.assertTrue(test)
# Not considering this destructive as its writing to a private space
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_set_value_unicode(self):
'''
Test - set a registry plain text subkey name to a unicode string value
'''
vname = 'TestUniccodeString'
subkey = 'Software\\SaltStackTest'
test1_success = False
test2_success = False
test1_success = win_mod_reg.set_value(
'HKEY_LOCAL_MACHINE',
subkey,
vname,
UNICODETEST_WITH_SIGNS
)
# Now use _winreg direct to see if it worked as expected
if test1_success:
handle = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
(current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname)
_winreg.CloseKey(handle)
test2_success = (current_vdata == UNICODETEST_WITH_SIGNS)
self.assertTrue(test1_success and test2_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_set_value_unicode_key(self):
'''
Test - set a registry Unicode subkey name with unicode characters within
to a integer
'''
test_success = win_mod_reg.set_value(
'HKEY_LOCAL_MACHINE',
'Software\\SaltStackTest',
UNICODE_TEST_KEY,
TIMEINT,
'REG_DWORD'
)
self.assertTrue(test_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_del_value(self):
'''
Test - Create Directly and Delete with salt a registry value
'''
subkey = 'Software\\SaltStackTest'
vname = UNICODE_TEST_KEY_DEL
vdata = 'I will be deleted'
if PY2:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey.encode('mbcs'),
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(
handle,
vname.encode('mbcs'),
0,
_winreg.REG_SZ,
vdata.encode('mbcs')
)
else:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata)
_winreg.CloseKey(handle)
# time.sleep(15) # delays for 15 seconds
test_success = win_mod_reg.delete_value(
'HKEY_LOCAL_MACHINE',
subkey,
vname
)
self.assertTrue(test_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_del_key_recursive_user(self):
'''
Test - Create directly key/value pair and Delete recusivly with salt
'''
subkey = 'Software\\SaltStackTest'
vname = UNICODE_TEST_KEY_DEL
vdata = 'I will be deleted recursive'
if PY2:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_CURRENT_USER,
subkey.encode('mbcs'),
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(
handle,
vname.encode('mbcs'),
0,
_winreg.REG_SZ,
vdata.encode('mbcs')
)
else:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_CURRENT_USER,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata)
_winreg.CloseKey(handle)
# time.sleep(15) # delays for 15 seconds so you can run regedit & watch it happen
test_success = win_mod_reg.delete_key_recursive('HKEY_CURRENT_USER', subkey)
self.assertTrue(test_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
@destructiveTest
def test_del_key_recursive_machine(self):
'''
This is a DESTRUCTIVE TEST it creates a new registry entry.
And then destroys the registry entry recusively , however it is completed in its own space
within the registry. We mark this as destructiveTest as it has the potential
to detroy a machine if salt reg code has a large error in it.
'''
subkey = 'Software\\SaltStackTest'
vname = UNICODE_TEST_KEY_DEL
vdata = 'I will be deleted recursive'
if PY2:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey.encode('mbcs'),
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(
handle,
vname.encode('mbcs'),
0,
_winreg.REG_SZ,
vdata.encode('mbcs')
)
else:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata)
_winreg.CloseKey(handle)
# time.sleep(15) # delays for 15 seconds so you can run regedit and watch it happen
test_success = win_mod_reg.delete_key_recursive('HKEY_LOCAL_MACHINE', subkey)
self.assertTrue(test_success)
# pylint: disable=W0511
# TODO: Test other hives, other than HKEY_LOCAL_MACHINE and HKEY_CURRENT_USER
|
[
"salt.ext.six.moves.winreg.QueryValueEx",
"salt.modules.reg.read_value",
"salt.ext.six.moves.winreg.CreateKeyEx",
"salt.modules.reg.delete_value",
"salt.modules.reg.set_value",
"salt.ext.six.moves.winreg.CloseKey",
"time.strftime",
"salt.ext.six.moves.winreg.OpenKey",
"salt.modules.reg.list_keys",
"sys.platform.startswith",
"salt.modules.reg.list_values",
"salt.ext.six.text_type",
"time.time",
"salt.ext.six.moves.winreg.SetValueEx",
"tests.support.unit.skipIf",
"salt.modules.reg.delete_key_recursive"
] |
[((1607, 1681), 'tests.support.unit.skipIf', 'skipIf', (['NO_WINDOWS_MODULES', '"""requires Windows OS to test Windows registry"""'], {}), "(NO_WINDOWS_MODULES, 'requires Windows OS to test Windows registry')\n", (1613, 1681), False, 'from tests.support.unit import TestCase, skipIf\n'), ((965, 976), 'time.time', 'time.time', ([], {}), '()\n', (974, 976), False, 'import time\n'), ((1010, 1032), 'salt.ext.six.text_type', 'six.text_type', (['TIMEINT'], {}), '(TIMEINT)\n', (1023, 1032), False, 'from salt.ext import six\n'), ((1109, 1134), 'time.strftime', 'time.strftime', (['"""%X %x %Z"""'], {}), "('%X %x %Z')\n", (1122, 1134), False, 'import time\n'), ((2189, 2267), 'salt.ext.six.moves.winreg.OpenKey', '_winreg.OpenKey', (['_winreg.HKEY_LOCAL_MACHINE', 'subkey', '(0)', '_winreg.KEY_ALL_ACCESS'], {}), '(_winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS)\n', (2204, 2267), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((2417, 2452), 'salt.ext.six.moves.winreg.QueryValueEx', '_winreg.QueryValueEx', (['handle', 'vname'], {}), '(handle, vname)\n', (2437, 2452), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((2461, 2485), 'salt.ext.six.moves.winreg.CloseKey', '_winreg.CloseKey', (['handle'], {}), '(handle)\n', (2477, 2485), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((2999, 3077), 'salt.ext.six.moves.winreg.OpenKey', '_winreg.OpenKey', (['_winreg.HKEY_LOCAL_MACHINE', 'subkey', '(0)', '_winreg.KEY_ALL_ACCESS'], {}), '(_winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS)\n', (3014, 3077), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((3227, 3262), 'salt.ext.six.moves.winreg.QueryValueEx', '_winreg.QueryValueEx', (['handle', 'vname'], {}), '(handle, vname)\n', (3247, 3262), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((3271, 3295), 'salt.ext.six.moves.winreg.CloseKey', '_winreg.CloseKey', (['handle'], {}), '(handle)\n', (3287, 3295), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((3783, 3834), 'salt.modules.reg.list_keys', 'win_mod_reg.list_keys', (['"""HKEY_LOCAL_MACHINE"""', 'subkey'], {}), "('HKEY_LOCAL_MACHINE', subkey)\n", (3804, 3834), True, 'import salt.modules.reg as win_mod_reg\n'), ((4268, 4319), 'salt.modules.reg.list_keys', 'win_mod_reg.list_keys', (['"""HKEY_LOCAL_MACHINE"""', 'subkey'], {}), "('HKEY_LOCAL_MACHINE', subkey)\n", (4289, 4319), True, 'import salt.modules.reg as win_mod_reg\n'), ((4697, 4750), 'salt.modules.reg.list_values', 'win_mod_reg.list_values', (['"""HKEY_LOCAL_MACHINE"""', 'subkey'], {}), "('HKEY_LOCAL_MACHINE', subkey)\n", (4720, 4750), True, 'import salt.modules.reg as win_mod_reg\n'), ((5182, 5235), 'salt.modules.reg.list_values', 'win_mod_reg.list_values', (['"""HKEY_LOCAL_MACHINE"""', 'subkey'], {}), "('HKEY_LOCAL_MACHINE', subkey)\n", (5205, 5235), True, 'import salt.modules.reg as win_mod_reg\n'), ((5793, 5879), 'salt.modules.reg.set_value', 'win_mod_reg.set_value', (['"""HKEY_LOCAL_MACHINE"""', 'subkey', 'vname', 'UNICODETEST_WITH_SIGNS'], {}), "('HKEY_LOCAL_MACHINE', subkey, vname,\n UNICODETEST_WITH_SIGNS)\n", (5814, 5879), True, 'import salt.modules.reg as win_mod_reg\n'), ((6841, 6955), 'salt.modules.reg.set_value', 'win_mod_reg.set_value', (['"""HKEY_LOCAL_MACHINE"""', '"""Software\\\\SaltStackTest"""', 'UNICODE_TEST_KEY', 'TIMEINT', '"""REG_DWORD"""'], {}), "('HKEY_LOCAL_MACHINE', 'Software\\\\SaltStackTest',\n UNICODE_TEST_KEY, TIMEINT, 'REG_DWORD')\n", (6862, 6955), True, 'import salt.modules.reg as win_mod_reg\n'), ((8224, 8248), 'salt.ext.six.moves.winreg.CloseKey', '_winreg.CloseKey', (['handle'], {}), '(handle)\n', (8240, 8248), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((8321, 8382), 'salt.modules.reg.delete_value', 'win_mod_reg.delete_value', (['"""HKEY_LOCAL_MACHINE"""', 'subkey', 'vname'], {}), "('HKEY_LOCAL_MACHINE', subkey, vname)\n", (8345, 8382), True, 'import salt.modules.reg as win_mod_reg\n'), ((9636, 9660), 'salt.ext.six.moves.winreg.CloseKey', '_winreg.CloseKey', (['handle'], {}), '(handle)\n', (9652, 9660), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((9774, 9835), 'salt.modules.reg.delete_key_recursive', 'win_mod_reg.delete_key_recursive', (['"""HKEY_CURRENT_USER"""', 'subkey'], {}), "('HKEY_CURRENT_USER', subkey)\n", (9806, 9835), True, 'import salt.modules.reg as win_mod_reg\n'), ((11310, 11334), 'salt.ext.six.moves.winreg.CloseKey', '_winreg.CloseKey', (['handle'], {}), '(handle)\n', (11326, 11334), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((11450, 11512), 'salt.modules.reg.delete_key_recursive', 'win_mod_reg.delete_key_recursive', (['"""HKEY_LOCAL_MACHINE"""', 'subkey'], {}), "('HKEY_LOCAL_MACHINE', subkey)\n", (11482, 11512), True, 'import salt.modules.reg as win_mod_reg\n'), ((1047, 1072), 'time.strftime', 'time.strftime', (['"""%X %x %Z"""'], {}), "('%X %x %Z')\n", (1060, 1072), False, 'import time\n'), ((2508, 2568), 'salt.modules.reg.read_value', 'win_mod_reg.read_value', (["b'HKEY_LOCAL_MACHINE'", 'subkey', 'vname'], {}), "(b'HKEY_LOCAL_MACHINE', subkey, vname)\n", (2530, 2568), True, 'import salt.modules.reg as win_mod_reg\n'), ((1783, 1813), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (1806, 1813), False, 'import sys\n'), ((3318, 3377), 'salt.modules.reg.read_value', 'win_mod_reg.read_value', (['"""HKEY_LOCAL_MACHINE"""', 'subkey', 'vname'], {}), "('HKEY_LOCAL_MACHINE', subkey, vname)\n", (3340, 3377), True, 'import salt.modules.reg as win_mod_reg\n'), ((2661, 2691), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (2684, 2691), False, 'import sys\n'), ((3529, 3559), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (3552, 3559), False, 'import sys\n'), ((4023, 4053), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (4046, 4053), False, 'import sys\n'), ((4444, 4474), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (4467, 4474), False, 'import sys\n'), ((4939, 4969), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (4962, 4969), False, 'import sys\n'), ((6130, 6208), 'salt.ext.six.moves.winreg.OpenKey', '_winreg.OpenKey', (['_winreg.HKEY_LOCAL_MACHINE', 'subkey', '(0)', '_winreg.KEY_ALL_ACCESS'], {}), '(_winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS)\n', (6145, 6208), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((6382, 6417), 'salt.ext.six.moves.winreg.QueryValueEx', '_winreg.QueryValueEx', (['handle', 'vname'], {}), '(handle, vname)\n', (6402, 6417), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((6430, 6454), 'salt.ext.six.moves.winreg.CloseKey', '_winreg.CloseKey', (['handle'], {}), '(handle)\n', (6446, 6454), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((5433, 5463), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (5456, 5463), False, 'import sys\n'), ((6595, 6625), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (6618, 6625), False, 'import sys\n'), ((7939, 8026), 'salt.ext.six.moves.winreg.CreateKeyEx', '_winreg.CreateKeyEx', (['_winreg.HKEY_LOCAL_MACHINE', 'subkey', '(0)', '_winreg.KEY_ALL_ACCESS'], {}), '(_winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.\n KEY_ALL_ACCESS)\n', (7958, 8026), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((8156, 8215), 'salt.ext.six.moves.winreg.SetValueEx', '_winreg.SetValueEx', (['handle', 'vname', '(0)', '_winreg.REG_SZ', 'vdata'], {}), '(handle, vname, 0, _winreg.REG_SZ, vdata)\n', (8174, 8215), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((7153, 7183), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (7176, 7183), False, 'import sys\n'), ((9352, 9438), 'salt.ext.six.moves.winreg.CreateKeyEx', '_winreg.CreateKeyEx', (['_winreg.HKEY_CURRENT_USER', 'subkey', '(0)', '_winreg.KEY_ALL_ACCESS'], {}), '(_winreg.HKEY_CURRENT_USER, subkey, 0, _winreg.\n KEY_ALL_ACCESS)\n', (9371, 9438), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((9568, 9627), 'salt.ext.six.moves.winreg.SetValueEx', '_winreg.SetValueEx', (['handle', 'vname', '(0)', '_winreg.REG_SZ', 'vdata'], {}), '(handle, vname, 0, _winreg.REG_SZ, vdata)\n', (9586, 9627), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((8536, 8566), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (8559, 8566), False, 'import sys\n'), ((11025, 11112), 'salt.ext.six.moves.winreg.CreateKeyEx', '_winreg.CreateKeyEx', (['_winreg.HKEY_LOCAL_MACHINE', 'subkey', '(0)', '_winreg.KEY_ALL_ACCESS'], {}), '(_winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.\n KEY_ALL_ACCESS)\n', (11044, 11112), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((11242, 11301), 'salt.ext.six.moves.winreg.SetValueEx', '_winreg.SetValueEx', (['handle', 'vname', '(0)', '_winreg.REG_SZ', 'vdata'], {}), '(handle, vname, 0, _winreg.REG_SZ, vdata)\n', (11260, 11301), True, 'from salt.ext.six.moves import winreg as _winreg\n'), ((9891, 9921), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (9914, 9921), False, 'import sys\n')]
|
from blaze import compute, resource, symbol, discover
from blaze.utils import example
flag = [False]
def mymap(func, *args):
flag[0] = True
return map(func, *args)
def test_map_called_on_resource_star():
r = resource(example('accounts_*.csv'))
s = symbol('s', discover(r))
flag[0] = False
a = compute(s.count(), r)
b = compute(s.count(), r, map=mymap)
assert a == b
assert flag[0]
|
[
"blaze.utils.example",
"blaze.discover"
] |
[((234, 259), 'blaze.utils.example', 'example', (['"""accounts_*.csv"""'], {}), "('accounts_*.csv')\n", (241, 259), False, 'from blaze.utils import example\n'), ((281, 292), 'blaze.discover', 'discover', (['r'], {}), '(r)\n', (289, 292), False, 'from blaze import compute, resource, symbol, discover\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mwtab.mwschema
~~~~~~~~~~~~~~
This module provides schema definitions for different sections of the
``mwTab`` Metabolomics Workbench format.
"""
import sys
from schema import Schema, Optional, Or
if sys.version_info.major == 2:
str = unicode
metabolomics_workbench_schema = Schema(
{
"VERSION": str,
"CREATED_ON": str,
Optional("STUDY_ID"): str,
Optional("ANALYSIS_ID"): str,
Optional("PROJECT_ID"): str,
Optional("HEADER"): str,
Optional("DATATRACK_ID"): str
}
)
project_schema = Schema(
{
"PROJECT_TITLE": str,
Optional("PROJECT_TYPE"): str,
"PROJECT_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("FUNDING_SOURCE"): str,
Optional("PROJECT_COMMENTS"): str,
Optional("PUBLICATIONS"): str,
Optional("CONTRIBUTORS"): str,
Optional("DOI"): str
}
)
study_schema = Schema(
{
"STUDY_TITLE": str,
Optional("STUDY_TYPE"): str,
"STUDY_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("NUM_GROUPS"): str,
Optional("TOTAL_SUBJECTS"): str,
Optional("NUM_MALES"): str,
Optional("NUM_FEMALES"): str,
Optional("STUDY_COMMENTS"): str,
Optional("PUBLICATIONS"): str, # assumed
Optional("SUBMIT_DATE"): str # assumed
}
)
subject_schema = Schema(
{
"SUBJECT_TYPE": str,
"SUBJECT_SPECIES": str,
Optional("TAXONOMY_ID"): str,
Optional("GENOTYPE_STRAIN"): str,
Optional("AGE_OR_AGE_RANGE"): str,
Optional("WEIGHT_OR_WEIGHT_RANGE"): str,
Optional("HEIGHT_OR_HEIGHT_RANGE"): str,
Optional("GENDER"): str,
Optional("HUMAN_RACE"): str,
Optional("HUMAN_ETHNICITY"): str,
Optional("HUMAN_TRIAL_TYPE"): str,
Optional("HUMAN_LIFESTYLE_FACTORS"): str,
Optional("HUMAN_MEDICATIONS"): str,
Optional("HUMAN_PRESCRIPTION_OTC"): str,
Optional("HUMAN_SMOKING_STATUS"): str,
Optional("HUMAN_ALCOHOL_DRUG_USE"): str,
Optional("HUMAN_NUTRITION"): str,
Optional("HUMAN_INCLUSION_CRITERIA"): str,
Optional("HUMAN_EXCLUSION_CRITERIA"): str,
Optional("ANIMAL_ANIMAL_SUPPLIER"): str,
Optional("ANIMAL_HOUSING"): str,
Optional("ANIMAL_LIGHT_CYCLE"): str,
Optional("ANIMAL_FEED"): str,
Optional("ANIMAL_WATER"): str,
Optional("ANIMAL_INCLUSION_CRITERIA"): str,
Optional("CELL_BIOSOURCE_OR_SUPPLIER"): str,
Optional("CELL_STRAIN_DETAILS"): str,
Optional("SUBJECT_COMMENTS"): str,
Optional("CELL_PRIMARY_IMMORTALIZED"): str,
Optional("CELL_PASSAGE_NUMBER"): str,
Optional("CELL_COUNTS"): str,
Optional("SPECIES_GROUP"): str
}
)
subject_sample_factors_schema = Schema(
[
{
"Subject ID": str,
"Sample ID": str,
"Factors": dict,
Optional("Additional sample data"): {
Optional("RAW_FILE_NAME"): str,
Optional(str): str
}
}
]
)
collection_schema = Schema(
{
"COLLECTION_SUMMARY": str,
Optional("COLLECTION_PROTOCOL_ID"): str,
Optional("COLLECTION_PROTOCOL_FILENAME"): str,
Optional("COLLECTION_PROTOCOL_COMMENTS"): str,
Optional("SAMPLE_TYPE"): str, # assumed optional due to large number of files without
Optional("COLLECTION_METHOD"): str,
Optional("COLLECTION_LOCATION"): str,
Optional("COLLECTION_FREQUENCY"): str,
Optional("COLLECTION_DURATION"): str,
Optional("COLLECTION_TIME"): str,
Optional("VOLUMEORAMOUNT_COLLECTED"): str,
Optional("STORAGE_CONDITIONS"): str,
Optional("COLLECTION_VIALS"): str,
Optional("STORAGE_VIALS"): str,
Optional("COLLECTION_TUBE_TEMP"): str,
Optional("ADDITIVES"): str,
Optional("BLOOD_SERUM_OR_PLASMA"): str,
Optional("TISSUE_CELL_IDENTIFICATION"): str,
Optional("TISSUE_CELL_QUANTITY_TAKEN"): str
}
)
treatment_schema = Schema(
{
"TREATMENT_SUMMARY": str,
Optional("TREATMENT_PROTOCOL_ID"): str,
Optional("TREATMENT_PROTOCOL_FILENAME"): str,
Optional("TREATMENT_PROTOCOL_COMMENTS"): str,
Optional("TREATMENT"): str,
Optional("TREATMENT_COMPOUND"): str,
Optional("TREATMENT_ROUTE"): str,
Optional("TREATMENT_DOSE"): str,
Optional("TREATMENT_DOSEVOLUME"): str,
Optional("TREATMENT_DOSEDURATION"): str,
Optional("TREATMENT_VEHICLE"): str,
Optional("ANIMAL_VET_TREATMENTS"): str,
Optional("ANIMAL_ANESTHESIA"): str,
Optional("ANIMAL_ACCLIMATION_DURATION"): str,
Optional("ANIMAL_FASTING"): str,
Optional("ANIMAL_ENDP_EUTHANASIA"): str,
Optional("ANIMAL_ENDP_TISSUE_COLL_LIST"): str,
Optional("ANIMAL_ENDP_TISSUE_PROC_METHOD"): str,
Optional("ANIMAL_ENDP_CLINICAL_SIGNS"): str,
Optional("HUMAN_FASTING"): str,
Optional("HUMAN_ENDP_CLINICAL_SIGNS"): str,
Optional("CELL_STORAGE"): str,
Optional("CELL_GROWTH_CONTAINER"): str,
Optional("CELL_GROWTH_CONFIG"): str,
Optional("CELL_GROWTH_RATE"): str,
Optional("CELL_INOC_PROC"): str,
Optional("CELL_MEDIA"): str,
Optional("CELL_ENVIR_COND"): str,
Optional("CELL_HARVESTING"): str,
Optional("PLANT_GROWTH_SUPPORT"): str,
Optional("PLANT_GROWTH_LOCATION"): str,
Optional("PLANT_PLOT_DESIGN"): str,
Optional("PLANT_LIGHT_PERIOD"): str,
Optional("PLANT_HUMIDITY"): str,
Optional("PLANT_TEMP"): str,
Optional("PLANT_WATERING_REGIME"): str,
Optional("PLANT_NUTRITIONAL_REGIME"): str,
Optional("PLANT_ESTAB_DATE"): str,
Optional("PLANT_HARVEST_DATE"): str,
Optional("PLANT_GROWTH_STAGE"): str,
Optional("PLANT_METAB_QUENCH_METHOD"): str,
Optional("PLANT_HARVEST_METHOD"): str,
Optional("PLANT_STORAGE"): str,
Optional("CELL_PCT_CONFLUENCE"): str,
Optional("CELL_MEDIA_LASTCHANGED"): str
}
)
sampleprep_schema = Schema(
{
"SAMPLEPREP_SUMMARY": str,
Optional("SAMPLEPREP_PROTOCOL_ID"): str,
Optional("SAMPLEPREP_PROTOCOL_FILENAME"): str,
Optional("SAMPLEPREP_PROTOCOL_COMMENTS"): str,
Optional("PROCESSING_METHOD"): str,
Optional("PROCESSING_STORAGE_CONDITIONS"): str,
Optional("EXTRACTION_METHOD"): str,
Optional("EXTRACT_CONCENTRATION_DILUTION"): str,
Optional("EXTRACT_ENRICHMENT"): str,
Optional("EXTRACT_CLEANUP"): str,
Optional("EXTRACT_STORAGE"): str,
Optional("SAMPLE_RESUSPENSION"): str,
Optional("SAMPLE_DERIVATIZATION"): str,
Optional("SAMPLE_SPIKING"): str,
Optional("ORGAN"): str,
Optional("ORGAN_SPECIFICATION"): str,
Optional("CELL_TYPE"): str,
Optional("SUBCELLULAR_LOCATION"): str
}
)
chromatography_schema = Schema(
{
Optional("CHROMATOGRAPHY_SUMMARY"): str,
"CHROMATOGRAPHY_TYPE": str,
"INSTRUMENT_NAME": str,
"COLUMN_NAME": str,
Optional("FLOW_GRADIENT"): str,
Optional("FLOW_RATE"): str,
Optional("COLUMN_TEMPERATURE"): str,
Optional("METHODS_FILENAME"): str,
Optional("SOLVENT_A"): str,
Optional("SOLVENT_B"): str,
Optional("METHODS_ID"): str,
Optional("COLUMN_PRESSURE"): str,
Optional("INJECTION_TEMPERATURE"): str,
Optional("INTERNAL_STANDARD"): str,
Optional("INTERNAL_STANDARD_MT"): str,
Optional("RETENTION_INDEX"): str,
Optional("RETENTION_TIME"): str,
Optional("SAMPLE_INJECTION"): str,
Optional("SAMPLING_CONE"): str,
Optional("ANALYTICAL_TIME"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("MIGRATION_TIME"): str,
Optional("OVEN_TEMPERATURE"): str,
Optional("PRECONDITIONING"): str,
Optional("RUNNING_BUFFER"): str,
Optional("RUNNING_VOLTAGE"): str,
Optional("SHEATH_LIQUID"): str,
Optional("TIME_PROGRAM"): str,
Optional("TRANSFERLINE_TEMPERATURE"): str,
Optional("WASHING_BUFFER"): str,
Optional("WEAK_WASH_SOLVENT_NAME"): str,
Optional("WEAK_WASH_VOLUME"): str,
Optional("STRONG_WASH_SOLVENT_NAME"): str,
Optional("STRONG_WASH_VOLUME"): str,
Optional("TARGET_SAMPLE_TEMPERATURE"): str,
Optional("SAMPLE_LOOP_SIZE"): str,
Optional("SAMPLE_SYRINGE_SIZE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("CHROMATOGRAPHY_COMMENTS"): str
}
)
analysis_schema = Schema(
{
"ANALYSIS_TYPE": str,
Optional("LABORATORY_NAME"): str,
Optional("OPERATOR_NAME"): str,
Optional("DETECTOR_TYPE"): str,
Optional("SOFTWARE_VERSION"): str,
Optional("ACQUISITION_DATE"): str,
Optional("ANALYSIS_PROTOCOL_FILE"): str,
Optional("ACQUISITION_PARAMETERS_FILE"): str,
Optional("PROCESSING_PARAMETERS_FILE"): str,
Optional("DATA_FORMAT"): str,
# not specified in mwTab specification (assumed)
Optional("ACQUISITION_ID"): str,
Optional("ACQUISITION_TIME"): str,
Optional("ANALYSIS_COMMENTS"): str,
Optional("ANALYSIS_DISPLAY"): str,
Optional("INSTRUMENT_NAME"): str,
Optional("INSTRUMENT_PARAMETERS_FILE"): str,
Optional("NUM_FACTORS"): str,
Optional("NUM_METABOLITES"): str,
Optional("PROCESSED_FILE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("RAW_FILE"): str,
}
)
ms_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"MS_TYPE": str,
"ION_MODE": str,
"MS_COMMENTS": str, # changed to required
Optional("CAPILLARY_TEMPERATURE"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("COLLISION_ENERGY"): str,
Optional("COLLISION_GAS"): str,
Optional("DRY_GAS_FLOW"): str,
Optional("DRY_GAS_TEMP"): str,
Optional("FRAGMENT_VOLTAGE"): str,
Optional("FRAGMENTATION_METHOD"): str,
Optional("GAS_PRESSURE"): str,
Optional("HELIUM_FLOW"): str,
Optional("ION_SOURCE_TEMPERATURE"): str,
Optional("ION_SPRAY_VOLTAGE"): str,
Optional("IONIZATION"): str,
Optional("IONIZATION_ENERGY"): str,
Optional("IONIZATION_POTENTIAL"): str,
Optional("MASS_ACCURACY"): str,
Optional("PRECURSOR_TYPE"): str,
Optional("REAGENT_GAS"): str,
Optional("SOURCE_TEMPERATURE"): str,
Optional("SPRAY_VOLTAGE"): str,
Optional("ACTIVATION_PARAMETER"): str,
Optional("ACTIVATION_TIME"): str,
Optional("ATOM_GUN_CURRENT"): str,
Optional("AUTOMATIC_GAIN_CONTROL"): str,
Optional("BOMBARDMENT"): str,
Optional("CDL_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("CDL_TEMPERATURE"): str,
Optional("DATAFORMAT"): str,
Optional("DESOLVATION_GAS_FLOW"): str,
Optional("DESOLVATION_TEMPERATURE"): str,
Optional("INTERFACE_VOLTAGE"): str,
Optional("IT_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("LASER"): str,
Optional("MATRIX"): str,
Optional("NEBULIZER"): str,
Optional("OCTPOLE_VOLTAGE"): str,
Optional("PROBE_TIP"): str,
Optional("RESOLUTION_SETTING"): str,
Optional("SAMPLE_DRIPPING"): str,
Optional("SCAN_RANGE_MOVERZ"): str,
Optional("SCANNING"): str,
Optional("SCANNING_CYCLE"): str,
Optional("SCANNING_RANGE"): str,
Optional("SKIMMER_VOLTAGE"): str,
Optional("TUBE_LENS_VOLTAGE"): str,
Optional("MS_RESULTS_FILE"): Or(str, dict)
}
)
nmr_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"NMR_EXPERIMENT_TYPE": str,
Optional("NMR_COMMENTS"): str,
Optional("FIELD_FREQUENCY_LOCK"): str,
Optional("STANDARD_CONCENTRATION"): str,
"SPECTROMETER_FREQUENCY": str,
Optional("NMR_PROBE"): str,
Optional("NMR_SOLVENT"): str,
Optional("NMR_TUBE_SIZE"): str,
Optional("SHIMMING_METHOD"): str,
Optional("PULSE_SEQUENCE"): str,
Optional("WATER_SUPPRESSION"): str,
Optional("PULSE_WIDTH"): str,
Optional("POWER_LEVEL"): str,
Optional("RECEIVER_GAIN"): str,
Optional("OFFSET_FREQUENCY"): str,
Optional("PRESATURATION_POWER_LEVEL"): str,
Optional("CHEMICAL_SHIFT_REF_CPD"): str,
Optional("TEMPERATURE"): str,
Optional("NUMBER_OF_SCANS"): str,
Optional("DUMMY_SCANS"): str,
Optional("ACQUISITION_TIME"): str,
Optional("RELAXATION_DELAY"): str,
Optional("SPECTRAL_WIDTH"): str,
Optional("NUM_DATA_POINTS_ACQUIRED"): str,
Optional("REAL_DATA_POINTS"): str,
Optional("LINE_BROADENING"): str,
Optional("ZERO_FILLING"): str,
Optional("APODIZATION"): str,
Optional("BASELINE_CORRECTION_METHOD"): str,
Optional("CHEMICAL_SHIFT_REF_STD"): str,
Optional("BINNED_INCREMENT"): str,
Optional("BINNED_DATA_NORMALIZATION_METHOD"): str,
Optional("BINNED_DATA_PROTOCOL_FILE"): str,
Optional("BINNED_DATA_CHEMICAL_SHIFT_RANGE"): str,
Optional("BINNED_DATA_EXCLUDED_RANGE"): str
}
)
data_schema = Schema(
[
{
Or("Metabolite", "Bin range(ppm)", only_one=True): str,
Optional(str): str,
},
]
)
extended_schema = Schema(
[
{
"Metabolite": str,
Optional(str): str,
"sample_id": str
},
]
)
ms_metabolite_data_schema = Schema(
{
"Units": str,
"Data": data_schema,
"Metabolites": data_schema,
Optional("Extended"): extended_schema
}
)
nmr_binned_data_schema = Schema(
{
"Units": str,
"Data": data_schema
}
)
section_schema_mapping = {
"METABOLOMICS WORKBENCH": metabolomics_workbench_schema,
"PROJECT": project_schema,
"STUDY": study_schema,
"ANALYSIS": analysis_schema,
"SUBJECT": subject_schema,
"SUBJECT_SAMPLE_FACTORS": subject_sample_factors_schema,
"COLLECTION": collection_schema,
"TREATMENT": treatment_schema,
"SAMPLEPREP": sampleprep_schema,
"CHROMATOGRAPHY": chromatography_schema,
"MS": ms_schema,
"NM": nmr_schema,
"MS_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_BINNED_DATA": nmr_binned_data_schema,
}
|
[
"schema.Optional",
"schema.Schema",
"schema.Or"
] |
[((14489, 14532), 'schema.Schema', 'Schema', (["{'Units': str, 'Data': data_schema}"], {}), "({'Units': str, 'Data': data_schema})\n", (14495, 14532), False, 'from schema import Schema, Optional, Or\n'), ((409, 429), 'schema.Optional', 'Optional', (['"""STUDY_ID"""'], {}), "('STUDY_ID')\n", (417, 429), False, 'from schema import Schema, Optional, Or\n'), ((444, 467), 'schema.Optional', 'Optional', (['"""ANALYSIS_ID"""'], {}), "('ANALYSIS_ID')\n", (452, 467), False, 'from schema import Schema, Optional, Or\n'), ((482, 504), 'schema.Optional', 'Optional', (['"""PROJECT_ID"""'], {}), "('PROJECT_ID')\n", (490, 504), False, 'from schema import Schema, Optional, Or\n'), ((519, 537), 'schema.Optional', 'Optional', (['"""HEADER"""'], {}), "('HEADER')\n", (527, 537), False, 'from schema import Schema, Optional, Or\n'), ((552, 576), 'schema.Optional', 'Optional', (['"""DATATRACK_ID"""'], {}), "('DATATRACK_ID')\n", (560, 576), False, 'from schema import Schema, Optional, Or\n'), ((660, 684), 'schema.Optional', 'Optional', (['"""PROJECT_TYPE"""'], {}), "('PROJECT_TYPE')\n", (668, 684), False, 'from schema import Schema, Optional, Or\n'), ((757, 779), 'schema.Optional', 'Optional', (['"""DEPARTMENT"""'], {}), "('DEPARTMENT')\n", (765, 779), False, 'from schema import Schema, Optional, Or\n'), ((794, 816), 'schema.Optional', 'Optional', (['"""LABORATORY"""'], {}), "('LABORATORY')\n", (802, 816), False, 'from schema import Schema, Optional, Or\n'), ((952, 978), 'schema.Optional', 'Optional', (['"""FUNDING_SOURCE"""'], {}), "('FUNDING_SOURCE')\n", (960, 978), False, 'from schema import Schema, Optional, Or\n'), ((993, 1021), 'schema.Optional', 'Optional', (['"""PROJECT_COMMENTS"""'], {}), "('PROJECT_COMMENTS')\n", (1001, 1021), False, 'from schema import Schema, Optional, Or\n'), ((1036, 1060), 'schema.Optional', 'Optional', (['"""PUBLICATIONS"""'], {}), "('PUBLICATIONS')\n", (1044, 1060), False, 'from schema import Schema, Optional, Or\n'), ((1075, 1099), 'schema.Optional', 'Optional', (['"""CONTRIBUTORS"""'], {}), "('CONTRIBUTORS')\n", (1083, 1099), False, 'from schema import Schema, Optional, Or\n'), ((1114, 1129), 'schema.Optional', 'Optional', (['"""DOI"""'], {}), "('DOI')\n", (1122, 1129), False, 'from schema import Schema, Optional, Or\n'), ((1209, 1231), 'schema.Optional', 'Optional', (['"""STUDY_TYPE"""'], {}), "('STUDY_TYPE')\n", (1217, 1231), False, 'from schema import Schema, Optional, Or\n'), ((1302, 1324), 'schema.Optional', 'Optional', (['"""DEPARTMENT"""'], {}), "('DEPARTMENT')\n", (1310, 1324), False, 'from schema import Schema, Optional, Or\n'), ((1339, 1361), 'schema.Optional', 'Optional', (['"""LABORATORY"""'], {}), "('LABORATORY')\n", (1347, 1361), False, 'from schema import Schema, Optional, Or\n'), ((1497, 1519), 'schema.Optional', 'Optional', (['"""NUM_GROUPS"""'], {}), "('NUM_GROUPS')\n", (1505, 1519), False, 'from schema import Schema, Optional, Or\n'), ((1534, 1560), 'schema.Optional', 'Optional', (['"""TOTAL_SUBJECTS"""'], {}), "('TOTAL_SUBJECTS')\n", (1542, 1560), False, 'from schema import Schema, Optional, Or\n'), ((1575, 1596), 'schema.Optional', 'Optional', (['"""NUM_MALES"""'], {}), "('NUM_MALES')\n", (1583, 1596), False, 'from schema import Schema, Optional, Or\n'), ((1611, 1634), 'schema.Optional', 'Optional', (['"""NUM_FEMALES"""'], {}), "('NUM_FEMALES')\n", (1619, 1634), False, 'from schema import Schema, Optional, Or\n'), ((1649, 1675), 'schema.Optional', 'Optional', (['"""STUDY_COMMENTS"""'], {}), "('STUDY_COMMENTS')\n", (1657, 1675), False, 'from schema import Schema, Optional, Or\n'), ((1690, 1714), 'schema.Optional', 'Optional', (['"""PUBLICATIONS"""'], {}), "('PUBLICATIONS')\n", (1698, 1714), False, 'from schema import Schema, Optional, Or\n'), ((1740, 1763), 'schema.Optional', 'Optional', (['"""SUBMIT_DATE"""'], {}), "('SUBMIT_DATE')\n", (1748, 1763), False, 'from schema import Schema, Optional, Or\n'), ((1889, 1912), 'schema.Optional', 'Optional', (['"""TAXONOMY_ID"""'], {}), "('TAXONOMY_ID')\n", (1897, 1912), False, 'from schema import Schema, Optional, Or\n'), ((1927, 1954), 'schema.Optional', 'Optional', (['"""GENOTYPE_STRAIN"""'], {}), "('GENOTYPE_STRAIN')\n", (1935, 1954), False, 'from schema import Schema, Optional, Or\n'), ((1969, 1997), 'schema.Optional', 'Optional', (['"""AGE_OR_AGE_RANGE"""'], {}), "('AGE_OR_AGE_RANGE')\n", (1977, 1997), False, 'from schema import Schema, Optional, Or\n'), ((2012, 2046), 'schema.Optional', 'Optional', (['"""WEIGHT_OR_WEIGHT_RANGE"""'], {}), "('WEIGHT_OR_WEIGHT_RANGE')\n", (2020, 2046), False, 'from schema import Schema, Optional, Or\n'), ((2061, 2095), 'schema.Optional', 'Optional', (['"""HEIGHT_OR_HEIGHT_RANGE"""'], {}), "('HEIGHT_OR_HEIGHT_RANGE')\n", (2069, 2095), False, 'from schema import Schema, Optional, Or\n'), ((2110, 2128), 'schema.Optional', 'Optional', (['"""GENDER"""'], {}), "('GENDER')\n", (2118, 2128), False, 'from schema import Schema, Optional, Or\n'), ((2143, 2165), 'schema.Optional', 'Optional', (['"""HUMAN_RACE"""'], {}), "('HUMAN_RACE')\n", (2151, 2165), False, 'from schema import Schema, Optional, Or\n'), ((2180, 2207), 'schema.Optional', 'Optional', (['"""HUMAN_ETHNICITY"""'], {}), "('HUMAN_ETHNICITY')\n", (2188, 2207), False, 'from schema import Schema, Optional, Or\n'), ((2222, 2250), 'schema.Optional', 'Optional', (['"""HUMAN_TRIAL_TYPE"""'], {}), "('HUMAN_TRIAL_TYPE')\n", (2230, 2250), False, 'from schema import Schema, Optional, Or\n'), ((2265, 2300), 'schema.Optional', 'Optional', (['"""HUMAN_LIFESTYLE_FACTORS"""'], {}), "('HUMAN_LIFESTYLE_FACTORS')\n", (2273, 2300), False, 'from schema import Schema, Optional, Or\n'), ((2315, 2344), 'schema.Optional', 'Optional', (['"""HUMAN_MEDICATIONS"""'], {}), "('HUMAN_MEDICATIONS')\n", (2323, 2344), False, 'from schema import Schema, Optional, Or\n'), ((2359, 2393), 'schema.Optional', 'Optional', (['"""HUMAN_PRESCRIPTION_OTC"""'], {}), "('HUMAN_PRESCRIPTION_OTC')\n", (2367, 2393), False, 'from schema import Schema, Optional, Or\n'), ((2408, 2440), 'schema.Optional', 'Optional', (['"""HUMAN_SMOKING_STATUS"""'], {}), "('HUMAN_SMOKING_STATUS')\n", (2416, 2440), False, 'from schema import Schema, Optional, Or\n'), ((2455, 2489), 'schema.Optional', 'Optional', (['"""HUMAN_ALCOHOL_DRUG_USE"""'], {}), "('HUMAN_ALCOHOL_DRUG_USE')\n", (2463, 2489), False, 'from schema import Schema, Optional, Or\n'), ((2504, 2531), 'schema.Optional', 'Optional', (['"""HUMAN_NUTRITION"""'], {}), "('HUMAN_NUTRITION')\n", (2512, 2531), False, 'from schema import Schema, Optional, Or\n'), ((2546, 2582), 'schema.Optional', 'Optional', (['"""HUMAN_INCLUSION_CRITERIA"""'], {}), "('HUMAN_INCLUSION_CRITERIA')\n", (2554, 2582), False, 'from schema import Schema, Optional, Or\n'), ((2597, 2633), 'schema.Optional', 'Optional', (['"""HUMAN_EXCLUSION_CRITERIA"""'], {}), "('HUMAN_EXCLUSION_CRITERIA')\n", (2605, 2633), False, 'from schema import Schema, Optional, Or\n'), ((2648, 2682), 'schema.Optional', 'Optional', (['"""ANIMAL_ANIMAL_SUPPLIER"""'], {}), "('ANIMAL_ANIMAL_SUPPLIER')\n", (2656, 2682), False, 'from schema import Schema, Optional, Or\n'), ((2697, 2723), 'schema.Optional', 'Optional', (['"""ANIMAL_HOUSING"""'], {}), "('ANIMAL_HOUSING')\n", (2705, 2723), False, 'from schema import Schema, Optional, Or\n'), ((2738, 2768), 'schema.Optional', 'Optional', (['"""ANIMAL_LIGHT_CYCLE"""'], {}), "('ANIMAL_LIGHT_CYCLE')\n", (2746, 2768), False, 'from schema import Schema, Optional, Or\n'), ((2783, 2806), 'schema.Optional', 'Optional', (['"""ANIMAL_FEED"""'], {}), "('ANIMAL_FEED')\n", (2791, 2806), False, 'from schema import Schema, Optional, Or\n'), ((2821, 2845), 'schema.Optional', 'Optional', (['"""ANIMAL_WATER"""'], {}), "('ANIMAL_WATER')\n", (2829, 2845), False, 'from schema import Schema, Optional, Or\n'), ((2860, 2897), 'schema.Optional', 'Optional', (['"""ANIMAL_INCLUSION_CRITERIA"""'], {}), "('ANIMAL_INCLUSION_CRITERIA')\n", (2868, 2897), False, 'from schema import Schema, Optional, Or\n'), ((2912, 2950), 'schema.Optional', 'Optional', (['"""CELL_BIOSOURCE_OR_SUPPLIER"""'], {}), "('CELL_BIOSOURCE_OR_SUPPLIER')\n", (2920, 2950), False, 'from schema import Schema, Optional, Or\n'), ((2965, 2996), 'schema.Optional', 'Optional', (['"""CELL_STRAIN_DETAILS"""'], {}), "('CELL_STRAIN_DETAILS')\n", (2973, 2996), False, 'from schema import Schema, Optional, Or\n'), ((3011, 3039), 'schema.Optional', 'Optional', (['"""SUBJECT_COMMENTS"""'], {}), "('SUBJECT_COMMENTS')\n", (3019, 3039), False, 'from schema import Schema, Optional, Or\n'), ((3054, 3091), 'schema.Optional', 'Optional', (['"""CELL_PRIMARY_IMMORTALIZED"""'], {}), "('CELL_PRIMARY_IMMORTALIZED')\n", (3062, 3091), False, 'from schema import Schema, Optional, Or\n'), ((3106, 3137), 'schema.Optional', 'Optional', (['"""CELL_PASSAGE_NUMBER"""'], {}), "('CELL_PASSAGE_NUMBER')\n", (3114, 3137), False, 'from schema import Schema, Optional, Or\n'), ((3152, 3175), 'schema.Optional', 'Optional', (['"""CELL_COUNTS"""'], {}), "('CELL_COUNTS')\n", (3160, 3175), False, 'from schema import Schema, Optional, Or\n'), ((3190, 3215), 'schema.Optional', 'Optional', (['"""SPECIES_GROUP"""'], {}), "('SPECIES_GROUP')\n", (3198, 3215), False, 'from schema import Schema, Optional, Or\n'), ((3619, 3653), 'schema.Optional', 'Optional', (['"""COLLECTION_PROTOCOL_ID"""'], {}), "('COLLECTION_PROTOCOL_ID')\n", (3627, 3653), False, 'from schema import Schema, Optional, Or\n'), ((3668, 3708), 'schema.Optional', 'Optional', (['"""COLLECTION_PROTOCOL_FILENAME"""'], {}), "('COLLECTION_PROTOCOL_FILENAME')\n", (3676, 3708), False, 'from schema import Schema, Optional, Or\n'), ((3723, 3763), 'schema.Optional', 'Optional', (['"""COLLECTION_PROTOCOL_COMMENTS"""'], {}), "('COLLECTION_PROTOCOL_COMMENTS')\n", (3731, 3763), False, 'from schema import Schema, Optional, Or\n'), ((3778, 3801), 'schema.Optional', 'Optional', (['"""SAMPLE_TYPE"""'], {}), "('SAMPLE_TYPE')\n", (3786, 3801), False, 'from schema import Schema, Optional, Or\n'), ((3873, 3902), 'schema.Optional', 'Optional', (['"""COLLECTION_METHOD"""'], {}), "('COLLECTION_METHOD')\n", (3881, 3902), False, 'from schema import Schema, Optional, Or\n'), ((3917, 3948), 'schema.Optional', 'Optional', (['"""COLLECTION_LOCATION"""'], {}), "('COLLECTION_LOCATION')\n", (3925, 3948), False, 'from schema import Schema, Optional, Or\n'), ((3963, 3995), 'schema.Optional', 'Optional', (['"""COLLECTION_FREQUENCY"""'], {}), "('COLLECTION_FREQUENCY')\n", (3971, 3995), False, 'from schema import Schema, Optional, Or\n'), ((4010, 4041), 'schema.Optional', 'Optional', (['"""COLLECTION_DURATION"""'], {}), "('COLLECTION_DURATION')\n", (4018, 4041), False, 'from schema import Schema, Optional, Or\n'), ((4056, 4083), 'schema.Optional', 'Optional', (['"""COLLECTION_TIME"""'], {}), "('COLLECTION_TIME')\n", (4064, 4083), False, 'from schema import Schema, Optional, Or\n'), ((4098, 4134), 'schema.Optional', 'Optional', (['"""VOLUMEORAMOUNT_COLLECTED"""'], {}), "('VOLUMEORAMOUNT_COLLECTED')\n", (4106, 4134), False, 'from schema import Schema, Optional, Or\n'), ((4149, 4179), 'schema.Optional', 'Optional', (['"""STORAGE_CONDITIONS"""'], {}), "('STORAGE_CONDITIONS')\n", (4157, 4179), False, 'from schema import Schema, Optional, Or\n'), ((4194, 4222), 'schema.Optional', 'Optional', (['"""COLLECTION_VIALS"""'], {}), "('COLLECTION_VIALS')\n", (4202, 4222), False, 'from schema import Schema, Optional, Or\n'), ((4237, 4262), 'schema.Optional', 'Optional', (['"""STORAGE_VIALS"""'], {}), "('STORAGE_VIALS')\n", (4245, 4262), False, 'from schema import Schema, Optional, Or\n'), ((4277, 4309), 'schema.Optional', 'Optional', (['"""COLLECTION_TUBE_TEMP"""'], {}), "('COLLECTION_TUBE_TEMP')\n", (4285, 4309), False, 'from schema import Schema, Optional, Or\n'), ((4324, 4345), 'schema.Optional', 'Optional', (['"""ADDITIVES"""'], {}), "('ADDITIVES')\n", (4332, 4345), False, 'from schema import Schema, Optional, Or\n'), ((4360, 4393), 'schema.Optional', 'Optional', (['"""BLOOD_SERUM_OR_PLASMA"""'], {}), "('BLOOD_SERUM_OR_PLASMA')\n", (4368, 4393), False, 'from schema import Schema, Optional, Or\n'), ((4408, 4446), 'schema.Optional', 'Optional', (['"""TISSUE_CELL_IDENTIFICATION"""'], {}), "('TISSUE_CELL_IDENTIFICATION')\n", (4416, 4446), False, 'from schema import Schema, Optional, Or\n'), ((4461, 4499), 'schema.Optional', 'Optional', (['"""TISSUE_CELL_QUANTITY_TAKEN"""'], {}), "('TISSUE_CELL_QUANTITY_TAKEN')\n", (4469, 4499), False, 'from schema import Schema, Optional, Or\n'), ((4589, 4622), 'schema.Optional', 'Optional', (['"""TREATMENT_PROTOCOL_ID"""'], {}), "('TREATMENT_PROTOCOL_ID')\n", (4597, 4622), False, 'from schema import Schema, Optional, Or\n'), ((4637, 4676), 'schema.Optional', 'Optional', (['"""TREATMENT_PROTOCOL_FILENAME"""'], {}), "('TREATMENT_PROTOCOL_FILENAME')\n", (4645, 4676), False, 'from schema import Schema, Optional, Or\n'), ((4691, 4730), 'schema.Optional', 'Optional', (['"""TREATMENT_PROTOCOL_COMMENTS"""'], {}), "('TREATMENT_PROTOCOL_COMMENTS')\n", (4699, 4730), False, 'from schema import Schema, Optional, Or\n'), ((4745, 4766), 'schema.Optional', 'Optional', (['"""TREATMENT"""'], {}), "('TREATMENT')\n", (4753, 4766), False, 'from schema import Schema, Optional, Or\n'), ((4781, 4811), 'schema.Optional', 'Optional', (['"""TREATMENT_COMPOUND"""'], {}), "('TREATMENT_COMPOUND')\n", (4789, 4811), False, 'from schema import Schema, Optional, Or\n'), ((4826, 4853), 'schema.Optional', 'Optional', (['"""TREATMENT_ROUTE"""'], {}), "('TREATMENT_ROUTE')\n", (4834, 4853), False, 'from schema import Schema, Optional, Or\n'), ((4868, 4894), 'schema.Optional', 'Optional', (['"""TREATMENT_DOSE"""'], {}), "('TREATMENT_DOSE')\n", (4876, 4894), False, 'from schema import Schema, Optional, Or\n'), ((4909, 4941), 'schema.Optional', 'Optional', (['"""TREATMENT_DOSEVOLUME"""'], {}), "('TREATMENT_DOSEVOLUME')\n", (4917, 4941), False, 'from schema import Schema, Optional, Or\n'), ((4956, 4990), 'schema.Optional', 'Optional', (['"""TREATMENT_DOSEDURATION"""'], {}), "('TREATMENT_DOSEDURATION')\n", (4964, 4990), False, 'from schema import Schema, Optional, Or\n'), ((5005, 5034), 'schema.Optional', 'Optional', (['"""TREATMENT_VEHICLE"""'], {}), "('TREATMENT_VEHICLE')\n", (5013, 5034), False, 'from schema import Schema, Optional, Or\n'), ((5049, 5082), 'schema.Optional', 'Optional', (['"""ANIMAL_VET_TREATMENTS"""'], {}), "('ANIMAL_VET_TREATMENTS')\n", (5057, 5082), False, 'from schema import Schema, Optional, Or\n'), ((5097, 5126), 'schema.Optional', 'Optional', (['"""ANIMAL_ANESTHESIA"""'], {}), "('ANIMAL_ANESTHESIA')\n", (5105, 5126), False, 'from schema import Schema, Optional, Or\n'), ((5141, 5180), 'schema.Optional', 'Optional', (['"""ANIMAL_ACCLIMATION_DURATION"""'], {}), "('ANIMAL_ACCLIMATION_DURATION')\n", (5149, 5180), False, 'from schema import Schema, Optional, Or\n'), ((5195, 5221), 'schema.Optional', 'Optional', (['"""ANIMAL_FASTING"""'], {}), "('ANIMAL_FASTING')\n", (5203, 5221), False, 'from schema import Schema, Optional, Or\n'), ((5236, 5270), 'schema.Optional', 'Optional', (['"""ANIMAL_ENDP_EUTHANASIA"""'], {}), "('ANIMAL_ENDP_EUTHANASIA')\n", (5244, 5270), False, 'from schema import Schema, Optional, Or\n'), ((5285, 5325), 'schema.Optional', 'Optional', (['"""ANIMAL_ENDP_TISSUE_COLL_LIST"""'], {}), "('ANIMAL_ENDP_TISSUE_COLL_LIST')\n", (5293, 5325), False, 'from schema import Schema, Optional, Or\n'), ((5340, 5382), 'schema.Optional', 'Optional', (['"""ANIMAL_ENDP_TISSUE_PROC_METHOD"""'], {}), "('ANIMAL_ENDP_TISSUE_PROC_METHOD')\n", (5348, 5382), False, 'from schema import Schema, Optional, Or\n'), ((5397, 5435), 'schema.Optional', 'Optional', (['"""ANIMAL_ENDP_CLINICAL_SIGNS"""'], {}), "('ANIMAL_ENDP_CLINICAL_SIGNS')\n", (5405, 5435), False, 'from schema import Schema, Optional, Or\n'), ((5450, 5475), 'schema.Optional', 'Optional', (['"""HUMAN_FASTING"""'], {}), "('HUMAN_FASTING')\n", (5458, 5475), False, 'from schema import Schema, Optional, Or\n'), ((5490, 5527), 'schema.Optional', 'Optional', (['"""HUMAN_ENDP_CLINICAL_SIGNS"""'], {}), "('HUMAN_ENDP_CLINICAL_SIGNS')\n", (5498, 5527), False, 'from schema import Schema, Optional, Or\n'), ((5542, 5566), 'schema.Optional', 'Optional', (['"""CELL_STORAGE"""'], {}), "('CELL_STORAGE')\n", (5550, 5566), False, 'from schema import Schema, Optional, Or\n'), ((5581, 5614), 'schema.Optional', 'Optional', (['"""CELL_GROWTH_CONTAINER"""'], {}), "('CELL_GROWTH_CONTAINER')\n", (5589, 5614), False, 'from schema import Schema, Optional, Or\n'), ((5629, 5659), 'schema.Optional', 'Optional', (['"""CELL_GROWTH_CONFIG"""'], {}), "('CELL_GROWTH_CONFIG')\n", (5637, 5659), False, 'from schema import Schema, Optional, Or\n'), ((5674, 5702), 'schema.Optional', 'Optional', (['"""CELL_GROWTH_RATE"""'], {}), "('CELL_GROWTH_RATE')\n", (5682, 5702), False, 'from schema import Schema, Optional, Or\n'), ((5717, 5743), 'schema.Optional', 'Optional', (['"""CELL_INOC_PROC"""'], {}), "('CELL_INOC_PROC')\n", (5725, 5743), False, 'from schema import Schema, Optional, Or\n'), ((5758, 5780), 'schema.Optional', 'Optional', (['"""CELL_MEDIA"""'], {}), "('CELL_MEDIA')\n", (5766, 5780), False, 'from schema import Schema, Optional, Or\n'), ((5795, 5822), 'schema.Optional', 'Optional', (['"""CELL_ENVIR_COND"""'], {}), "('CELL_ENVIR_COND')\n", (5803, 5822), False, 'from schema import Schema, Optional, Or\n'), ((5837, 5864), 'schema.Optional', 'Optional', (['"""CELL_HARVESTING"""'], {}), "('CELL_HARVESTING')\n", (5845, 5864), False, 'from schema import Schema, Optional, Or\n'), ((5879, 5911), 'schema.Optional', 'Optional', (['"""PLANT_GROWTH_SUPPORT"""'], {}), "('PLANT_GROWTH_SUPPORT')\n", (5887, 5911), False, 'from schema import Schema, Optional, Or\n'), ((5926, 5959), 'schema.Optional', 'Optional', (['"""PLANT_GROWTH_LOCATION"""'], {}), "('PLANT_GROWTH_LOCATION')\n", (5934, 5959), False, 'from schema import Schema, Optional, Or\n'), ((5974, 6003), 'schema.Optional', 'Optional', (['"""PLANT_PLOT_DESIGN"""'], {}), "('PLANT_PLOT_DESIGN')\n", (5982, 6003), False, 'from schema import Schema, Optional, Or\n'), ((6018, 6048), 'schema.Optional', 'Optional', (['"""PLANT_LIGHT_PERIOD"""'], {}), "('PLANT_LIGHT_PERIOD')\n", (6026, 6048), False, 'from schema import Schema, Optional, Or\n'), ((6063, 6089), 'schema.Optional', 'Optional', (['"""PLANT_HUMIDITY"""'], {}), "('PLANT_HUMIDITY')\n", (6071, 6089), False, 'from schema import Schema, Optional, Or\n'), ((6104, 6126), 'schema.Optional', 'Optional', (['"""PLANT_TEMP"""'], {}), "('PLANT_TEMP')\n", (6112, 6126), False, 'from schema import Schema, Optional, Or\n'), ((6141, 6174), 'schema.Optional', 'Optional', (['"""PLANT_WATERING_REGIME"""'], {}), "('PLANT_WATERING_REGIME')\n", (6149, 6174), False, 'from schema import Schema, Optional, Or\n'), ((6189, 6225), 'schema.Optional', 'Optional', (['"""PLANT_NUTRITIONAL_REGIME"""'], {}), "('PLANT_NUTRITIONAL_REGIME')\n", (6197, 6225), False, 'from schema import Schema, Optional, Or\n'), ((6240, 6268), 'schema.Optional', 'Optional', (['"""PLANT_ESTAB_DATE"""'], {}), "('PLANT_ESTAB_DATE')\n", (6248, 6268), False, 'from schema import Schema, Optional, Or\n'), ((6283, 6313), 'schema.Optional', 'Optional', (['"""PLANT_HARVEST_DATE"""'], {}), "('PLANT_HARVEST_DATE')\n", (6291, 6313), False, 'from schema import Schema, Optional, Or\n'), ((6328, 6358), 'schema.Optional', 'Optional', (['"""PLANT_GROWTH_STAGE"""'], {}), "('PLANT_GROWTH_STAGE')\n", (6336, 6358), False, 'from schema import Schema, Optional, Or\n'), ((6373, 6410), 'schema.Optional', 'Optional', (['"""PLANT_METAB_QUENCH_METHOD"""'], {}), "('PLANT_METAB_QUENCH_METHOD')\n", (6381, 6410), False, 'from schema import Schema, Optional, Or\n'), ((6425, 6457), 'schema.Optional', 'Optional', (['"""PLANT_HARVEST_METHOD"""'], {}), "('PLANT_HARVEST_METHOD')\n", (6433, 6457), False, 'from schema import Schema, Optional, Or\n'), ((6472, 6497), 'schema.Optional', 'Optional', (['"""PLANT_STORAGE"""'], {}), "('PLANT_STORAGE')\n", (6480, 6497), False, 'from schema import Schema, Optional, Or\n'), ((6512, 6543), 'schema.Optional', 'Optional', (['"""CELL_PCT_CONFLUENCE"""'], {}), "('CELL_PCT_CONFLUENCE')\n", (6520, 6543), False, 'from schema import Schema, Optional, Or\n'), ((6558, 6592), 'schema.Optional', 'Optional', (['"""CELL_MEDIA_LASTCHANGED"""'], {}), "('CELL_MEDIA_LASTCHANGED')\n", (6566, 6592), False, 'from schema import Schema, Optional, Or\n'), ((6684, 6718), 'schema.Optional', 'Optional', (['"""SAMPLEPREP_PROTOCOL_ID"""'], {}), "('SAMPLEPREP_PROTOCOL_ID')\n", (6692, 6718), False, 'from schema import Schema, Optional, Or\n'), ((6733, 6773), 'schema.Optional', 'Optional', (['"""SAMPLEPREP_PROTOCOL_FILENAME"""'], {}), "('SAMPLEPREP_PROTOCOL_FILENAME')\n", (6741, 6773), False, 'from schema import Schema, Optional, Or\n'), ((6788, 6828), 'schema.Optional', 'Optional', (['"""SAMPLEPREP_PROTOCOL_COMMENTS"""'], {}), "('SAMPLEPREP_PROTOCOL_COMMENTS')\n", (6796, 6828), False, 'from schema import Schema, Optional, Or\n'), ((6843, 6872), 'schema.Optional', 'Optional', (['"""PROCESSING_METHOD"""'], {}), "('PROCESSING_METHOD')\n", (6851, 6872), False, 'from schema import Schema, Optional, Or\n'), ((6887, 6928), 'schema.Optional', 'Optional', (['"""PROCESSING_STORAGE_CONDITIONS"""'], {}), "('PROCESSING_STORAGE_CONDITIONS')\n", (6895, 6928), False, 'from schema import Schema, Optional, Or\n'), ((6943, 6972), 'schema.Optional', 'Optional', (['"""EXTRACTION_METHOD"""'], {}), "('EXTRACTION_METHOD')\n", (6951, 6972), False, 'from schema import Schema, Optional, Or\n'), ((6987, 7029), 'schema.Optional', 'Optional', (['"""EXTRACT_CONCENTRATION_DILUTION"""'], {}), "('EXTRACT_CONCENTRATION_DILUTION')\n", (6995, 7029), False, 'from schema import Schema, Optional, Or\n'), ((7044, 7074), 'schema.Optional', 'Optional', (['"""EXTRACT_ENRICHMENT"""'], {}), "('EXTRACT_ENRICHMENT')\n", (7052, 7074), False, 'from schema import Schema, Optional, Or\n'), ((7089, 7116), 'schema.Optional', 'Optional', (['"""EXTRACT_CLEANUP"""'], {}), "('EXTRACT_CLEANUP')\n", (7097, 7116), False, 'from schema import Schema, Optional, Or\n'), ((7131, 7158), 'schema.Optional', 'Optional', (['"""EXTRACT_STORAGE"""'], {}), "('EXTRACT_STORAGE')\n", (7139, 7158), False, 'from schema import Schema, Optional, Or\n'), ((7173, 7204), 'schema.Optional', 'Optional', (['"""SAMPLE_RESUSPENSION"""'], {}), "('SAMPLE_RESUSPENSION')\n", (7181, 7204), False, 'from schema import Schema, Optional, Or\n'), ((7219, 7252), 'schema.Optional', 'Optional', (['"""SAMPLE_DERIVATIZATION"""'], {}), "('SAMPLE_DERIVATIZATION')\n", (7227, 7252), False, 'from schema import Schema, Optional, Or\n'), ((7267, 7293), 'schema.Optional', 'Optional', (['"""SAMPLE_SPIKING"""'], {}), "('SAMPLE_SPIKING')\n", (7275, 7293), False, 'from schema import Schema, Optional, Or\n'), ((7308, 7325), 'schema.Optional', 'Optional', (['"""ORGAN"""'], {}), "('ORGAN')\n", (7316, 7325), False, 'from schema import Schema, Optional, Or\n'), ((7340, 7371), 'schema.Optional', 'Optional', (['"""ORGAN_SPECIFICATION"""'], {}), "('ORGAN_SPECIFICATION')\n", (7348, 7371), False, 'from schema import Schema, Optional, Or\n'), ((7386, 7407), 'schema.Optional', 'Optional', (['"""CELL_TYPE"""'], {}), "('CELL_TYPE')\n", (7394, 7407), False, 'from schema import Schema, Optional, Or\n'), ((7422, 7454), 'schema.Optional', 'Optional', (['"""SUBCELLULAR_LOCATION"""'], {}), "('SUBCELLULAR_LOCATION')\n", (7430, 7454), False, 'from schema import Schema, Optional, Or\n'), ((7515, 7549), 'schema.Optional', 'Optional', (['"""CHROMATOGRAPHY_SUMMARY"""'], {}), "('CHROMATOGRAPHY_SUMMARY')\n", (7523, 7549), False, 'from schema import Schema, Optional, Or\n'), ((7660, 7685), 'schema.Optional', 'Optional', (['"""FLOW_GRADIENT"""'], {}), "('FLOW_GRADIENT')\n", (7668, 7685), False, 'from schema import Schema, Optional, Or\n'), ((7700, 7721), 'schema.Optional', 'Optional', (['"""FLOW_RATE"""'], {}), "('FLOW_RATE')\n", (7708, 7721), False, 'from schema import Schema, Optional, Or\n'), ((7736, 7766), 'schema.Optional', 'Optional', (['"""COLUMN_TEMPERATURE"""'], {}), "('COLUMN_TEMPERATURE')\n", (7744, 7766), False, 'from schema import Schema, Optional, Or\n'), ((7781, 7809), 'schema.Optional', 'Optional', (['"""METHODS_FILENAME"""'], {}), "('METHODS_FILENAME')\n", (7789, 7809), False, 'from schema import Schema, Optional, Or\n'), ((7824, 7845), 'schema.Optional', 'Optional', (['"""SOLVENT_A"""'], {}), "('SOLVENT_A')\n", (7832, 7845), False, 'from schema import Schema, Optional, Or\n'), ((7860, 7881), 'schema.Optional', 'Optional', (['"""SOLVENT_B"""'], {}), "('SOLVENT_B')\n", (7868, 7881), False, 'from schema import Schema, Optional, Or\n'), ((7896, 7918), 'schema.Optional', 'Optional', (['"""METHODS_ID"""'], {}), "('METHODS_ID')\n", (7904, 7918), False, 'from schema import Schema, Optional, Or\n'), ((7933, 7960), 'schema.Optional', 'Optional', (['"""COLUMN_PRESSURE"""'], {}), "('COLUMN_PRESSURE')\n", (7941, 7960), False, 'from schema import Schema, Optional, Or\n'), ((7975, 8008), 'schema.Optional', 'Optional', (['"""INJECTION_TEMPERATURE"""'], {}), "('INJECTION_TEMPERATURE')\n", (7983, 8008), False, 'from schema import Schema, Optional, Or\n'), ((8023, 8052), 'schema.Optional', 'Optional', (['"""INTERNAL_STANDARD"""'], {}), "('INTERNAL_STANDARD')\n", (8031, 8052), False, 'from schema import Schema, Optional, Or\n'), ((8067, 8099), 'schema.Optional', 'Optional', (['"""INTERNAL_STANDARD_MT"""'], {}), "('INTERNAL_STANDARD_MT')\n", (8075, 8099), False, 'from schema import Schema, Optional, Or\n'), ((8114, 8141), 'schema.Optional', 'Optional', (['"""RETENTION_INDEX"""'], {}), "('RETENTION_INDEX')\n", (8122, 8141), False, 'from schema import Schema, Optional, Or\n'), ((8156, 8182), 'schema.Optional', 'Optional', (['"""RETENTION_TIME"""'], {}), "('RETENTION_TIME')\n", (8164, 8182), False, 'from schema import Schema, Optional, Or\n'), ((8197, 8225), 'schema.Optional', 'Optional', (['"""SAMPLE_INJECTION"""'], {}), "('SAMPLE_INJECTION')\n", (8205, 8225), False, 'from schema import Schema, Optional, Or\n'), ((8240, 8265), 'schema.Optional', 'Optional', (['"""SAMPLING_CONE"""'], {}), "('SAMPLING_CONE')\n", (8248, 8265), False, 'from schema import Schema, Optional, Or\n'), ((8280, 8307), 'schema.Optional', 'Optional', (['"""ANALYTICAL_TIME"""'], {}), "('ANALYTICAL_TIME')\n", (8288, 8307), False, 'from schema import Schema, Optional, Or\n'), ((8322, 8351), 'schema.Optional', 'Optional', (['"""CAPILLARY_VOLTAGE"""'], {}), "('CAPILLARY_VOLTAGE')\n", (8330, 8351), False, 'from schema import Schema, Optional, Or\n'), ((8366, 8392), 'schema.Optional', 'Optional', (['"""MIGRATION_TIME"""'], {}), "('MIGRATION_TIME')\n", (8374, 8392), False, 'from schema import Schema, Optional, Or\n'), ((8407, 8435), 'schema.Optional', 'Optional', (['"""OVEN_TEMPERATURE"""'], {}), "('OVEN_TEMPERATURE')\n", (8415, 8435), False, 'from schema import Schema, Optional, Or\n'), ((8450, 8477), 'schema.Optional', 'Optional', (['"""PRECONDITIONING"""'], {}), "('PRECONDITIONING')\n", (8458, 8477), False, 'from schema import Schema, Optional, Or\n'), ((8492, 8518), 'schema.Optional', 'Optional', (['"""RUNNING_BUFFER"""'], {}), "('RUNNING_BUFFER')\n", (8500, 8518), False, 'from schema import Schema, Optional, Or\n'), ((8533, 8560), 'schema.Optional', 'Optional', (['"""RUNNING_VOLTAGE"""'], {}), "('RUNNING_VOLTAGE')\n", (8541, 8560), False, 'from schema import Schema, Optional, Or\n'), ((8575, 8600), 'schema.Optional', 'Optional', (['"""SHEATH_LIQUID"""'], {}), "('SHEATH_LIQUID')\n", (8583, 8600), False, 'from schema import Schema, Optional, Or\n'), ((8615, 8639), 'schema.Optional', 'Optional', (['"""TIME_PROGRAM"""'], {}), "('TIME_PROGRAM')\n", (8623, 8639), False, 'from schema import Schema, Optional, Or\n'), ((8654, 8690), 'schema.Optional', 'Optional', (['"""TRANSFERLINE_TEMPERATURE"""'], {}), "('TRANSFERLINE_TEMPERATURE')\n", (8662, 8690), False, 'from schema import Schema, Optional, Or\n'), ((8705, 8731), 'schema.Optional', 'Optional', (['"""WASHING_BUFFER"""'], {}), "('WASHING_BUFFER')\n", (8713, 8731), False, 'from schema import Schema, Optional, Or\n'), ((8746, 8780), 'schema.Optional', 'Optional', (['"""WEAK_WASH_SOLVENT_NAME"""'], {}), "('WEAK_WASH_SOLVENT_NAME')\n", (8754, 8780), False, 'from schema import Schema, Optional, Or\n'), ((8795, 8823), 'schema.Optional', 'Optional', (['"""WEAK_WASH_VOLUME"""'], {}), "('WEAK_WASH_VOLUME')\n", (8803, 8823), False, 'from schema import Schema, Optional, Or\n'), ((8838, 8874), 'schema.Optional', 'Optional', (['"""STRONG_WASH_SOLVENT_NAME"""'], {}), "('STRONG_WASH_SOLVENT_NAME')\n", (8846, 8874), False, 'from schema import Schema, Optional, Or\n'), ((8889, 8919), 'schema.Optional', 'Optional', (['"""STRONG_WASH_VOLUME"""'], {}), "('STRONG_WASH_VOLUME')\n", (8897, 8919), False, 'from schema import Schema, Optional, Or\n'), ((8934, 8971), 'schema.Optional', 'Optional', (['"""TARGET_SAMPLE_TEMPERATURE"""'], {}), "('TARGET_SAMPLE_TEMPERATURE')\n", (8942, 8971), False, 'from schema import Schema, Optional, Or\n'), ((8986, 9014), 'schema.Optional', 'Optional', (['"""SAMPLE_LOOP_SIZE"""'], {}), "('SAMPLE_LOOP_SIZE')\n", (8994, 9014), False, 'from schema import Schema, Optional, Or\n'), ((9029, 9060), 'schema.Optional', 'Optional', (['"""SAMPLE_SYRINGE_SIZE"""'], {}), "('SAMPLE_SYRINGE_SIZE')\n", (9037, 9060), False, 'from schema import Schema, Optional, Or\n'), ((9075, 9106), 'schema.Optional', 'Optional', (['"""RANDOMIZATION_ORDER"""'], {}), "('RANDOMIZATION_ORDER')\n", (9083, 9106), False, 'from schema import Schema, Optional, Or\n'), ((9121, 9156), 'schema.Optional', 'Optional', (['"""CHROMATOGRAPHY_COMMENTS"""'], {}), "('CHROMATOGRAPHY_COMMENTS')\n", (9129, 9156), False, 'from schema import Schema, Optional, Or\n'), ((9241, 9268), 'schema.Optional', 'Optional', (['"""LABORATORY_NAME"""'], {}), "('LABORATORY_NAME')\n", (9249, 9268), False, 'from schema import Schema, Optional, Or\n'), ((9283, 9308), 'schema.Optional', 'Optional', (['"""OPERATOR_NAME"""'], {}), "('OPERATOR_NAME')\n", (9291, 9308), False, 'from schema import Schema, Optional, Or\n'), ((9323, 9348), 'schema.Optional', 'Optional', (['"""DETECTOR_TYPE"""'], {}), "('DETECTOR_TYPE')\n", (9331, 9348), False, 'from schema import Schema, Optional, Or\n'), ((9363, 9391), 'schema.Optional', 'Optional', (['"""SOFTWARE_VERSION"""'], {}), "('SOFTWARE_VERSION')\n", (9371, 9391), False, 'from schema import Schema, Optional, Or\n'), ((9406, 9434), 'schema.Optional', 'Optional', (['"""ACQUISITION_DATE"""'], {}), "('ACQUISITION_DATE')\n", (9414, 9434), False, 'from schema import Schema, Optional, Or\n'), ((9449, 9483), 'schema.Optional', 'Optional', (['"""ANALYSIS_PROTOCOL_FILE"""'], {}), "('ANALYSIS_PROTOCOL_FILE')\n", (9457, 9483), False, 'from schema import Schema, Optional, Or\n'), ((9498, 9537), 'schema.Optional', 'Optional', (['"""ACQUISITION_PARAMETERS_FILE"""'], {}), "('ACQUISITION_PARAMETERS_FILE')\n", (9506, 9537), False, 'from schema import Schema, Optional, Or\n'), ((9552, 9590), 'schema.Optional', 'Optional', (['"""PROCESSING_PARAMETERS_FILE"""'], {}), "('PROCESSING_PARAMETERS_FILE')\n", (9560, 9590), False, 'from schema import Schema, Optional, Or\n'), ((9605, 9628), 'schema.Optional', 'Optional', (['"""DATA_FORMAT"""'], {}), "('DATA_FORMAT')\n", (9613, 9628), False, 'from schema import Schema, Optional, Or\n'), ((9701, 9727), 'schema.Optional', 'Optional', (['"""ACQUISITION_ID"""'], {}), "('ACQUISITION_ID')\n", (9709, 9727), False, 'from schema import Schema, Optional, Or\n'), ((9742, 9770), 'schema.Optional', 'Optional', (['"""ACQUISITION_TIME"""'], {}), "('ACQUISITION_TIME')\n", (9750, 9770), False, 'from schema import Schema, Optional, Or\n'), ((9785, 9814), 'schema.Optional', 'Optional', (['"""ANALYSIS_COMMENTS"""'], {}), "('ANALYSIS_COMMENTS')\n", (9793, 9814), False, 'from schema import Schema, Optional, Or\n'), ((9829, 9857), 'schema.Optional', 'Optional', (['"""ANALYSIS_DISPLAY"""'], {}), "('ANALYSIS_DISPLAY')\n", (9837, 9857), False, 'from schema import Schema, Optional, Or\n'), ((9872, 9899), 'schema.Optional', 'Optional', (['"""INSTRUMENT_NAME"""'], {}), "('INSTRUMENT_NAME')\n", (9880, 9899), False, 'from schema import Schema, Optional, Or\n'), ((9914, 9952), 'schema.Optional', 'Optional', (['"""INSTRUMENT_PARAMETERS_FILE"""'], {}), "('INSTRUMENT_PARAMETERS_FILE')\n", (9922, 9952), False, 'from schema import Schema, Optional, Or\n'), ((9967, 9990), 'schema.Optional', 'Optional', (['"""NUM_FACTORS"""'], {}), "('NUM_FACTORS')\n", (9975, 9990), False, 'from schema import Schema, Optional, Or\n'), ((10005, 10032), 'schema.Optional', 'Optional', (['"""NUM_METABOLITES"""'], {}), "('NUM_METABOLITES')\n", (10013, 10032), False, 'from schema import Schema, Optional, Or\n'), ((10047, 10073), 'schema.Optional', 'Optional', (['"""PROCESSED_FILE"""'], {}), "('PROCESSED_FILE')\n", (10055, 10073), False, 'from schema import Schema, Optional, Or\n'), ((10088, 10119), 'schema.Optional', 'Optional', (['"""RANDOMIZATION_ORDER"""'], {}), "('RANDOMIZATION_ORDER')\n", (10096, 10119), False, 'from schema import Schema, Optional, Or\n'), ((10134, 10154), 'schema.Optional', 'Optional', (['"""RAW_FILE"""'], {}), "('RAW_FILE')\n", (10142, 10154), False, 'from schema import Schema, Optional, Or\n'), ((10368, 10401), 'schema.Optional', 'Optional', (['"""CAPILLARY_TEMPERATURE"""'], {}), "('CAPILLARY_TEMPERATURE')\n", (10376, 10401), False, 'from schema import Schema, Optional, Or\n'), ((10416, 10445), 'schema.Optional', 'Optional', (['"""CAPILLARY_VOLTAGE"""'], {}), "('CAPILLARY_VOLTAGE')\n", (10424, 10445), False, 'from schema import Schema, Optional, Or\n'), ((10460, 10488), 'schema.Optional', 'Optional', (['"""COLLISION_ENERGY"""'], {}), "('COLLISION_ENERGY')\n", (10468, 10488), False, 'from schema import Schema, Optional, Or\n'), ((10503, 10528), 'schema.Optional', 'Optional', (['"""COLLISION_GAS"""'], {}), "('COLLISION_GAS')\n", (10511, 10528), False, 'from schema import Schema, Optional, Or\n'), ((10543, 10567), 'schema.Optional', 'Optional', (['"""DRY_GAS_FLOW"""'], {}), "('DRY_GAS_FLOW')\n", (10551, 10567), False, 'from schema import Schema, Optional, Or\n'), ((10582, 10606), 'schema.Optional', 'Optional', (['"""DRY_GAS_TEMP"""'], {}), "('DRY_GAS_TEMP')\n", (10590, 10606), False, 'from schema import Schema, Optional, Or\n'), ((10621, 10649), 'schema.Optional', 'Optional', (['"""FRAGMENT_VOLTAGE"""'], {}), "('FRAGMENT_VOLTAGE')\n", (10629, 10649), False, 'from schema import Schema, Optional, Or\n'), ((10664, 10696), 'schema.Optional', 'Optional', (['"""FRAGMENTATION_METHOD"""'], {}), "('FRAGMENTATION_METHOD')\n", (10672, 10696), False, 'from schema import Schema, Optional, Or\n'), ((10711, 10735), 'schema.Optional', 'Optional', (['"""GAS_PRESSURE"""'], {}), "('GAS_PRESSURE')\n", (10719, 10735), False, 'from schema import Schema, Optional, Or\n'), ((10750, 10773), 'schema.Optional', 'Optional', (['"""HELIUM_FLOW"""'], {}), "('HELIUM_FLOW')\n", (10758, 10773), False, 'from schema import Schema, Optional, Or\n'), ((10788, 10822), 'schema.Optional', 'Optional', (['"""ION_SOURCE_TEMPERATURE"""'], {}), "('ION_SOURCE_TEMPERATURE')\n", (10796, 10822), False, 'from schema import Schema, Optional, Or\n'), ((10837, 10866), 'schema.Optional', 'Optional', (['"""ION_SPRAY_VOLTAGE"""'], {}), "('ION_SPRAY_VOLTAGE')\n", (10845, 10866), False, 'from schema import Schema, Optional, Or\n'), ((10881, 10903), 'schema.Optional', 'Optional', (['"""IONIZATION"""'], {}), "('IONIZATION')\n", (10889, 10903), False, 'from schema import Schema, Optional, Or\n'), ((10918, 10947), 'schema.Optional', 'Optional', (['"""IONIZATION_ENERGY"""'], {}), "('IONIZATION_ENERGY')\n", (10926, 10947), False, 'from schema import Schema, Optional, Or\n'), ((10962, 10994), 'schema.Optional', 'Optional', (['"""IONIZATION_POTENTIAL"""'], {}), "('IONIZATION_POTENTIAL')\n", (10970, 10994), False, 'from schema import Schema, Optional, Or\n'), ((11009, 11034), 'schema.Optional', 'Optional', (['"""MASS_ACCURACY"""'], {}), "('MASS_ACCURACY')\n", (11017, 11034), False, 'from schema import Schema, Optional, Or\n'), ((11049, 11075), 'schema.Optional', 'Optional', (['"""PRECURSOR_TYPE"""'], {}), "('PRECURSOR_TYPE')\n", (11057, 11075), False, 'from schema import Schema, Optional, Or\n'), ((11090, 11113), 'schema.Optional', 'Optional', (['"""REAGENT_GAS"""'], {}), "('REAGENT_GAS')\n", (11098, 11113), False, 'from schema import Schema, Optional, Or\n'), ((11128, 11158), 'schema.Optional', 'Optional', (['"""SOURCE_TEMPERATURE"""'], {}), "('SOURCE_TEMPERATURE')\n", (11136, 11158), False, 'from schema import Schema, Optional, Or\n'), ((11173, 11198), 'schema.Optional', 'Optional', (['"""SPRAY_VOLTAGE"""'], {}), "('SPRAY_VOLTAGE')\n", (11181, 11198), False, 'from schema import Schema, Optional, Or\n'), ((11213, 11245), 'schema.Optional', 'Optional', (['"""ACTIVATION_PARAMETER"""'], {}), "('ACTIVATION_PARAMETER')\n", (11221, 11245), False, 'from schema import Schema, Optional, Or\n'), ((11260, 11287), 'schema.Optional', 'Optional', (['"""ACTIVATION_TIME"""'], {}), "('ACTIVATION_TIME')\n", (11268, 11287), False, 'from schema import Schema, Optional, Or\n'), ((11302, 11330), 'schema.Optional', 'Optional', (['"""ATOM_GUN_CURRENT"""'], {}), "('ATOM_GUN_CURRENT')\n", (11310, 11330), False, 'from schema import Schema, Optional, Or\n'), ((11345, 11379), 'schema.Optional', 'Optional', (['"""AUTOMATIC_GAIN_CONTROL"""'], {}), "('AUTOMATIC_GAIN_CONTROL')\n", (11353, 11379), False, 'from schema import Schema, Optional, Or\n'), ((11394, 11417), 'schema.Optional', 'Optional', (['"""BOMBARDMENT"""'], {}), "('BOMBARDMENT')\n", (11402, 11417), False, 'from schema import Schema, Optional, Or\n'), ((11432, 11475), 'schema.Optional', 'Optional', (['"""CDL_SIDE_OCTOPOLES_BIAS_VOLTAGE"""'], {}), "('CDL_SIDE_OCTOPOLES_BIAS_VOLTAGE')\n", (11440, 11475), False, 'from schema import Schema, Optional, Or\n'), ((11490, 11517), 'schema.Optional', 'Optional', (['"""CDL_TEMPERATURE"""'], {}), "('CDL_TEMPERATURE')\n", (11498, 11517), False, 'from schema import Schema, Optional, Or\n'), ((11532, 11554), 'schema.Optional', 'Optional', (['"""DATAFORMAT"""'], {}), "('DATAFORMAT')\n", (11540, 11554), False, 'from schema import Schema, Optional, Or\n'), ((11569, 11601), 'schema.Optional', 'Optional', (['"""DESOLVATION_GAS_FLOW"""'], {}), "('DESOLVATION_GAS_FLOW')\n", (11577, 11601), False, 'from schema import Schema, Optional, Or\n'), ((11616, 11651), 'schema.Optional', 'Optional', (['"""DESOLVATION_TEMPERATURE"""'], {}), "('DESOLVATION_TEMPERATURE')\n", (11624, 11651), False, 'from schema import Schema, Optional, Or\n'), ((11666, 11695), 'schema.Optional', 'Optional', (['"""INTERFACE_VOLTAGE"""'], {}), "('INTERFACE_VOLTAGE')\n", (11674, 11695), False, 'from schema import Schema, Optional, Or\n'), ((11710, 11752), 'schema.Optional', 'Optional', (['"""IT_SIDE_OCTOPOLES_BIAS_VOLTAGE"""'], {}), "('IT_SIDE_OCTOPOLES_BIAS_VOLTAGE')\n", (11718, 11752), False, 'from schema import Schema, Optional, Or\n'), ((11767, 11784), 'schema.Optional', 'Optional', (['"""LASER"""'], {}), "('LASER')\n", (11775, 11784), False, 'from schema import Schema, Optional, Or\n'), ((11799, 11817), 'schema.Optional', 'Optional', (['"""MATRIX"""'], {}), "('MATRIX')\n", (11807, 11817), False, 'from schema import Schema, Optional, Or\n'), ((11832, 11853), 'schema.Optional', 'Optional', (['"""NEBULIZER"""'], {}), "('NEBULIZER')\n", (11840, 11853), False, 'from schema import Schema, Optional, Or\n'), ((11868, 11895), 'schema.Optional', 'Optional', (['"""OCTPOLE_VOLTAGE"""'], {}), "('OCTPOLE_VOLTAGE')\n", (11876, 11895), False, 'from schema import Schema, Optional, Or\n'), ((11910, 11931), 'schema.Optional', 'Optional', (['"""PROBE_TIP"""'], {}), "('PROBE_TIP')\n", (11918, 11931), False, 'from schema import Schema, Optional, Or\n'), ((11946, 11976), 'schema.Optional', 'Optional', (['"""RESOLUTION_SETTING"""'], {}), "('RESOLUTION_SETTING')\n", (11954, 11976), False, 'from schema import Schema, Optional, Or\n'), ((11991, 12018), 'schema.Optional', 'Optional', (['"""SAMPLE_DRIPPING"""'], {}), "('SAMPLE_DRIPPING')\n", (11999, 12018), False, 'from schema import Schema, Optional, Or\n'), ((12033, 12062), 'schema.Optional', 'Optional', (['"""SCAN_RANGE_MOVERZ"""'], {}), "('SCAN_RANGE_MOVERZ')\n", (12041, 12062), False, 'from schema import Schema, Optional, Or\n'), ((12077, 12097), 'schema.Optional', 'Optional', (['"""SCANNING"""'], {}), "('SCANNING')\n", (12085, 12097), False, 'from schema import Schema, Optional, Or\n'), ((12112, 12138), 'schema.Optional', 'Optional', (['"""SCANNING_CYCLE"""'], {}), "('SCANNING_CYCLE')\n", (12120, 12138), False, 'from schema import Schema, Optional, Or\n'), ((12153, 12179), 'schema.Optional', 'Optional', (['"""SCANNING_RANGE"""'], {}), "('SCANNING_RANGE')\n", (12161, 12179), False, 'from schema import Schema, Optional, Or\n'), ((12194, 12221), 'schema.Optional', 'Optional', (['"""SKIMMER_VOLTAGE"""'], {}), "('SKIMMER_VOLTAGE')\n", (12202, 12221), False, 'from schema import Schema, Optional, Or\n'), ((12236, 12265), 'schema.Optional', 'Optional', (['"""TUBE_LENS_VOLTAGE"""'], {}), "('TUBE_LENS_VOLTAGE')\n", (12244, 12265), False, 'from schema import Schema, Optional, Or\n'), ((12280, 12307), 'schema.Optional', 'Optional', (['"""MS_RESULTS_FILE"""'], {}), "('MS_RESULTS_FILE')\n", (12288, 12307), False, 'from schema import Schema, Optional, Or\n'), ((12309, 12322), 'schema.Or', 'Or', (['str', 'dict'], {}), '(str, dict)\n', (12311, 12322), False, 'from schema import Schema, Optional, Or\n'), ((12467, 12491), 'schema.Optional', 'Optional', (['"""NMR_COMMENTS"""'], {}), "('NMR_COMMENTS')\n", (12475, 12491), False, 'from schema import Schema, Optional, Or\n'), ((12506, 12538), 'schema.Optional', 'Optional', (['"""FIELD_FREQUENCY_LOCK"""'], {}), "('FIELD_FREQUENCY_LOCK')\n", (12514, 12538), False, 'from schema import Schema, Optional, Or\n'), ((12553, 12587), 'schema.Optional', 'Optional', (['"""STANDARD_CONCENTRATION"""'], {}), "('STANDARD_CONCENTRATION')\n", (12561, 12587), False, 'from schema import Schema, Optional, Or\n'), ((12641, 12662), 'schema.Optional', 'Optional', (['"""NMR_PROBE"""'], {}), "('NMR_PROBE')\n", (12649, 12662), False, 'from schema import Schema, Optional, Or\n'), ((12677, 12700), 'schema.Optional', 'Optional', (['"""NMR_SOLVENT"""'], {}), "('NMR_SOLVENT')\n", (12685, 12700), False, 'from schema import Schema, Optional, Or\n'), ((12715, 12740), 'schema.Optional', 'Optional', (['"""NMR_TUBE_SIZE"""'], {}), "('NMR_TUBE_SIZE')\n", (12723, 12740), False, 'from schema import Schema, Optional, Or\n'), ((12755, 12782), 'schema.Optional', 'Optional', (['"""SHIMMING_METHOD"""'], {}), "('SHIMMING_METHOD')\n", (12763, 12782), False, 'from schema import Schema, Optional, Or\n'), ((12797, 12823), 'schema.Optional', 'Optional', (['"""PULSE_SEQUENCE"""'], {}), "('PULSE_SEQUENCE')\n", (12805, 12823), False, 'from schema import Schema, Optional, Or\n'), ((12838, 12867), 'schema.Optional', 'Optional', (['"""WATER_SUPPRESSION"""'], {}), "('WATER_SUPPRESSION')\n", (12846, 12867), False, 'from schema import Schema, Optional, Or\n'), ((12882, 12905), 'schema.Optional', 'Optional', (['"""PULSE_WIDTH"""'], {}), "('PULSE_WIDTH')\n", (12890, 12905), False, 'from schema import Schema, Optional, Or\n'), ((12920, 12943), 'schema.Optional', 'Optional', (['"""POWER_LEVEL"""'], {}), "('POWER_LEVEL')\n", (12928, 12943), False, 'from schema import Schema, Optional, Or\n'), ((12958, 12983), 'schema.Optional', 'Optional', (['"""RECEIVER_GAIN"""'], {}), "('RECEIVER_GAIN')\n", (12966, 12983), False, 'from schema import Schema, Optional, Or\n'), ((12998, 13026), 'schema.Optional', 'Optional', (['"""OFFSET_FREQUENCY"""'], {}), "('OFFSET_FREQUENCY')\n", (13006, 13026), False, 'from schema import Schema, Optional, Or\n'), ((13041, 13078), 'schema.Optional', 'Optional', (['"""PRESATURATION_POWER_LEVEL"""'], {}), "('PRESATURATION_POWER_LEVEL')\n", (13049, 13078), False, 'from schema import Schema, Optional, Or\n'), ((13093, 13127), 'schema.Optional', 'Optional', (['"""CHEMICAL_SHIFT_REF_CPD"""'], {}), "('CHEMICAL_SHIFT_REF_CPD')\n", (13101, 13127), False, 'from schema import Schema, Optional, Or\n'), ((13142, 13165), 'schema.Optional', 'Optional', (['"""TEMPERATURE"""'], {}), "('TEMPERATURE')\n", (13150, 13165), False, 'from schema import Schema, Optional, Or\n'), ((13180, 13207), 'schema.Optional', 'Optional', (['"""NUMBER_OF_SCANS"""'], {}), "('NUMBER_OF_SCANS')\n", (13188, 13207), False, 'from schema import Schema, Optional, Or\n'), ((13222, 13245), 'schema.Optional', 'Optional', (['"""DUMMY_SCANS"""'], {}), "('DUMMY_SCANS')\n", (13230, 13245), False, 'from schema import Schema, Optional, Or\n'), ((13260, 13288), 'schema.Optional', 'Optional', (['"""ACQUISITION_TIME"""'], {}), "('ACQUISITION_TIME')\n", (13268, 13288), False, 'from schema import Schema, Optional, Or\n'), ((13303, 13331), 'schema.Optional', 'Optional', (['"""RELAXATION_DELAY"""'], {}), "('RELAXATION_DELAY')\n", (13311, 13331), False, 'from schema import Schema, Optional, Or\n'), ((13346, 13372), 'schema.Optional', 'Optional', (['"""SPECTRAL_WIDTH"""'], {}), "('SPECTRAL_WIDTH')\n", (13354, 13372), False, 'from schema import Schema, Optional, Or\n'), ((13387, 13423), 'schema.Optional', 'Optional', (['"""NUM_DATA_POINTS_ACQUIRED"""'], {}), "('NUM_DATA_POINTS_ACQUIRED')\n", (13395, 13423), False, 'from schema import Schema, Optional, Or\n'), ((13438, 13466), 'schema.Optional', 'Optional', (['"""REAL_DATA_POINTS"""'], {}), "('REAL_DATA_POINTS')\n", (13446, 13466), False, 'from schema import Schema, Optional, Or\n'), ((13481, 13508), 'schema.Optional', 'Optional', (['"""LINE_BROADENING"""'], {}), "('LINE_BROADENING')\n", (13489, 13508), False, 'from schema import Schema, Optional, Or\n'), ((13523, 13547), 'schema.Optional', 'Optional', (['"""ZERO_FILLING"""'], {}), "('ZERO_FILLING')\n", (13531, 13547), False, 'from schema import Schema, Optional, Or\n'), ((13562, 13585), 'schema.Optional', 'Optional', (['"""APODIZATION"""'], {}), "('APODIZATION')\n", (13570, 13585), False, 'from schema import Schema, Optional, Or\n'), ((13600, 13638), 'schema.Optional', 'Optional', (['"""BASELINE_CORRECTION_METHOD"""'], {}), "('BASELINE_CORRECTION_METHOD')\n", (13608, 13638), False, 'from schema import Schema, Optional, Or\n'), ((13653, 13687), 'schema.Optional', 'Optional', (['"""CHEMICAL_SHIFT_REF_STD"""'], {}), "('CHEMICAL_SHIFT_REF_STD')\n", (13661, 13687), False, 'from schema import Schema, Optional, Or\n'), ((13702, 13730), 'schema.Optional', 'Optional', (['"""BINNED_INCREMENT"""'], {}), "('BINNED_INCREMENT')\n", (13710, 13730), False, 'from schema import Schema, Optional, Or\n'), ((13745, 13789), 'schema.Optional', 'Optional', (['"""BINNED_DATA_NORMALIZATION_METHOD"""'], {}), "('BINNED_DATA_NORMALIZATION_METHOD')\n", (13753, 13789), False, 'from schema import Schema, Optional, Or\n'), ((13804, 13841), 'schema.Optional', 'Optional', (['"""BINNED_DATA_PROTOCOL_FILE"""'], {}), "('BINNED_DATA_PROTOCOL_FILE')\n", (13812, 13841), False, 'from schema import Schema, Optional, Or\n'), ((13856, 13900), 'schema.Optional', 'Optional', (['"""BINNED_DATA_CHEMICAL_SHIFT_RANGE"""'], {}), "('BINNED_DATA_CHEMICAL_SHIFT_RANGE')\n", (13864, 13900), False, 'from schema import Schema, Optional, Or\n'), ((13915, 13953), 'schema.Optional', 'Optional', (['"""BINNED_DATA_EXCLUDED_RANGE"""'], {}), "('BINNED_DATA_EXCLUDED_RANGE')\n", (13923, 13953), False, 'from schema import Schema, Optional, Or\n'), ((14417, 14437), 'schema.Optional', 'Optional', (['"""Extended"""'], {}), "('Extended')\n", (14425, 14437), False, 'from schema import Schema, Optional, Or\n'), ((3388, 3422), 'schema.Optional', 'Optional', (['"""Additional sample data"""'], {}), "('Additional sample data')\n", (3396, 3422), False, 'from schema import Schema, Optional, Or\n'), ((14018, 14067), 'schema.Or', 'Or', (['"""Metabolite"""', '"""Bin range(ppm)"""'], {'only_one': '(True)'}), "('Metabolite', 'Bin range(ppm)', only_one=True)\n", (14020, 14067), False, 'from schema import Schema, Optional, Or\n'), ((14086, 14099), 'schema.Optional', 'Optional', (['str'], {}), '(str)\n', (14094, 14099), False, 'from schema import Schema, Optional, Or\n'), ((14211, 14224), 'schema.Optional', 'Optional', (['str'], {}), '(str)\n', (14219, 14224), False, 'from schema import Schema, Optional, Or\n'), ((3442, 3467), 'schema.Optional', 'Optional', (['"""RAW_FILE_NAME"""'], {}), "('RAW_FILE_NAME')\n", (3450, 3467), False, 'from schema import Schema, Optional, Or\n'), ((3490, 3503), 'schema.Optional', 'Optional', (['str'], {}), '(str)\n', (3498, 3503), False, 'from schema import Schema, Optional, Or\n')]
|
"""
flux related class and functions
"""
from scipy.integrate import quad
import pandas as pd
from .helper import LinearInterp, polar_to_cartesian, lorentz_boost, lorentz_matrix
from .oscillation import survival_solar
from .parameters import *
def _invs(ev):
return 1/ev**2
class FluxBaseContinuous:
def __init__(self, ev, flux, norm=1):
self.norm = norm
self.ev = ev
self.fx = flux
self.ev_min = self.ev[0]
self.ev_max = self.ev[-1]
self.binw = self.ev[1:] - self.ev[:-1]
self.precalc = {None: self.binw*(self.fx[1:]+self.fx[:-1])/2}
def __call__(self, ev):
if ev == self.ev_min:
return self.fx[0] * self.norm
if ev == self.ev_max:
return self.fx[-1] * self.norm
if self.ev_min < ev < self.ev_max:
idx = self.ev.searchsorted(ev)
l1 = ev - self.ev[idx-1]
l2 = self.ev[idx] - ev
h1 = self.fx[idx-1]
h2 = self.fx[idx]
return (l1*h2 + l2*h1) / (l1 + l2) * self.norm
return 0
def integrate(self, ea, eb, weight_function=None):
if eb <= ea:
return 0
res = 0
if weight_function not in self.precalc:
weighted = weight_function(self.ev)*self.fx
self.precalc[weight_function] = self.binw * (weighted[1:]+weighted[:-1]) / 2
eb = min(eb, self.ev_max)
ea = max(ea, self.ev_min)
idxmin = self.ev.searchsorted(ea, side='right')
idxmax = self.ev.searchsorted(eb, side='left')
if idxmin == idxmax:
l1 = ea - self.ev[idxmin - 1]
l2 = self.ev[idxmin] - ea
h1 = self.fx[idxmin - 1] * weight_function(self.ev[idxmin - 1]) \
if weight_function is not None else self.fx[idxmin - 1]
h2 = self.fx[idxmin] * weight_function(self.ev[idxmin]) \
if weight_function is not None else self.fx[idxmin]
ha = (l1*h2+l2*h1)/(l1+l2)
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
hb = (l1*h2+l2*h1)/(l1+l2)
return (ha + hb) * (eb - ea) / 2 * self.norm
res += np.sum(self.precalc[weight_function][idxmin:idxmax-1])
l1 = ea - self.ev[idxmin-1]
l2 = self.ev[idxmin] - ea
h1 = self.fx[idxmin-1]*weight_function(self.ev[idxmin-1]) \
if weight_function is not None else self.fx[idxmin-1]
h2 = self.fx[idxmin]*weight_function(self.ev[idxmin]) \
if weight_function is not None else self.fx[idxmin]
res += ((l1*h2+l2*h1)/(l1+l2)+h2)*l2/2
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
h1 = self.fx[idxmax - 1] * weight_function(self.ev[idxmax - 1]) \
if weight_function is not None else self.fx[idxmax-1]
h2 = self.fx[idxmax] * weight_function(self.ev[idxmax]) \
if weight_function is not None else self.fx[idxmax]
res += ((l1 * h2 + l2 * h1) / (l1 + l2) + h1) * l1 / 2
return res * self.norm
class Flux:
"""
flux class,
flux at source
"""
def __init__(self, fl_name, delimiter=',', fl_unc=0):
"""
initializing flux, can take in user provided flux
restrictions: user provided data must have 7 columns,
first column is neutrino energy in MeV,
other columns are neutrino flux in cm^2/s/MeV, they are enu, munu, taunu, enubar, munubar, taunubar
:param fl_name: name of the flux or path to the file or array of neutrino flux
:param delimiter: delimiter of the input file, default is ','
:param fl_unc: uncertainty of flux
"""
if isinstance(fl_name, str):
self.fl_name = fl_name.lower()
else:
self.fl_name = 'default'
if self.fl_name == 'reactor':
self.evMin = 0.0
self.evMax = 30 # MeV
self.flUn = 0.02
fpers = 3.0921 * (10 ** 16) # antineutrinos per fission
nuperf = 6.14102
self.__nuflux1m = nuperf * fpers / (4 * np.pi) * (meter_by_mev ** 2)
elif self.fl_name in ['sns', 'prompt', 'delayed']:
self.evMin = 0
self.evMax = 52 # MeV
self.flUn = 0.1
self.__norm = 1.13 * (10 ** 11) * (meter_by_mev ** 2)
elif self.fl_name in ['solar', 'b8', 'f17', 'n13', 'o15', 'pp', 'hep']:
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + self.fl_name + '.csv'), delimiter=',')
self.flUn = 0
self.evMin = f[0, 0]
self.evMax = f[-1, 0]
self.__nue = LinearInterp(f[:, 0], f[:, 1] * ((100 * meter_by_mev) ** 2))
else:
if isinstance(fl_name, np.ndarray):
f = fl_name
else:
f = np.genfromtxt(fl_name, delimiter=delimiter)
self.evMin = np.amin(f[:, 0])
self.evMax = np.amax(f[:, 0])
self.flUn = fl_unc
self.__nue = LinearInterp(f[:, 0], f[:, 1] * ((100 * meter_by_mev) ** 2))
self.__numu = LinearInterp(f[:, 0], f[:, 2] * ((100 * meter_by_mev) ** 2))
self.__nutau = LinearInterp(f[:, 0], f[:, 3] * ((100 * meter_by_mev) ** 2))
self.__nuebar = LinearInterp(f[:, 0], f[:, 4] * ((100 * meter_by_mev) ** 2))
self.__numubar = LinearInterp(f[:, 0], f[:, 5] * ((100 * meter_by_mev) ** 2))
self.__nutaubar = LinearInterp(f[:, 0], f[:, 6] * ((100 * meter_by_mev) ** 2))
def flux(self, ev, flavor='e', f=None, **kwargs):
"""
differential neutrino flux at the detector, unit MeV^-3*s^-1
:param ev: nuetrino energy
:param flavor: nuetrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: neutrino flux
"""
if self.fl_name == 'reactor':
# Phys.Rev.D39, 11 Vogel
# 5.323608902707208 = Integrate[Exp[.870 - .16*e - .091*e^2], {e, 0, 10}]
# reactor neutrino is actually anti-neutrino, this may cause problem when doing electron scattering
if flavor == 'ebar':
if f is not None:
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * \
f(ev, nui='ebar', nuf=flavor, **kwargs)
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * self.__nuflux1m
elif flavor[-1] == 'r':
if f is not None:
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * \
f(ev, nui='ebar', nuf=flavor, **kwargs)
return 0
else:
return 0
elif self.fl_name in ['sns', 'delayed']:
if flavor[-1] != 'r':
if f is not None:
return (3 * ((ev / (2 / 3 * 52)) ** 2) - 2 * ((ev / (2 / 3 * 52)) ** 3)) / 29.25 * self.__norm * \
f(ev, nui='e', nuf=flavor, **kwargs)
return (3 * ((ev / (2 / 3 * 52)) ** 2) - 2 * ((ev / (2 / 3 * 52)) ** 3)) / 29.25 * self.__norm \
if flavor == 'e' else 0
else:
if f is not None:
return (3 * ((ev / 52) ** 2) - 2 * ((ev / 52) ** 3)) / 26 * self.__norm * \
f(ev, nui='mubar', nuf=flavor, **kwargs)
return (3 * ((ev / 52) ** 2) - 2 * ((ev / 52) ** 3)) / 26 * self.__norm if flavor == 'mubar' else 0
elif self.fl_name == 'prompt':
return 0
elif self.fl_name in ['solar', 'b8', 'f17', 'n13', 'o15', 'pp', 'hep']:
if flavor[-1] != 'r':
if f is None:
f = survival_solar
return self.__nue(ev) * f(ev, nui='e', nuf=flavor, **kwargs)
return 0
else:
if flavor[-1] != 'r':
if f is None:
if flavor == 'e':
return self.__nue(ev)
elif flavor == 'mu':
return self.__numu(ev)
elif flavor == 'tau':
return self.__nutau(ev)
else:
return 0
return self.__nue(ev) * f(ev, nui='e', nuf=flavor, **kwargs) + \
self.__numu(ev) * f(ev, nui='mu', nuf=flavor, **kwargs) + \
self.__nutau(ev) * f(ev, nui='tau', nuf=flavor, **kwargs)
else:
if f is None:
if flavor == 'ebar':
return self.__nuebar(ev)
elif flavor == 'mubar':
return self.__numubar(ev)
elif flavor == 'taubar':
return self.__nutaubar(ev)
else:
return 0
return self.__nuebar(ev) * f(ev, nui='ebar', nuf=flavor, **kwargs) + \
self.__numubar(ev) * f(ev, nui='mubar', nuf=flavor, **kwargs) + \
self.__nutaubar(ev) * f(ev, nui='taubar', nuf=flavor, **kwargs)
def fint(self, er, m, flavor='e', f=None, **kwargs):
"""
flux integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def fx(ev):
return self.flux(ev, flavor, f, **kwargs)
if not isinstance(emin, np.ndarray):
res = quad(fx, emin, self.evMax)[0] # no need to check range, because outside evMin and evMax are 0
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) \
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(fx, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) \
if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) \
if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm * f(29, nui='mu', nuf=flavor, **kwargs) if emin[i] <= 29 else 0
return res
def fintinv(self, er, m, flavor='e', f=None, **kwargs):
"""
flux/ev integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def finv(ev):
"""
flux/ev
"""
return self.flux(ev, flavor, f, **kwargs) / ev
if not isinstance(emin, np.ndarray):
res = quad(finv, emin, self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / 1.439 \
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / 0.8613 \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm / 29 if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm / 29 * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(finv, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / \
1.439 if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / \
0.8613 if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm / 29 if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm / 29 * f(29, nui='mu', nuf=flavor, **kwargs) \
if emin[i] <= 29 else 0
return res
def fintinvs(self, er, m, flavor='e', f=None, **kwargs):
"""
flux/ev^2 integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def finvs(ev):
"""
flux/ev^2
"""
return self.flux(ev, flavor, f, **kwargs) / (ev ** 2)
if not isinstance(emin, np.ndarray):
res = quad(finvs, emin, self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / 1.439**2\
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / 0.8613**2 \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm / 29**2 if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm / 29**2 * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(finvs, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / \
1.439**2 if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / \
0.8613**2 if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm / 29**2 if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm / 29**2 * f(29, nui='mu', nuf=flavor, **kwargs) \
if emin[i] <= 29 else 0
return res
class NeutrinoFluxFactory:
def __init__(self):
self.flux_list = ['solar', 'solar_b8', 'solar_f17', 'solar_hep', 'solar_n13', 'solar_o15', 'solar_pp',
'solar_pep', 'solar_be7', 'coherent', 'coherent_prompt', 'coherent_delayed',
'far_beam_nu', 'far_beam_nubar', 'atmospheric','jsns_prompt', 'jsns_delayed', 'jsns_prompt_continuous',
'near_beam_nu', 'near_beam_nubar',]
def print_available(self):
print(self.flux_list)
def interp_flux(self, nrg, data):
return np.interp(nrg, data[:,0], data[:,1])
def get(self, flux_name, **kwargs):
if flux_name not in self.flux_list:
print('flux name not in current list: ', self.flux_list)
raise Exception('flux not found.')
if flux_name in ['solar_b8', 'solar_f17', 'solar_hep', 'solar_n13', 'solar_o15', 'solar_pp']:
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + flux_name[6:] + '.csv'), delimiter=',')
return NeutrinoFlux(continuous_fluxes={'ev': f[:, 0], 'e': f[:, 1]})
if flux_name == 'solar':
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + flux_name + '.csv'), delimiter=',')
return NeutrinoFlux(continuous_fluxes={'ev': f[:, 0], 'e': f[:, 1]}, delta_fluxes={'e': [(1.439, 1.44e8), (0.8613, 5e9)]})
if flux_name == 'pep':
return NeutrinoFlux(delta_fluxes={'e': [(1.439, 1.44e8), ]})
if flux_name == 'be7':
return NeutrinoFlux(delta_fluxes={'e': [(0.8613, 5e9), ]})
if flux_name == 'coherent':
def de(evv):
return (3 * ((evv / (2 / 3 * 52)) ** 2) - 2 * ((evv / (2 / 3 * 52)) ** 3)) / 29.25
def dmubar(evv):
return (3 * ((evv / 52) ** 2) - 2 * ((evv / 52) ** 3)) / 26
ev = np.linspace(0.001, 52, 100)
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': de(ev), 'mubar': dmubar(ev)},
delta_fluxes={'mu': [(29, 1)]}, norm=1.13 * (10 ** 7)) ## default unit is /(cm^2*s)
if flux_name == 'coherent_delayed':
def de(evv):
return (3 * ((evv / (2 / 3 * 52)) ** 2) - 2 * ((evv / (2 / 3 * 52)) ** 3)) / 29.25
def dmubar(evv):
return (3 * ((evv / 52) ** 2) - 2 * ((evv / 52) ** 3)) / 26
ev = np.linspace(0.001, 52, kwargs['npoints'] if 'npoints' in kwargs else 100)
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': de(ev), 'mubar': dmubar(ev)}, norm=1.13 * (10 ** 7))
if flux_name == 'coherent_prompt':
return NeutrinoFlux(delta_fluxes={'mu': [(29, 1)]}, norm=1.13 * (10 ** 7))
if flux_name == 'jsns':
nu_e = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_e.txt"), delimiter=',')
nu_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_mu_nodelta.txt"), delimiter=',')
nubar_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nubar_mu.txt"), delimiter=',')
norm_nu_e = quad(self.interp_flux, 0, 300, args=(nu_e,))[0]
norm_nu_mu = quad(self.interp_flux, 0, 300, args=(nu_mu,))[0]
norm_nubar_mu = quad(self.interp_flux, 0, 300, args=(nubar_mu,))[0]
def numuPDF(energy):
return self.interp_flux(energy, nu_mu) / norm_nu_mu
def nuePDF(energy):
return self.interp_flux(energy, nu_e) / norm_nu_e
def nubarmuPDF(energy):
return self.interp_flux(energy, nubar_mu) / norm_nubar_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': nuePDF(ev), 'mubar': nubarmuPDF(ev), 'mu': numuPDF(ev)},
delta_fluxes={'mu': [(29, 1),(236, 0.013)]}, norm=4.9 * (10 ** 7)) ## default unit is /(cm^2*s)
if flux_name == 'jsns_delayed':
nu_e = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_e.txt"), delimiter=',')
nubar_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nubar_mu.txt"), delimiter=',')
norm_nu_e = quad(self.interp_flux, 0, 300, args=(nu_e,))[0]
norm_nubar_mu = quad(self.interp_flux, 0, 300, args=(nubar_mu,))[0]
def nuePDF(energy):
return self.interp_flux(energy, nu_e) / norm_nu_e
def nubarmuPDF(energy):
return self.interp_flux(energy, nubar_mu) / norm_nubar_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': nuePDF(ev), 'mubar': nubarmuPDF(ev)}, norm=3 * (10 ** 7))
if flux_name == 'jsns_prompt':
return NeutrinoFlux(delta_fluxes={'mu': [(29, 1),(236, 0.013)]}, norm=1.85 * (10 ** 7))
if flux_name == 'jsns_prompt_continuous':
nu_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_mu_nodelta.txt"), delimiter=',')
norm_nu_mu = quad(self.interp_flux, 0, 300, args=(nu_mu,))[0]
def numuPDF(energy):
return self.interp_flux(energy, nu_mu) / norm_nu_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'mu': numuPDF(ev)},
norm=1.85 * (10 ** 4))
if flux_name == 'far_beam_nu':
far_beam_txt = 'data/dune_beam_fd_nu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt), delimiter=',')
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'far_beam_nubar':
far_beam_txt = 'data/dune_beam_fd_antinu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt), delimiter=',')
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'near_beam_nu':
far_beam_txt = 'data/dune_beam_nd_nu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt))
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'near_beam_nubar':
far_beam_txt = 'data/dune_beam_nd_antinu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt))
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'atmospheric':
if 'zenith' not in kwargs:
raise Exception('please specify zenith angle')
zen = np.round(kwargs['zenith'], decimals=3)
zen_list = np.round(np.linspace(-0.975, 0.975, 40), decimals=3)
if zen not in zen_list:
print('available choice of zenith angle: ', zen_list)
raise Exception('zenith angle not available')
idx = (0.975 - zen) / 0.05 * 61
f_atmos = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/atmos.txt'), delimiter=',')
nu = {'ev': f_atmos[int(round(idx)):int(round(idx))+61, 0],
'e': f_atmos[int(round(idx)):int(round(idx))+61, 2],
'mu': f_atmos[int(round(idx)):int(round(idx))+61, 3],
'ebar': f_atmos[int(round(idx)):int(round(idx))+61, 5],
'mubar': f_atmos[int(round(idx)):int(round(idx))+61, 6]}
return NeutrinoFlux(continuous_fluxes=nu)
class NeutrinoFlux:
def __init__(self, continuous_fluxes=None, delta_fluxes=None, norm=1):
self.norm = norm * ((100 * meter_by_mev) ** 2)
self.ev_min = None
self.ev_max = None
if continuous_fluxes is None:
self.nu = None
elif isinstance(continuous_fluxes, dict):
self.ev = continuous_fluxes['ev']
sorted_idx = np.argsort(self.ev)
self.ev = self.ev[sorted_idx]
self.ev_min = self.ev[0]
self.ev_max = self.ev[-1]
if self.ev_min == 0:
raise Exception('flux with neutrino energy equal to zeros is not supported. '
'please consider using a small value for your lower bound.')
self.nu = {'e': continuous_fluxes['e'][sorted_idx] if 'e' in continuous_fluxes else None,
'mu': continuous_fluxes['mu'][sorted_idx] if 'mu' in continuous_fluxes else None,
'tau': continuous_fluxes['tau'][sorted_idx] if 'tau' in continuous_fluxes else None,
'ebar': continuous_fluxes['ebar'][sorted_idx] if 'ebar' in continuous_fluxes else None,
'mubar': continuous_fluxes['mubar'][sorted_idx] if 'mubar' in continuous_fluxes else None,
'taubar': continuous_fluxes['taubar'][sorted_idx] if 'taubar' in continuous_fluxes else None}
self.binw = self.ev[1:] - self.ev[:-1]
self.precalc = {None: {flr: self.binw*(flx[1:]+flx[:-1])/2 if flx is not None else None for flr, flx in self.nu.items()}}
else:
raise Exception('only support dict as input.')
if delta_fluxes is None:
self.delta_nu = None
elif isinstance(delta_fluxes, dict):
self.delta_nu = {'e': delta_fluxes['e'] if 'e' in delta_fluxes else None,
'mu': delta_fluxes['mu'] if 'mu' in delta_fluxes else None,
'tau': delta_fluxes['tau'] if 'tau' in delta_fluxes else None,
'ebar': delta_fluxes['ebar'] if 'ebar' in delta_fluxes else None,
'mubar': delta_fluxes['mubar'] if 'mubar' in delta_fluxes else None,
'taubar': delta_fluxes['taubar'] if 'taubar' in delta_fluxes else None}
for flavor in self.delta_nu: # grab the maximum energy of the delta fluxes
if self.delta_nu[flavor] is None:
continue
energies = [self.delta_nu[flavor][i][0] for i in range(len(self.delta_nu[flavor]))]
if self.ev_max is None or max(energies) > self.ev_max:
self.ev_max = max(energies)
else:
raise Exception("'delta_fluxes' must be a dictionary of a list of tuples! e.g. {'e': [(12, 4), (14, 15)], ...}")
def __call__(self, ev, flavor):
if self.nu is None or self.nu[flavor] is None:
return 0
if ev == self.ev_min:
return self.nu[flavor][0] * self.norm
if ev == self.ev_max:
return self.nu[flavor][-1] * self.norm
if self.ev_min < ev < self.ev_max:
idx = self.ev.searchsorted(ev)
l1 = ev - self.ev[idx - 1]
l2 = self.ev[idx] - ev
h1 = self.nu[flavor][idx - 1]
h2 = self.nu[flavor][idx]
return (l1*h2+l2*h1)/(l1+l2) * self.norm
return 0
def integrate(self, ea, eb, flavor, weight_function=None):
"""
Please avoid using lambda as your weight_function!!!
:param ea:
:param eb:
:param flavor:
:param weight_function:
:return:
"""
if eb <= ea:
return 0
res = 0
if self.delta_nu is not None and self.delta_nu[flavor] is not None:
for deltas in self.delta_nu[flavor]:
if ea < deltas[0] <= eb: # self.ev_max should be included with <=
res += deltas[1] if weight_function is None else deltas[1]*weight_function(deltas[0])
if self.nu is not None and self.nu[flavor] is not None:
if weight_function not in self.precalc:
weight = weight_function(self.ev)
self.precalc[weight_function] = {flr: self.binw*((flx*weight)[1:]+(flx*weight)[:-1])/2
if flx is not None else None for flr, flx in self.nu.items()}
eb = min(eb, self.ev_max)
ea = max(ea, self.ev_min)
idxmin = self.ev.searchsorted(ea, side='right')
idxmax = self.ev.searchsorted(eb, side='left')
if idxmin == idxmax:
l1 = ea - self.ev[idxmin - 1]
l2 = self.ev[idxmin] - ea
h1 = self.nu[flavor][idxmin - 1] * weight_function(self.ev[idxmin - 1]) \
if weight_function is not None else self.nu[flavor][idxmin - 1]
h2 = self.nu[flavor][idxmin] * weight_function(self.ev[idxmin]) \
if weight_function is not None else self.nu[flavor][idxmin]
ha = (l1*h2+l2*h1)/(l1+l2)
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
hb = (l1*h2+l2*h1)/(l1+l2)
return (ha + hb) * (eb - ea) / 2 * self.norm
res += np.sum(self.precalc[weight_function][flavor][idxmin:idxmax-1])
l1 = ea - self.ev[idxmin-1]
l2 = self.ev[idxmin] - ea
h1 = self.nu[flavor][idxmin-1]*weight_function(self.ev[idxmin-1]) \
if weight_function is not None else self.nu[flavor][idxmin-1]
h2 = self.nu[flavor][idxmin]*weight_function(self.ev[idxmin]) \
if weight_function is not None else self.nu[flavor][idxmin]
res += ((l1*h2+l2*h1)/(l1+l2)+h2)*l2/2
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
h1 = self.nu[flavor][idxmax - 1] * weight_function(self.ev[idxmax - 1]) \
if weight_function is not None else self.nu[flavor][idxmax-1]
h2 = self.nu[flavor][idxmax] * weight_function(self.ev[idxmax]) \
if weight_function is not None else self.nu[flavor][idxmax]
res += ((l1 * h2 + l2 * h1) / (l1 + l2) + h1) * l1 / 2
return res * self.norm
def change_parameters(self):
pass
class DMFlux:
"""
Dark matter flux at COHERENT
"""
def __init__(self, dark_photon_mass, life_time, coupling_quark, dark_matter_mass,
detector_distance=19.3, pot_mu=0.75, pot_sigma=0.25, size=100000, mono_energy=None):
"""
initialize and generate flux
:param dark_photon_mass: dark photon mass
:param life_time: life time of dark photon in rest frame, unit in micro second
:param coupling_quark: dark photon coupling to quarks
:param dark_matter_mass: mass of dark matter, unit in MeV
:param detector_distance: distance from the detector to the Hg target
:param pot_mu: mean of guassian distribution of proton on target, unit in micro second
:param pot_sigma: std of guassian distribution of proton on target, unit in micro second
:param size: size of sampling dark photons
"""
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsi_quark = coupling_quark
self.det_dist = detector_distance / meter_by_mev
self.dp_life = life_time * 1e-6 * c_light / meter_by_mev
self.pot_mu = pot_mu * 1e-6 * c_light / meter_by_mev
self.pot_sigma = pot_sigma * 1e-6 * c_light / meter_by_mev
if mono_energy is None:
self.timing, self.energy = self._generate(size)
else:
self.timing, self.energy = self._mono_flux(mono_energy, pot_mu)
self.ed_min = self.energy.min()
self.ed_max = self.energy.max()
self.dm_norm = self.epsi_quark**2*0.23*1e20 / (4*np.pi*(detector_distance**2)*24*3600) * (meter_by_mev**2) * \
self.timing.shape[0] * 2 / size
def _generate(self, size=1000000):
"""
generate dark matter flux at COHERENT
:param size: size of sampling dark photons
:return: time and energy histogram of dark matter
"""
dp_m = self.dp_m
dp_e = ((massofpi+massofp)**2 - massofn**2 + dp_m**2)/(2*(massofpi+massofp))
dp_p = np.sqrt(dp_e ** 2 - dp_m ** 2)
dp_v = dp_p / dp_e
gamma = dp_e / dp_m
tau = self.dp_life * gamma
tf = np.random.normal(self.pot_mu, self.pot_sigma, size) # POT
t = np.random.exponential(tau, size) # life time of each dark photon
cs = np.random.uniform(-1, 1, size) # direction of each dark photon
# in rest frame
estar = dp_m / 2
pstar = np.sqrt(estar ** 2 - self.dm_m ** 2)
pstarx = pstar * cs
pstary = pstar * np.sqrt(1 - cs ** 2)
# boost to lab frame
elab = gamma * (estar + dp_v * pstarx)
plabx = gamma * (pstarx + dp_v * estar)
plaby = pstary
vx = plabx / elab
vy = plaby / elab
timing = []
energy = []
for i in range(size):
a = vx[i] ** 2 + vy[i] ** 2
b = 2 * vx[i] * t[i] * dp_v
cc = dp_v ** 2 * t[i] ** 2 - self.det_dist ** 2
if b ** 2 - 4 * a * cc >= 0:
if (-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
if (-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
return np.array(timing) / c_light * meter_by_mev * 1e6, np.array(energy)
def _mono_flux(self, e_chi, t_trig, size=1000):
return np.random.normal(loc=t_trig, scale=0.01*t_trig, size=size), np.random.normal(loc=e_chi, scale=0.005*e_chi, size=size)
def flux(self, ev):
"""
dark matter flux
:param ev: dark matter energy
:return: dark matter flux
"""
return 1/(self.ed_max-self.ed_min)*self.dm_norm if self.ed_min <= ev <= self.ed_max else 0
def fint(self, er, m, **kwargs):
"""
flux/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex)/(ex**2 - self.dm_m**2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
def fint1(self, er, m, **kwargs):
"""
flux*ex/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux*ex/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex) * ex / (ex ** 2 - self.dm_m ** 2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
def fint2(self, er, m, **kwargs):
"""
flux*ex^2/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux*ex^2/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex) * ex**2 / (ex ** 2 - self.dm_m ** 2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
class DMFluxIsoPhoton(FluxBaseContinuous):
def __init__(self, photon_distribution, dark_photon_mass, coupling, dark_matter_mass, life_time=0.001,
detector_distance=19.3, pot_rate=5e20, pot_sample=100000, brem_suppress=True,
pot_mu=0.7, pot_sigma=0.15, sampling_size=100, nbins=20, verbose=False):
self.nbins = nbins
self.photon_flux = photon_distribution
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsilon = coupling
self.life_time = life_time # input in mus, internal in s
self.det_dist = detector_distance # meters
self.pot_rate = pot_rate # the number of POT/day in the experiment
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pot_sample = pot_sample # the number of POT in photon_distribution
self.time = []
self.energy = []
self.weight = []
self.norm = 1
self.sampling_size = sampling_size
self.supp = brem_suppress # add phase space suppression
self.verbose = verbose
for photon_events in photon_distribution:
if self.verbose:
print("getting photons from E =", photon_events[0], "Size =", photon_events[1])
self._generate_single(photon_events, self.sampling_size)
normalization = self.epsilon ** 2 * (self.pot_rate / self.pot_sample) \
/ (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * (meter_by_mev**2)
self.norm = normalization
self.weight = [x * self.norm for x in self.weight]
self.timing = np.array(self.time) * 1e6
hist, bin_edges = np.histogram(self.energy, bins=nbins, weights=self.weight, density=True)
super().__init__((bin_edges[:-1] + bin_edges[1:]) / 2, hist, norm=np.sum(self.weight))
def getScaledWeights(self):
wgt = self.weight
wgt = [x * self.norm * 24 * 3600 / (meter_by_mev**2) for x in wgt]
return wgt
def simulate(self):
self.time = []
self.energy = []
self.weight = []
normalization = self.epsilon ** 2 * (self.pot_rate / self.pot_sample) \
/ (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * (meter_by_mev**2)
self.norm = normalization
for photon_events in self.photon_flux:
if self.verbose:
print("getting photons from E =", photon_events[0], "Size =", photon_events[1])
self._generate_single(photon_events, self.sampling_size)
self.weight = [x * self.norm for x in self.weight]
self.timing = np.array(self.time) * 1e6
hist, bin_edges = np.histogram(self.energy, bins=self.nbins, weights=self.weight, density=True)
super().__init__((bin_edges[:-1] + bin_edges[1:]) / 2, hist, norm=np.sum(self.weight))
def _generate_single(self, photon_events, nsamples):
# Initiate photon position, energy and momentum.
if photon_events[0]**2 < self.dp_m**2:
return
dp_m = self.dp_m
dp_e = photon_events[0]
dp_p = np.sqrt(dp_e ** 2 - self.dp_m ** 2)
dp_momentum = np.array([dp_e, 0, 0, dp_p])
# dark photon to dark matter
dm_m = self.dm_m
dm_e = self.dp_m / 2
dm_p = np.sqrt(dm_e ** 2 - dm_m ** 2)
# Directional sampling.
dp_wgt = photon_events[1] / nsamples # Event weight
# Brem suppression
if self.supp == True:
el_e = 1.0773*dp_e + 13.716 # most likely electron energy that produced this dark photon
supp_fact = min(1, 1154 * np.exp(-24.42 * np.power(dp_m/el_e, 0.3174)))
dp_wgt *= supp_fact
## optimize
#pos = np.zeros(3) ## optimize
t = np.random.normal(self.pot_mu * 1e-6, self.pot_sigma * 1e-6, nsamples)
t_dp = np.random.exponential(1e-6 * self.life_time * dp_momentum[0] / dp_m, nsamples)
t += t_dp
csd = np.random.uniform(-1, 1, nsamples)
phid = np.random.uniform(0, 2 * np.pi, nsamples)
boost_matr = lorentz_matrix(np.array([-dp_momentum[1] / dp_momentum[0],
-dp_momentum[2] / dp_momentum[0],
-dp_momentum[3] / dp_momentum[0]]))
pos_z = c_light * t_dp * dp_momentum[3] / dp_momentum[0] # position is along z by construction
for i in range(nsamples):
dm_momentum = np.array([dm_e, dm_p * np.sqrt(1 - csd[i] ** 2) * np.cos(phid[i]),
dm_p * np.sqrt(1 - csd[i] ** 2) * np.sin(phid[i]), dm_p * csd[i]])
dm_momentum = boost_matr @ dm_momentum
# dark matter arrives at detector, assuming azimuthal symmetric
# append the time and energy spectrum of the DM.
# DM particle 1
v = dm_momentum[1:] / dm_momentum[0] * c_light
a = v[0]*v[0] + v[1]*v[1] + v[2]*v[2] #np.sum(v ** 2)
b = 2*v[2]*pos_z[i] # dot product is along z by construction
c = pos_z[i]**2 - self.det_dist ** 2
if b ** 2 - 4 * a * c >= 0:
t_dm = (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append(dm_momentum[0])
self.weight.append(dp_wgt)
t_dm = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append(dm_momentum[0])
self.weight.append(dp_wgt)
# DM particle 2
v = (dp_momentum - dm_momentum)[1:] / (dp_momentum - dm_momentum)[0] * c_light
a = v[0]*v[0] + v[1]*v[1] + v[2]*v[2] #np.sum(v ** 2)
b = b = 2*v[2]*pos_z[i]
c = pos_z[i]**2 - self.det_dist ** 2
if b ** 2 - 4 * a * c >= 0:
t_dm = (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append((dp_momentum - dm_momentum)[0])
self.weight.append(dp_wgt)
t_dm = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append((dp_momentum - dm_momentum)[0])
self.weight.append(dp_wgt)
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
class DMFluxFromPiMinusAbsorption:
r"""
Dark matter flux from pi^- + p -> A^\prime + n -> \chi + \chi + n
"""
def __init__(self, dark_photon_mass, coupling_quark, dark_matter_mass, life_time=0.001,
detector_distance=19.3, pot_rate=5e20, pot_mu=0.7, pot_sigma=0.15, pion_rate=18324/500000,
sampling_size=100000):
"""
initialize and generate flux
default values are COHERENT experiment values
:param dark_photon_mass: dark photon mass
:param life_time: life time of dark photon in rest frame, unit in micro second
:param coupling_quark: dark photon coupling to quarks divided by electron charge
:param dark_matter_mass: mass of dark matter, unit in MeV
:param detector_distance: distance from the detector to the target
:param pot_rate: proton on target rate, unit POT/day
:param pot_mu: mean of guassian distribution of proton on target, unit in micro second
:param pot_sigma: std of guassian distribution of proton on target, unit in micro second
:param pion_rate: pi^- production rate
:param sampling_size: size of sampling dark photons
"""
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsi_quark = coupling_quark
self.det_dist = detector_distance / meter_by_mev
self.life_time = life_time # input in mus, internal in s
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pot_rate = pot_rate
self.pion_rate = pion_rate
self.sampling_size = sampling_size
self.timing = []
self.energy = []
self.ed_min = None
self.ed_max = None
self.norm = None
self.simulate()
self.ev_min = self.ed_min
self.ev_max = self.ed_max
def get_lifetime(self, g, m):
return ((16 * np.pi ** 2) / ((g ** 2) * m)) * mev_per_hz
def simulate(self):
"""
generate dark matter flux
"""
# First check that the dp mass is less than the pi- mass.
if self.dp_m > massofpi:
self.norm = 0.0
return
dp_m = self.dp_m
dp_e = ((massofpi + massofp) ** 2 - massofn ** 2 + dp_m ** 2) / (2 * (massofpi + massofp))
dp_p = np.sqrt(dp_e ** 2 - dp_m ** 2)
dp_v = dp_p / dp_e
gamma = dp_e / dp_m
tau = (self.life_time * 1e-6 * c_light / meter_by_mev) * gamma
tf = np.random.normal(self.pot_mu * 1e-6 * c_light / meter_by_mev,
self.pot_sigma * 1e-6 * c_light / meter_by_mev,
self.sampling_size) # POT
t = np.random.exponential(tau, self.sampling_size) # life time of each dark photon
cs = np.random.uniform(-1, 1, self.sampling_size) # direction of each dark photon
# in rest frame
estar = dp_m / 2
pstar = np.sqrt(estar ** 2 - self.dm_m ** 2)
pstarx = pstar * cs
pstary = pstar * np.sqrt(1 - cs ** 2)
# boost to lab frame
elab = gamma * (estar + dp_v * pstarx)
plabx = gamma * (pstarx + dp_v * estar)
plaby = pstary
vx = plabx / elab
vy = plaby / elab
timing = []
energy = []
for i in range(self.sampling_size):
a = vx[i] ** 2 + vy[i] ** 2
b = 2 * vx[i] * t[i] * dp_v
cc = dp_v ** 2 * t[i] ** 2 - self.det_dist ** 2
if b ** 2 - 4 * a * cc >= 0:
if (-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
if (-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
self.timing = np.array(timing) / c_light * meter_by_mev * 1e6
self.energy = np.array(energy)
self.ed_min = min(energy)
self.ed_max = max(energy)
self.ev_min = self.ed_min
self.ev_max = self.ed_max
self.norm = self.epsi_quark ** 2 * self.pot_rate * self.pion_rate / (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * \
self.timing.shape[0] * 2 / self.sampling_size
def __call__(self, ev):
"""
dark matter flux, the spectrum is flat because of isotropic
:param ev: dark matter energy
:return: dark matter flux
"""
return 1 / (self.ed_max - self.ed_min) * self.norm if self.ed_min <= ev <= self.ed_max else 0
def integrate(self, ea, eb, weight_function=None):
"""
adaptive quadrature can achieve almost linear time on simple weight function, no need to do precalculation
:param ea: lowerbound
:param eb: upperbound
:param weight_function: weight function
:return: integration of the flux, weighted by the weight function
"""
if eb <= ea:
return 0
eb = min(eb, self.ed_max)
ea = max(ea, self.ed_min)
if weight_function is None:
return (eb - ea) / (self.ed_max - self.ed_min) * self.norm
return quad(weight_function, ea, eb, epsrel=1e-3)[0] / (self.ed_max - self.ed_min) * self.norm
def change_parameters(self, dark_photon_mass=None, life_time=None, coupling_quark=None, dark_matter_mass=None,
detector_distance=None, pot_rate=None, pot_mu=None, pot_sigma=None, pion_rate=None, sampling_size=None):
self.dp_m = dark_photon_mass if dark_photon_mass is not None else self.dp_m
self.dp_life = life_time * 1e-6 * c_light / meter_by_mev if life_time is not None else self.dp_life
self.epsi_quark = coupling_quark if coupling_quark is not None else self.epsi_quark
self.dm_m = dark_matter_mass if dark_matter_mass is not None else self.dm_m
self.det_dist = detector_distance / meter_by_mev if detector_distance is not None else self.det_dist
self.pot_rate = pot_rate if pot_rate is not None else self.pot_rate
self.pot_mu = pot_mu * 1e-6 * c_light / meter_by_mev if pot_mu is not None else self.pot_mu
self.pot_sigma = pot_sigma * 1e-6 * c_light / meter_by_mev if pot_sigma is not None else self.pot_sigma
self.pion_rate = self.pion_rate if pion_rate is not None else self.pion_rate
self.sampling_size = sampling_size if sampling_size is not None else self.sampling_size
self.simulate()
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
class DMFluxFromPi0Decay(FluxBaseContinuous):
"""
z direction is the direction of the beam
"""
def __init__(self, pi0_distribution, dark_photon_mass, coupling_quark, dark_matter_mass, meson_mass=massofpi0, life_time=0.001,
detector_distance=19.3, detector_direction=0, detector_width=0.1, pot_rate=5e20, pot_mu=0.7,
pot_sigma=0.15, pion_rate=52935/500000, nbins=20):
self.pi0_distribution = pi0_distribution
self.dp_m = dark_photon_mass
self.life_time = life_time
self.epsilon = coupling_quark # input in mus, internal in s
self.dm_m = dark_matter_mass
self.meson_mass = meson_mass
self.det_dist = detector_distance
self.det_direc = detector_direction
self.det_width = detector_width
self.pot_rate = pot_rate
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pion_rate = pion_rate
self.time = []
self.energy = []
self.nbins = nbins
self.dm_m = dark_matter_mass
for pi0_events in pi0_distribution: # must be in the form [azimuth, cos(zenith), kinetic energy]
self._generate_single(pi0_events)
self.timing = np.array(self.time)*1e6
hist, bin_edges = np.histogram(self.energy, bins=nbins, density=True)
ps_factor = np.heaviside(self.meson_mass - self.dp_m, 0.0) * 2 * self.epsilon**2 * (1 - (self.dp_m / self.meson_mass)**2)**3
super().__init__((bin_edges[:-1]+bin_edges[1:])/2, hist,
norm=ps_factor*pot_rate*pion_rate*len(self.time)/len(pi0_distribution)/
(2*np.pi*(min(1.0, detector_direction+detector_width/2)-max(-1.0, detector_direction-detector_width/2))*detector_distance**2*24*3600)
*(meter_by_mev**2))
def get_lifetime(self, g, m):
return ((16 * np.pi ** 2) / ((g ** 2) * m)) * mev_per_hz
def simulate(self):
self.time = []
self.energy = []
for pi0_events in self.pi0_distribution: # must be in the form [azimuth, cos(zenith), kinetic energy]
self._generate_single(pi0_events)
self.timing = np.array(self.time)*1e6
hist, bin_edges = np.histogram(self.energy, bins=self.nbins, density=True)
ps_factor = np.heaviside(self.meson_mass - self.dp_m, 0.0) * 2 * self.epsilon**2 * (1 - (self.dp_m / self.meson_mass)**2)**3
norm = ps_factor * self.pot_rate * self.pion_rate * \
len(self.time)/len(self.pi0_distribution)/ \
(2*np.pi*(min(1.0, self.det_direc+self.det_width/2)-max(-1.0, self.det_direc-self.det_width/2))*self.det_dist**2*24*3600)*(meter_by_mev**2)
super().__init__((bin_edges[:-1]+bin_edges[1:])/2, hist, norm=norm)
def _generate_single(self, pi0_events):
if self.dp_m > self.meson_mass:
return
pos = np.zeros(3)
t = 0
t += np.random.normal(self.pot_mu * 1e-6, self.pot_sigma * 1e-6)
pi_e = self.meson_mass + pi0_events[2]
pi_p = np.sqrt(pi_e**2 - self.meson_mass**2)
pi_v = pi_p / pi_e
t_pi = np.random.exponential(8.4e-17*pi_e/self.meson_mass)
pos += pi_v * polar_to_cartesian(pi0_events[:2]) * t_pi * c_light
t += t_pi
# pi0 to dark photon
dp_m = self.dp_m
dp_e = (self.meson_mass**2 + dp_m**2)/(2*self.meson_mass)
dp_p = (self.meson_mass**2 - dp_m**2)/(2*self.meson_mass)
cs = np.random.uniform(-1, 1)
phi = np.random.uniform(0, 2*np.pi)
dp_momentum = np.array([dp_e, dp_p*np.sqrt(1-cs**2)*np.cos(phi), dp_p*np.sqrt(1-cs**2)*np.sin(phi), dp_p*cs])
dp_momentum = lorentz_boost(dp_momentum, -pi_v*polar_to_cartesian(pi0_events[:2]))
t_dp = np.random.exponential((self.life_time*1e-6)*dp_momentum[0]/dp_m)
pos += c_light*t_dp*np.array([dp_momentum[1]/dp_momentum[0], dp_momentum[2]/dp_momentum[0], dp_momentum[3]/dp_momentum[0]])
t += t_dp
# dark photon to dark matter
dm_m = self.dm_m
dm_e = dp_m / 2
dm_p = np.sqrt(dm_e**2 - dm_m**2)
csd = np.random.uniform(-1, 1)
phid = np.random.uniform(0, 2*np.pi)
dm_momentum = np.array([dm_e, dm_p*np.sqrt(1-csd**2)*np.cos(phid), dm_p*np.sqrt(1-csd**2)*np.sin(phid), dm_p*csd])
dm_momentum = lorentz_boost(dm_momentum, np.array([-dp_momentum[1]/dp_momentum[0],
-dp_momentum[2]/dp_momentum[0],
-dp_momentum[3]/dp_momentum[0]]))
# dark matter arrives at detector, assuming azimuthal symmetric
v = dm_momentum[1:]/dm_momentum[0]*c_light
a = np.sum(v**2)
b = 2*np.sum(v*pos) #2 * v[2] * (c_light * dp_p / dp_e) * t_dp
c = np.sum(pos**2) - self.det_dist**2
if b**2 - 4*a*c >= 0:
t_dm = (-b+np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append(dm_momentum[0])
t_dm = (-b-np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append(dm_momentum[0])
v = (dp_momentum-dm_momentum)[1:]/(dp_momentum-dm_momentum)[0]*c_light
a = np.sum(v**2)
b = 2*np.sum(v*pos)
c = np.sum(pos**2) - self.det_dist**2
if b**2 - 4*a*c >= 0:
t_dm = (-b+np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append((dp_momentum-dm_momentum)[0])
t_dm = (-b-np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append((dp_momentum-dm_momentum)[0])
def to_pandas(self):
return pd.DataFrame({'time': self.time, 'energy': self.energy})
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
|
[
"pandas.DataFrame",
"scipy.integrate.quad"
] |
[((61687, 61743), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': self.time, 'energy': self.energy}"], {}), "({'time': self.time, 'energy': self.energy})\n", (61699, 61743), True, 'import pandas as pd\n'), ((10282, 10308), 'scipy.integrate.quad', 'quad', (['fx', 'emin', 'self.evMax'], {}), '(fx, emin, self.evMax)\n', (10286, 10308), False, 'from scipy.integrate import quad\n'), ((13162, 13190), 'scipy.integrate.quad', 'quad', (['finv', 'emin', 'self.evMax'], {}), '(finv, emin, self.evMax)\n', (13166, 13190), False, 'from scipy.integrate import quad\n'), ((16078, 16107), 'scipy.integrate.quad', 'quad', (['finvs', 'emin', 'self.evMax'], {}), '(finvs, emin, self.evMax)\n', (16082, 16107), False, 'from scipy.integrate import quad\n'), ((21265, 21309), 'scipy.integrate.quad', 'quad', (['self.interp_flux', '(0)', '(300)'], {'args': '(nu_e,)'}), '(self.interp_flux, 0, 300, args=(nu_e,))\n', (21269, 21309), False, 'from scipy.integrate import quad\n'), ((21338, 21383), 'scipy.integrate.quad', 'quad', (['self.interp_flux', '(0)', '(300)'], {'args': '(nu_mu,)'}), '(self.interp_flux, 0, 300, args=(nu_mu,))\n', (21342, 21383), False, 'from scipy.integrate import quad\n'), ((21415, 21463), 'scipy.integrate.quad', 'quad', (['self.interp_flux', '(0)', '(300)'], {'args': '(nubar_mu,)'}), '(self.interp_flux, 0, 300, args=(nubar_mu,))\n', (21419, 21463), False, 'from scipy.integrate import quad\n'), ((22461, 22505), 'scipy.integrate.quad', 'quad', (['self.interp_flux', '(0)', '(300)'], {'args': '(nu_e,)'}), '(self.interp_flux, 0, 300, args=(nu_e,))\n', (22465, 22505), False, 'from scipy.integrate import quad\n'), ((22537, 22585), 'scipy.integrate.quad', 'quad', (['self.interp_flux', '(0)', '(300)'], {'args': '(nubar_mu,)'}), '(self.interp_flux, 0, 300, args=(nubar_mu,))\n', (22541, 22585), False, 'from scipy.integrate import quad\n'), ((23387, 23432), 'scipy.integrate.quad', 'quad', (['self.interp_flux', '(0)', '(300)'], {'args': '(nu_mu,)'}), '(self.interp_flux, 0, 300, args=(nu_mu,))\n', (23391, 23432), False, 'from scipy.integrate import quad\n'), ((37603, 37637), 'scipy.integrate.quad', 'quad', (['integrand', 'emin', 'self.ed_max'], {}), '(integrand, emin, self.ed_max)\n', (37607, 37637), False, 'from scipy.integrate import quad\n'), ((38375, 38409), 'scipy.integrate.quad', 'quad', (['integrand', 'emin', 'self.ed_max'], {}), '(integrand, emin, self.ed_max)\n', (38379, 38409), False, 'from scipy.integrate import quad\n'), ((39154, 39188), 'scipy.integrate.quad', 'quad', (['integrand', 'emin', 'self.ed_max'], {}), '(integrand, emin, self.ed_max)\n', (39158, 39188), False, 'from scipy.integrate import quad\n'), ((11300, 11329), 'scipy.integrate.quad', 'quad', (['fx', 'emin[i]', 'self.evMax'], {}), '(fx, emin[i], self.evMax)\n', (11304, 11329), False, 'from scipy.integrate import quad\n'), ((14144, 14175), 'scipy.integrate.quad', 'quad', (['finv', 'emin[i]', 'self.evMax'], {}), '(finv, emin[i], self.evMax)\n', (14148, 14175), False, 'from scipy.integrate import quad\n'), ((17072, 17104), 'scipy.integrate.quad', 'quad', (['finvs', 'emin[i]', 'self.evMax'], {}), '(finvs, emin[i], self.evMax)\n', (17076, 17104), False, 'from scipy.integrate import quad\n'), ((37761, 37798), 'scipy.integrate.quad', 'quad', (['integrand', 'emin[i]', 'self.ed_max'], {}), '(integrand, emin[i], self.ed_max)\n', (37765, 37798), False, 'from scipy.integrate import quad\n'), ((38533, 38570), 'scipy.integrate.quad', 'quad', (['integrand', 'emin[i]', 'self.ed_max'], {}), '(integrand, emin[i], self.ed_max)\n', (38537, 38570), False, 'from scipy.integrate import quad\n'), ((39312, 39349), 'scipy.integrate.quad', 'quad', (['integrand', 'emin[i]', 'self.ed_max'], {}), '(integrand, emin[i], self.ed_max)\n', (39316, 39349), False, 'from scipy.integrate import quad\n'), ((52749, 52792), 'scipy.integrate.quad', 'quad', (['weight_function', 'ea', 'eb'], {'epsrel': '(0.001)'}), '(weight_function, ea, eb, epsrel=0.001)\n', (52753, 52792), False, 'from scipy.integrate import quad\n')]
|
# Author : <NAME> "blackdaemon"
# Email : <EMAIL>
#
# Copyright (c) 2010, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.contrib.minimessages
#
# ----------------------------------------------------------------------------
"""
An Enso plugin that makes all mini-messages related commands available.
Commands:
hide mini messages
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from xml.sax.saxutils import escape as xml_escape
import enso.messages
from enso.commands import CommandManager, CommandObject
from enso.commands.factories import ArbitraryPostfixFactory
from enso.contrib.scriptotron.ensoapi import EnsoApi
from enso.contrib.scriptotron.tracebacks import safetyNetted
from enso.messages import MessageManager, TimedMiniMessage
ensoapi = EnsoApi()
# ----------------------------------------------------------------------------
# The 'hide mini messages' command
# ---------------------------------------------------------------------------
class HideMiniMessagesCommand(CommandObject):
"""
The 'hide mini messages' command.
"""
NAME = "hide mini messages"
DESCRIPTION = "Hides all mini messages."
def __init__(self):
super(HideMiniMessagesCommand, self).__init__()
self.setDescription(self.DESCRIPTION)
self.setName(self.NAME)
@safetyNetted
def run(self):
MessageManager.get().finishMessages()
# ----------------------------------------------------------------------------
# The 'show mini message' testing command
# ---------------------------------------------------------------------------
class ShowMiniMessageCommand(CommandObject):
"""
The 'show mini message {text}' command.
"""
LOREMIPSUM = u"Lorem ipsum dolor sit amet, consectetur adipiscing elit. "\
"Nunc fringilla ipsum dapibus mi porta et laoreet turpis porta. Class aptent "\
"taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. "\
"Duis commodo massa nec arcu mollis auctor. Nunc et orci quis lacus suscipit "\
"dictum eu vitae est. Donec neque massa, pretium sed venenatis sed, consequat "\
"quis est. Proin auctor consequat euismod. Praesent iaculis placerat libero eu "\
"gravida. Curabitur ullamcorper velit sit amet tortor fermentum fringilla. "\
"Pellentesque non lectus mauris, a iaculis ipsum. Cum sociis natoque penatibus "\
"et magnis dis parturient montes, nascetur ridiculus mus. Vivamus mauris nibh, "\
"ultrices in accumsan in, bibendum sed mi. Ut ut nunc a mi vestibulum luctus. "\
"Sed ornare euismod justo a condimentum."
def __init__(self, postfix):
super(ShowMiniMessageCommand, self).__init__()
self._postfix = postfix
self._msgmanager = MessageManager.get()
@safetyNetted
def run(self):
import random
text = self._postfix
if text and "," in text:
timeout, text = text.split(",")
timeout = max(int(timeout), 0)
else:
timeout = None
if not text:
pos = random.randint(0, self.LOREMIPSUM.count(" ") - 10 + 1)
cnt = random.randint(5, 10)
words = self.LOREMIPSUM.split()
text = " ".join(words[pos:pos + cnt])
if text[0].upper() != text[0]:
text = "..." + text
if text[-1] != ".":
text = text + "..."
if timeout:
caption = "test message (timed %ds)" % timeout
else:
caption = "test message"
msg = xml_escape(text)
caption = xml_escape(caption)
if caption:
xmltext = u"<p>%s</p><caption>%s</caption>" % (msg, caption)
else:
xmltext = u"<p>%s</p>" % (msg)
msg = TimedMiniMessage(
primaryXml=None,
miniXml=xmltext,
waitTime=timeout
)
self._msgmanager.newMessage(msg)
class ShowMiniMessageFactory(ArbitraryPostfixFactory):
"""
Generates a "show mini message {text}" command.
"""
PREFIX = "show mini message "
DESCRIPTION = "Show mini message with given timeout and text, both optional."
HELP_TEXT = "{timeout,text}"
NAME = "%s%s" % (PREFIX, HELP_TEXT)
def _generateCommandObj(self, postfix):
cmd = ShowMiniMessageCommand(postfix)
cmd.setDescription(self.DESCRIPTION)
cmd.setName(self.NAME)
cmd.setHelp(self.HELP_TEXT)
return cmd
class ShowRecentMessageCommand(CommandObject):
"""
The 'show recent message' command.
"""
NAME = "show recent message"
DESCRIPTION = "Show recent message."
def __init__(self):
super(ShowRecentMessageCommand, self).__init__()
self.setDescription(self.DESCRIPTION)
self.setName(self.NAME)
@safetyNetted
def run(self):
if not enso.messages.displayRecentMessage():
ensoapi.display_message(u"No recent messages.")
# ----------------------------------------------------------------------------
# Plugin initialization
# ---------------------------------------------------------------------------
def load():
cmdMan = CommandManager.get()
cmdMan.registerCommand(
HideMiniMessagesCommand.NAME,
HideMiniMessagesCommand()
)
cmdMan.registerCommand(
ShowMiniMessageFactory.NAME,
ShowMiniMessageFactory()
)
cmdMan.registerCommand(
ShowRecentMessageCommand.NAME,
ShowRecentMessageCommand()
)
# vim:set tabstop=4 shiftwidth=4 expandtab:
|
[
"random.randint",
"enso.messages.MessageManager.get",
"enso.commands.CommandManager.get",
"enso.messages.TimedMiniMessage",
"enso.contrib.scriptotron.ensoapi.EnsoApi",
"xml.sax.saxutils.escape"
] |
[((2409, 2418), 'enso.contrib.scriptotron.ensoapi.EnsoApi', 'EnsoApi', ([], {}), '()\n', (2416, 2418), False, 'from enso.contrib.scriptotron.ensoapi import EnsoApi\n'), ((6830, 6850), 'enso.commands.CommandManager.get', 'CommandManager.get', ([], {}), '()\n', (6848, 6850), False, 'from enso.commands import CommandManager, CommandObject\n'), ((4422, 4442), 'enso.messages.MessageManager.get', 'MessageManager.get', ([], {}), '()\n', (4440, 4442), False, 'from enso.messages import MessageManager, TimedMiniMessage\n'), ((5217, 5233), 'xml.sax.saxutils.escape', 'xml_escape', (['text'], {}), '(text)\n', (5227, 5233), True, 'from xml.sax.saxutils import escape as xml_escape\n'), ((5252, 5271), 'xml.sax.saxutils.escape', 'xml_escape', (['caption'], {}), '(caption)\n', (5262, 5271), True, 'from xml.sax.saxutils import escape as xml_escape\n'), ((5438, 5506), 'enso.messages.TimedMiniMessage', 'TimedMiniMessage', ([], {'primaryXml': 'None', 'miniXml': 'xmltext', 'waitTime': 'timeout'}), '(primaryXml=None, miniXml=xmltext, waitTime=timeout)\n', (5454, 5506), False, 'from enso.messages import MessageManager, TimedMiniMessage\n'), ((4808, 4829), 'random.randint', 'random.randint', (['(5)', '(10)'], {}), '(5, 10)\n', (4822, 4829), False, 'import random\n'), ((2997, 3017), 'enso.messages.MessageManager.get', 'MessageManager.get', ([], {}), '()\n', (3015, 3017), False, 'from enso.messages import MessageManager, TimedMiniMessage\n')]
|
import unittest
class PerlinTestCase(unittest.TestCase):
def test_perlin_1d_range(self):
from noise import pnoise1
for i in range(-10000, 10000):
x = i * 0.49
n = pnoise1(x)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_1d_octaves_range(self):
from noise import pnoise1
for i in range(-1000, 1000):
for o in range(10):
x = i * 0.49
n = pnoise1(x, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_1d_base(self):
from noise import pnoise1
self.assertEqual(pnoise1(0.5), pnoise1(0.5, base=0))
self.assertNotEqual(pnoise1(0.5), pnoise1(0.5, base=5))
self.assertNotEqual(pnoise1(0.5, base=5), pnoise1(0.5, base=1))
def test_perlin_2d_range(self):
from noise import pnoise2
for i in range(-10000, 10000):
x = i * 0.49
y = -i * 0.67
n = pnoise2(x, y)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, n))
def test_perlin_2d_octaves_range(self):
from noise import pnoise2
for i in range(-1000, 1000):
for o in range(10):
x = -i * 0.49
y = i * 0.67
n = pnoise2(x, y, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_2d_base(self):
from noise import pnoise2
x, y = 0.73, 0.27
self.assertEqual(pnoise2(x, y), pnoise2(x, y, base=0))
self.assertNotEqual(pnoise2(x, y), pnoise2(x, y, base=5))
self.assertNotEqual(pnoise2(x, y, base=5), pnoise2(x, y, base=1))
def test_perlin_3d_range(self):
from noise import pnoise3
for i in range(-10000, 10000):
x = -i * 0.49
y = i * 0.67
z = -i * 0.727
n = pnoise3(x, y, z)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_perlin_3d_octaves_range(self):
from noise import pnoise3
for i in range(-1000, 1000):
x = i * 0.22
y = -i * 0.77
z = -i * 0.17
for o in range(10):
n = pnoise3(x, y, z, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_perlin_3d_base(self):
from noise import pnoise3
x, y, z = 0.1, 0.7, 0.33
self.assertEqual(pnoise3(x, y, z), pnoise3(x, y, z, base=0))
self.assertNotEqual(pnoise3(x, y, z), pnoise3(x, y, z, base=5))
self.assertNotEqual(pnoise3(x, y, z, base=5), pnoise3(x, y, z, base=1))
class SimplexTestCase(unittest.TestCase):
def test_randomize(self):
from noise import randomize
self.assertTrue(randomize(4096,23490))
def test_simplex_2d_range(self):
from noise import snoise2
for i in range(-10000, 10000):
x = i * 0.49
y = -i * 0.67
n = snoise2(x, y)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, n))
def test_simplex_2d_octaves_range(self):
from noise import snoise2
for i in range(-1000, 1000):
for o in range(10):
x = -i * 0.49
y = i * 0.67
n = snoise2(x, y, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_simplex_3d_range(self):
from noise import snoise3
for i in range(-10000, 10000):
x = i * 0.31
y = -i * 0.7
z = i * 0.19
n = snoise3(x, y, z)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_simplex_3d_octaves_range(self):
from noise import snoise3
for i in range(-1000, 1000):
x = -i * 0.12
y = i * 0.55
z = i * 0.34
for o in range(10):
n = snoise3(x, y, z, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, o+1, n))
def test_simplex_4d_range(self):
from noise import snoise4
for i in range(-10000, 10000):
x = i * 0.88
y = -i * 0.11
z = -i * 0.57
w = i * 0.666
n = snoise4(x, y, z, w)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, w, n))
def test_simplex_4d_octaves_range(self):
from noise import snoise4
for i in range(-1000, 1000):
x = -i * 0.12
y = i * 0.55
z = i * 0.34
w = i * 0.21
for o in range(10):
n = snoise4(x, y, z, w, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, w, o+1, n))
if __name__ == '__main__':
unittest.main()
|
[
"noise.snoise3",
"noise.snoise4",
"noise.pnoise1",
"noise.snoise2",
"noise.pnoise3",
"unittest.main",
"noise.pnoise2",
"noise.randomize"
] |
[((4699, 4714), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4712, 4714), False, 'import unittest\n'), ((210, 220), 'noise.pnoise1', 'pnoise1', (['x'], {}), '(x)\n', (217, 220), False, 'from noise import pnoise1\n'), ((651, 663), 'noise.pnoise1', 'pnoise1', (['(0.5)'], {}), '(0.5)\n', (658, 663), False, 'from noise import pnoise1\n'), ((665, 685), 'noise.pnoise1', 'pnoise1', (['(0.5)'], {'base': '(0)'}), '(0.5, base=0)\n', (672, 685), False, 'from noise import pnoise1\n'), ((715, 727), 'noise.pnoise1', 'pnoise1', (['(0.5)'], {}), '(0.5)\n', (722, 727), False, 'from noise import pnoise1\n'), ((729, 749), 'noise.pnoise1', 'pnoise1', (['(0.5)'], {'base': '(5)'}), '(0.5, base=5)\n', (736, 749), False, 'from noise import pnoise1\n'), ((779, 799), 'noise.pnoise1', 'pnoise1', (['(0.5)'], {'base': '(5)'}), '(0.5, base=5)\n', (786, 799), False, 'from noise import pnoise1\n'), ((801, 821), 'noise.pnoise1', 'pnoise1', (['(0.5)'], {'base': '(1)'}), '(0.5, base=1)\n', (808, 821), False, 'from noise import pnoise1\n'), ((1000, 1013), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {}), '(x, y)\n', (1007, 1013), False, 'from noise import pnoise2\n'), ((1506, 1519), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {}), '(x, y)\n', (1513, 1519), False, 'from noise import pnoise2\n'), ((1521, 1542), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {'base': '(0)'}), '(x, y, base=0)\n', (1528, 1542), False, 'from noise import pnoise2\n'), ((1572, 1585), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {}), '(x, y)\n', (1579, 1585), False, 'from noise import pnoise2\n'), ((1587, 1608), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {'base': '(5)'}), '(x, y, base=5)\n', (1594, 1608), False, 'from noise import pnoise2\n'), ((1638, 1659), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {'base': '(5)'}), '(x, y, base=5)\n', (1645, 1659), False, 'from noise import pnoise2\n'), ((1661, 1682), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {'base': '(1)'}), '(x, y, base=1)\n', (1668, 1682), False, 'from noise import pnoise2\n'), ((1888, 1904), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {}), '(x, y, z)\n', (1895, 1904), False, 'from noise import pnoise3\n'), ((2434, 2450), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {}), '(x, y, z)\n', (2441, 2450), False, 'from noise import pnoise3\n'), ((2452, 2476), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {'base': '(0)'}), '(x, y, z, base=0)\n', (2459, 2476), False, 'from noise import pnoise3\n'), ((2506, 2522), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {}), '(x, y, z)\n', (2513, 2522), False, 'from noise import pnoise3\n'), ((2524, 2548), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {'base': '(5)'}), '(x, y, z, base=5)\n', (2531, 2548), False, 'from noise import pnoise3\n'), ((2578, 2602), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {'base': '(5)'}), '(x, y, z, base=5)\n', (2585, 2602), False, 'from noise import pnoise3\n'), ((2604, 2628), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {'base': '(1)'}), '(x, y, z, base=1)\n', (2611, 2628), False, 'from noise import pnoise3\n'), ((2764, 2786), 'noise.randomize', 'randomize', (['(4096)', '(23490)'], {}), '(4096, 23490)\n', (2773, 2786), False, 'from noise import randomize\n'), ((2965, 2978), 'noise.snoise2', 'snoise2', (['x', 'y'], {}), '(x, y)\n', (2972, 2978), False, 'from noise import snoise2\n'), ((3553, 3569), 'noise.snoise3', 'snoise3', (['x', 'y', 'z'], {}), '(x, y, z)\n', (3560, 3569), False, 'from noise import snoise3\n'), ((4206, 4225), 'noise.snoise4', 'snoise4', (['x', 'y', 'z', 'w'], {}), '(x, y, z, w)\n', (4213, 4225), False, 'from noise import snoise4\n'), ((472, 497), 'noise.pnoise1', 'pnoise1', (['x'], {'octaves': '(o + 1)'}), '(x, octaves=o + 1)\n', (479, 497), False, 'from noise import pnoise1\n'), ((1298, 1326), 'noise.pnoise2', 'pnoise2', (['x', 'y'], {'octaves': '(o + 1)'}), '(x, y, octaves=o + 1)\n', (1305, 1326), False, 'from noise import pnoise2\n'), ((2210, 2241), 'noise.pnoise3', 'pnoise3', (['x', 'y', 'z'], {'octaves': '(o + 1)'}), '(x, y, z, octaves=o + 1)\n', (2217, 2241), False, 'from noise import pnoise3\n'), ((3264, 3292), 'noise.snoise2', 'snoise2', (['x', 'y'], {'octaves': '(o + 1)'}), '(x, y, octaves=o + 1)\n', (3271, 3292), False, 'from noise import snoise2\n'), ((3875, 3906), 'noise.snoise3', 'snoise3', (['x', 'y', 'z'], {'octaves': '(o + 1)'}), '(x, y, z, octaves=o + 1)\n', (3882, 3906), False, 'from noise import snoise3\n'), ((4559, 4593), 'noise.snoise4', 'snoise4', (['x', 'y', 'z', 'w'], {'octaves': '(o + 1)'}), '(x, y, z, w, octaves=o + 1)\n', (4566, 4593), False, 'from noise import snoise4\n')]
|
#!/usr/bin/env python3
import os
import shutil
import sys
import pathlib
import logging
# I will NEVER EVER use subproccess again
# At least not for something like Popen
try:
from sh import wget
except Exception:
print('[!] Just install sh right now!(pip install --user sh)')
sys.exit(0)
# Dumb Python2 support
if sys.version_info[0] == 2:
input = raw_input
# Path where this python script is located when it's run
curr_dir = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
# The URL
url = input('[$] Url(none for ema.perfact.de): ')
url = url if url else 'ema.perfact.de'
print('[*] Url: {}\n'.format(url))
# Get name of the directory where the whole page should be saved
dir_name = input('[$] Directory name for the page(none for "1337"): ')
dir_name = dir_name if dir_name else '1337'
page_dir = curr_dir / dir_name
if page_dir.is_dir():
print('[!] {} is already a directory and will be overwritten!'.format(page_dir))
choice = input('[!] Continue?(y/n):').lower()
if choice != 'y':
sys.exit(0)
print('[*] Directory to save the page: {}\n'.format(dir_name))
# Get name of directory where the files will be saved we actually want to save
save_name = input('[$] Directory name to save findings(none for "saved"): ')
save_name = save_name if save_name else 'saved'
save_dir = curr_dir / save_name
if save_dir.is_dir():
print('[!] {} is already a directory!'.format(save_dir))
choice = input('[!] Delete it?(y/n): '.format(save_dir)).lower()
if choice == 'y':
shutil.rmtree(save_dir.absolute().as_posix())
else:
sys.exit(0)
os.makedirs(save_dir.absolute().as_posix())
print('[*] Directory to save findings: {}\n'.format(save_name))
# The searchterm (which files we want to copy)
print('[*] Everything with the following substring will be copied')
search_term = input('[$] Files to copy to that directory(none for ".png"): ')
search_term = search_term if search_term else '.png'
print('[*] Searchterm: {}\n'.format(search_term))
input('\n[$] Press any key to continue...')
# We will give these exit_codes to the wget call later
# to disabled every exit/error message (will look horribly else)
exit_codes = (i for i in range(0, 9))
# Sets off the wget -m <url> -P <directory> commande
# It's written so weird, so we can see the output of the program
try:
for line in wget('-m', url, '-P', dir_name, _iter=True, _err_to_out=True,
_out_bufsize=1, _ok_code=exit_codes):
print(line)
except Exception:
pass
# Copying the files we want to save
try:
# Get every file with the correct searchterm from the folder where the webpage is saved
files = list(page_dir.glob("**/*{}".format(search_term)))
if not files:
print("[!] No matching files found")
else:
print("[*] Copying {} *{} files...".format(len(files), search_term))
for f in files:
shutil.copy(f.absolute().as_posix(), save_dir.absolute().as_posix())
except Exception as e:
print('[!] Something went wrong while copying data')
print(e)
# Deleting the saved webpage, cause we don't need it anymore
print('\n[*] Cleaning up...\n')
if page_dir.is_dir():
shutil.rmtree(page_dir.absolute().as_posix())
print('[*] All done!')
|
[
"os.path.abspath",
"sh.wget",
"sys.exit"
] |
[((2357, 2460), 'sh.wget', 'wget', (['"""-m"""', 'url', '"""-P"""', 'dir_name'], {'_iter': '(True)', '_err_to_out': '(True)', '_out_bufsize': '(1)', '_ok_code': 'exit_codes'}), "('-m', url, '-P', dir_name, _iter=True, _err_to_out=True, _out_bufsize=\n 1, _ok_code=exit_codes)\n", (2361, 2460), False, 'from sh import wget\n'), ((289, 300), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (297, 300), False, 'import sys\n'), ((475, 500), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (490, 500), False, 'import os\n'), ((1037, 1048), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1045, 1048), False, 'import sys\n'), ((1595, 1606), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1603, 1606), False, 'import sys\n')]
|
import ase
from ase import Atoms
from ase.atom import Atom
import sys
from ase.visualize import view
import pickle
f = open(sys.argv[1],'r') #The .amc file
p = pickle.load(f)
positions = p['atomspositions']
atms = Atoms()
for p0 in positions:
a = Atom('Au',position=p0)
atms.append(a)
atms.center(vacuum=2)
view(atms)
|
[
"ase.Atoms",
"pickle.load",
"ase.visualize.view",
"ase.atom.Atom"
] |
[((160, 174), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (171, 174), False, 'import pickle\n'), ((215, 222), 'ase.Atoms', 'Atoms', ([], {}), '()\n', (220, 222), False, 'from ase import Atoms\n'), ((312, 322), 'ase.visualize.view', 'view', (['atms'], {}), '(atms)\n', (316, 322), False, 'from ase.visualize import view\n'), ((250, 273), 'ase.atom.Atom', 'Atom', (['"""Au"""'], {'position': 'p0'}), "('Au', position=p0)\n", (254, 273), False, 'from ase.atom import Atom\n')]
|
import json
def Config(config_path):
with open(config_path) as config_file:
return json.load(config_file)
|
[
"json.load"
] |
[((96, 118), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (105, 118), False, 'import json\n')]
|
"""编译go语言模块."""
import warnings
from typing import List, Optional
from pathlib import Path
from pmfp.utils.run_command_utils import run
def _build_grpc(includes: str, flag: str, to: str, target: str, cwd: Path) -> None:
command = f"protoc {includes} {flag} --go_out={to} --go-grpc_out={to} {target}"
try:
run(command, cwd=cwd, visible=True)
except Exception as e:
warnings.warn(f"""根据模板构造grpc项目失败
{str(e)}
编译为go语言依赖如下插件,请检查是否安装:
"google.golang.org/protobuf/cmd/protoc-gen-go"
"google.golang.org/grpc/cmd/protoc-gen-go-grpc"
""")
else:
print(f"编译grpc项目 {target} 为go语言模块完成!")
def build_pb_go(serv_file: str, includes: List[str], to: str,
source_relative: bool, cwd: Path, files: Optional[List[str]] = None, **kwargs: str) -> None:
"""编译grpc的protobuffer定义文件为go语言模块.
Args:
serv_file (str): 定义grpc service的目标proto文件
includes (List[str]): 待编译的protobuffer文件所在的文件夹
to (str): 编译成的模块文件放到的路径
source_relative (bool): 是否使用路径作为包名,只针对go语言
cwd (Path): 执行目录.
files (Optional[List[str]]): 其他待编译的protobuffer文件
"""
includes_str = " ".join([f"-I {include}" for include in includes])
target_str = serv_file
if files:
target_str += " " + " ".join(files)
flag_str = ""
if source_relative:
flag_str += " --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative"
if kwargs:
if flag_str:
flag_str += " "
flag_str += " ".join([f"{k}={v}" for k, v in kwargs.items()])
_build_grpc(includes_str, flag_str, to, target_str, cwd)
|
[
"pmfp.utils.run_command_utils.run"
] |
[((323, 358), 'pmfp.utils.run_command_utils.run', 'run', (['command'], {'cwd': 'cwd', 'visible': '(True)'}), '(command, cwd=cwd, visible=True)\n', (326, 358), False, 'from pmfp.utils.run_command_utils import run\n')]
|
import json
import logging
from flask import jsonify, make_response, request
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from http import HTTPStatus
from marshmallow import ValidationError, Schema
from werkzeug.security import generate_password_hash
from app.models import db
from app.models.user import User, user_schema
from app.api.utils import get_url
from app.utils.exceptions import ApiException
logger = logging.getLogger(__name__)
class RequestSchema:
class PostUsers(Schema):
name = type(user_schema.fields['name'])(
required=True, validate=user_schema.fields['name'].validate)
email = type(user_schema.fields['email'])(
required=True, validate=user_schema.fields['email'].validate)
password = type(user_schema.fields['password'])(
required=True, validate=user_schema.fields['password'].validate)
role_id = type(user_schema.fields['role_id'])(
required=True, validate=user_schema.fields['role_id'].validate)
class ResponseSchema:
class GetUser(Schema):
id = type(user_schema.fields['id'])(
required=True, validate=user_schema.fields['name'].validate)
name = type(user_schema.fields['name'])(
required=True, validate=user_schema.fields['name'].validate)
email = type(user_schema.fields['email'])(
required=True, validate=user_schema.fields['email'].validate)
class UserListApi(Resource):
"""
GET: Return all users.
POST: Create new user account.
PUT: N/A
DELETE: N/A
"""
def post(self):
"""Sign up"""
status = HTTPStatus.CREATED
ret = {}
error_msg = {}
try:
data = request.get_json()
if data is None:
raise ApiException('Request is empty.', status=HTTPStatus.BAD_REQUEST)
errors = RequestSchema.PostUsers().validate(data)
if errors:
raise ValidationError(errors)
data = RequestSchema.PostUsers().dump(data)
if User.query.filter_by(name=data['name']).count() > 0:
raise ApiException(
f"Username:{data['name']} is already used.", status=HTTPStatus.CONFLICT)
if User.query.filter_by(email=data['email']).count() > 0:
raise ApiException(
f"Email:{data['email']} is already used.", status=HTTPStatus.CONFLICT)
data['password'] = generate_password_hash(data['password'])
user = User(**data)
db.session.add(user)
db.session.commit()
ret['link'] = {'self': get_url(tail_url=user.id)}
except ValidationError as e:
status = HTTPStatus.BAD_REQUEST
error_msg = e.normalized_messages()
except ApiException as e:
status = e.status
error_msg = str(e)
except Exception as e:
error_msg = f'{type(e)} : {str(e)} '
if status == HTTPStatus.CREATED:
status = HTTPStatus.INTERNAL_SERVER_ERROR
error_msg = f'Signup failed due to internal server error. ' + error_msg
finally:
if status != HTTPStatus.CREATED:
db.session.rollback()
ret = { 'error': { 'message': error_msg } }
logger.error(ret)
return make_response(jsonify(ret), status)
class UserApi(Resource):
"""
GET: Return user.
POST: N/A
PUT: Update user data.
DELETE: Delete user account.
"""
@jwt_required
def get(self, id):
"""Return user."""
status = HTTPStatus.OK
ret = {}
error_msg = ''
try:
query = User.query.filter_by(id=id)
user = query.first()
if not user:
raise ApiException(
f'User ID:{id} was not found.', status=HTTPStatus.NOT_FOUND)
ret = ResponseSchema.GetUser().dump(user)
ret['link'] = {'self': get_url(tail_url='')}
except ApiException as e:
status = e.status
error_msg = str(e)
except Exception as e:
status.e = HTTPStatus.INTERNAL_SERVER_ERROR
error_msg = str(e)
finally:
if error_msg != '':
ret = { 'error': { 'message': error_msg } }
logger.error(ret)
return make_response(jsonify(ret), status)
|
[
"logging.getLogger",
"app.models.user.User",
"app.models.user.User.query.filter_by",
"marshmallow.ValidationError",
"app.api.utils.get_url",
"flask.request.get_json",
"werkzeug.security.generate_password_hash",
"app.models.db.session.rollback",
"app.models.db.session.commit",
"app.models.db.session.add",
"app.utils.exceptions.ApiException",
"flask.jsonify"
] |
[((449, 476), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (466, 476), False, 'import logging\n'), ((1646, 1664), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1662, 1664), False, 'from flask import jsonify, make_response, request\n'), ((2302, 2342), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (["data['password']"], {}), "(data['password'])\n", (2324, 2342), False, 'from werkzeug.security import generate_password_hash\n'), ((2356, 2368), 'app.models.user.User', 'User', ([], {}), '(**data)\n', (2360, 2368), False, 'from app.models.user import User, user_schema\n'), ((2375, 2395), 'app.models.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (2389, 2395), False, 'from app.models import db\n'), ((2402, 2421), 'app.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2419, 2421), False, 'from app.models import db\n'), ((3096, 3108), 'flask.jsonify', 'jsonify', (['ret'], {}), '(ret)\n', (3103, 3108), False, 'from flask import jsonify, make_response, request\n'), ((3388, 3415), 'app.models.user.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (3408, 3415), False, 'from app.models.user import User, user_schema\n'), ((3985, 3997), 'flask.jsonify', 'jsonify', (['ret'], {}), '(ret)\n', (3992, 3997), False, 'from flask import jsonify, make_response, request\n'), ((1702, 1766), 'app.utils.exceptions.ApiException', 'ApiException', (['"""Request is empty."""'], {'status': 'HTTPStatus.BAD_REQUEST'}), "('Request is empty.', status=HTTPStatus.BAD_REQUEST)\n", (1714, 1766), False, 'from app.utils.exceptions import ApiException\n'), ((1854, 1877), 'marshmallow.ValidationError', 'ValidationError', (['errors'], {}), '(errors)\n', (1869, 1877), False, 'from marshmallow import ValidationError, Schema\n'), ((2005, 2095), 'app.utils.exceptions.ApiException', 'ApiException', (['f"""Username:{data[\'name\']} is already used."""'], {'status': 'HTTPStatus.CONFLICT'}), '(f"Username:{data[\'name\']} is already used.", status=HTTPStatus\n .CONFLICT)\n', (2017, 2095), False, 'from app.utils.exceptions import ApiException\n'), ((2181, 2269), 'app.utils.exceptions.ApiException', 'ApiException', (['f"""Email:{data[\'email\']} is already used."""'], {'status': 'HTTPStatus.CONFLICT'}), '(f"Email:{data[\'email\']} is already used.", status=HTTPStatus.\n CONFLICT)\n', (2193, 2269), False, 'from app.utils.exceptions import ApiException\n'), ((2452, 2477), 'app.api.utils.get_url', 'get_url', ([], {'tail_url': 'user.id'}), '(tail_url=user.id)\n', (2459, 2477), False, 'from app.api.utils import get_url\n'), ((2970, 2991), 'app.models.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (2989, 2991), False, 'from app.models import db\n'), ((3476, 3549), 'app.utils.exceptions.ApiException', 'ApiException', (['f"""User ID:{id} was not found."""'], {'status': 'HTTPStatus.NOT_FOUND'}), "(f'User ID:{id} was not found.', status=HTTPStatus.NOT_FOUND)\n", (3488, 3549), False, 'from app.utils.exceptions import ApiException\n'), ((3639, 3659), 'app.api.utils.get_url', 'get_url', ([], {'tail_url': '""""""'}), "(tail_url='')\n", (3646, 3659), False, 'from app.api.utils import get_url\n'), ((1938, 1977), 'app.models.user.User.query.filter_by', 'User.query.filter_by', ([], {'name': "data['name']"}), "(name=data['name'])\n", (1958, 1977), False, 'from app.models.user import User, user_schema\n'), ((2112, 2153), 'app.models.user.User.query.filter_by', 'User.query.filter_by', ([], {'email': "data['email']"}), "(email=data['email'])\n", (2132, 2153), False, 'from app.models.user import User, user_schema\n')]
|
import numpy as np
from mpi4py import MPI
from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic
from src import logger
class GoalSampler:
def __init__(self,
policy_language_model,
reward_language_model,
goal_dim,
one_hot_encoder,
params):
self.policy_language_model = policy_language_model
self.reward_language_model = reward_language_model
self.goal_dim = goal_dim
self.params = params
self.nb_feedbacks = 0
self.nb_positive_feedbacks = 0
self.nb_negative_feedbacks = 0
self.feedback2id = dict()
self.id2feedback = dict()
self.id2oracleid = dict()
self.feedback2one_hot = dict()
self.id2one_hot = dict()
self.feedback_memory = dict(memory_id=[],
string=[],
iter_discovery=[],
target_counter=[],
reached_counter=[],
oracle_id=[],
f1_score=[],
policy_encoding=[],
reward_encoding=[],
imagined=[],
)
self.imagined_goals = dict(string=[],
competence=[],
lp=[])
self.one_hot_encoder = one_hot_encoder
self.goal_generator = SentenceGeneratorHeuristic(params['train_descriptions'],
params['test_descriptions'],
sentences=None,
method=params['conditions']['imagination_method'])
self.nb_discovered_goals = 0
self.score_target_goals = None
self.perceived_learning_progress = None
self.perceived_competence = None
self.feedback_stats = None
self.rank = MPI.COMM_WORLD.Get_rank()
self.num_cpus = params['experiment_params']['n_cpus']
self.rollout_batch_size = params['experiment_params']['rollout_batch_size']
self.not_imagined_goal_ids = np.array([])
self.imagined_goal_ids = np.array([])
def store_reward_function(self, reward_function):
self.reward_function = reward_function
def update_embeddings(self):
# embeddings must be updated when the language model is udpated
for i, goal_str in enumerate(self.feedback_memory['string']):
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'][i] = reward_encoding.copy()
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback_memory['policy_encoding'][i] = policy_encoding.copy()
def add_entries_to_feedback_memory(self, str_list, episode_count, imagined):
for goal_str in str_list:
if goal_str not in self.feedback2id.keys():
memory_id = self.nb_discovered_goals
if goal_str in self.params['train_descriptions']:
oracle_id = self.params['train_descriptions'].index(goal_str)
else:
oracle_id = None
one_hot = self.one_hot_encoder.encode(goal_str.lower().split(" "))
self.feedback2one_hot[goal_str] = one_hot
self.id2one_hot[memory_id] = one_hot
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'].append(reward_encoding.copy())
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback2id[goal_str] = memory_id
self.id2oracleid[memory_id] = oracle_id
self.id2feedback[memory_id] = goal_str
self.feedback_memory['memory_id'].append(memory_id)
self.feedback_memory['oracle_id'].append(oracle_id)
self.feedback_memory['string'].append(goal_str)
self.feedback_memory['target_counter'].append(0)
self.feedback_memory['reached_counter'].append(0)
self.feedback_memory['iter_discovery'].append(episode_count)
self.feedback_memory['f1_score'].append(0)
self.feedback_memory['policy_encoding'].append(policy_encoding.copy())
self.feedback_memory['imagined'].append(imagined)
self.nb_discovered_goals += 1
elif goal_str in self.feedback2id.keys() and not imagined: # if goal previously imagined is discovered later, change its status
ind = self.feedback_memory['string'].index(goal_str)
if self.feedback_memory['imagined'][ind] == 1:
self.feedback_memory['imagined'][ind] = 0
logger.info('Goal already imagined:', goal_str)
def update_discovered_goals(self,
new_goals_str,
episode_count,
epoch):
# only done in cpu 0
self.add_entries_to_feedback_memory(str_list=new_goals_str,
episode_count=episode_count,
imagined=0)
# Decide whether to generate new goals
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = True
if len(new_goals_str) > 0 and imagined:
new_imagined_goals = []
inds_not_imagined = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.goal_generator.update_model(np.array(self.feedback_memory['string'])[inds_not_imagined])
generated_goals = self.goal_generator.generate_sentences(n='all')
for gen_g in generated_goals:
if gen_g not in self.imagined_goals['string']:
self.imagined_goals['string'].append(gen_g)
self.imagined_goals['competence'].append(0)
self.imagined_goals['lp'].append(0)
new_imagined_goals.append(gen_g)
self.add_entries_to_feedback_memory(str_list=new_imagined_goals,
episode_count=episode_count,
imagined=1)
def update(self,
current_episode,
all_episodes,
partner_available,
goals_reached_str,
goals_not_reached_str):
imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 1).flatten()
not_imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.not_imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[not_imagined_inds]
self.imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[imagined_inds]
# only done in cpu 0
n_episodes = len(all_episodes)
attempted_goals_ids = []
exploit = []
for ep in all_episodes:
exploit.append(ep['exploit'])
attempted_goals_ids.append(ep['g_id'])
if partner_available:
# if partner is available, simply encodes what it said
assert n_episodes == len(goals_reached_str) == len(goals_not_reached_str) == len(exploit) == len(attempted_goals_ids)
# Get indexes in the order of discovery of the attempted goals, reached_goals, not reached_goals
goals_reached_ids = []
goals_not_reached_ids = []
for i in range(n_episodes):
goals_reached_ids.append([])
goals_not_reached_ids.append([])
for goal_str in goals_reached_str[i]:
goals_reached_ids[-1].append(self.feedback2id[goal_str])
for goal_str in goals_not_reached_str[i]:
goals_not_reached_ids[-1].append(self.feedback2id[goal_str])
else:
goals_reached_ids = []
goals_not_reached_ids = []
final_obs = np.array([ep['obs'][-1] for ep in all_episodes])
# test 50 goals for each episode
discovered_goal_ids = np.array(self.feedback_memory['memory_id'])
not_imagined_ind = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
discovered_goal_ids = discovered_goal_ids[not_imagined_ind]
n_attempts = min(50, len(discovered_goal_ids))
goals_to_try = np.random.choice(discovered_goal_ids, size=n_attempts, replace=False)
obs = np.repeat(final_obs, n_attempts, axis=0)
goals = np.tile(goals_to_try, final_obs.shape[0])
rewards = self.reward_function.predict(state=obs, goal_ids=goals)[0]
for i in range(len(all_episodes)):
pos_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == 0)].tolist()
goals_reached_ids.append(pos_goals)
neg_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == -1)].tolist()
goals_not_reached_ids.append(neg_goals)
return goals_reached_ids, goals_not_reached_ids
def share_info_to_all_cpus(self):
# share data across cpus
self.feedback_memory = MPI.COMM_WORLD.bcast(self.feedback_memory, root=0)
self.feedback2id = MPI.COMM_WORLD.bcast(self.feedback2id, root=0)
self.id2oracleid = MPI.COMM_WORLD.bcast(self.id2oracleid, root=0)
self.id2feedback = MPI.COMM_WORLD.bcast(self.id2feedback, root=0)
self.feedback2one_hot = MPI.COMM_WORLD.bcast(self.feedback2one_hot, root=0)
self.nb_discovered_goals = MPI.COMM_WORLD.bcast(self.nb_discovered_goals, root=0)
self.imagined_goals = MPI.COMM_WORLD.bcast(self.imagined_goals, root=0)
self.one_hot_encoder = MPI.COMM_WORLD.bcast(self.one_hot_encoder, root=0)
def sample_targets(self, epoch):
"""
Sample targets for all cpus and all batch, then scatter to the different cpus
"""
# Decide whether to exploit or not
exploit = True if np.random.random() < 0.1 else False
strategy = 'random'
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = np.random.random() < self.params['conditions']['p_imagined']
if self.rank == 0:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
for i in range(self.num_cpus):
goals_str = []
goals_encodings = []
goals_ids = []
for j in range(self.rollout_batch_size):
# when there is no goal in memory, sample random goal from standard normal distribution
if len(self.feedback_memory['memory_id']) == 0:
goals_encodings.append(np.random.normal(size=self.goal_dim))
goals_str.append('Random Goal')
goals_ids.append(-1)
else:
if strategy == 'random':
if imagined and self.imagined_goal_ids.size > 0:
ind = np.random.choice(self.imagined_goal_ids)
else:
ind = np.random.choice(self.not_imagined_goal_ids)
else:
raise NotImplementedError
goals_encodings.append(self.feedback_memory['policy_encoding'][ind])
goals_str.append(self.id2feedback[ind])
goals_ids.append(ind)
all_goals_str.append(goals_str)
all_goals_encodings.append(goals_encodings)
all_goals_ids.append(goals_ids)
else:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
goals_str = MPI.COMM_WORLD.scatter(all_goals_str, root=0)
goals_encodings = MPI.COMM_WORLD.scatter(all_goals_encodings, root=0)
goals_ids = MPI.COMM_WORLD.scatter(all_goals_ids, root=0)
return exploit, goals_str, goals_encodings, goals_ids, imagined
class EvalGoalSampler:
def __init__(self, policy_language_model, one_hot_encoder, params):
self.descriptions = params['train_descriptions']
self.nb_descriptions = len(self.descriptions)
self.count = 0
self.policy_language_model = policy_language_model
self.rollout_batch_size = params['evaluation_rollout_params']['rollout_batch_size']
self.params = params
def reset(self):
self.count = 0
def sample(self, method='robin'):
# print(self.descriptions[self.count])
goals_str = []
goals_encodings = []
goals_ids = []
if method == 'robin':
ind = self.count
elif method == 'random':
ind = np.random.randint(self.nb_descriptions)
else:
raise NotImplementedError
for _ in range(self.rollout_batch_size):
g_str = self.descriptions[ind]
goals_str.append(g_str)
policy_encoding = self.policy_language_model.encode(g_str).flatten()
goals_encodings.append(policy_encoding)
goals_ids.append(ind)
self.count += 1
return True, goals_str, goals_encodings, goals_ids
|
[
"numpy.random.normal",
"numpy.tile",
"src.logger.info",
"numpy.repeat",
"mpi4py.MPI.COMM_WORLD.bcast",
"numpy.random.choice",
"numpy.random.random",
"numpy.where",
"src.imagine.goal_generator.simple_sentence_generator.SentenceGeneratorHeuristic",
"numpy.array",
"mpi4py.MPI.COMM_WORLD.scatter",
"numpy.random.randint",
"mpi4py.MPI.COMM_WORLD.Get_rank"
] |
[((1608, 1770), 'src.imagine.goal_generator.simple_sentence_generator.SentenceGeneratorHeuristic', 'SentenceGeneratorHeuristic', (["params['train_descriptions']", "params['test_descriptions']"], {'sentences': 'None', 'method': "params['conditions']['imagination_method']"}), "(params['train_descriptions'], params[\n 'test_descriptions'], sentences=None, method=params['conditions'][\n 'imagination_method'])\n", (1634, 1770), False, 'from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic\n'), ((2152, 2177), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (2175, 2177), False, 'from mpi4py import MPI\n'), ((2362, 2374), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2370, 2374), True, 'import numpy as np\n'), ((2408, 2420), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2416, 2420), True, 'import numpy as np\n'), ((9881, 9931), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.feedback_memory'], {'root': '(0)'}), '(self.feedback_memory, root=0)\n', (9901, 9931), False, 'from mpi4py import MPI\n'), ((9959, 10005), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.feedback2id'], {'root': '(0)'}), '(self.feedback2id, root=0)\n', (9979, 10005), False, 'from mpi4py import MPI\n'), ((10033, 10079), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.id2oracleid'], {'root': '(0)'}), '(self.id2oracleid, root=0)\n', (10053, 10079), False, 'from mpi4py import MPI\n'), ((10107, 10153), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.id2feedback'], {'root': '(0)'}), '(self.id2feedback, root=0)\n', (10127, 10153), False, 'from mpi4py import MPI\n'), ((10186, 10237), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.feedback2one_hot'], {'root': '(0)'}), '(self.feedback2one_hot, root=0)\n', (10206, 10237), False, 'from mpi4py import MPI\n'), ((10273, 10327), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.nb_discovered_goals'], {'root': '(0)'}), '(self.nb_discovered_goals, root=0)\n', (10293, 10327), False, 'from mpi4py import MPI\n'), ((10358, 10407), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.imagined_goals'], {'root': '(0)'}), '(self.imagined_goals, root=0)\n', (10378, 10407), False, 'from mpi4py import MPI\n'), ((10439, 10489), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.one_hot_encoder'], {'root': '(0)'}), '(self.one_hot_encoder, root=0)\n', (10459, 10489), False, 'from mpi4py import MPI\n'), ((12720, 12765), 'mpi4py.MPI.COMM_WORLD.scatter', 'MPI.COMM_WORLD.scatter', (['all_goals_str'], {'root': '(0)'}), '(all_goals_str, root=0)\n', (12742, 12765), False, 'from mpi4py import MPI\n'), ((12792, 12843), 'mpi4py.MPI.COMM_WORLD.scatter', 'MPI.COMM_WORLD.scatter', (['all_goals_encodings'], {'root': '(0)'}), '(all_goals_encodings, root=0)\n', (12814, 12843), False, 'from mpi4py import MPI\n'), ((12864, 12909), 'mpi4py.MPI.COMM_WORLD.scatter', 'MPI.COMM_WORLD.scatter', (['all_goals_ids'], {'root': '(0)'}), '(all_goals_ids, root=0)\n', (12886, 12909), False, 'from mpi4py import MPI\n'), ((7304, 7347), 'numpy.array', 'np.array', (["self.feedback_memory['memory_id']"], {}), "(self.feedback_memory['memory_id'])\n", (7312, 7347), True, 'import numpy as np\n'), ((7400, 7443), 'numpy.array', 'np.array', (["self.feedback_memory['memory_id']"], {}), "(self.feedback_memory['memory_id'])\n", (7408, 7443), True, 'import numpy as np\n'), ((8634, 8682), 'numpy.array', 'np.array', (["[ep['obs'][-1] for ep in all_episodes]"], {}), "([ep['obs'][-1] for ep in all_episodes])\n", (8642, 8682), True, 'import numpy as np\n'), ((8762, 8805), 'numpy.array', 'np.array', (["self.feedback_memory['memory_id']"], {}), "(self.feedback_memory['memory_id'])\n", (8770, 8805), True, 'import numpy as np\n'), ((9066, 9135), 'numpy.random.choice', 'np.random.choice', (['discovered_goal_ids'], {'size': 'n_attempts', 'replace': '(False)'}), '(discovered_goal_ids, size=n_attempts, replace=False)\n', (9082, 9135), True, 'import numpy as np\n'), ((9154, 9194), 'numpy.repeat', 'np.repeat', (['final_obs', 'n_attempts'], {'axis': '(0)'}), '(final_obs, n_attempts, axis=0)\n', (9163, 9194), True, 'import numpy as np\n'), ((9215, 9256), 'numpy.tile', 'np.tile', (['goals_to_try', 'final_obs.shape[0]'], {}), '(goals_to_try, final_obs.shape[0])\n', (9222, 9256), True, 'import numpy as np\n'), ((10709, 10727), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10725, 10727), True, 'import numpy as np\n'), ((13712, 13751), 'numpy.random.randint', 'np.random.randint', (['self.nb_descriptions'], {}), '(self.nb_descriptions)\n', (13729, 13751), True, 'import numpy as np\n'), ((6186, 6226), 'numpy.array', 'np.array', (["self.feedback_memory['string']"], {}), "(self.feedback_memory['string'])\n", (6194, 6226), True, 'import numpy as np\n'), ((11033, 11051), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (11049, 11051), True, 'import numpy as np\n'), ((5194, 5241), 'src.logger.info', 'logger.info', (['"""Goal already imagined:"""', 'goal_str'], {}), "('Goal already imagined:', goal_str)\n", (5205, 5241), False, 'from src import logger\n'), ((7109, 7151), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (7117, 7151), True, 'import numpy as np\n'), ((7208, 7250), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (7216, 7250), True, 'import numpy as np\n'), ((6082, 6124), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (6090, 6124), True, 'import numpy as np\n'), ((8849, 8891), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (8857, 8891), True, 'import numpy as np\n'), ((9427, 9486), 'numpy.where', 'np.where', (['(rewards[i * n_attempts:(i + 1) * n_attempts] == 0)'], {}), '(rewards[i * n_attempts:(i + 1) * n_attempts] == 0)\n', (9435, 9486), True, 'import numpy as np\n'), ((9591, 9651), 'numpy.where', 'np.where', (['(rewards[i * n_attempts:(i + 1) * n_attempts] == -1)'], {}), '(rewards[i * n_attempts:(i + 1) * n_attempts] == -1)\n', (9599, 9651), True, 'import numpy as np\n'), ((11648, 11684), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.goal_dim'}), '(size=self.goal_dim)\n', (11664, 11684), True, 'import numpy as np\n'), ((11985, 12025), 'numpy.random.choice', 'np.random.choice', (['self.imagined_goal_ids'], {}), '(self.imagined_goal_ids)\n', (12001, 12025), True, 'import numpy as np\n'), ((12098, 12142), 'numpy.random.choice', 'np.random.choice', (['self.not_imagined_goal_ids'], {}), '(self.not_imagined_goal_ids)\n', (12114, 12142), True, 'import numpy as np\n')]
|
from Qt import QtWidgets, QtGui
from .categories import (
CategoryState,
SystemWidget,
ProjectWidget
)
from .widgets import ShadowWidget
from .. import style
class MainWidget(QtWidgets.QWidget):
widget_width = 1000
widget_height = 600
def __init__(self, user_role, parent=None):
super(MainWidget, self).__init__(parent)
self.setObjectName("MainWidget")
self.setWindowTitle("OpenPype Settings")
self.resize(self.widget_width, self.widget_height)
stylesheet = style.load_stylesheet()
self.setStyleSheet(stylesheet)
self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
header_tab_widget = QtWidgets.QTabWidget(parent=self)
studio_widget = SystemWidget(user_role, header_tab_widget)
project_widget = ProjectWidget(user_role, header_tab_widget)
tab_widgets = [
studio_widget,
project_widget
]
header_tab_widget.addTab(studio_widget, "System")
header_tab_widget.addTab(project_widget, "Project")
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
layout.setSpacing(0)
layout.addWidget(header_tab_widget)
self.setLayout(layout)
self._shadow_widget = ShadowWidget("Working...", self)
for tab_widget in tab_widgets:
tab_widget.saved.connect(self._on_tab_save)
tab_widget.state_changed.connect(self._on_state_change)
self.tab_widgets = tab_widgets
def _on_tab_save(self, source_widget):
for tab_widget in self.tab_widgets:
tab_widget.on_saved(source_widget)
def _on_state_change(self):
any_working = False
for widget in self.tab_widgets:
if widget.state is CategoryState.Working:
any_working = True
break
if (
(any_working and self._shadow_widget.isVisible())
or (not any_working and not self._shadow_widget.isVisible())
):
return
self._shadow_widget.setVisible(any_working)
# Process events to apply shadow widget visibility
app = QtWidgets.QApplication.instance()
if app:
app.processEvents()
def reset(self):
for tab_widget in self.tab_widgets:
tab_widget.reset()
|
[
"Qt.QtWidgets.QTabWidget",
"Qt.QtWidgets.QApplication.instance",
"Qt.QtWidgets.QVBoxLayout"
] |
[((682, 715), 'Qt.QtWidgets.QTabWidget', 'QtWidgets.QTabWidget', ([], {'parent': 'self'}), '(parent=self)\n', (702, 715), False, 'from Qt import QtWidgets, QtGui\n'), ((1079, 1106), 'Qt.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self'], {}), '(self)\n', (1100, 1106), False, 'from Qt import QtWidgets, QtGui\n'), ((2179, 2212), 'Qt.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (2210, 2212), False, 'from Qt import QtWidgets, QtGui\n')]
|
import uuid
from mongoengine import Document, StringField, ListField, UUIDField
from django.conf import settings
from cripts.core.cripts_mongoengine import CriptsBaseAttributes, CriptsSourceDocument
from cripts.core.cripts_mongoengine import CriptsActionsDocument
class UserName(CriptsBaseAttributes, CriptsSourceDocument, CriptsActionsDocument,
Document):
"""
UserName class.
"""
meta = {
"collection": settings.COL_USERNAMES,
"cripts_type": 'UserName',
"latest_schema_version": 1,
"schema_doc": {
'name': 'The actual username',
'username_id': 'An ID corresponding to the username since using the raw username as the key can run into little bobby tables issues',
'description': 'Description of the e-mail address',
'datasets': ('List [] of datasets this username'
' appeared in'),
'source': ('List [] of sources who provided information about this'
' username'),
},
"jtable_opts": {
'details_url': 'cripts.usernames.views.username_detail',
'details_url_key': 'username_id',
'default_sort': "name",
'searchurl': 'cripts.usernames.views.usernames_listing',
'fields': [ "name", "created",
"source", "id", "username_id"],
'jtopts_fields': [ "name",
"created",
"source",
"favorite",
"id", "username_id"],
'hidden_fields': ["username_id", "id"],
'linked_fields': ["source", ],
'details_link': 'name',
'no_sort': []
}
}
name = StringField(required=True)
description = StringField(required=True)
username_id = UUIDField(binary=True, required=True, default=uuid.uuid4)
datasets = ListField(required=False)
|
[
"mongoengine.ListField",
"mongoengine.UUIDField",
"mongoengine.StringField"
] |
[((1994, 2020), 'mongoengine.StringField', 'StringField', ([], {'required': '(True)'}), '(required=True)\n', (2005, 2020), False, 'from mongoengine import Document, StringField, ListField, UUIDField\n'), ((2039, 2065), 'mongoengine.StringField', 'StringField', ([], {'required': '(True)'}), '(required=True)\n', (2050, 2065), False, 'from mongoengine import Document, StringField, ListField, UUIDField\n'), ((2084, 2141), 'mongoengine.UUIDField', 'UUIDField', ([], {'binary': '(True)', 'required': '(True)', 'default': 'uuid.uuid4'}), '(binary=True, required=True, default=uuid.uuid4)\n', (2093, 2141), False, 'from mongoengine import Document, StringField, ListField, UUIDField\n'), ((2157, 2182), 'mongoengine.ListField', 'ListField', ([], {'required': '(False)'}), '(required=False)\n', (2166, 2182), False, 'from mongoengine import Document, StringField, ListField, UUIDField\n')]
|
import pytest
import operator as op
from sweetpea import fully_cross_block
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window
from sweetpea.encoding_diagram import __generate_encoding_diagram
color = Factor("color", ["red", "blue"])
text = Factor("text", ["red", "blue"])
con_level = DerivedLevel("con", WithinTrial(op.eq, [color, text]))
inc_level = DerivedLevel("inc", WithinTrial(op.ne, [color, text]))
con_factor = Factor("congruent?", [con_level, inc_level])
color_repeats_factor = Factor("color repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [color])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [color]))
])
text_repeats_factor = Factor("text repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [text])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [text]))
])
design = [color, text, con_factor]
crossing = [color, text]
blk = fully_cross_block(design, crossing, [])
def test_generate_encoding_diagram():
assert __generate_encoding_diagram(blk) == "\
----------------------------------------------\n\
| Trial | color | text | congruent? |\n\
| # | red blue | red blue | con inc |\n\
----------------------------------------------\n\
| 1 | 1 2 | 3 4 | 5 6 |\n\
| 2 | 7 8 | 9 10 | 11 12 |\n\
| 3 | 13 14 | 15 16 | 17 18 |\n\
| 4 | 19 20 | 21 22 | 23 24 |\n\
----------------------------------------------\n"
def test_generate_encoding_diagram_with_transition():
block = fully_cross_block([color, text, color_repeats_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
--------------------------------------------------\n\
| Trial | color | text | color repeats? |\n\
| # | red blue | red blue | yes no |\n\
--------------------------------------------------\n\
| 1 | 1 2 | 3 4 | |\n\
| 2 | 5 6 | 7 8 | 17 18 |\n\
| 3 | 9 10 | 11 12 | 19 20 |\n\
| 4 | 13 14 | 15 16 | 21 22 |\n\
--------------------------------------------------\n"
def test_generate_encoding_diagram_with_constraint_and_multiple_transitions():
block = fully_cross_block([color, text, con_factor, color_repeats_factor, text_repeats_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
-------------------------------------------------------------------------------\n\
| Trial | color | text | congruent? | color repeats? | text repeats? |\n\
| # | red blue | red blue | con inc | yes no | yes no |\n\
-------------------------------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | 5 6 | | |\n\
| 2 | 7 8 | 9 10 | 11 12 | 25 26 | 31 32 |\n\
| 3 | 13 14 | 15 16 | 17 18 | 27 28 | 33 34 |\n\
| 4 | 19 20 | 21 22 | 23 24 | 29 30 | 35 36 |\n\
-------------------------------------------------------------------------------\n"
def test_generate_encoding_diagram_with_constraint_and_multiple_transitions_in_different_order():
block = fully_cross_block([text_repeats_factor, color, color_repeats_factor, text, con_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
-------------------------------------------------------------------------------\n\
| Trial | text repeats? | color | color repeats? | text | congruent? |\n\
| # | yes no | red blue | yes no | red blue | con inc |\n\
-------------------------------------------------------------------------------\n\
| 1 | | 1 2 | | 3 4 | 5 6 |\n\
| 2 | 25 26 | 7 8 | 31 32 | 9 10 | 11 12 |\n\
| 3 | 27 28 | 13 14 | 33 34 | 15 16 | 17 18 |\n\
| 4 | 29 30 | 19 20 | 35 36 | 21 22 | 23 24 |\n\
-------------------------------------------------------------------------------\n"
def test_generate_encoding_diagram_with_windows():
color3 = Factor("color3", ["red", "blue", "green"])
yes_fn = lambda colors: colors[0] == colors[1] == colors[2]
no_fn = lambda colors: not yes_fn(colors)
color3_repeats_factor = Factor("color3 repeats?", [
DerivedLevel("yes", Window(yes_fn, [color3], 3, 1)),
DerivedLevel("no", Window(no_fn, [color3], 3, 1))
])
block = fully_cross_block([color3_repeats_factor, color3, text], [color3, text], [])
assert __generate_encoding_diagram(block) == "\
---------------------------------------------------------\n\
| Trial | color3 repeats? | color3 | text |\n\
| # | yes no | red blue green | red blue |\n\
---------------------------------------------------------\n\
| 1 | | 1 2 3 | 4 5 |\n\
| 2 | | 6 7 8 | 9 10 |\n\
| 3 | 31 32 | 11 12 13 | 14 15 |\n\
| 4 | 33 34 | 16 17 18 | 19 20 |\n\
| 5 | 35 36 | 21 22 23 | 24 25 |\n\
| 6 | 37 38 | 26 27 28 | 29 30 |\n\
---------------------------------------------------------\n"
def test_generate_encoding_diagram_with_window_with_stride():
congruent_bookend = Factor("congruent bookend?", [
DerivedLevel("yes", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3)),
DerivedLevel("no", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3))
])
block = fully_cross_block([color, text, congruent_bookend], [color, text], [])
assert __generate_encoding_diagram(block) == "\
------------------------------------------------------\n\
| Trial | color | text | congruent bookend? |\n\
| # | red blue | red blue | yes no |\n\
------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | 17 18 |\n\
| 2 | 5 6 | 7 8 | |\n\
| 3 | 9 10 | 11 12 | |\n\
| 4 | 13 14 | 15 16 | 19 20 |\n\
------------------------------------------------------\n"
congruent_bookend = Factor("congruent bookend?", [
DerivedLevel("yes", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2)),
DerivedLevel("no", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2))
])
block = fully_cross_block([color, text, congruent_bookend], [color, text], [])
assert __generate_encoding_diagram(block) == "\
------------------------------------------------------\n\
| Trial | color | text | congruent bookend? |\n\
| # | red blue | red blue | yes no |\n\
------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | |\n\
| 2 | 5 6 | 7 8 | 17 18 |\n\
| 3 | 9 10 | 11 12 | |\n\
| 4 | 13 14 | 15 16 | 19 20 |\n\
------------------------------------------------------\n"
|
[
"sweetpea.primitives.Window",
"sweetpea.primitives.Factor",
"sweetpea.primitives.WithinTrial",
"sweetpea.primitives.Transition",
"sweetpea.encoding_diagram.__generate_encoding_diagram",
"sweetpea.fully_cross_block"
] |
[((238, 270), 'sweetpea.primitives.Factor', 'Factor', (['"""color"""', "['red', 'blue']"], {}), "('color', ['red', 'blue'])\n", (244, 270), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((279, 310), 'sweetpea.primitives.Factor', 'Factor', (['"""text"""', "['red', 'blue']"], {}), "('text', ['red', 'blue'])\n", (285, 310), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((462, 506), 'sweetpea.primitives.Factor', 'Factor', (['"""congruent?"""', '[con_level, inc_level]'], {}), "('congruent?', [con_level, inc_level])\n", (468, 506), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((1016, 1055), 'sweetpea.fully_cross_block', 'fully_cross_block', (['design', 'crossing', '[]'], {}), '(design, crossing, [])\n', (1033, 1055), False, 'from sweetpea import fully_cross_block\n'), ((346, 379), 'sweetpea.primitives.WithinTrial', 'WithinTrial', (['op.eq', '[color, text]'], {}), '(op.eq, [color, text])\n', (357, 379), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((414, 447), 'sweetpea.primitives.WithinTrial', 'WithinTrial', (['op.ne', '[color, text]'], {}), '(op.ne, [color, text])\n', (425, 447), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((1664, 1737), 'sweetpea.fully_cross_block', 'fully_cross_block', (['[color, text, color_repeats_factor]', '[color, text]', '[]'], {}), '([color, text, color_repeats_factor], [color, text], [])\n', (1681, 1737), False, 'from sweetpea import fully_cross_block\n'), ((2430, 2540), 'sweetpea.fully_cross_block', 'fully_cross_block', (['[color, text, con_factor, color_repeats_factor, text_repeats_factor]', '[color, text]', '[]'], {}), '([color, text, con_factor, color_repeats_factor,\n text_repeats_factor], [color, text], [])\n', (2447, 2540), False, 'from sweetpea import fully_cross_block\n'), ((3509, 3619), 'sweetpea.fully_cross_block', 'fully_cross_block', (['[text_repeats_factor, color, color_repeats_factor, text, con_factor]', '[color, text]', '[]'], {}), '([text_repeats_factor, color, color_repeats_factor, text,\n con_factor], [color, text], [])\n', (3526, 3619), False, 'from sweetpea import fully_cross_block\n'), ((4542, 4584), 'sweetpea.primitives.Factor', 'Factor', (['"""color3"""', "['red', 'blue', 'green']"], {}), "('color3', ['red', 'blue', 'green'])\n", (4548, 4584), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((4892, 4968), 'sweetpea.fully_cross_block', 'fully_cross_block', (['[color3_repeats_factor, color3, text]', '[color3, text]', '[]'], {}), '([color3_repeats_factor, color3, text], [color3, text], [])\n', (4909, 4968), False, 'from sweetpea import fully_cross_block\n'), ((6037, 6107), 'sweetpea.fully_cross_block', 'fully_cross_block', (['[color, text, congruent_bookend]', '[color, text]', '[]'], {}), '([color, text, congruent_bookend], [color, text], [])\n', (6054, 6107), False, 'from sweetpea import fully_cross_block\n'), ((6964, 7034), 'sweetpea.fully_cross_block', 'fully_cross_block', (['[color, text, congruent_bookend]', '[color, text]', '[]'], {}), '([color, text, congruent_bookend], [color, text], [])\n', (6981, 7034), False, 'from sweetpea import fully_cross_block\n'), ((1107, 1139), 'sweetpea.encoding_diagram.__generate_encoding_diagram', '__generate_encoding_diagram', (['blk'], {}), '(blk)\n', (1134, 1139), False, 'from sweetpea.encoding_diagram import __generate_encoding_diagram\n'), ((1810, 1844), 'sweetpea.encoding_diagram.__generate_encoding_diagram', '__generate_encoding_diagram', (['block'], {}), '(block)\n', (1837, 1844), False, 'from sweetpea.encoding_diagram import __generate_encoding_diagram\n'), ((2609, 2643), 'sweetpea.encoding_diagram.__generate_encoding_diagram', '__generate_encoding_diagram', (['block'], {}), '(block)\n', (2636, 2643), False, 'from sweetpea.encoding_diagram import __generate_encoding_diagram\n'), ((3688, 3722), 'sweetpea.encoding_diagram.__generate_encoding_diagram', '__generate_encoding_diagram', (['block'], {}), '(block)\n', (3715, 3722), False, 'from sweetpea.encoding_diagram import __generate_encoding_diagram\n'), ((4981, 5015), 'sweetpea.encoding_diagram.__generate_encoding_diagram', '__generate_encoding_diagram', (['block'], {}), '(block)\n', (5008, 5015), False, 'from sweetpea.encoding_diagram import __generate_encoding_diagram\n'), ((6120, 6154), 'sweetpea.encoding_diagram.__generate_encoding_diagram', '__generate_encoding_diagram', (['block'], {}), '(block)\n', (6147, 6154), False, 'from sweetpea.encoding_diagram import __generate_encoding_diagram\n'), ((7047, 7081), 'sweetpea.encoding_diagram.__generate_encoding_diagram', '__generate_encoding_diagram', (['block'], {}), '(block)\n', (7074, 7081), False, 'from sweetpea.encoding_diagram import __generate_encoding_diagram\n'), ((582, 640), 'sweetpea.primitives.Transition', 'Transition', (['(lambda colors: colors[0] == colors[1])', '[color]'], {}), '(lambda colors: colors[0] == colors[1], [color])\n', (592, 640), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((667, 725), 'sweetpea.primitives.Transition', 'Transition', (['(lambda colors: colors[0] != colors[1])', '[color]'], {}), '(lambda colors: colors[0] != colors[1], [color])\n', (677, 725), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((803, 860), 'sweetpea.primitives.Transition', 'Transition', (['(lambda colors: colors[0] == colors[1])', '[text]'], {}), '(lambda colors: colors[0] == colors[1], [text])\n', (813, 860), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((887, 944), 'sweetpea.primitives.Transition', 'Transition', (['(lambda colors: colors[0] != colors[1])', '[text]'], {}), '(lambda colors: colors[0] != colors[1], [text])\n', (897, 944), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((4780, 4810), 'sweetpea.primitives.Window', 'Window', (['yes_fn', '[color3]', '(3)', '(1)'], {}), '(yes_fn, [color3], 3, 1)\n', (4786, 4810), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((4841, 4870), 'sweetpea.primitives.Window', 'Window', (['no_fn', '[color3]', '(3)', '(1)'], {}), '(no_fn, [color3], 3, 1)\n', (4847, 4870), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((5840, 5912), 'sweetpea.primitives.Window', 'Window', (['(lambda colors, texts: colors[0] == texts[0])', '[color, text]', '(1)', '(3)'], {}), '(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3)\n', (5846, 5912), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((5943, 6015), 'sweetpea.primitives.Window', 'Window', (['(lambda colors, texts: colors[0] == texts[0])', '[color, text]', '(1)', '(3)'], {}), '(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3)\n', (5949, 6015), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((6767, 6839), 'sweetpea.primitives.Window', 'Window', (['(lambda colors, texts: colors[0] == texts[0])', '[color, text]', '(2)', '(2)'], {}), '(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2)\n', (6773, 6839), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n'), ((6870, 6942), 'sweetpea.primitives.Window', 'Window', (['(lambda colors, texts: colors[0] == texts[0])', '[color, text]', '(2)', '(2)'], {}), '(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2)\n', (6876, 6942), False, 'from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window\n')]
|
import yaml
import os
import time
import datetime
from pycti.utils.constants import StixCyberObservableTypes
from weasyprint import HTML
from pycti import OpenCTIConnectorHelper, get_config_variable
from jinja2 import Environment, FileSystemLoader
class ExportReportPdf:
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
# ExportReportPdf specific config settings
self.primary_color = get_config_variable(
"EXPORT_REPORT_PDF_PRIMARY_COLOR",
["export_report_pdf", "primary_color"],
config,
)
self.secondary_color = get_config_variable(
"EXPORT_REPORT_PDF_SECONDARY_COLOR",
["export_report_pdf", "secondary_color"],
config,
)
self.current_dir = os.path.abspath(os.path.dirname(__file__))
self.set_colors()
self.company_address_line_1 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_1",
["export_report_pdf", "company_address_line_1"],
config,
)
self.company_address_line_2 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_2",
["export_report_pdf", "company_address_line_2"],
config,
)
self.company_address_line_3 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_3",
["export_report_pdf", "company_address_line_3"],
config,
)
self.company_phone_number = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_PHONE_NUMBER",
["export_report_pdf", "company_phone_number"],
config,
)
self.company_email = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_EMAIL",
["export_report_pdf", "company_email"],
config,
)
self.company_website = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_WEBSITE",
["export_report_pdf", "company_website"],
config,
)
self.indicators_only = get_config_variable(
"EXPORT_REPORT_PDF_INDICATORS_ONLY",
["export_report_pdf", "indicators_only"],
config,
)
self.defang_urls = get_config_variable(
"EXPORT_REPORT_PDF_DEFANG_URLS",
["export_report_pdf", "defang_urls"],
config,
)
def _process_message(self, data):
file_name = data["file_name"]
# TODO this can be implemented to filter every entity and observable
# max_marking = data["max_marking"]
entity_type = data["entity_type"]
if entity_type != "Report":
raise ValueError(
f'This Connector can only process entities of type "Report" and not of type "{entity_type}".'
)
# Get the Report
report_dict = self.helper.api.report.read(id=data["entity_id"])
# Extract values for inclusion in output pdf
report_marking = report_dict.get("objectMarking", None)
if report_marking:
report_marking = report_marking[-1]["definition"]
report_name = report_dict["name"]
report_description = report_dict.get("description", "No description available.")
report_confidence = report_dict["confidence"]
report_id = report_dict["id"]
report_external_refs = [
external_ref_dict["url"]
for external_ref_dict in report_dict["externalReferences"]
]
report_objs = report_dict["objects"]
report_date = datetime.datetime.now().strftime("%b %d %Y")
context = {
"report_name": report_name,
"report_description": report_description,
"report_marking": report_marking,
"report_confidence": report_confidence,
"report_external_refs": report_external_refs,
"report_date": report_date,
"company_address_line_1": self.company_address_line_1,
"company_address_line_2": self.company_address_line_2,
"company_address_line_3": self.company_address_line_3,
"company_phone_number": self.company_phone_number,
"company_email": self.company_email,
"company_website": self.company_website,
"entities": {},
"observables": {},
}
# Process each STIX Object
for report_obj in report_objs:
obj_entity_type = report_obj["entity_type"]
obj_id = report_obj["standard_id"]
# Handle StixCyberObservables entities
if obj_entity_type == "StixFile" or StixCyberObservableTypes.has_value(
obj_entity_type
):
observable_dict = self.helper.api.stix_cyber_observable.read(id=obj_id)
# If only include indicators and
# the observable doesn't have an indicator, skip it
if self.indicators_only and not observable_dict["indicators"]:
self.helper.log_info(
f"Skipping {obj_entity_type} observable with value {observable_dict['observable_value']} as it was not an Indicator."
)
continue
if obj_entity_type not in context["observables"]:
context["observables"][obj_entity_type] = []
# Defang urls
if self.defang_urls and obj_entity_type == "Url":
observable_dict["observable_value"] = observable_dict[
"observable_value"
].replace("http", "hxxp", 1)
context["observables"][obj_entity_type].append(observable_dict)
# Handle all other entities
else:
reader_func = self.get_reader(obj_entity_type)
if reader_func is None:
self.helper.log_error(
f'Could not find a function to read entity with type "{obj_entity_type}"'
)
continue
entity_dict = reader_func(id=obj_id)
if obj_entity_type not in context["entities"]:
context["entities"][obj_entity_type] = []
context["entities"][obj_entity_type].append(entity_dict)
# Render html with input variables
env = Environment(loader=FileSystemLoader(self.current_dir))
template = env.get_template("resources/report.html")
html_string = template.render(context)
# Generate pdf from html string
pdf_contents = HTML(string=html_string, base_url="resources").write_pdf()
# Upload the output pdf
self.helper.log_info(f"Uploading: {file_name}")
self.helper.api.stix_domain_object.add_file(
id=report_id,
file_name=file_name,
data=pdf_contents,
mime_type="application/pdf",
)
return "Export done"
def set_colors(self):
with open(
os.path.join(self.current_dir, "resources/report.css.template"), "r"
) as f:
new_css = f.read()
new_css = new_css.replace("<primary_color>", self.primary_color)
new_css = new_css.replace("<secondary_color>", self.secondary_color)
with open(os.path.join(self.current_dir, "resources/report.css"), "w") as f:
f.write(new_css)
def get_reader(self, entity_type):
"""
Returns the function to use for calling the OpenCTI to
read data for a particular entity type.
entity_type: a str representing the entity type, i.e. Indicator
returns: a function or None if entity type is not supported
"""
reader = {
"Stix-Domain-Object": self.helper.api.stix_domain_object.read,
"Attack-Pattern": self.helper.api.attack_pattern.read,
"Campaign": self.helper.api.campaign.read,
"Note": self.helper.api.note.read,
"Observed-Data": self.helper.api.observed_data.read,
"Organization": self.helper.api.identity.read,
"Opinion": self.helper.api.opinion.read,
"Report": self.helper.api.report.read,
"Sector": self.helper.api.identity.read,
"System": self.helper.api.identity.read,
"Course-Of-Action": self.helper.api.course_of_action.read,
"Identity": self.helper.api.identity.read,
"Indicator": self.helper.api.indicator.read,
"Individual": self.helper.api.identity.read,
"Infrastructure": self.helper.api.infrastructure.read,
"Intrusion-Set": self.helper.api.intrusion_set.read,
"Malware": self.helper.api.malware.read,
"Threat-Actor": self.helper.api.threat_actor.read,
"Tool": self.helper.api.tool.read,
"Vulnerability": self.helper.api.vulnerability.read,
"Incident": self.helper.api.incident.read,
"City": self.helper.api.location.read,
"Country": self.helper.api.location.read,
"Region": self.helper.api.location.read,
"Position": self.helper.api.location.read,
"Location": self.helper.api.location.read,
}
return reader.get(entity_type, None)
# Start the main loop
def start(self):
self.helper.listen(self._process_message)
if __name__ == "__main__":
try:
connector_export_report_pdf = ExportReportPdf()
connector_export_report_pdf.start()
except Exception as e:
print(e)
time.sleep(10)
exit(0)
|
[
"pycti.get_config_variable",
"pycti.utils.constants.StixCyberObservableTypes.has_value",
"os.path.join",
"time.sleep",
"pycti.OpenCTIConnectorHelper",
"os.path.dirname",
"os.path.isfile",
"datetime.datetime.now",
"weasyprint.HTML",
"os.path.abspath",
"jinja2.FileSystemLoader"
] |
[((627, 657), 'pycti.OpenCTIConnectorHelper', 'OpenCTIConnectorHelper', (['config'], {}), '(config)\n', (649, 657), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((739, 845), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_PRIMARY_COLOR"""', "['export_report_pdf', 'primary_color']", 'config'], {}), "('EXPORT_REPORT_PDF_PRIMARY_COLOR', ['export_report_pdf',\n 'primary_color'], config)\n", (758, 845), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((920, 1031), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_SECONDARY_COLOR"""', "['export_report_pdf', 'secondary_color']", 'config'], {}), "('EXPORT_REPORT_PDF_SECONDARY_COLOR', [\n 'export_report_pdf', 'secondary_color'], config)\n", (939, 1031), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((1208, 1333), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_1"""', "['export_report_pdf', 'company_address_line_1']", 'config'], {}), "('EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_1', [\n 'export_report_pdf', 'company_address_line_1'], config)\n", (1227, 1333), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((1414, 1539), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_2"""', "['export_report_pdf', 'company_address_line_2']", 'config'], {}), "('EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_2', [\n 'export_report_pdf', 'company_address_line_2'], config)\n", (1433, 1539), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((1620, 1745), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_3"""', "['export_report_pdf', 'company_address_line_3']", 'config'], {}), "('EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_3', [\n 'export_report_pdf', 'company_address_line_3'], config)\n", (1639, 1745), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((1824, 1945), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_COMPANY_PHONE_NUMBER"""', "['export_report_pdf', 'company_phone_number']", 'config'], {}), "('EXPORT_REPORT_PDF_COMPANY_PHONE_NUMBER', [\n 'export_report_pdf', 'company_phone_number'], config)\n", (1843, 1945), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((2017, 2123), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_COMPANY_EMAIL"""', "['export_report_pdf', 'company_email']", 'config'], {}), "('EXPORT_REPORT_PDF_COMPANY_EMAIL', ['export_report_pdf',\n 'company_email'], config)\n", (2036, 2123), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((2198, 2309), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_COMPANY_WEBSITE"""', "['export_report_pdf', 'company_website']", 'config'], {}), "('EXPORT_REPORT_PDF_COMPANY_WEBSITE', [\n 'export_report_pdf', 'company_website'], config)\n", (2217, 2309), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((2383, 2494), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_INDICATORS_ONLY"""', "['export_report_pdf', 'indicators_only']", 'config'], {}), "('EXPORT_REPORT_PDF_INDICATORS_ONLY', [\n 'export_report_pdf', 'indicators_only'], config)\n", (2402, 2494), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((2564, 2666), 'pycti.get_config_variable', 'get_config_variable', (['"""EXPORT_REPORT_PDF_DEFANG_URLS"""', "['export_report_pdf', 'defang_urls']", 'config'], {}), "('EXPORT_REPORT_PDF_DEFANG_URLS', ['export_report_pdf',\n 'defang_urls'], config)\n", (2583, 2666), False, 'from pycti import OpenCTIConnectorHelper, get_config_variable\n'), ((542, 574), 'os.path.isfile', 'os.path.isfile', (['config_file_path'], {}), '(config_file_path)\n', (556, 574), False, 'import os\n'), ((1117, 1142), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1132, 1142), False, 'import os\n'), ((9926, 9940), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (9936, 9940), False, 'import time\n'), ((395, 420), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (410, 420), False, 'import os\n'), ((3887, 3910), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3908, 3910), False, 'import datetime\n'), ((4956, 5007), 'pycti.utils.constants.StixCyberObservableTypes.has_value', 'StixCyberObservableTypes.has_value', (['obj_entity_type'], {}), '(obj_entity_type)\n', (4990, 5007), False, 'from pycti.utils.constants import StixCyberObservableTypes\n'), ((6720, 6754), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['self.current_dir'], {}), '(self.current_dir)\n', (6736, 6754), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((6928, 6974), 'weasyprint.HTML', 'HTML', ([], {'string': 'html_string', 'base_url': '"""resources"""'}), "(string=html_string, base_url='resources')\n", (6932, 6974), False, 'from weasyprint import HTML\n'), ((7357, 7420), 'os.path.join', 'os.path.join', (['self.current_dir', '"""resources/report.css.template"""'], {}), "(self.current_dir, 'resources/report.css.template')\n", (7369, 7420), False, 'import os\n'), ((7650, 7704), 'os.path.join', 'os.path.join', (['self.current_dir', '"""resources/report.css"""'], {}), "(self.current_dir, 'resources/report.css')\n", (7662, 7704), False, 'import os\n')]
|
from django.urls import path
from .views import initiate_payment, callback
urlpatterns = [
path('', initiate_payment, name='pay'),
path('callback/', callback, name='callback'),
]
|
[
"django.urls.path"
] |
[((96, 134), 'django.urls.path', 'path', (['""""""', 'initiate_payment'], {'name': '"""pay"""'}), "('', initiate_payment, name='pay')\n", (100, 134), False, 'from django.urls import path\n'), ((140, 184), 'django.urls.path', 'path', (['"""callback/"""', 'callback'], {'name': '"""callback"""'}), "('callback/', callback, name='callback')\n", (144, 184), False, 'from django.urls import path\n')]
|
# -*- coding: utf-8 -*-
__author__ = "<NAME> <<EMAIL>>"
"""
config.py
- settings for the flask application object
"""
import os
import redis
from sweetrpg_library_api.application import constants
class BaseConfig(object):
DEBUG = bool(os.environ.get(constants.DEBUG) or True)
PORT = os.environ.get(constants.PORT) or 5000
# ASSETS_DEBUG = True
LOG_LEVEL = os.environ.get(constants.LOG_LEVEL) or "INFO"
DB_HOST = os.environ[constants.DB_HOST]
# DB_PORT = os.environ.get(constants.DB_PORT) or "27017"
DB_USERNAME = os.environ[constants.DB_USER]
DB_PASSWORD = os.environ[constants.DB_PW]
DB_NAME = os.environ[constants.DB_NAME]
DB_OPTS = os.environ.get(constants.DB_OPTS)
DB_URL = f"mongodb+srv://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}?{DB_OPTS}"
MONGODB_ALIAS_CONNECTION = "default"
MONGODB_URI = DB_URL
MONGODB_SETTINGS = {
"host": DB_URL,
"connect": False,
}
# used for encryption and session management
# SECRET_KEY = os.environ.get('SECRET_KEY') or hashlib.sha256(f"{random.random()}".encode('utf-8')).hexdigest()
# CSRF_TOKEN = os.environ.get('CSRF_TOKEN') or hashlib.sha256(f"{random.random()}".encode('utf-8')).hexdigest()
CACHE_REDIS_HOST = os.environ[constants.REDIS_HOST]
CACHE_REDIS_PORT = int(os.environ.get(constants.REDIS_PORT) or 6379)
# CACHE_REDIS_DB = int(os.environ.get(constants.REDIS_DB) or 7)
SESSION_TYPE = "redis"
SESSION_REDIS = redis.from_url(
f"redis://{os.environ[constants.REDIS_HOST]}:{int(os.environ.get(constants.REDIS_PORT) or 6379)}")
# SEGMENT_WRITE_KEY = os.environ.get(constants.SEGMENT_WRITE_KEY)
SERVER_NAME = os.environ.get(constants.SERVER_NAME)
|
[
"os.environ.get"
] |
[((681, 714), 'os.environ.get', 'os.environ.get', (['constants.DB_OPTS'], {}), '(constants.DB_OPTS)\n', (695, 714), False, 'import os\n'), ((1686, 1723), 'os.environ.get', 'os.environ.get', (['constants.SERVER_NAME'], {}), '(constants.SERVER_NAME)\n', (1700, 1723), False, 'import os\n'), ((297, 327), 'os.environ.get', 'os.environ.get', (['constants.PORT'], {}), '(constants.PORT)\n', (311, 327), False, 'import os\n'), ((378, 413), 'os.environ.get', 'os.environ.get', (['constants.LOG_LEVEL'], {}), '(constants.LOG_LEVEL)\n', (392, 413), False, 'import os\n'), ((245, 276), 'os.environ.get', 'os.environ.get', (['constants.DEBUG'], {}), '(constants.DEBUG)\n', (259, 276), False, 'import os\n'), ((1314, 1350), 'os.environ.get', 'os.environ.get', (['constants.REDIS_PORT'], {}), '(constants.REDIS_PORT)\n', (1328, 1350), False, 'import os\n'), ((1549, 1585), 'os.environ.get', 'os.environ.get', (['constants.REDIS_PORT'], {}), '(constants.REDIS_PORT)\n', (1563, 1585), False, 'import os\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method for querying the sizes of the image
"""
def __init__(self, datasets, uniform_datasets):
_ConcatDataset.__init__(self, datasets)
self.uniform_datasets = uniform_datasets
def get_idxs(self, idx):
if self.uniform_datasets:
dataset_idx = np.random.randint(len(self.cumulative_sizes))
if dataset_idx == 0:
low = 0
else:
low = self.cumulative_sizes[dataset_idx - 1]
sample_idx = np.random.randint(0, self.cumulative_sizes[dataset_idx] - low)
else:
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_img_info(self, idx):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_img_info(sample_idx)
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx][sample_idx]
|
[
"torch.utils.data.dataset.ConcatDataset.__init__",
"numpy.random.randint",
"bisect.bisect_right"
] |
[((411, 450), 'torch.utils.data.dataset.ConcatDataset.__init__', '_ConcatDataset.__init__', (['self', 'datasets'], {}), '(self, datasets)\n', (434, 450), True, 'from torch.utils.data.dataset import ConcatDataset as _ConcatDataset\n'), ((798, 860), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.cumulative_sizes[dataset_idx] - low)'], {}), '(0, self.cumulative_sizes[dataset_idx] - low)\n', (815, 860), True, 'import numpy as np\n'), ((901, 948), 'bisect.bisect_right', 'bisect.bisect_right', (['self.cumulative_sizes', 'idx'], {}), '(self.cumulative_sizes, idx)\n', (920, 948), False, 'import bisect\n')]
|
import os
class Config:
# Statement for enabling the development environment
DEBUG = True
# Define the application directory
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Logging config.
LOG_DIR = "logs"
LOG_TYPE = ["LOG_TYPE", "watched"]
LOG_LEVEL = "DEBUG"
APP_LOG_NAME = "babylon_server.log"
# WWW_LOG_NAME is for log rotation, which is currently not set up.
# Log files sit in the `logs` directory.
WWW_LOG_NAME = "babylon_server.log"
LOG_MAX_BYTES = 100_000_000 # 100MB in bytes
LOG_COPIES = 5
# All the MySql options are under the assumption that the only database at this time is the
# `activity` database.
MYSQL_DATABASE_HOST = "localhost"
MYSQL_DATABASE_NAME = "activity"
MYSQL_DATABASE_PORT = "3308"
MYSQL_DATABASE_USER = "application"
MYSQL_DATABASE_PWD = "<PASSWORD>"
MYSQL_UNIX_SOCKET = "/var/run/mysqld/mysqld.sock"
SQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{MYSQL_DATABASE_USER}:{MYSQL_DATABASE_PWD}@{MYSQL_DATABASE_HOST}:{MYSQL_DATABASE_PORT}/{MYSQL_DATABASE_NAME}?{MYSQL_UNIX_SOCKET}' # noqa
# Pool recycle is recommended for MySQL.
# See https://docs.sqlalchemy.org/en/14/core/pooling.html#setting-pool-recycle
SQLALCHEMY_POOL_RECYCLE = 3600
SQLALCHEMY_BINDS = {
'db2': 'mysql://user:pass@localhost/activity',
'db3': 'mysql://user:pass@localhost/user'
}
|
[
"os.path.dirname"
] |
[((171, 196), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (186, 196), False, 'import os\n')]
|
from db_connection import db
class Agenda(db.Model):
__tablename__ = "agendas"
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date)
work_start = db.Column(db.Time)
work_end = db.Column(db.Time)
rest_start = db.Column(db.Time)
rest_end = db.Column(db.Time)
user_id = db.Column(db.Integer, db.ForeignKey('usuarios.id'))
user = db.relationship('User')
def __init__(self, date, work_start, work_end, rest_start, rest_end, user_id):
self.date = date
self.work_start = work_start
self.work_end = work_end
self.rest_start = rest_start
self.rest_end = rest_end
self.user_id = user_id
def update(self, date, work_start, work_end, rest_start, rest_end):
self.date = date
self.work_start = work_start
self.work_end = work_end
self.rest_start = rest_start
self.rest_end = rest_end
|
[
"db_connection.db.Column",
"db_connection.db.relationship",
"db_connection.db.ForeignKey"
] |
[((94, 133), 'db_connection.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (103, 133), False, 'from db_connection import db\n'), ((145, 163), 'db_connection.db.Column', 'db.Column', (['db.Date'], {}), '(db.Date)\n', (154, 163), False, 'from db_connection import db\n'), ((181, 199), 'db_connection.db.Column', 'db.Column', (['db.Time'], {}), '(db.Time)\n', (190, 199), False, 'from db_connection import db\n'), ((215, 233), 'db_connection.db.Column', 'db.Column', (['db.Time'], {}), '(db.Time)\n', (224, 233), False, 'from db_connection import db\n'), ((251, 269), 'db_connection.db.Column', 'db.Column', (['db.Time'], {}), '(db.Time)\n', (260, 269), False, 'from db_connection import db\n'), ((285, 303), 'db_connection.db.Column', 'db.Column', (['db.Time'], {}), '(db.Time)\n', (294, 303), False, 'from db_connection import db\n'), ((381, 404), 'db_connection.db.relationship', 'db.relationship', (['"""User"""'], {}), "('User')\n", (396, 404), False, 'from db_connection import db\n'), ((340, 368), 'db_connection.db.ForeignKey', 'db.ForeignKey', (['"""usuarios.id"""'], {}), "('usuarios.id')\n", (353, 368), False, 'from db_connection import db\n')]
|
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import copy
import numpy as np
from mmdet.datasets.builder import PIPELINES
from ..datasets import get_annotation_mmdet_format
@PIPELINES.register_module()
class LoadImageFromOTEDataset:
"""
Pipeline element that loads an image from a OTE Dataset on the fly. Can do conversion to float 32 if needed.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the image
results['dataset_id']: id of the dataset to which the item belongs
results['index']: index of the item in the dataset
:param to_float32: optional bool, True to convert images to fp32. defaults to False
"""
def __init__(self, to_float32: bool = False):
self.to_float32 = to_float32
def __call__(self, results):
dataset_item = results['dataset_item']
img = dataset_item.numpy
shape = img.shape
assert img.shape[0] == results['height'], f"{img.shape[0]} != {results['height']}"
assert img.shape[1] == results['width'], f"{img.shape[1]} != {results['width']}"
filename = f"Dataset item index {results['index']}"
results['filename'] = filename
results['ori_filename'] = filename
results['img'] = img
results['img_shape'] = shape
results['ori_shape'] = shape
# Set initial values for default meta_keys
results['pad_shape'] = shape
num_channels = 1 if len(shape) < 3 else shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
results['img_fields'] = ['img']
if self.to_float32:
results['img'] = results['img'].astype(np.float32)
return results
@PIPELINES.register_module()
class LoadAnnotationFromOTEDataset:
"""
Pipeline element that loads an annotation from a OTE Dataset on the fly.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the annotation
results['ann_info']['label_list']: list of all labels in the project
"""
def __init__(self, min_size : int, with_bbox: bool = True, with_label: bool = True, with_mask: bool = False, with_seg: bool = False,
poly2mask: bool = True, with_text: bool = False, domain=None):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.with_text = with_text
self.domain = domain
self.min_size = min_size
@staticmethod
def _load_bboxes(results, ann_info):
results['bbox_fields'].append('gt_bboxes')
results['gt_bboxes'] = copy.deepcopy(ann_info['bboxes'])
return results
@staticmethod
def _load_labels(results, ann_info):
results['gt_labels'] = copy.deepcopy(ann_info['labels'])
return results
@staticmethod
def _load_masks(results, ann_info):
results['mask_fields'].append('gt_masks')
results['gt_masks'] = copy.deepcopy(ann_info['masks'])
return results
def __call__(self, results):
dataset_item = results['dataset_item']
label_list = results['ann_info']['label_list']
ann_info = get_annotation_mmdet_format(dataset_item, label_list, self.domain, self.min_size)
if self.with_bbox:
results = self._load_bboxes(results, ann_info)
if results is None or len(results['gt_bboxes']) == 0:
return None
if self.with_label:
results = self._load_labels(results, ann_info)
if self.with_mask:
results = self._load_masks(results, ann_info)
return results
|
[
"numpy.zeros",
"numpy.ones",
"mmdet.datasets.builder.PIPELINES.register_module",
"copy.deepcopy"
] |
[((715, 742), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (740, 742), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((2438, 2465), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (2463, 2465), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((3481, 3514), 'copy.deepcopy', 'copy.deepcopy', (["ann_info['bboxes']"], {}), "(ann_info['bboxes'])\n", (3494, 3514), False, 'import copy\n'), ((3629, 3662), 'copy.deepcopy', 'copy.deepcopy', (["ann_info['labels']"], {}), "(ann_info['labels'])\n", (3642, 3662), False, 'import copy\n'), ((3825, 3857), 'copy.deepcopy', 'copy.deepcopy', (["ann_info['masks']"], {}), "(ann_info['masks'])\n", (3838, 3857), False, 'import copy\n'), ((2154, 2194), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2162, 2194), True, 'import numpy as np\n'), ((2212, 2251), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2219, 2251), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic', '0002_auto_20170727_1741'),
]
operations = [
migrations.AddField(
model_name='entrypoint',
name='entry_function',
field=models.CharField(default='', help_text='Django function, with syntax: "app_name.function_name"', max_length=100),
),
]
|
[
"django.db.models.CharField"
] |
[((409, 526), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Django function, with syntax: "app_name.function_name\\""""', 'max_length': '(100)'}), '(default=\'\', help_text=\n \'Django function, with syntax: "app_name.function_name"\', max_length=100)\n', (425, 526), False, 'from django.db import migrations, models\n')]
|
from simple_network.tcp_app_server import *
import httptools
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = 'ButenkoMS <<EMAIL>>'
# ======================================================================
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
#
SERVER_KEYWORD = b'http server inline'
SERVER_ADDRESS = ('localhost', 25000)
BSC__USE_READ_WITH_FIXED_BUFFER = True # "Optimized for speed". Good for Named Clients.
# BSC__USE_READ_WITH_FIXED_BUFFER = False # "Optimized for memory". Good for big amount of Unknown Clients (raw,
# http, etc.) if you have small server.
BSC__SOCKET_READ_FIXED_BUFFER_SIZE = 1024 ** 2
BSC__USE_NODELAY_INET = True
BSC__REUSE_GATE_ADDR = True
BSC__REUSE_GATE_PORT = True
LINE_TRACE_ALLOWED = True
#
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
# ======================================================================
class RawClientCheckerAllRaw(CheckIsRawConnection):
def __call__(self, app_server: ASockIOCore, client_info: Connection):
return True
def run_http_server():
io_iteration_timeout = 0.5
# ADD SERVER GATE CONNECTIONS
set_of_tcp_settings = set()
tcp_settings = ConnectionSettings(ConnectionDirectionRole.server, SERVER_ADDRESS, SERVER_KEYWORD)
set_of_tcp_settings.add(tcp_settings)
# CREATE SERVER
http_server = ASockIOCore(set_of_tcp_settings)
# SET SERVER SETTINGS
http_server.raw_checker_for_new_incoming_connections = RawClientCheckerAllRaw()
http_server.unknown_clients_are_allowed = True
http_server.should_get_client_addr_info_on_connection = False
http_server.use_speed_optimized_socket_read = BSC__USE_READ_WITH_FIXED_BUFFER
http_server.socket_read_fixed_buffer_size.result = BSC__SOCKET_READ_FIXED_BUFFER_SIZE
http_server.use_nodelay_inet = BSC__USE_NODELAY_INET
http_server.reuse_gate_addr = BSC__REUSE_GATE_ADDR
http_server.reuse_gate_port = BSC__REUSE_GATE_PORT
# START SERVER
with asock_io_core_connect(http_server, True, backlog=1000) as server:
http_server.need_to_auto_check_incoming_raw_connection = True
clients_per_transport_id = dict()
# RUN SERVER LOOP
while True:
io_iteration_result = server.io_iteration(io_iteration_timeout)
# CLIENT CONNECTED
for another_client_id in io_iteration_result.newly_connected_unknown_clients:
clients_per_transport_id[another_client_id] = HttpClientData(another_client_id, server)
# CLIENT HAVE DATA TO READ
for another_client_id in io_iteration_result.clients_have_data_to_read:
clients_per_transport_id[another_client_id].data_received()
# CLIENT CLOSED
for another_client_id in io_iteration_result.clients_with_disconnected_connection:
if clients_per_transport_id[another_client_id].socket_error():
del clients_per_transport_id[another_client_id]
print('Server had been Shut Down.')
# ==============================================================================================================
# !!!!! IMPORTANT !!!!!
# NEXT CODE SHOULD BE EQUIVALENT TO ASYNCIO HTTP SERVER'S CODE FROM "https://github.com/MagicStack/vmbench" PROJECT
# (BENCHMARKING TOOL FROM 'UVLOOP' DEVELOPERS) FOR FAIR COMPARISON, SO IT'S SO DIRTY.
# (IT'S ALMOST EQUIVALENT: IT DOES NOT HAVE FEW CRITICAL vmbench's BUGS)
_RESP_CACHE = {}
class HttpRequest:
__slots__ = ('_protocol', '_url', '_headers', '_version')
def __init__(self, protocol, url, headers, version):
self._protocol = protocol
self._url = url
self._headers = headers
self._version = version
class HttpResponse:
__slots__ = ('_protocol', '_request', '_headers_sent')
def __init__(self, protocol, request: HttpRequest):
self._protocol = protocol
self._request = request
self._headers_sent = False
def write(self, data):
self._protocol.output_list.append(b''.join([
'HTTP/{} 200 OK\r\n'.format(
self._request._version).encode('latin-1'),
b'Content-Type: text/plain\r\n',
'Content-Length: {}\r\n'.format(len(data)).encode('latin-1'),
b'\r\n',
data
]))
class HttpClientData:
__slots__ = ('server', 'output_list', 'transport_id',
'_current_request', '_current_parser',
'_current_url', '_current_headers', '_last_piece_of_data',
'_previous_piece_of_data')
def __init__(self, transport_id, server: ASockIOCore):
self.server = server
self.transport_id = transport_id
self.output_list = list()
self._current_parser = httptools.HttpRequestParser(self)
self._current_headers = list()
self._current_request = None
self._current_url = None
self._last_piece_of_data = None
self._previous_piece_of_data = None
def data_received(self):
try:
for message in self.server.get_messages_from_client(self.transport_id):
# print('IN {}: {}'.format(self.transport_id, bytes(message)))
self._current_parser.feed_data(message)
self.server.send_messages_to_client(self.transport_id, self.output_list)
except Exception as err:
print('EXCEPTION:', err)
self.server.mark_client_connection_as_should_be_closed_immediately(self.transport_id, False)
# raise err
del self.output_list[:]
# self.output_list.clear()
def socket_error(self):
self._current_request = self._current_parser = None
self.server.remove_client(self.transport_id)
return True
# =============================================
# ==== BEGIN of HttpRequestParser methods: ====
# def on_message_begin(self):
# pass
def on_url(self, url):
if self._current_url:
self._current_url += url
else:
self._current_url = url
# def on_status(self, data):
# pass
def on_header(self, name, value):
self._current_headers.append((name, value))
def on_headers_complete(self):
try:
self._current_request = HttpRequest(
self, self._current_url, self._current_headers,
self._current_parser.get_http_version())
self.handle(self._current_request, HttpResponse(self, self._current_request))
except:
print('ON HEADERS COMPLETE. ID: {}. Last: {}. Previous : {}.'.format(
self.transport_id, self._last_piece_of_data, self._previous_piece_of_data))
raise
# def on_body(self, data):
# pass
# def on_message_complete(self):
# pass
# def on_chunk_header(self):
# pass
# def on_chunk_complete(self):
# pass
# ==== END of HttpRequestParser methods====
# =========================================
def handle(self, request, response: HttpResponse):
parsed_url = httptools.parse_url(self._current_url)
payload_size = parsed_url.path.decode('ascii')[1:]
if not payload_size:
payload_size = 1024
else:
payload_size = int(payload_size)
resp = _RESP_CACHE.get(payload_size)
if resp is None:
resp = b'X' * payload_size
_RESP_CACHE[payload_size] = resp
response.write(resp)
self._current_request = None
self._current_url = None
self._current_headers = list()
# print('KEEP ALIVE:', self._current_parser.should_keep_alive())
if not self._current_parser.should_keep_alive():
self.server.mark_client_connection_as_ready_to_be_closed(self.transport_id, False)
if __name__ == '__main__':
run_http_server()
|
[
"httptools.parse_url",
"httptools.HttpRequestParser"
] |
[((4859, 4892), 'httptools.HttpRequestParser', 'httptools.HttpRequestParser', (['self'], {}), '(self)\n', (4886, 4892), False, 'import httptools\n'), ((7194, 7232), 'httptools.parse_url', 'httptools.parse_url', (['self._current_url'], {}), '(self._current_url)\n', (7213, 7232), False, 'import httptools\n')]
|
"""
Link extractor based on lxml.html
"""
import lxml.html
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
self.links = []
def _extract_links(self, response_text, response_url):
html = lxml.html.fromstring(response_text)
html.make_links_absolute(response_url)
for e, a, l, p in html.iterlinks():
if self.scan_tag(e.tag):
if self.scan_attr(a):
link = Link(self.process_attr(l), text=e.text)
self.links.append(link)
links = unique_list(self.links, key=lambda link: link.url) \
if self.unique else self.links
return links
def extract_links(self, response):
return self._extract_links(response.body, response.url)
|
[
"scrapy.utils.python.unique"
] |
[((933, 983), 'scrapy.utils.python.unique', 'unique_list', (['self.links'], {'key': '(lambda link: link.url)'}), '(self.links, key=lambda link: link.url)\n', (944, 983), True, 'from scrapy.utils.python import unique as unique_list\n')]
|
import numpy as np
import argparse
from sklearn.svm import LinearSVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_regression
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
y = np.load(args.labels, allow_pickle=True)
# http://scikit-learn.sourceforge.net/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC
regr = make_pipeline(StandardScaler(),
LinearSVR(verbose=args.verbose, tol = 1e-5, max_iter = 30))
regr.fit(X,y)
np.savetxt(args.outputpath, regr.named_steps['linearsvr'].coef_, delimiter=",")
|
[
"argparse.ArgumentParser",
"sklearn.svm.LinearSVR",
"sklearn.preprocessing.StandardScaler",
"numpy.savetxt",
"numpy.load"
] |
[((217, 242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (240, 242), False, 'import argparse\n'), ((538, 579), 'numpy.load', 'np.load', (['args.datapath'], {'allow_pickle': '(True)'}), '(args.datapath, allow_pickle=True)\n', (545, 579), True, 'import numpy as np\n'), ((584, 623), 'numpy.load', 'np.load', (['args.labels'], {'allow_pickle': '(True)'}), '(args.labels, allow_pickle=True)\n', (591, 623), True, 'import numpy as np\n'), ((855, 934), 'numpy.savetxt', 'np.savetxt', (['args.outputpath', "regr.named_steps['linearsvr'].coef_"], {'delimiter': '""","""'}), "(args.outputpath, regr.named_steps['linearsvr'].coef_, delimiter=',')\n", (865, 934), True, 'import numpy as np\n'), ((757, 773), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (771, 773), False, 'from sklearn.preprocessing import StandardScaler\n'), ((779, 834), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {'verbose': 'args.verbose', 'tol': '(1e-05)', 'max_iter': '(30)'}), '(verbose=args.verbose, tol=1e-05, max_iter=30)\n', (788, 834), False, 'from sklearn.svm import LinearSVR\n')]
|
from guardian.shortcuts import get_objects_for_user
from django.http import Http404, HttpResponseRedirect
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from vdw.samples.models import Sample, Project, Batch, Cohort
from .forms import CohortForm
def registry(request):
projects = get_objects_for_user(request.user, 'samples.view_project')
batch_count = Count('batches', distinct=True)
sample_count = Count('samples', distinct=True)
# Distinct count on batch necessary since the join inflates the numbers
projects = projects.annotate(sample_count=sample_count,
batch_count=batch_count)
staged_samples = \
Sample.objects.filter(published=False, project__in=projects) \
.select_related('batch', 'project')
return render(request, 'samples/registry.html', {
'projects': list(projects),
'staged_samples': list(staged_samples),
})
def project_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
batch_count = Count('batches', distinct=True)
sample_count = Count('samples', distinct=True)
# Distinct count on batch necessary since the join inflates the numbers
try:
project = projects.annotate(sample_count=sample_count,
batch_count=batch_count).get(pk=pk)
except Project.DoesNotExist:
raise Http404
batches = Batch.objects.filter(project=project) \
.annotate(sample_count=Count('samples'))
return render(request, 'samples/project.html', {
'project': project,
'batches': batches,
})
def batch_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
sample_count = Count('samples', distinct=True)
try:
batch = Batch.objects.annotate(sample_count=sample_count) \
.filter(project__in=projects).select_related('project').get(pk=pk)
except Batch.DoesNotExist:
raise Http404
samples = Sample.objects.filter(batch=batch)
return render(request, 'samples/batch.html', {
'batch': batch,
'project': batch.project,
'samples': samples,
})
def sample_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
try:
sample = Sample.objects.filter(project__in=projects) \
.select_related('batch', 'project').get(pk=pk)
except Sample.DoesNotExist:
raise Http404
return render(request, 'samples/sample.html', {
'sample': sample,
'batch': sample.batch,
'project': sample.project,
})
def cohort_form(request, pk=None):
if request.user.has_perm('samples.change_cohort'):
cohorts = Cohort.objects.all()
cohort = get_object_or_404(Cohort, pk=pk) if pk else None
else:
cohorts = Cohort.objects.filter(user=request.user)
cohort = \
get_object_or_404(Cohort, pk=pk, user=request.user) if pk else None
# Apply permissions..
samples = Sample.objects.all()
if request.method == 'POST':
form = CohortForm(samples, data=request.POST, instance=cohort,
initial={'user': request.user})
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('cohorts'))
else:
form = CohortForm(samples, instance=cohort)
return render(request, 'samples/cohort-form.html', {
'form': form,
'cohort': cohort,
'cohorts': cohorts,
})
def cohort_delete(request, pk):
if request.user.has_perm('samples.change_cohort'):
cohort = get_object_or_404(Cohort, pk=pk)
else:
cohort = get_object_or_404(Cohort, pk=pk, user=request.user)
cohort.delete()
return HttpResponseRedirect(reverse('cohorts'))
|
[
"django.shortcuts.render",
"django.db.models.Count",
"vdw.samples.models.Sample.objects.filter",
"vdw.samples.models.Sample.objects.all",
"django.shortcuts.get_object_or_404",
"guardian.shortcuts.get_objects_for_user",
"vdw.samples.models.Cohort.objects.all",
"django.core.urlresolvers.reverse",
"vdw.samples.models.Batch.objects.annotate",
"vdw.samples.models.Cohort.objects.filter",
"vdw.samples.models.Batch.objects.filter"
] |
[((373, 431), 'guardian.shortcuts.get_objects_for_user', 'get_objects_for_user', (['request.user', '"""samples.view_project"""'], {}), "(request.user, 'samples.view_project')\n", (393, 431), False, 'from guardian.shortcuts import get_objects_for_user\n'), ((451, 482), 'django.db.models.Count', 'Count', (['"""batches"""'], {'distinct': '(True)'}), "('batches', distinct=True)\n", (456, 482), False, 'from django.db.models import Count\n'), ((502, 533), 'django.db.models.Count', 'Count', (['"""samples"""'], {'distinct': '(True)'}), "('samples', distinct=True)\n", (507, 533), False, 'from django.db.models import Count\n'), ((1066, 1124), 'guardian.shortcuts.get_objects_for_user', 'get_objects_for_user', (['request.user', '"""samples.view_project"""'], {}), "(request.user, 'samples.view_project')\n", (1086, 1124), False, 'from guardian.shortcuts import get_objects_for_user\n'), ((1144, 1175), 'django.db.models.Count', 'Count', (['"""batches"""'], {'distinct': '(True)'}), "('batches', distinct=True)\n", (1149, 1175), False, 'from django.db.models import Count\n'), ((1195, 1226), 'django.db.models.Count', 'Count', (['"""samples"""'], {'distinct': '(True)'}), "('samples', distinct=True)\n", (1200, 1226), False, 'from django.db.models import Count\n'), ((1619, 1704), 'django.shortcuts.render', 'render', (['request', '"""samples/project.html"""', "{'project': project, 'batches': batches}"], {}), "(request, 'samples/project.html', {'project': project, 'batches':\n batches})\n", (1625, 1704), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1774, 1832), 'guardian.shortcuts.get_objects_for_user', 'get_objects_for_user', (['request.user', '"""samples.view_project"""'], {}), "(request.user, 'samples.view_project')\n", (1794, 1832), False, 'from guardian.shortcuts import get_objects_for_user\n'), ((1853, 1884), 'django.db.models.Count', 'Count', (['"""samples"""'], {'distinct': '(True)'}), "('samples', distinct=True)\n", (1858, 1884), False, 'from django.db.models import Count\n'), ((2110, 2144), 'vdw.samples.models.Sample.objects.filter', 'Sample.objects.filter', ([], {'batch': 'batch'}), '(batch=batch)\n', (2131, 2144), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n'), ((2157, 2263), 'django.shortcuts.render', 'render', (['request', '"""samples/batch.html"""', "{'batch': batch, 'project': batch.project, 'samples': samples}"], {}), "(request, 'samples/batch.html', {'batch': batch, 'project': batch.\n project, 'samples': samples})\n", (2163, 2263), False, 'from django.shortcuts import render, get_object_or_404\n'), ((2341, 2399), 'guardian.shortcuts.get_objects_for_user', 'get_objects_for_user', (['request.user', '"""samples.view_project"""'], {}), "(request.user, 'samples.view_project')\n", (2361, 2399), False, 'from guardian.shortcuts import get_objects_for_user\n'), ((2598, 2711), 'django.shortcuts.render', 'render', (['request', '"""samples/sample.html"""', "{'sample': sample, 'batch': sample.batch, 'project': sample.project}"], {}), "(request, 'samples/sample.html', {'sample': sample, 'batch': sample.\n batch, 'project': sample.project})\n", (2604, 2711), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3144, 3164), 'vdw.samples.models.Sample.objects.all', 'Sample.objects.all', ([], {}), '()\n', (3162, 3164), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n'), ((3514, 3615), 'django.shortcuts.render', 'render', (['request', '"""samples/cohort-form.html"""', "{'form': form, 'cohort': cohort, 'cohorts': cohorts}"], {}), "(request, 'samples/cohort-form.html', {'form': form, 'cohort': cohort,\n 'cohorts': cohorts})\n", (3520, 3615), False, 'from django.shortcuts import render, get_object_or_404\n'), ((2848, 2868), 'vdw.samples.models.Cohort.objects.all', 'Cohort.objects.all', ([], {}), '()\n', (2866, 2868), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n'), ((2963, 3003), 'vdw.samples.models.Cohort.objects.filter', 'Cohort.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (2984, 3003), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n'), ((3749, 3781), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Cohort'], {'pk': 'pk'}), '(Cohort, pk=pk)\n', (3766, 3781), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3809, 3860), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Cohort'], {'pk': 'pk', 'user': 'request.user'}), '(Cohort, pk=pk, user=request.user)\n', (3826, 3860), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3915, 3933), 'django.core.urlresolvers.reverse', 'reverse', (['"""cohorts"""'], {}), "('cohorts')\n", (3922, 3933), False, 'from django.core.urlresolvers import reverse\n'), ((761, 821), 'vdw.samples.models.Sample.objects.filter', 'Sample.objects.filter', ([], {'published': '(False)', 'project__in': 'projects'}), '(published=False, project__in=projects)\n', (782, 821), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n'), ((1518, 1555), 'vdw.samples.models.Batch.objects.filter', 'Batch.objects.filter', ([], {'project': 'project'}), '(project=project)\n', (1538, 1555), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n'), ((1589, 1605), 'django.db.models.Count', 'Count', (['"""samples"""'], {}), "('samples')\n", (1594, 1605), False, 'from django.db.models import Count\n'), ((2886, 2918), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Cohort'], {'pk': 'pk'}), '(Cohort, pk=pk)\n', (2903, 2918), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3035, 3086), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Cohort'], {'pk': 'pk', 'user': 'request.user'}), '(Cohort, pk=pk, user=request.user)\n', (3052, 3086), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3420, 3438), 'django.core.urlresolvers.reverse', 'reverse', (['"""cohorts"""'], {}), "('cohorts')\n", (3427, 3438), False, 'from django.core.urlresolvers import reverse\n'), ((2427, 2470), 'vdw.samples.models.Sample.objects.filter', 'Sample.objects.filter', ([], {'project__in': 'projects'}), '(project__in=projects)\n', (2448, 2470), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n'), ((1911, 1960), 'vdw.samples.models.Batch.objects.annotate', 'Batch.objects.annotate', ([], {'sample_count': 'sample_count'}), '(sample_count=sample_count)\n', (1933, 1960), False, 'from vdw.samples.models import Sample, Project, Batch, Cohort\n')]
|
#!/usr/bin/env python3
# coding: utf-8
# Adapted from: https://github.com/zpincus/celltool/blob/master/celltool/numerics/image_warp.py
from scipy import ndimage
import numpy as np
from probreg import bcpd
import tifffile
import matplotlib.pyplot as plt
import napari
from magicgui import magic_factory, widgets
from napari.types import PointsData, ImageData
from typing_extensions import Annotated
def _make_inverse_warp(from_points, to_points, output_region, approximate_grid):
x_min, y_min, z_min, x_max, y_max, z_max = output_region
if approximate_grid is None: approximate_grid = 1
x_steps = (x_max - x_min) // approximate_grid
y_steps = (y_max - y_min) // approximate_grid
z_steps = (z_max - z_min) // approximate_grid
x, y, z = np.mgrid[x_min:x_max:x_steps*1j, y_min:y_max:y_steps*1j, z_min:z_max:z_steps*1j]
transform = _make_warp(to_points, from_points, x, y, z)
if approximate_grid != 1:
# linearly interpolate the zoomed transform grid
new_x, new_y, new_z = np.mgrid[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1]
x_fracs, x_indices = np.modf((x_steps-1)*(new_x-x_min)/float(x_max-x_min))
y_fracs, y_indices = np.modf((y_steps-1)*(new_y-y_min)/float(y_max-y_min))
z_fracs, z_indices = np.modf((z_steps-1)*(new_z-z_min)/float(z_max-z_min))
x_indices = x_indices.astype(int)
y_indices = y_indices.astype(int)
z_indices = z_indices.astype(int)
x1 = 1 - x_fracs
y1 = 1 - y_fracs
z1 = 1 - z_fracs
ix1 = (x_indices+1).clip(0, x_steps-1)
iy1 = (y_indices+1).clip(0, y_steps-1)
iz1 = (z_indices+1).clip(0, z_steps-1)
transform_x = _trilinear_interpolation(0, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_y = _trilinear_interpolation(1, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_z = _trilinear_interpolation(2, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform = [transform_x, transform_y, transform_z]
return transform
def _trilinear_interpolation(d, t, x0, y0, z0, x1, y1, z1, ix0, iy0, iz0, ix1, iy1, iz1):
t000 = t[d][(ix0, iy0, iz0)]
t001 = t[d][(ix0, iy0, iz1)]
t010 = t[d][(ix0, iy1, iz0)]
t100 = t[d][(ix1, iy0, iz0)]
t011 = t[d][(ix0, iy1, iz1)]
t101 = t[d][(ix1, iy0, iz1)]
t110 = t[d][(ix1, iy1, iz0)]
t111 = t[d][(ix1, iy1, iz1)]
return t000*x0*y0*z0 + t001*x0*y0*z1 + t010*x0*y1*z0 + t100*x1*y0*z0 + t011*x0*y1*z1 + t101*x1*y0*z1 + t110*x1*y1*z0 + t111*x1*y1*z1
def _U(x):
_small = 1e-100
return (x**2) * np.where(x<_small, 0, np.log(x))
def _interpoint_distances(points):
xd = np.subtract.outer(points[:,0], points[:,0])
yd = np.subtract.outer(points[:,1], points[:,1])
zd = np.subtract.outer(points[:,2], points[:,2])
return np.sqrt(xd**2 + yd**2 + zd**2)
def _make_L_matrix(points):
n = len(points)
K = _U(_interpoint_distances(points))
P = np.ones((n, 4))
P[:,1:] = points
O = np.zeros((4, 4))
L = np.asarray(np.bmat([[K, P],[P.transpose(), O]]))
return L
def _calculate_f(coeffs, points, x, y, z):
w = coeffs[:-3]
a1, ax, ay, az = coeffs[-4:]
summation = np.zeros(x.shape)
for wi, Pi in zip(w, points):
summation += wi * _U(np.sqrt((x-Pi[0])**2 + (y-Pi[1])**2 + (z-Pi[2])**2))
return a1 + ax*x + ay*y +az*z + summation
def _make_warp(from_points, to_points, x_vals, y_vals, z_vals):
from_points, to_points = np.asarray(from_points), np.asarray(to_points)
err = np.seterr(divide='ignore')
L = _make_L_matrix(from_points)
V = np.resize(to_points, (len(to_points)+4, 3))
V[-3:, :] = 0
coeffs = np.dot(np.linalg.pinv(L), V)
print('L, V, coeffs', L.shape, V.shape, coeffs.shape)
x_warp = _calculate_f(coeffs[:,0], from_points, x_vals, y_vals, z_vals)
y_warp = _calculate_f(coeffs[:,1], from_points, x_vals, y_vals, z_vals)
z_warp = _calculate_f(coeffs[:,2], from_points, x_vals, y_vals, z_vals)
np.seterr(**err)
return [x_warp, y_warp, z_warp]
@magic_factory
def make_image_warping(
viewer: "napari.viewer.Viewer",
moving_image: ImageData,
fixed_image: ImageData,
moving_points: PointsData,
transformed_points: PointsData,
interpolation_order: Annotated[int, {"min": 0, "max": 10, "step": 1}]=1,
approximate_grid: Annotated[int, {"min": 1, "max": 10, "step": 1}]=1
):
from napari.qt import thread_worker
pbar = widgets.ProgressBar()
pbar.range = (0, 0) # unknown duration
make_image_warping.insert(0, pbar) # add progress bar to the top of widget
# this function will be called after we return
def _add_data(return_value, self=make_image_warping):
data, kwargs = return_value
viewer.add_image(data, **kwargs)
self.pop(0).hide() # remove the progress bar
@thread_worker(connect={"returned": _add_data})
def _warp_images(from_points, to_points, image, output_region, interpolation_order=5, approximate_grid=10):
print('Entered warp_images')
transform = _make_inverse_warp(from_points, to_points, output_region, approximate_grid)
warped_image = ndimage.map_coordinates(np.asarray(image), transform, order=interpolation_order)
kwargs = dict(
name='warped_image'
)
return (warped_image, kwargs)
print('Warping image volume')
assert len(moving_points) == len(transformed_points), 'Moving and transformed points must be of same length.'
output_region = (0, 0, 0, int(fixed_image.shape[0] / 1), int(fixed_image.shape[1] / 1), int(fixed_image.shape[2] / 1))
print(output_region)
_warp_images(from_points=moving_points,
to_points=transformed_points,
image=moving_image,
output_region=output_region,
interpolation_order=interpolation_order,
approximate_grid=approximate_grid)
|
[
"numpy.sqrt",
"numpy.subtract.outer",
"numpy.ones",
"napari.qt.thread_worker",
"numpy.linalg.pinv",
"numpy.log",
"numpy.asarray",
"numpy.zeros",
"magicgui.widgets.ProgressBar",
"numpy.seterr"
] |
[((2818, 2863), 'numpy.subtract.outer', 'np.subtract.outer', (['points[:, 0]', 'points[:, 0]'], {}), '(points[:, 0], points[:, 0])\n', (2835, 2863), True, 'import numpy as np\n'), ((2871, 2916), 'numpy.subtract.outer', 'np.subtract.outer', (['points[:, 1]', 'points[:, 1]'], {}), '(points[:, 1], points[:, 1])\n', (2888, 2916), True, 'import numpy as np\n'), ((2924, 2969), 'numpy.subtract.outer', 'np.subtract.outer', (['points[:, 2]', 'points[:, 2]'], {}), '(points[:, 2], points[:, 2])\n', (2941, 2969), True, 'import numpy as np\n'), ((2979, 3015), 'numpy.sqrt', 'np.sqrt', (['(xd ** 2 + yd ** 2 + zd ** 2)'], {}), '(xd ** 2 + yd ** 2 + zd ** 2)\n', (2986, 3015), True, 'import numpy as np\n'), ((3109, 3124), 'numpy.ones', 'np.ones', (['(n, 4)'], {}), '((n, 4))\n', (3116, 3124), True, 'import numpy as np\n'), ((3154, 3170), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (3162, 3170), True, 'import numpy as np\n'), ((3354, 3371), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (3362, 3371), True, 'import numpy as np\n'), ((3685, 3711), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (3694, 3711), True, 'import numpy as np\n'), ((4150, 4166), 'numpy.seterr', 'np.seterr', ([], {}), '(**err)\n', (4159, 4166), True, 'import numpy as np\n'), ((4608, 4629), 'magicgui.widgets.ProgressBar', 'widgets.ProgressBar', ([], {}), '()\n', (4627, 4629), False, 'from magicgui import magic_factory, widgets\n'), ((5001, 5047), 'napari.qt.thread_worker', 'thread_worker', ([], {'connect': "{'returned': _add_data}"}), "(connect={'returned': _add_data})\n", (5014, 5047), False, 'from napari.qt import thread_worker\n'), ((3628, 3651), 'numpy.asarray', 'np.asarray', (['from_points'], {}), '(from_points)\n', (3638, 3651), True, 'import numpy as np\n'), ((3653, 3674), 'numpy.asarray', 'np.asarray', (['to_points'], {}), '(to_points)\n', (3663, 3674), True, 'import numpy as np\n'), ((3838, 3855), 'numpy.linalg.pinv', 'np.linalg.pinv', (['L'], {}), '(L)\n', (3852, 3855), True, 'import numpy as np\n'), ((2762, 2771), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (2768, 2771), True, 'import numpy as np\n'), ((5340, 5357), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (5350, 5357), True, 'import numpy as np\n'), ((3435, 3498), 'numpy.sqrt', 'np.sqrt', (['((x - Pi[0]) ** 2 + (y - Pi[1]) ** 2 + (z - Pi[2]) ** 2)'], {}), '((x - Pi[0]) ** 2 + (y - Pi[1]) ** 2 + (z - Pi[2]) ** 2)\n', (3442, 3498), True, 'import numpy as np\n')]
|
from django.test import TestCase, RequestFactory
import vcr
from django.conf import settings
from django.core.management import call_command
from open_humans.models import OpenHumansMember
from main.celery import read_reference, clean_raw_23andme
from main.celery_helper import vcf_header
import os
import tempfile
import requests
import requests_mock
from main.celery import process_file
class ParsingTestCase(TestCase):
"""
test that files are parsed correctly
"""
def setUp(self):
"""
Set up the app for following tests
"""
settings.DEBUG = True
call_command('init_proj_config')
self.factory = RequestFactory()
data = {"access_token": '<PASSWORD>',
"refresh_token": '<PASSWORD>',
"expires_in": 36000}
self.oh_member = OpenHumansMember.create(oh_id='12345678',
data=data)
self.oh_member.save()
self.user = self.oh_member.user
self.user.set_password('<PASSWORD>')
self.user.save()
def test_read_reference(self):
"""
Test function to read the reference file.
"""
REF_23ANDME_FILE = os.path.join(os.path.dirname(__file__),
'fixtures/test_reference.txt')
ref = read_reference(REF_23ANDME_FILE)
self.assertEqual(ref, {'1': {'82154': 'A', '752566': 'G'}})
def test_vcf_header(self):
"""
Test function to create a VCF header
"""
hd = vcf_header(
source='23andme',
reference='http://example.com',
format_info=['<ID=GT,Number=1,Type=String,Description="GT">'])
self.assertEqual(len(hd), 6)
expected_header_fields = ["##fileformat",
"##fileDate",
'##source',
'##reference',
'##FORMAT',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER' +
'\tINFO\tFORMAT\t23ANDME_DATA']
self.assertEqual([i.split("=")[0] for i in hd], expected_header_fields)
def test_23andme_cleaning(self):
"""
Test that cleanup works as expected
"""
with requests_mock.Mocker() as m:
get_url = 'http://example.com/23andme_file.txt'
closed_input_file = os.path.join(os.path.dirname(__file__),
'fixtures/23andme_invalid.txt')
fhandle = open(closed_input_file, "rb")
content = fhandle.read()
m.register_uri('GET',
get_url,
content=content,
status_code=200)
tf_in = tempfile.NamedTemporaryFile(suffix=".txt")
tf_in.write(requests.get(get_url).content)
tf_in.flush()
cleaned_input = clean_raw_23andme(tf_in)
cleaned_input.seek(0)
lines = cleaned_input.read()
self.assertEqual(lines.find('<NAME>'), -1)
self.assertNotEqual(lines.find('data file generated'), -1)
@vcr.use_cassette('main/tests/fixtures/process_file.yaml',
record_mode='none')
def test_process_file(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_bz2.yaml',
record_mode='none')
def test_process_file_bz2(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt.bz2',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt.bz2?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_gz.yaml',
record_mode='none')
def test_process_file_gz(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt.gz',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt.gz?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_zip.yaml',
record_mode='none')
def test_process_file_zip(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.zip',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.zip?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
|
[
"django.test.RequestFactory",
"open_humans.models.OpenHumansMember.create",
"main.celery.read_reference",
"vcr.use_cassette",
"django.core.management.call_command",
"main.celery_helper.vcf_header",
"requests_mock.Mocker",
"main.celery.clean_raw_23andme",
"requests.get",
"os.path.dirname",
"tempfile.NamedTemporaryFile",
"main.celery.process_file"
] |
[((3225, 3302), 'vcr.use_cassette', 'vcr.use_cassette', (['"""main/tests/fixtures/process_file.yaml"""'], {'record_mode': '"""none"""'}), "('main/tests/fixtures/process_file.yaml', record_mode='none')\n", (3241, 3302), False, 'import vcr\n'), ((3974, 4060), 'vcr.use_cassette', 'vcr.use_cassette', (['"""main/tests/fixtures/process_file_bz2.yaml"""'], {'record_mode': '"""none"""'}), "('main/tests/fixtures/process_file_bz2.yaml', record_mode=\n 'none')\n", (3990, 4060), False, 'import vcr\n'), ((4739, 4824), 'vcr.use_cassette', 'vcr.use_cassette', (['"""main/tests/fixtures/process_file_gz.yaml"""'], {'record_mode': '"""none"""'}), "('main/tests/fixtures/process_file_gz.yaml', record_mode='none'\n )\n", (4755, 4824), False, 'import vcr\n'), ((5500, 5586), 'vcr.use_cassette', 'vcr.use_cassette', (['"""main/tests/fixtures/process_file_zip.yaml"""'], {'record_mode': '"""none"""'}), "('main/tests/fixtures/process_file_zip.yaml', record_mode=\n 'none')\n", (5516, 5586), False, 'import vcr\n'), ((608, 640), 'django.core.management.call_command', 'call_command', (['"""init_proj_config"""'], {}), "('init_proj_config')\n", (620, 640), False, 'from django.core.management import call_command\n'), ((664, 680), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (678, 680), False, 'from django.test import TestCase, RequestFactory\n'), ((836, 888), 'open_humans.models.OpenHumansMember.create', 'OpenHumansMember.create', ([], {'oh_id': '"""12345678"""', 'data': 'data'}), "(oh_id='12345678', data=data)\n", (859, 888), False, 'from open_humans.models import OpenHumansMember\n'), ((1340, 1372), 'main.celery.read_reference', 'read_reference', (['REF_23ANDME_FILE'], {}), '(REF_23ANDME_FILE)\n', (1354, 1372), False, 'from main.celery import read_reference, clean_raw_23andme\n'), ((1555, 1683), 'main.celery_helper.vcf_header', 'vcf_header', ([], {'source': '"""23andme"""', 'reference': '"""http://example.com"""', 'format_info': '[\'<ID=GT,Number=1,Type=String,Description="GT">\']'}), '(source=\'23andme\', reference=\'http://example.com\', format_info=[\n \'<ID=GT,Number=1,Type=String,Description="GT">\'])\n', (1565, 1683), False, 'from main.celery_helper import vcf_header\n'), ((3904, 3967), 'main.celery.process_file', 'process_file', (['dfile', '"""myaccesstoken"""', 'member', "dfile['metadata']"], {}), "(dfile, 'myaccesstoken', member, dfile['metadata'])\n", (3916, 3967), False, 'from main.celery import process_file\n'), ((4669, 4732), 'main.celery.process_file', 'process_file', (['dfile', '"""myaccesstoken"""', 'member', "dfile['metadata']"], {}), "(dfile, 'myaccesstoken', member, dfile['metadata'])\n", (4681, 4732), False, 'from main.celery import process_file\n'), ((5430, 5493), 'main.celery.process_file', 'process_file', (['dfile', '"""myaccesstoken"""', 'member', "dfile['metadata']"], {}), "(dfile, 'myaccesstoken', member, dfile['metadata'])\n", (5442, 5493), False, 'from main.celery import process_file\n'), ((6187, 6250), 'main.celery.process_file', 'process_file', (['dfile', '"""myaccesstoken"""', 'member', "dfile['metadata']"], {}), "(dfile, 'myaccesstoken', member, dfile['metadata'])\n", (6199, 6250), False, 'from main.celery import process_file\n'), ((1228, 1253), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1243, 1253), False, 'import os\n'), ((2335, 2357), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (2355, 2357), False, 'import requests_mock\n'), ((2840, 2882), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".txt"""'}), "(suffix='.txt')\n", (2867, 2882), False, 'import tempfile\n'), ((2993, 3017), 'main.celery.clean_raw_23andme', 'clean_raw_23andme', (['tf_in'], {}), '(tf_in)\n', (3010, 3017), False, 'from main.celery import read_reference, clean_raw_23andme\n'), ((2469, 2494), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2484, 2494), False, 'import os\n'), ((2907, 2928), 'requests.get', 'requests.get', (['get_url'], {}), '(get_url)\n', (2919, 2928), False, 'import requests\n')]
|
# -*- coding: UTF-8 -*-
#!/usr/bin/python3
"""
Embedding Layer
"""
#************************************************************
# Imported Libraries
#************************************************************
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models import KeyedVectors
import pdb
class EmbeddingLayer(nn.Module):
def __init__(self, params, vocab, pretrained_emb_path = None):
super(EmbeddingLayer, self).__init__()
# embedding layer
self.lang = vocab.lang
self.vocab = vocab
self.emb_dim = params.emb_dim
self.embeddings = nn.Embedding(vocab.vocab_size, self.emb_dim, padding_idx = vocab.PAD_ID)
self.init_emb(self.embeddings, pretrained_emb_path, vocab)
# ijcai dropout, p = 0.2
self.emb_do = nn.Dropout(p = params.emb_do)
self.use_cuda = params.cuda
def init_emb(self, embeddings, pretrained_emb_path, vocab):
if pretrained_emb_path is not None:
self.load_pretrained(pretrained_emb_path, embeddings, vocab)
else:
"""
Initialize embedding weight like word2vec.
The u_embedding is a uniform distribution in [-0.5/emb_dim, 0.5/emb_dim],
"""
initrange = 0.5 / self.emb_dim
embeddings.weight.data.uniform_(-initrange, initrange)
embeddings.weight.data[vocab.PAD_ID] = 0
def load_pretrained(self, pretrained_emb_path, embeddings, vocab):
print('loading {} embeddings for {}'.format(pretrained_emb_path, self.lang))
try:
pre_emb = KeyedVectors.load_word2vec_format(pretrained_emb_path, binary = False)
except:
print('Did not found {} embeddings for {}'.format(pretrained_emb_path, self.lang))
return
# ignore only pad
for i in range(1, len(vocab.idx2word)):
try:
embeddings.weight.data[i] = torch.from_numpy(pre_emb[vocab.idx2word[i]])
except:
continue
def forward(self, batch_input):
input_word_embs = self.embeddings(batch_input)
input_word_embs = self.emb_do(input_word_embs)
return input_word_embs
|
[
"gensim.models.KeyedVectors.load_word2vec_format",
"torch.from_numpy",
"torch.nn.Dropout",
"torch.nn.Embedding"
] |
[((622, 692), 'torch.nn.Embedding', 'nn.Embedding', (['vocab.vocab_size', 'self.emb_dim'], {'padding_idx': 'vocab.PAD_ID'}), '(vocab.vocab_size, self.emb_dim, padding_idx=vocab.PAD_ID)\n', (634, 692), True, 'import torch.nn as nn\n'), ((805, 832), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'params.emb_do'}), '(p=params.emb_do)\n', (815, 832), True, 'import torch.nn as nn\n'), ((1520, 1588), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['pretrained_emb_path'], {'binary': '(False)'}), '(pretrained_emb_path, binary=False)\n', (1553, 1588), False, 'from gensim.models import KeyedVectors\n'), ((1818, 1862), 'torch.from_numpy', 'torch.from_numpy', (['pre_emb[vocab.idx2word[i]]'], {}), '(pre_emb[vocab.idx2word[i]])\n', (1834, 1862), False, 'import torch\n')]
|
import abc
import socket
import logging
import asyncio
import warnings
import h2.config
import h2.exceptions
from .utils import DeadlineWrapper
from .const import Status
from .stream import send_message, recv_message
from .stream import StreamIterator
from .metadata import Metadata, Deadline
from .protocol import H2Protocol, AbstractHandler
from .exceptions import GRPCError, ProtocolError
from .encoding.base import GRPC_CONTENT_TYPE
from .encoding.proto import ProtoCodec
log = logging.getLogger(__name__)
class Stream(StreamIterator):
"""
Represents gRPC method call – HTTP/2 request/stream, and everything you
need to communicate with client in order to handle this request.
As you can see, every method handler accepts single positional argument -
stream:
.. code-block:: python
async def MakeLatte(self, stream: grpclib.server.Stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
This is true for every gRPC method type.
"""
# stream state
_send_initial_metadata_done = False
_send_message_count = 0
_send_trailing_metadata_done = False
_cancel_done = False
def __init__(self, stream, cardinality, codec, recv_type, send_type,
*, metadata, deadline=None):
self._stream = stream
self._cardinality = cardinality
self._codec = codec
self._recv_type = recv_type
self._send_type = send_type
self.metadata = metadata
self.deadline = deadline
async def recv_message(self):
"""Coroutine to receive incoming message from the client.
If client sends UNARY request, then you can call this coroutine
only once. If client sends STREAM request, then you should call this
coroutine several times, until it returns None. To simplify your code
in this case, :py:class:`Stream` class implements async iteration
protocol, so you can use it like this:
.. code-block:: python
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so server will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
return await recv_message(self._stream, self._codec, self._recv_type)
async def send_initial_metadata(self):
"""Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_initial_metadata_done:
raise ProtocolError('Initial metadata was already sent')
await self._stream.send_headers([
(':status', '200'),
('content-type', (GRPC_CONTENT_TYPE + '+'
+ self._codec.__content_subtype__)),
])
self._send_initial_metadata_done = True
async def send_message(self, message, **kwargs):
"""Coroutine to send message to the client.
If server sends UNARY response, then you should call this coroutine only
once. If server sends STREAM response, then you can call this coroutine
as many times as you need.
:param message: message object
"""
if 'end' in kwargs:
warnings.warn('"end" argument is deprecated, use '
'"stream.send_trailing_metadata" explicitly',
stacklevel=2)
end = kwargs.pop('end', False)
assert not kwargs, kwargs
if not self._send_initial_metadata_done:
await self.send_initial_metadata()
if not self._cardinality.server_streaming:
if self._send_message_count:
raise ProtocolError('Server should send exactly one message '
'in response')
await send_message(self._stream, self._codec, message, self._send_type)
self._send_message_count += 1
if end:
await self.send_trailing_metadata()
async def send_trailing_metadata(self, *, status=Status.OK,
status_message=None):
"""Coroutine to send trailers with trailing metadata to the client.
This coroutine allows sending trailers-only responses, in case of some
failure conditions during handling current request, i.e. when
``status is not OK``.
.. note:: This coroutine will be called implicitly at exit from
request handler, with appropriate status code, if not called
explicitly during handler execution.
:param status: resulting status of this coroutine call
:param status_message: description for a status
"""
if self._send_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already sent')
if not self._send_message_count and status is Status.OK:
raise ProtocolError('{!r} requires non-empty response'
.format(status))
if self._send_initial_metadata_done:
headers = []
else:
# trailers-only response
headers = [(':status', '200')]
headers.append(('grpc-status', str(status.value)))
if status_message is not None:
headers.append(('grpc-message', status_message))
await self._stream.send_headers(headers, end_stream=True)
self._send_trailing_metadata_done = True
if status != Status.OK and self._stream.closable:
self._stream.reset_nowait()
async def cancel(self):
"""Coroutine to cancel this request/stream.
Server will send RST_STREAM frame to the client, so it will be
explicitly informed that there is nothing to expect from the server
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if (
self._send_trailing_metadata_done
or self._cancel_done
or self._stream._transport.is_closing()
):
# to suppress exception propagation
return True
if exc_val is not None:
if isinstance(exc_val, GRPCError):
status = exc_val.status
status_message = exc_val.message
elif isinstance(exc_val, Exception):
status = Status.UNKNOWN
status_message = 'Internal Server Error'
else:
# propagate exception
return
elif not self._send_message_count:
status = Status.UNKNOWN
status_message = 'Empty response'
else:
status = Status.OK
status_message = None
try:
await self.send_trailing_metadata(status=status,
status_message=status_message)
except h2.exceptions.StreamClosedError:
pass
# to suppress exception propagation
return True
async def request_handler(mapping, _stream, headers, codec, release_stream):
try:
headers_map = dict(headers)
if headers_map[':method'] != 'POST':
await _stream.send_headers([
(':status', '405'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
content_type = headers_map.get('content-type')
if content_type is None:
await _stream.send_headers([
(':status', '415'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Missing content-type header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != codec.__content_subtype__
):
await _stream.send_headers([
(':status', '415'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Unacceptable content-type header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
if headers_map.get('te') != 'trailers':
await _stream.send_headers([
(':status', '400'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Required "te: trailers" header is missing'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
h2_path = headers_map[':path']
method = mapping.get(h2_path)
if method is None:
await _stream.send_headers([
(':status', '200'),
('grpc-status', str(Status.UNIMPLEMENTED.value)),
('grpc-message', 'Method not found'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
metadata = Metadata.from_headers(headers)
try:
deadline = Deadline.from_metadata(metadata)
except ValueError:
await _stream.send_headers([
(':status', '200'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Invalid grpc-timeout header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
async with Stream(_stream, method.cardinality, codec,
method.request_type, method.reply_type,
metadata=metadata, deadline=deadline) as stream:
deadline_wrapper = None
try:
if deadline:
deadline_wrapper = DeadlineWrapper()
with deadline_wrapper.start(deadline):
with deadline_wrapper:
await method.func(stream)
else:
await method.func(stream)
except asyncio.TimeoutError:
if deadline_wrapper and deadline_wrapper.cancelled:
log.exception('Deadline exceeded')
raise GRPCError(Status.DEADLINE_EXCEEDED)
else:
log.exception('Timeout occurred')
raise
except asyncio.CancelledError:
log.exception('Request was cancelled')
raise
except Exception:
log.exception('Application error')
raise
except Exception:
log.exception('Server error')
finally:
release_stream()
class _GC(abc.ABC):
_gc_counter = 0
@property
@abc.abstractmethod
def __gc_interval__(self):
raise NotImplementedError
@abc.abstractmethod
def __gc_collect__(self):
pass
def __gc_step__(self):
self._gc_counter += 1
if not (self._gc_counter % self.__gc_interval__):
self.__gc_collect__()
class Handler(_GC, AbstractHandler):
__gc_interval__ = 10
closing = False
def __init__(self, mapping, codec, *, loop):
self.mapping = mapping
self.codec = codec
self.loop = loop
self._tasks = {}
self._cancelled = set()
def __gc_collect__(self):
self._tasks = {s: t for s, t in self._tasks.items()
if not t.done()}
self._cancelled = {t for t in self._cancelled
if not t.done()}
def accept(self, stream, headers, release_stream):
self.__gc_step__()
self._tasks[stream] = self.loop.create_task(
request_handler(self.mapping, stream, headers, self.codec,
release_stream)
)
def cancel(self, stream):
task = self._tasks.pop(stream)
task.cancel()
self._cancelled.add(task)
def close(self):
for task in self._tasks.values():
task.cancel()
self._cancelled.update(self._tasks.values())
self.closing = True
async def wait_closed(self):
if self._cancelled:
await asyncio.wait(self._cancelled, loop=self.loop)
def check_closed(self):
self.__gc_collect__()
return not self._tasks and not self._cancelled
class Server(_GC, asyncio.AbstractServer):
"""
HTTP/2 server, which uses gRPC service handlers to handle requests.
Handler is a subclass of the abstract base class, which was generated
from .proto file:
.. code-block:: python
class CoffeeMachine(cafe_grpc.CoffeeMachineBase):
async def MakeLatte(self, stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
server = Server([CoffeeMachine()], loop=loop)
"""
__gc_interval__ = 10
def __init__(self, handlers, *, loop, codec=None):
"""
:param handlers: list of handlers
:param loop: asyncio-compatible event loop
"""
mapping = {}
for handler in handlers:
mapping.update(handler.__mapping__())
self._mapping = mapping
self._loop = loop
self._codec = codec or ProtoCodec()
self._config = h2.config.H2Configuration(
client_side=False,
header_encoding='utf-8',
)
self._tcp_server = None
self._handlers = set()
def __gc_collect__(self):
self._handlers = {h for h in self._handlers
if not (h.closing and h.check_closed())}
def _protocol_factory(self):
self.__gc_step__()
handler = Handler(self._mapping, self._codec, loop=self._loop)
self._handlers.add(handler)
return H2Protocol(handler, self._config, loop=self._loop)
async def start(self, host=None, port=None, *,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None,
reuse_port=None):
"""Coroutine to start the server.
:param host: can be a string, containing IPv4/v6 address or domain name.
If host is None, server will be bound to all available interfaces.
:param port: port number.
:param family: can be set to either :py:data:`python:socket.AF_INET` or
:py:data:`python:socket.AF_INET6` to force the socket to use IPv4 or
IPv6. If not set it will be determined from host.
:param flags: is a bitmask for
:py:meth:`~python:asyncio.AbstractEventLoop.getaddrinfo`.
:param sock: sock can optionally be specified in order to use a
preexisting socket object. If specified, host and port should be
omitted (must be None).
:param backlog: is the maximum number of queued connections passed to
listen().
:param ssl: can be set to an :py:class:`~python:ssl.SSLContext`
to enable SSL over the accepted connections.
:param reuse_address: tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to expire.
:param reuse_port: tells the kernel to allow this endpoint to be bound
to the same port as other existing endpoints are bound to,
so long as they all set this flag when being created.
"""
if self._tcp_server is not None:
raise RuntimeError('Server is already started')
self._tcp_server = await self._loop.create_server(
self._protocol_factory, host, port,
family=family, flags=flags, sock=sock, backlog=backlog, ssl=ssl,
reuse_address=reuse_address, reuse_port=reuse_port
)
def close(self):
"""Stops accepting new connections, cancels all currently running
requests. Request handlers are able to handle `CancelledError` and
exit properly.
"""
if self._tcp_server is None:
raise RuntimeError('Server is not started')
self._tcp_server.close()
for handler in self._handlers:
handler.close()
async def wait_closed(self):
"""Coroutine to wait until all existing request handlers will exit
properly.
"""
if self._tcp_server is None:
raise RuntimeError('Server is not started')
await self._tcp_server.wait_closed()
if self._handlers:
await asyncio.wait({h.wait_closed() for h in self._handlers},
loop=self._loop)
|
[
"logging.getLogger",
"warnings.warn",
"asyncio.wait"
] |
[((486, 513), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (503, 513), False, 'import logging\n'), ((3945, 4062), 'warnings.warn', 'warnings.warn', (['""""end" argument is deprecated, use "stream.send_trailing_metadata" explicitly"""'], {'stacklevel': '(2)'}), '(\n \'"end" argument is deprecated, use "stream.send_trailing_metadata" explicitly\'\n , stacklevel=2)\n', (3958, 4062), False, 'import warnings\n'), ((13305, 13350), 'asyncio.wait', 'asyncio.wait', (['self._cancelled'], {'loop': 'self.loop'}), '(self._cancelled, loop=self.loop)\n', (13317, 13350), False, 'import asyncio\n')]
|
#!/usr/bin/env python
import argparse
from eva import EvaProgram, Input, Output
from eva.ckks import CKKSCompiler
from eva.seal import generate_keys
import numpy as np
import time
from eva.std.numeric import horizontal_sum
def dot(x, y):
return np.dot(x, y)
def generate_inputs_naive(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
for n in range(size):
# each element is a list (i.e. a vector of size 1)
inputs[f"{label}_{n}"] = [i]
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot_naive(size):
"""Vector dot product with vector size of 1"""
fhe_dot = EvaProgram("fhe_dot", vec_size=1)
with fhe_dot:
a = np.array([Input(f"x_{n}") for n in range(size)]).reshape(1, size)
b = np.array([Input(f"w_{k}") for k in range(size)]).reshape(size, 1)
out = dot(a, b)
Output("y", out[0][0])
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def generate_inputs(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
# all data is stored in a single list of size `size`
inputs[label] = list(range(size))
for n in range(size):
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot(size):
"""Vector dot product with CKKS vector size equal to the size"""
fhe_dot = EvaProgram("fhe_dot", vec_size=size)
with fhe_dot:
a = np.array([Input("x")])
b = np.array([Input(f"w")])
out = dot(a, b)
Output("y", horizontal_sum(out))
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def benchmark_vector_dot(size, mode="SIMD"):
if mode == "SIMD":
# generate program with SIMD-style
inputs, inputs_np = generate_inputs(size, label="x")
weights, weights_np = generate_inputs(size, label="w")
fhe_dot = generate_vector_dot(size)
else:
# generate program with vector size = 1
inputs, inputs_np = generate_inputs_naive(size, label="x")
weights, weights_np = generate_inputs_naive(size, label="w")
fhe_dot = generate_vector_dot_naive(size)
# compiling program
data = {**weights, **inputs}
compiler = CKKSCompiler(config={"security_level": "128", "warn_vec_size": "false"})
compiled, params, signature = compiler.compile(fhe_dot)
public_ctx, secret_ctx = generate_keys(params)
enc_inputs = public_ctx.encrypt(data, signature)
# Running program
start = time.time()
enc_outputs = public_ctx.execute(compiled, enc_inputs)
end = time.time()
run_time = end - start
# decrypt the output
outputs = secret_ctx.decrypt(enc_outputs, signature)
y = np.array(outputs["y"])
# get time for plaintext dot product
start = time.time()
true_y = inputs_np.dot(weights_np)
end = time.time()
plain_run_time = end - start
# verifying correctness of output
np.testing.assert_allclose(y, true_y)
return run_time, plain_run_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a dot product program")
parser.add_argument(
"--mode",
default="SIMD",
choices=["SIMD", "naive"],
)
args = parser.parse_args()
results_cipher = dict()
results_plain = dict()
if args.mode == "SIMD":
print("Generating code in SIMD style")
else:
print("Generating code in naive style")
for size in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:
time_cipher, time_plain = benchmark_vector_dot(size, args.mode)
results_cipher[f"{size}"] = time_cipher
results_plain[f"{size}"] = time_plain
print(f"Done vector size {size}, CKKS time: {time_cipher}")
print("Done")
print("CKKS times:", results_cipher)
print("Plain text times:", results_plain)
|
[
"eva.EvaProgram",
"eva.Output",
"argparse.ArgumentParser",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.dot",
"eva.ckks.CKKSCompiler",
"numpy.zeros",
"eva.std.numeric.horizontal_sum",
"eva.Input",
"time.time",
"eva.seal.generate_keys"
] |
[((251, 263), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (257, 263), True, 'import numpy as np\n'), ((346, 360), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (354, 360), True, 'import numpy as np\n'), ((668, 701), 'eva.EvaProgram', 'EvaProgram', (['"""fhe_dot"""'], {'vec_size': '(1)'}), "('fhe_dot', vec_size=1)\n", (678, 701), False, 'from eva import EvaProgram, Input, Output\n'), ((1096, 1110), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1104, 1110), True, 'import numpy as np\n'), ((1429, 1465), 'eva.EvaProgram', 'EvaProgram', (['"""fhe_dot"""'], {'vec_size': 'size'}), "('fhe_dot', vec_size=size)\n", (1439, 1465), False, 'from eva import EvaProgram, Input, Output\n'), ((2307, 2379), 'eva.ckks.CKKSCompiler', 'CKKSCompiler', ([], {'config': "{'security_level': '128', 'warn_vec_size': 'false'}"}), "(config={'security_level': '128', 'warn_vec_size': 'false'})\n", (2319, 2379), False, 'from eva.ckks import CKKSCompiler\n'), ((2469, 2490), 'eva.seal.generate_keys', 'generate_keys', (['params'], {}), '(params)\n', (2482, 2490), False, 'from eva.seal import generate_keys\n'), ((2579, 2590), 'time.time', 'time.time', ([], {}), '()\n', (2588, 2590), False, 'import time\n'), ((2660, 2671), 'time.time', 'time.time', ([], {}), '()\n', (2669, 2671), False, 'import time\n'), ((2790, 2812), 'numpy.array', 'np.array', (["outputs['y']"], {}), "(outputs['y'])\n", (2798, 2812), True, 'import numpy as np\n'), ((2867, 2878), 'time.time', 'time.time', ([], {}), '()\n', (2876, 2878), False, 'import time\n'), ((2928, 2939), 'time.time', 'time.time', ([], {}), '()\n', (2937, 2939), False, 'import time\n'), ((3016, 3053), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'true_y'], {}), '(y, true_y)\n', (3042, 3053), True, 'import numpy as np\n'), ((3133, 3197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run a dot product program"""'}), "(description='Run a dot product program')\n", (3156, 3197), False, 'import argparse\n'), ((910, 932), 'eva.Output', 'Output', (['"""y"""', 'out[0][0]'], {}), "('y', out[0][0])\n", (916, 932), False, 'from eva import EvaProgram, Input, Output\n'), ((1601, 1620), 'eva.std.numeric.horizontal_sum', 'horizontal_sum', (['out'], {}), '(out)\n', (1615, 1620), False, 'from eva.std.numeric import horizontal_sum\n'), ((1506, 1516), 'eva.Input', 'Input', (['"""x"""'], {}), "('x')\n", (1511, 1516), False, 'from eva import EvaProgram, Input, Output\n'), ((1541, 1552), 'eva.Input', 'Input', (['f"""w"""'], {}), "(f'w')\n", (1546, 1552), False, 'from eva import EvaProgram, Input, Output\n'), ((742, 757), 'eva.Input', 'Input', (['f"""x_{n}"""'], {}), "(f'x_{n}')\n", (747, 757), False, 'from eva import EvaProgram, Input, Output\n'), ((820, 835), 'eva.Input', 'Input', (['f"""w_{k}"""'], {}), "(f'w_{k}')\n", (825, 835), False, 'from eva import EvaProgram, Input, Output\n')]
|
from __future__ import division
from __future__ import print_function
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module manages a distributed RAM cache as a global python dictionary in
each AppEngine instance. AppEngine can spin up new instances or kill old ones
at any time. Each instance's RAM cache is independent and might not have the
same entries as found in the RAM caches of other instances.
Each instance will do the work needed to compute a given RAM cache entry
itself. The values computed in a given instance will speed up future requests
made to that instance only.
When the user edits something in the app, the updated entity is stored in
datastore. Also, the singleton SharedInvalidate entity is updated with the
timestamp of the change. Every request handler must start processing a request
by first calling SharedInvalidate.check_for_distributed_invalidation() which
checks for any needed invalidations and clears RAM cache entries in
that instance if needed.
For now, there is only a single RAM cache per instance and when anything is
invalidated, that entire RAM cache is completely cleared. In the future,
invalidations could be compartmentalized by RAM cache type, or even specific
entity IDs. Monorail uses that approach, but existing ChromeStatus code does
not need it.
Calling code must not mutate any value that is passed into set() or returned
from get(). If calling code needs to mutate such objects, it should call
copy.copy() or copy.deepcopy() to avoid unintentional cumulative mutations.
Unlike memcache, this RAM cache has no concept of expiration time. So,
whenever a cached value would become invalid, it must be invalidated.
"""
import logging
import time as time_module
from google.appengine.ext import db
global_cache = {}
expires = {}
# Whenever the cache would have more than this many items, some
# random item is dropped, or the entire cache is cleared.
# If our instances are killed by appengine for exceeding memory limits,
# we can configure larger instances and/or reduce this value.
MAX_CACHE_SIZE = 10000
def set(key, value, time=None):
"""Emulate the memcache.set() method using a RAM cache."""
if len(global_cache) + 1 > MAX_CACHE_SIZE:
popped_item = global_cache.popitem()
if popped_item[0] in expires:
del expires[popped_item[0]]
global_cache[key] = value
if time:
expires[key] = int(time_module.time()) + time
def _check_expired(keys):
now = int(time_module.time())
for key in keys:
if key in expires and expires[key] < now:
del expires[key]
del global_cache[key]
def get(key):
"""Emulate the memcache.get() method using a RAM cache."""
_check_expired([key])
verb = 'hit' if key in global_cache else 'miss'
logging.info('cache %s for %r', verb, key)
return global_cache.get(key)
def get_multi(keys):
"""Emulate the memcache.get_multi() method using a RAM cache."""
_check_expired(keys)
return {
key: global_cache[key]
for key in keys
if key in global_cache
}
def set_multi(entries):
"""Emulate the memcache.set_multi() method using a RAM cache."""
if len(global_cache) + len(entries) > MAX_CACHE_SIZE:
global_cache.clear()
expires.clear()
global_cache.update(entries)
def delete(key):
"""Emulate the memcache.delete() method using a RAM cache."""
if key in global_cache:
del global_cache[key]
flush_all() # Note: this is wasteful but infrequent in our app.
def flush_all():
"""Emulate the memcache.flush_all() method using a RAM cache.
This does not clear the RAM cache in this instance. That happens
at the start of the next request when the request handler calls
SharedInvalidate.check_for_distributed_invalidation().
"""
SharedInvalidate.invalidate()
class SharedInvalidateParent(db.Model):
pass
class SharedInvalidate(db.Model):
PARENT_ENTITY_ID = 123
PARENT_KEY = db.Key.from_path('SharedInvalidateParent', PARENT_ENTITY_ID)
SINGLETON_ENTITY_ID = 456
SINGLETON_KEY = db.Key.from_path(
'SharedInvalidateParent', PARENT_ENTITY_ID,
'SharedInvalidate', SINGLETON_ENTITY_ID)
last_processed_timestamp = None
updated = db.DateTimeProperty(auto_now=True)
@classmethod
def invalidate(cls):
"""Tell this and other appengine instances to invalidate their caches."""
singleton = cls.get(cls.SINGLETON_KEY)
if not singleton:
singleton = SharedInvalidate(key=cls.SINGLETON_KEY)
singleton.put() # automatically sets singleton.updated to now.
# The cache in each instance (including this one) will be
# cleared on the next call to check_for_distributed_invalidation()
# which should happen at the start of request processing.
@classmethod
def check_for_distributed_invalidation(cls):
"""Check if any appengine instance has invlidated the cache."""
singleton = cls.get(cls.SINGLETON_KEY, read_policy=db.STRONG_CONSISTENCY)
if not singleton:
return # No news is good news
if (cls.last_processed_timestamp is None or
singleton.updated > cls.last_processed_timestamp):
global_cache.clear()
expires.clear()
cls.last_processed_timestamp = singleton.updated
def check_for_distributed_invalidation():
"""Just a shorthand way to call the class method."""
SharedInvalidate.check_for_distributed_invalidation()
|
[
"google.appengine.ext.db.Key.from_path",
"logging.info",
"google.appengine.ext.db.DateTimeProperty",
"time.time"
] |
[((3303, 3345), 'logging.info', 'logging.info', (['"""cache %s for %r"""', 'verb', 'key'], {}), "('cache %s for %r', verb, key)\n", (3315, 3345), False, 'import logging\n'), ((4462, 4522), 'google.appengine.ext.db.Key.from_path', 'db.Key.from_path', (['"""SharedInvalidateParent"""', 'PARENT_ENTITY_ID'], {}), "('SharedInvalidateParent', PARENT_ENTITY_ID)\n", (4478, 4522), False, 'from google.appengine.ext import db\n'), ((4569, 4674), 'google.appengine.ext.db.Key.from_path', 'db.Key.from_path', (['"""SharedInvalidateParent"""', 'PARENT_ENTITY_ID', '"""SharedInvalidate"""', 'SINGLETON_ENTITY_ID'], {}), "('SharedInvalidateParent', PARENT_ENTITY_ID,\n 'SharedInvalidate', SINGLETON_ENTITY_ID)\n", (4585, 4674), False, 'from google.appengine.ext import db\n'), ((4731, 4765), 'google.appengine.ext.db.DateTimeProperty', 'db.DateTimeProperty', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4750, 4765), False, 'from google.appengine.ext import db\n'), ((3014, 3032), 'time.time', 'time_module.time', ([], {}), '()\n', (3030, 3032), True, 'import time as time_module\n'), ((2947, 2965), 'time.time', 'time_module.time', ([], {}), '()\n', (2963, 2965), True, 'import time as time_module\n')]
|
import intrepyd
from intrepyd.iec611312py.plcopen import parse_plc_open_file
from intrepyd.iec611312py.stmtprinter import StmtPrinter
import unittest
from . import from_fixture_path
class TestOpenPLC(unittest.TestCase):
def test_simple_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/simple1.xml'))
self.assertEqual(1, len(pous))
printer = StmtPrinter()
printer.processStatements(pous[0].statements)
self.assertEqual('output1 := (local1 + input1);', printer.result)
def test_datatype_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/datatype1.xml'))
self.assertEqual(1, len(pous))
def test_if_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if1.xml'))
self.assertEqual(1, len(pous))
def test_if_2(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if2.xml'))
self.assertEqual(1, len(pous))
def test_if_3(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if3.xml'))
self.assertEqual(1, len(pous))
def test_if_4(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if4.xml'))
self.assertEqual(1, len(pous))
printer = StmtPrinter()
printer.processStatements(pous[0].statements)
self.assertEqual('IF (100 < (UDINT_TO_DINT((CONST_IN.Tolerance_Max / 100)) * UnitDelay_2_DSTATE)) THEN overInfusion := 1; END_IF;',
printer.result)
# It is slow, as expected
# def test_infusion_pump(self):
# pous = parsePlcOpenFile('tests/openplc/GPCA_SW_Functional_subst.xml')
# self.assertEqual(1, len(pous))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"intrepyd.iec611312py.stmtprinter.StmtPrinter"
] |
[((1715, 1730), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1728, 1730), False, 'import unittest\n'), ((384, 397), 'intrepyd.iec611312py.stmtprinter.StmtPrinter', 'StmtPrinter', ([], {}), '()\n', (395, 397), False, 'from intrepyd.iec611312py.stmtprinter import StmtPrinter\n'), ((1246, 1259), 'intrepyd.iec611312py.stmtprinter.StmtPrinter', 'StmtPrinter', ([], {}), '()\n', (1257, 1259), False, 'from intrepyd.iec611312py.stmtprinter import StmtPrinter\n')]
|
from unittest import TestCase
import json
from helpers import *
from pytezos import ContractInterface, pytezos, MichelsonRuntimeError
from pytezos.context.mixin import ExecutionContext
token_a = "<KEY>"
token_b = "<KEY>"
token_c = "<KEY>"
token_d = "<KEY>"
pair_ab = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_ac = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_cd = {
"token_a_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
},
"token_b_type" : {
"fa2": {
"token_address": token_d,
"token_id": 3
}
}
}
class TokenToTokenRouterTest(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
dex_code = open("./integration_tests/compiled/Dex.tz", 'r').read()
cls.dex = ContractInterface.from_michelson(dex_code)
initial_storage_michelson = json.load(open("./integration_tests/compiled/storage.json", 'r'))
cls.init_storage = cls.dex.storage.decode(initial_storage_michelson)
def test_tt_token_to_token_router(self):
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
# same swap but one by one
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 0,
"operation": "a_to_b",
}],
amount_in=amount_in,
min_amount_out=1,
receiver=julian,
deadline=100_000
))
transfers = parse_token_transfers(res)
token_b_out = next(v for v in transfers if v["destination"] == julian)
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 1,
"operation": "a_to_b",
}],
amount_in=token_b_out["amount"],
min_amount_out=1,
receiver=julian,
deadline=100_000,
))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["amount"], token_c_out["amount"])
def test_tt_router_triangle(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_bc, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_ac, 100_000_000_000, 100_000_000_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_c_out["amount"], 9909) # ~ 9910 by compound interest formula
def test_tt_router_ab_ba(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 9939)
def test_tt_router_impossible_path(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 1111, 3333))
res = chain.execute(self.dex.addPair(pair_cd, 5555, 7777))
# can't find path
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
def test_tt_router_cant_overbuy(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 100_000))
res = chain.execute(self.dex.addPair(pair_bc, 10_000, 10_000))
res = chain.execute(self.dex.addPair(pair_ac, 1_000_000, 1_000_000))
# overbuy at the very beginning
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 99_999)
# overbuy at the end
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
# overbuy in the middle
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
def test_tt_router_mixed_fa2_fa12(self):
pair_ab = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type": {
"fa2": {
"token_address": token_a,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "b_to_a",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
|
[
"pytezos.ContractInterface.from_michelson"
] |
[((1489, 1531), 'pytezos.ContractInterface.from_michelson', 'ContractInterface.from_michelson', (['dex_code'], {}), '(dex_code)\n', (1521, 1531), False, 'from pytezos import ContractInterface, pytezos, MichelsonRuntimeError\n')]
|
import os
from collections import defaultdict
from rbc.omnisci_backend import Array
from rbc.errors import OmnisciServerError
from numba import types as nb_types
import pytest
rbc_omnisci = pytest.importorskip('rbc.omniscidb')
available_version, reason = rbc_omnisci.is_available()
pytestmark = pytest.mark.skipif(not available_version, reason=reason)
@pytest.fixture(scope='module')
def omnisci():
# TODO: use omnisci_fixture from rbc/tests/__init__.py
config = rbc_omnisci.get_client_config(debug=not True)
m = rbc_omnisci.RemoteOmnisci(**config)
table_name = os.path.splitext(os.path.basename(__file__))[0]
m.sql_execute(f'DROP TABLE IF EXISTS {table_name}')
sqltypes = ['FLOAT[]', 'DOUBLE[]',
'TINYINT[]', 'SMALLINT[]', 'INT[]', 'BIGINT[]',
'BOOLEAN[]']
# todo: TEXT ENCODING DICT, TEXT ENCODING NONE, TIMESTAMP, TIME,
# DATE, DECIMAL/NUMERIC, GEOMETRY: POINT, LINESTRING, POLYGON,
# MULTIPOLYGON, See
# https://www.omnisci.com/docs/latest/5_datatypes.html
colnames = ['f4', 'f8', 'i1', 'i2', 'i4', 'i8', 'b']
table_defn = ',\n'.join('%s %s' % (n, t)
for t, n in zip(sqltypes, colnames))
m.sql_execute(f'CREATE TABLE IF NOT EXISTS {table_name} ({table_defn});')
data = defaultdict(list)
for i in range(5):
for j, n in enumerate(colnames):
if n == 'b':
data[n].append([_i % 2 == 0 for _i in range(-3, 3)])
elif n.startswith('f'):
data[n].append([i * 10 + _i + 0.5 for _i in range(-3, 3)])
else:
data[n].append([i * 10 + _i for _i in range(-3, 3)])
m.load_table_columnar(table_name, **data)
m.table_name = table_name
yield m
try:
m.sql_execute(f'DROP TABLE IF EXISTS {table_name}')
except Exception as msg:
print('%s in deardown' % (type(msg)))
@pytest.mark.parametrize('c_name', ['int8_t i1', 'int16_t i2', 'int32_t i4', 'int64_t i8',
'float f4', 'double f8'])
@pytest.mark.parametrize('device', ['cpu', 'gpu'])
def test_ptr(omnisci, c_name, device):
omnisci.reset()
if not omnisci.has_cuda and device == 'gpu':
pytest.skip('test requires CUDA-enabled omniscidb server')
from rbc.external import external
if omnisci.compiler is None:
pytest.skip('test requires clang C/C++ compiler')
ctype, cname = c_name.split()
c_code = f'''
#include <stdint.h>
#ifdef __cplusplus
extern "C" {{
#endif
{ctype} mysum_impl({ctype}* x, int n) {{
{ctype} r = 0;
for (int i=0; i < n; i++) {{
r += x[i];
}}
return r;
}}
{ctype} myval_impl({ctype}* x) {{
return *x;
}}
#ifdef __cplusplus
}}
#endif
'''
omnisci.user_defined_llvm_ir[device] = omnisci.compiler(c_code)
mysum_impl = external(f'{ctype} mysum_impl({ctype}*, int32_t)')
myval_impl = external(f'{ctype} myval_impl({ctype}*)')
@omnisci(f'{ctype}({ctype}[])', devices=[device])
def mysum_ptr(x):
return mysum_impl(x.ptr(), len(x))
@omnisci(f'{ctype}({ctype}[], int32_t)', devices=[device])
def myval_ptr(x, i):
return myval_impl(x.ptr(i))
desrc, result = omnisci.sql_execute(
f'select {cname}, mysum_ptr({cname}) from {omnisci.table_name}')
for a, r in result:
if cname == 'i1':
assert sum(a) % 256 == r % 256
else:
assert sum(a) == r
desrc, result = omnisci.sql_execute(
f'select {cname}, myval_ptr({cname}, 0), myval_ptr({cname}, 2) from {omnisci.table_name}')
for a, r0, r2 in result:
assert a[0] == r0
assert a[2] == r2
def test_len_i32(omnisci):
omnisci.reset()
@omnisci('int64(int32[])')
def array_sz_int32(x):
return len(x)
desrc, result = omnisci.sql_execute(
f'select i4, array_sz_int32(i4) from {omnisci.table_name}')
for a, sz in result:
assert len(a) == sz
def test_len_f64(omnisci):
omnisci.reset()
@omnisci('int64(float64[])')
def array_sz_double(x):
return len(x)
desrc, result = omnisci.sql_execute(
f'select f8, array_sz_double(f8) from {omnisci.table_name}')
for a, sz in result:
assert len(a) == sz
@pytest.mark.skipif(available_version[:2] == (5, 1),
reason="skip due to a bug in omniscidb 5.1 (got %s)" % (
available_version,))
def test_getitem_bool(omnisci):
omnisci.reset()
@omnisci('bool(bool[], int64)')
def array_getitem_bool(x, i):
return x[i]
query = f'select b, array_getitem_bool(b, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i8(omnisci):
omnisci.reset()
@omnisci('int8(int8[], int32)')
def array_getitem_int8(x, i):
return x[i]
query = f'select i1, array_getitem_int8(i1, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i32(omnisci):
omnisci.reset()
@omnisci('int32(int32[], int32)')
def array_getitem_int32(x, i):
return x[i]
query = f'select i4, array_getitem_int32(i4, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i64(omnisci):
omnisci.reset()
@omnisci('int64(int64[], int64)')
def array_getitem_int64(x, i):
return x[i]
query = f'select i8, array_getitem_int64(i8, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_float(omnisci):
omnisci.reset()
@omnisci('double(double[], int32)')
def array_getitem_double(x, i):
return x[i]
query = f'select f8, array_getitem_double(f8, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
assert type(a[2]) == type(item)
@omnisci('float(float[], int64)')
def array_getitem_float(x, i):
return x[i]
query = f'select f4, array_getitem_float(f4, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
assert type(a[2]) == type(item)
def test_sum(omnisci):
omnisci.reset()
@omnisci('int32(int32[])')
def array_sum_int32(x):
r = 0
n = len(x)
for i in range(n):
r = r + x[i]
return r
query = f'select i4, array_sum_int32(i4) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, s in result:
assert sum(a) == s
@pytest.mark.skipif(available_version[:2] == (5, 1),
reason="skip due to a bug in omniscidb 5.1 (got %s)" % (
available_version,))
def test_even_sum(omnisci):
omnisci.reset()
@omnisci('int32(bool[], int32[])')
def array_even_sum_int32(b, x):
r = 0
n = len(x)
for i in range(n):
if b[i]:
r = r + x[i]
return r
query = f'select b, i4, array_even_sum_int32(b, i4) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for b, i4, s in result:
assert sum([i_ for b_, i_ in zip(b, i4) if b_]) == s
def test_array_setitem(omnisci):
omnisci.reset()
@omnisci('double(double[], int32)')
def array_setitem_sum(b, c):
n = len(b)
s = 0
for i in range(n):
b[i] = b[i] * c # changes the value inplace
s += b[i]
b[i] = b[i] / c
return s
query = f'select f8, array_setitem_sum(f8, 4) from {omnisci.table_name}'
_, result = omnisci.sql_execute(query)
for f8, s in result:
assert sum(f8) * 4 == s
def test_array_constructor_noreturn(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
@omnisci('float64(int32)')
def array_noreturn(size):
a = Array(size, types.float64)
b = Array(size, types.float64)
c = Array(size, types.float64)
for i in range(size):
a[i] = b[i] = c[i] = i + 3.0
s = 0.0
for i in range(size):
s += a[i] + b[i] + c[i] - a[i] * b[i]
return s
query = 'select array_noreturn(10)'
_, result = omnisci.sql_execute(query)
r = list(result)[0]
assert (r == (-420.0,))
def test_array_constructor_return(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
from rbc.externals.stdio import printf
@omnisci('float64[](int32)')
def array_return(size):
printf("entering array_return(%i)\n", size)
a = Array(size, types.float64)
b = Array(size, types.float64)
for i in range(size):
a[i] = float(i)
b[i] = float(size - i - 1)
if size % 2:
c = a
else:
c = b
printf("returning array with length %i\n", len(c))
return c
query = 'select array_return(9), array_return(10)'
_, result = omnisci.sql_execute(query)
r = list(result)[0]
assert r == (list(map(float, range(9))),
list(map(float, reversed(range(10)))))
def test_array_constructor_len(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
@omnisci('int64(int32)')
def array_len(size):
a = Array(size, types.float64)
return len(a)
query = 'select array_len(30)'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (30,)
def test_array_constructor_getitem(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
import numpy as np
@omnisci('double(int32, int32)')
def array_ptr(size, pos):
a = Array(size, np.double)
for i in range(size):
a[i] = i + 0.0
return a[pos]
query = 'select array_ptr(5, 3)'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (3.0,)
def test_array_constructor_is_null(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
@omnisci('int8(int64)')
def array_is_null(size):
a = Array(size, 'double')
return a.is_null()
query = 'select array_is_null(3);'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (0,)
inps = [('int32', 'i4', 'trunc'), ('int32', 'i4', 'sext'),
('int32', 'i4', 'zext'), ('float', 'f4', 'fptrunc'),
('double', 'f8', 'fpext')]
@pytest.mark.parametrize("typ, col, suffix", inps,
ids=[item[-1] for item in inps])
def test_issue197(omnisci, typ, col, suffix):
omnisci.reset()
import rbc.omnisci_backend as np
from numba import types
cast = dict(
trunc=types.int64,
sext=types.int8,
zext=types.uint8,
fptrunc=types.float64,
fpext=types.float32)[suffix]
def fn_issue197(x):
y = np.zeros_like(x)
for i in range(len(x)):
y[i] = cast(x[i] + 3)
return y
fn_name = f"fn_issue197_{typ}_{suffix}"
fn_issue197.__name__ = fn_name
omnisci(f'{typ}[]({typ}[])')(fn_issue197)
_, result = omnisci.sql_execute(
f'SELECT {col}, {fn_name}({col}) FROM {omnisci.table_name};'
)
column, ret = list(result)[0]
for x, y in zip(column, ret):
assert y == x + 3
def test_issue197_bool(omnisci):
omnisci.reset()
import rbc.omnisci_backend as np
@omnisci('bool[](bool[])')
def fn_issue197_bool(x):
y = np.zeros_like(x)
for i in range(len(x)):
y[i] = bool(x[i])
return y
col = 'b'
fn_name = 'fn_issue197_bool'
_, result = omnisci.sql_execute(
f'SELECT {col}, {fn_name}({col}) FROM {omnisci.table_name};'
)
column, ret = list(result)[0]
for x, y in zip(column, ret):
assert bool(x) == bool(y)
def test_issue109(omnisci):
@omnisci('double[](int32)')
def issue109(size):
a = Array(5, 'double')
for i in range(5):
a[i] = nb_types.double(i)
return a
_, result = omnisci.sql_execute('select issue109(3);')
assert list(result) == [([0.0, 1.0, 2.0, 3.0, 4.0],)]
def test_issue77(omnisci):
@omnisci('int64[]()')
def issue77():
a = Array(5, 'int64')
a.fill(1)
return a
if omnisci.version[:2] >= (5, 8):
_, result = omnisci.sql_execute('select issue77();')
assert list(result)[0][0] == [1, 1, 1, 1, 1]
else:
with pytest.raises(OmnisciServerError) as exc:
_, result = omnisci.sql_execute('select issue77();')
assert exc.match('Could not bind issue77()')
|
[
"rbc.externals.stdio.printf",
"rbc.omnisci_backend.zeros_like",
"pytest.mark.parametrize",
"numba.types.double",
"pytest.importorskip",
"collections.defaultdict",
"os.path.basename",
"pytest.raises",
"pytest.mark.skipif",
"pytest.fixture",
"pytest.skip",
"rbc.external.external",
"rbc.omnisci_backend.Array"
] |
[((191, 227), 'pytest.importorskip', 'pytest.importorskip', (['"""rbc.omniscidb"""'], {}), "('rbc.omniscidb')\n", (210, 227), False, 'import pytest\n'), ((296, 352), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not available_version)'], {'reason': 'reason'}), '(not available_version, reason=reason)\n', (314, 352), False, 'import pytest\n'), ((356, 386), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (370, 386), False, 'import pytest\n'), ((1904, 2023), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""c_name"""', "['int8_t i1', 'int16_t i2', 'int32_t i4', 'int64_t i8', 'float f4', 'double f8'\n ]"], {}), "('c_name', ['int8_t i1', 'int16_t i2', 'int32_t i4',\n 'int64_t i8', 'float f4', 'double f8'])\n", (1927, 2023), False, 'import pytest\n'), ((2057, 2106), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""device"""', "['cpu', 'gpu']"], {}), "('device', ['cpu', 'gpu'])\n", (2080, 2106), False, 'import pytest\n'), ((4257, 4390), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(available_version[:2] == (5, 1))'], {'reason': "('skip due to a bug in omniscidb 5.1 (got %s)' % (available_version,))"}), "(available_version[:2] == (5, 1), reason=\n 'skip due to a bug in omniscidb 5.1 (got %s)' % (available_version,))\n", (4275, 4390), False, 'import pytest\n'), ((6807, 6940), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(available_version[:2] == (5, 1))'], {'reason': "('skip due to a bug in omniscidb 5.1 (got %s)' % (available_version,))"}), "(available_version[:2] == (5, 1), reason=\n 'skip due to a bug in omniscidb 5.1 (got %s)' % (available_version,))\n", (6825, 6940), False, 'import pytest\n'), ((10731, 10817), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""typ, col, suffix"""', 'inps'], {'ids': '[item[-1] for item in inps]'}), "('typ, col, suffix', inps, ids=[item[-1] for item in\n inps])\n", (10754, 10817), False, 'import pytest\n'), ((1294, 1311), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1305, 1311), False, 'from collections import defaultdict\n'), ((2835, 2885), 'rbc.external.external', 'external', (['f"""{ctype} mysum_impl({ctype}*, int32_t)"""'], {}), "(f'{ctype} mysum_impl({ctype}*, int32_t)')\n", (2843, 2885), False, 'from rbc.external import external\n'), ((2903, 2944), 'rbc.external.external', 'external', (['f"""{ctype} myval_impl({ctype}*)"""'], {}), "(f'{ctype} myval_impl({ctype}*)')\n", (2911, 2944), False, 'from rbc.external import external\n'), ((2223, 2281), 'pytest.skip', 'pytest.skip', (['"""test requires CUDA-enabled omniscidb server"""'], {}), "('test requires CUDA-enabled omniscidb server')\n", (2234, 2281), False, 'import pytest\n'), ((2362, 2411), 'pytest.skip', 'pytest.skip', (['"""test requires clang C/C++ compiler"""'], {}), "('test requires clang C/C++ compiler')\n", (2373, 2411), False, 'import pytest\n'), ((8157, 8183), 'rbc.omnisci_backend.Array', 'Array', (['size', 'types.float64'], {}), '(size, types.float64)\n', (8162, 8183), False, 'from rbc.omnisci_backend import Array\n'), ((8196, 8222), 'rbc.omnisci_backend.Array', 'Array', (['size', 'types.float64'], {}), '(size, types.float64)\n', (8201, 8222), False, 'from rbc.omnisci_backend import Array\n'), ((8235, 8261), 'rbc.omnisci_backend.Array', 'Array', (['size', 'types.float64'], {}), '(size, types.float64)\n', (8240, 8261), False, 'from rbc.omnisci_backend import Array\n'), ((8832, 8875), 'rbc.externals.stdio.printf', 'printf', (['"""entering array_return(%i)\n"""', 'size'], {}), "('entering array_return(%i)\\n', size)\n", (8838, 8875), False, 'from rbc.externals.stdio import printf\n'), ((8888, 8914), 'rbc.omnisci_backend.Array', 'Array', (['size', 'types.float64'], {}), '(size, types.float64)\n', (8893, 8914), False, 'from rbc.omnisci_backend import Array\n'), ((8927, 8953), 'rbc.omnisci_backend.Array', 'Array', (['size', 'types.float64'], {}), '(size, types.float64)\n', (8932, 8953), False, 'from rbc.omnisci_backend import Array\n'), ((9624, 9650), 'rbc.omnisci_backend.Array', 'Array', (['size', 'types.float64'], {}), '(size, types.float64)\n', (9629, 9650), False, 'from rbc.omnisci_backend import Array\n'), ((10002, 10024), 'rbc.omnisci_backend.Array', 'Array', (['size', 'np.double'], {}), '(size, np.double)\n', (10007, 10024), False, 'from rbc.omnisci_backend import Array\n'), ((10403, 10424), 'rbc.omnisci_backend.Array', 'Array', (['size', '"""double"""'], {}), "(size, 'double')\n", (10408, 10424), False, 'from rbc.omnisci_backend import Array\n'), ((11172, 11188), 'rbc.omnisci_backend.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (11185, 11188), True, 'import rbc.omnisci_backend as np\n'), ((11773, 11789), 'rbc.omnisci_backend.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (11786, 11789), True, 'import rbc.omnisci_backend as np\n'), ((12232, 12250), 'rbc.omnisci_backend.Array', 'Array', (['(5)', '"""double"""'], {}), "(5, 'double')\n", (12237, 12250), False, 'from rbc.omnisci_backend import Array\n'), ((12538, 12555), 'rbc.omnisci_backend.Array', 'Array', (['(5)', '"""int64"""'], {}), "(5, 'int64')\n", (12543, 12555), False, 'from rbc.omnisci_backend import Array\n'), ((598, 624), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (614, 624), False, 'import os\n'), ((12297, 12315), 'numba.types.double', 'nb_types.double', (['i'], {}), '(i)\n', (12312, 12315), True, 'from numba import types as nb_types\n'), ((12767, 12800), 'pytest.raises', 'pytest.raises', (['OmnisciServerError'], {}), '(OmnisciServerError)\n', (12780, 12800), False, 'import pytest\n')]
|
from dataclasses import dataclass
import re
from tokenize import group
from core.constructs.resource import ResourceModel
from core.constructs.workspace import Workspace
RUUID = "cdev::simple::bucket"
def get_cloud_output_from_cdev_name(component_name: str, cdev_name: str) -> str:
try:
ws = Workspace.instance()
cloud_output = ws.get_backend().get_cloud_output_by_name(
ws.get_resource_state_uuid(), component_name, RUUID, cdev_name
)
return cloud_output
except Exception as e:
print(f"Could not find resource {component_name}:{RUUID}:{cdev_name}")
print(e)
return None
def get_resource_from_cdev_name(component_name: str, cdev_name: str) -> ResourceModel:
try:
ws = Workspace.instance()
resource = ws.get_backend().get_resource_by_name(
ws.get_resource_state_uuid(), component_name, RUUID, cdev_name
)
return resource
except Exception as e:
print(f"Could not find resource {component_name}:{RUUID}:{cdev_name}")
print(e)
return None
remote_name_regex = "bucket://([a-z,_]+).([a-z,_]+)/?(\S+)?"
compiled_regex = re.compile(remote_name_regex)
@dataclass
class remote_location:
component_name: str
cdev_bucket_name: str
path: str
def is_valid_remote(name: str) -> bool:
return True if compiled_regex.match(name) else False
def parse_remote_location(name: str) -> remote_location:
match = compiled_regex.match(name)
if not match:
raise Exception(
"provided name {name} does not match regex for a remote bucket object"
)
return remote_location(
component_name=match.group(1),
cdev_bucket_name=match.group(2),
path=match.group(3),
)
|
[
"core.constructs.workspace.Workspace.instance",
"re.compile"
] |
[((1177, 1206), 're.compile', 're.compile', (['remote_name_regex'], {}), '(remote_name_regex)\n', (1187, 1206), False, 'import re\n'), ((308, 328), 'core.constructs.workspace.Workspace.instance', 'Workspace.instance', ([], {}), '()\n', (326, 328), False, 'from core.constructs.workspace import Workspace\n'), ((764, 784), 'core.constructs.workspace.Workspace.instance', 'Workspace.instance', ([], {}), '()\n', (782, 784), False, 'from core.constructs.workspace import Workspace\n')]
|
# -*- coding: utf-8 -*-
# Demo: MACD strategy
# src: ./test_backtest/MACD_JCSC.py
# jupyter: ./test_backtest/QUANTAXIS回测分析全过程讲解.ipynb
# paper: ./test_backtest/QUANTAXIS回测分析全过程讲解.md
import QUANTAXIS as QA
import numpy as np
import pandas as pd
import datetime
st1=datetime.datetime.now()
# define the MACD strategy
def MACD_JCSC(dataframe, SHORT=12, LONG=26, M=9):
"""
1.DIF向上突破DEA,买入信号参考。
2.DIF向下跌破DEA,卖出信号参考。
"""
CLOSE = dataframe.close
DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)
DEA = QA.EMA(DIFF, M)
MACD = 2*(DIFF-DEA)
CROSS_JC = QA.CROSS(DIFF, DEA)
CROSS_SC = QA.CROSS(DEA, DIFF)
ZERO = 0
return pd.DataFrame({'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})
# create account
Account = QA.QA_Account()
Broker = QA.QA_BacktestBroker()
Account.reset_assets(1000000)
Account.account_cookie = 'macd_stock'
QA.QA_SU_save_strategy('MACD_JCSC','Indicator',Account.account_cookie)
# get data from mongodb
data = QA.QA_fetch_stock_day_adv(
['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')
data = data.to_qfq()
# add indicator
ind = data.add_func(MACD_JCSC)
# ind.xs('000001',level=1)['2018-01'].plot()
data_forbacktest=data.select_time('2018-01-01','2018-05-01')
for items in data_forbacktest.panel_gen:
for item in items.security_gen:
daily_ind=ind.loc[item.index]
if daily_ind.CROSS_JC.iloc[0]>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=1000,
towards=QA.ORDER_DIRECTION.BUY,
price=0,
order_model=QA.ORDER_MODEL.CLOSE,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print(item.to_json()[0])
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
elif daily_ind.CROSS_SC.iloc[0]>0:
#print(item.code)
if Account.sell_available.get(item.code[0], 0)>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=Account.sell_available.get(item.code[0], 0),
towards=QA.ORDER_DIRECTION.SELL,
price=0,
order_model=QA.ORDER_MODEL.MARKET,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
Account.settle()
print('TIME -- {}'.format(datetime.datetime.now()-st1))
print(Account.history)
print(Account.history_table)
print(Account.daily_hold)
# create Risk analysis
Risk = QA.QA_Risk(Account)
Account.save()
Risk.save()
# print(Risk.message)
# print(Risk.assets)
# Risk.plot_assets_curve()
# plt=Risk.plot_dailyhold()
# plt.show()
# plt1=Risk.plot_signal()
# plt.show()
# performance=QA.QA_Performance(Account)
# plt=performance.plot_pnlmoney(performance.pnl_fifo)
# plt.show()
# Risk.assets.plot()
# Risk.benchmark_assets.plot()
# save result
#account_info = QA.QA_fetch_account({'account_cookie': 'user_admin_macd'})
#account = QA.QA_Account().from_message(account_info[0])
#print(account)
|
[
"QUANTAXIS.QA_BacktestBroker",
"QUANTAXIS.EMA",
"QUANTAXIS.CROSS",
"QUANTAXIS.QA_SU_save_strategy",
"QUANTAXIS.QA_Account",
"QUANTAXIS.QA_Risk",
"datetime.datetime.now",
"QUANTAXIS.QA_Event",
"QUANTAXIS.QA_fetch_stock_day_adv",
"pandas.DataFrame"
] |
[((264, 287), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (285, 287), False, 'import datetime\n'), ((800, 815), 'QUANTAXIS.QA_Account', 'QA.QA_Account', ([], {}), '()\n', (813, 815), True, 'import QUANTAXIS as QA\n'), ((825, 847), 'QUANTAXIS.QA_BacktestBroker', 'QA.QA_BacktestBroker', ([], {}), '()\n', (845, 847), True, 'import QUANTAXIS as QA\n'), ((917, 989), 'QUANTAXIS.QA_SU_save_strategy', 'QA.QA_SU_save_strategy', (['"""MACD_JCSC"""', '"""Indicator"""', 'Account.account_cookie'], {}), "('MACD_JCSC', 'Indicator', Account.account_cookie)\n", (939, 989), True, 'import QUANTAXIS as QA\n'), ((1019, 1118), 'QUANTAXIS.QA_fetch_stock_day_adv', 'QA.QA_fetch_stock_day_adv', (["['000001', '000002', '000004', '600000']", '"""2017-09-01"""', '"""2018-05-20"""'], {}), "(['000001', '000002', '000004', '600000'],\n '2017-09-01', '2018-05-20')\n", (1044, 1118), True, 'import QUANTAXIS as QA\n'), ((3225, 3244), 'QUANTAXIS.QA_Risk', 'QA.QA_Risk', (['Account'], {}), '(Account)\n', (3235, 3244), True, 'import QUANTAXIS as QA\n'), ((523, 538), 'QUANTAXIS.EMA', 'QA.EMA', (['DIFF', 'M'], {}), '(DIFF, M)\n', (529, 538), True, 'import QUANTAXIS as QA\n'), ((579, 598), 'QUANTAXIS.CROSS', 'QA.CROSS', (['DIFF', 'DEA'], {}), '(DIFF, DEA)\n', (587, 598), True, 'import QUANTAXIS as QA\n'), ((614, 633), 'QUANTAXIS.CROSS', 'QA.CROSS', (['DEA', 'DIFF'], {}), '(DEA, DIFF)\n', (622, 633), True, 'import QUANTAXIS as QA\n'), ((658, 774), 'pandas.DataFrame', 'pd.DataFrame', (["{'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC':\n CROSS_SC, 'ZERO': ZERO}"], {}), "({'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC,\n 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})\n", (670, 774), True, 'import pandas as pd\n'), ((470, 490), 'QUANTAXIS.EMA', 'QA.EMA', (['CLOSE', 'SHORT'], {}), '(CLOSE, SHORT)\n', (476, 490), True, 'import QUANTAXIS as QA\n'), ((493, 512), 'QUANTAXIS.EMA', 'QA.EMA', (['CLOSE', 'LONG'], {}), '(CLOSE, LONG)\n', (499, 512), True, 'import QUANTAXIS as QA\n'), ((3086, 3109), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3107, 3109), False, 'import datetime\n'), ((1865, 1907), 'QUANTAXIS.QA_Event', 'QA.QA_Event', ([], {'order': 'order', 'market_data': 'item'}), '(order=order, market_data=item)\n', (1876, 1907), True, 'import QUANTAXIS as QA\n'), ((2751, 2793), 'QUANTAXIS.QA_Event', 'QA.QA_Event', ([], {'order': 'order', 'market_data': 'item'}), '(order=order, market_data=item)\n', (2762, 2793), True, 'import QUANTAXIS as QA\n')]
|
"""Wrapper for pygame, which exports the PSP Python API on non-PSP systems."""
__author__ = "<NAME>, <<EMAIL>>"
import pygame
pygame.init()
_vol_music = 255
_vol_sound = 255
def setMusicVolume(vol):
global _vol_music
if vol >= 0 and vol <= 255:
_vol_music = vol
pygame.mixer.music.set_volume(_vol_music / 255.0)
def setSndFxVolume(vol):
global _vol_sound
if vol >= 0 and vol <= 255:
_vol_sound = vol
class Music:
def __init__(self, filename, maxchan=128, loop=False):
self._loop = loop
pygame.mixer.music.load(filename)
pygame.mixer.music.set_volume(_vol_music / 255.0)
def start(self):
if self._loop:
pygame.mixer.music.play(-1)
else:
pygame.mixer.music.play()
def stop(self):
pygame.mixer.music.stop()
class Sound:
def __init__(self, filename):
self._snd = pygame.mixer.Sound(filename)
def start(self):
self._snd.set_volume(_vol_sound / 255.0)
self._snd.play()
|
[
"pygame.init",
"pygame.mixer.Sound",
"pygame.mixer.music.set_volume",
"pygame.mixer.music.play",
"pygame.mixer.music.stop",
"pygame.mixer.music.load"
] |
[((131, 144), 'pygame.init', 'pygame.init', ([], {}), '()\n', (142, 144), False, 'import pygame\n'), ((294, 343), 'pygame.mixer.music.set_volume', 'pygame.mixer.music.set_volume', (['(_vol_music / 255.0)'], {}), '(_vol_music / 255.0)\n', (323, 343), False, 'import pygame\n'), ((557, 590), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['filename'], {}), '(filename)\n', (580, 590), False, 'import pygame\n'), ((599, 648), 'pygame.mixer.music.set_volume', 'pygame.mixer.music.set_volume', (['(_vol_music / 255.0)'], {}), '(_vol_music / 255.0)\n', (628, 648), False, 'import pygame\n'), ((823, 848), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (846, 848), False, 'import pygame\n'), ((918, 946), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['filename'], {}), '(filename)\n', (936, 946), False, 'import pygame\n'), ((710, 737), 'pygame.mixer.music.play', 'pygame.mixer.music.play', (['(-1)'], {}), '(-1)\n', (733, 737), False, 'import pygame\n'), ((764, 789), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (787, 789), False, 'import pygame\n')]
|
import torch
import torch.nn as nn
import torchvision.models as models
import numpy as np
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.lstm = nn.LSTM(embed_size,hidden_size,num_layers,batch_first=True)
self.embeddings = nn.Embedding(vocab_size, embed_size)
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
captions = self.embeddings(captions)
embed = torch.cat((features.unsqueeze(1),captions),1)
r_out = self.lstm(embed)
output = self.linear(r_out[0])[:, :-1, :]
return output
def sample(self, inputs, states=None, max_len=20):
#" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
#pass
output = []
for i in range(max_len):
hiddens, states = self.lstm(inputs, states)
mid = self.linear(hiddens.squeeze(1))
predicted = mid.max(1)[1]
output.append(predicted.tolist()[0])
inputs = self.embeddings(predicted)
inputs = inputs.unsqueeze(1)
#print(output)
#output = torch.cat(output, 1)
return output
|
[
"torch.nn.Sequential",
"torch.nn.LSTM",
"torch.nn.Linear",
"torchvision.models.resnet50",
"torch.nn.Embedding"
] |
[((216, 248), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (231, 248), True, 'import torchvision.models as models\n'), ((409, 432), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (422, 432), True, 'import torch.nn as nn\n'), ((454, 498), 'torch.nn.Linear', 'nn.Linear', (['resnet.fc.in_features', 'embed_size'], {}), '(resnet.fc.in_features, embed_size)\n', (463, 498), True, 'import torch.nn as nn\n'), ((862, 924), 'torch.nn.LSTM', 'nn.LSTM', (['embed_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)'}), '(embed_size, hidden_size, num_layers, batch_first=True)\n', (869, 924), True, 'import torch.nn as nn\n'), ((948, 984), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embed_size'], {}), '(vocab_size, embed_size)\n', (960, 984), True, 'import torch.nn as nn\n'), ((1007, 1041), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'vocab_size'], {}), '(hidden_size, vocab_size)\n', (1016, 1041), True, 'import torch.nn as nn\n')]
|
"""Items model. """
# Django
from django.db import models
# Utilities
from App.utils.models import BlackMarketModel
# Models
from .category import Category
from .unit import Unit
from .owner import Owner
class Item(BlackMarketModel):
"""Items model.
Is a model to items we goin to sell """
name = models.CharField(max_length=100, unique=True, blank=False, null=False)
category = models.ForeignKey(Category, blank=True, on_delete=models.SET_NULL, null=True)
description = models.TextField(max_length=200, blank=True)
type_item = models.CharField(max_length=15, blank=True)
unit = models.ForeignKey(Unit, blank=True, on_delete=models.SET_NULL, null=True)
price = models.DecimalField(max_digits=5, decimal_places=2, blank=False, null=False)
owner = models.ForeignKey(Owner, blank=True, on_delete=models.SET_NULL, null=True)
is_active = models.BooleanField(default=True)
def __str__(self):
return 'name:{}'.format(self.name)
|
[
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] |
[((316, 386), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)', 'blank': '(False)', 'null': '(False)'}), '(max_length=100, unique=True, blank=False, null=False)\n', (332, 386), False, 'from django.db import models\n'), ((402, 479), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {'blank': '(True)', 'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Category, blank=True, on_delete=models.SET_NULL, null=True)\n', (419, 479), False, 'from django.db import models\n'), ((498, 542), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'blank': '(True)'}), '(max_length=200, blank=True)\n', (514, 542), False, 'from django.db import models\n'), ((559, 602), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'blank': '(True)'}), '(max_length=15, blank=True)\n', (575, 602), False, 'from django.db import models\n'), ((614, 687), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Unit'], {'blank': '(True)', 'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Unit, blank=True, on_delete=models.SET_NULL, null=True)\n', (631, 687), False, 'from django.db import models\n'), ((700, 776), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(5)', 'decimal_places': '(2)', 'blank': '(False)', 'null': '(False)'}), '(max_digits=5, decimal_places=2, blank=False, null=False)\n', (719, 776), False, 'from django.db import models\n'), ((789, 863), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Owner'], {'blank': '(True)', 'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Owner, blank=True, on_delete=models.SET_NULL, null=True)\n', (806, 863), False, 'from django.db import models\n'), ((880, 913), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (899, 913), False, 'from django.db import models\n')]
|
import random
import cv2
import numpy as np
from augraphy.base.augmentation import Augmentation
class NoiseTexturize(Augmentation):
"""Creates a random noise based texture pattern to emulate paper textures.
Consequently applies noise patterns to the original image from big to small.
:param sigma_range: Defines bounds of noise fluctuations.
:type sigma_range: tuple, optional
:param turbulence_range: Defines how quickly big patterns will be
replaced with the small ones. The lower value -
the more iterations will be performed during texture generation.
:type turbulence_range: tuple, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
sigma_range=(3, 10),
turbulence_range=(2, 5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.sigma_range = sigma_range
self.turbulence_range = turbulence_range
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"NoiseTexturize(sigma_range={self.sigma_range}, turbulence_range={self.turbulence_range}, p={self.p})"
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
sigma = random.randint(self.sigma_range[0], self.sigma_range[1])
turbulence = random.randint(
self.turbulence_range[0],
self.turbulence_range[1],
)
result = image.astype(float)
rows, cols = image.shape[:2]
if len(image.shape) > 2:
channel = image.shape[2]
else:
channel = 0
ratio = cols
while not ratio == 1:
result += self.noise(cols, rows, channel, ratio, sigma=sigma)
ratio = (ratio // turbulence) or 1
cut = np.clip(result, 0, 255)
cut = cut.astype(np.uint8)
return cut
def noise(self, width, height, channel, ratio, sigma):
"""The function generates an image, filled with gaussian nose. If ratio
parameter is specified, noise will be generated for a lesser image and
then it will be upscaled to the original size. In that case noise will
generate larger square patterns. To avoid multiple lines, the upscale
uses interpolation.
:param ratio: the size of generated noise "pixels"
:param sigma: defines bounds of noise fluctuations
"""
mean = 0
# assert width % ratio == 0, "Can't scale image with of size {} and ratio {}".format(width, ratio)
# assert height % ratio == 0, "Can't scale image with of size {} and ratio {}".format(height, ratio)
h = int(height / ratio)
w = int(width / ratio)
if h == 0:
h = 1
if w == 0:
w = 1
gaussian = np.vectorize(lambda x: random.gauss(mean, sigma))
result = gaussian(np.array((w, h)))
result = cv2.resize(
result,
dsize=(width, height),
interpolation=cv2.INTER_LINEAR,
)
# for multiple channels input, convert result to multiple channels
if channel:
result = np.stack([result, result, result], axis=2)
return result
|
[
"numpy.clip",
"numpy.array",
"numpy.stack",
"cv2.resize",
"random.randint",
"random.gauss"
] |
[((3150, 3223), 'cv2.resize', 'cv2.resize', (['result'], {'dsize': '(width, height)', 'interpolation': 'cv2.INTER_LINEAR'}), '(result, dsize=(width, height), interpolation=cv2.INTER_LINEAR)\n', (3160, 3223), False, 'import cv2\n'), ((1413, 1469), 'random.randint', 'random.randint', (['self.sigma_range[0]', 'self.sigma_range[1]'], {}), '(self.sigma_range[0], self.sigma_range[1])\n', (1427, 1469), False, 'import random\n'), ((1495, 1561), 'random.randint', 'random.randint', (['self.turbulence_range[0]', 'self.turbulence_range[1]'], {}), '(self.turbulence_range[0], self.turbulence_range[1])\n', (1509, 1561), False, 'import random\n'), ((2023, 2046), 'numpy.clip', 'np.clip', (['result', '(0)', '(255)'], {}), '(result, 0, 255)\n', (2030, 2046), True, 'import numpy as np\n'), ((3114, 3130), 'numpy.array', 'np.array', (['(w, h)'], {}), '((w, h))\n', (3122, 3130), True, 'import numpy as np\n'), ((3388, 3430), 'numpy.stack', 'np.stack', (['[result, result, result]'], {'axis': '(2)'}), '([result, result, result], axis=2)\n', (3396, 3430), True, 'import numpy as np\n'), ((3060, 3085), 'random.gauss', 'random.gauss', (['mean', 'sigma'], {}), '(mean, sigma)\n', (3072, 3085), False, 'import random\n')]
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import openslide
from openslide import OpenSlide
from openslide.deepzoom import DeepZoomGenerator
from io import BytesIO
from PIL import Image
from .rendering_engine_interface import RenderingEngineInterface
from .. import settings
from ome_seadragon_cache import CacheDriverFactory
class OpenSlideEngine(RenderingEngineInterface):
def __init__(self, image_id, connection):
super(OpenSlideEngine, self).__init__(image_id, connection)
def _get_openslide_wrapper(self, original_file_source, file_mimetype):
img_path = self._get_image_path(original_file_source, file_mimetype)
if img_path:
return OpenSlide(img_path)
else:
return None
def _get_deepzoom_config(self, tile_size=None, limit_bounds=None):
cfg = {
'tile_size': tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE,
'overlap': settings.DEEPZOOM_OVERLAP,
'limit_bounds': limit_bounds if limit_bounds is not None else settings.DEEPZOOM_LIMIT_BOUNDS
}
self.logger.debug(settings.DEEPZOOM_LIMIT_BOUNDS)
self.logger.debug(cfg)
return cfg
def _get_deepzoom_wrapper(self, original_file_source, file_mimetype, tile_size=None, limit_bounds=None):
os_wrapper = self._get_openslide_wrapper(original_file_source, file_mimetype)
if os_wrapper:
return DeepZoomGenerator(os_wrapper, **self._get_deepzoom_config(tile_size, limit_bounds))
else:
return None
def _get_image_mpp(self, original_file_source=False, file_mimetype=None):
slide = self._get_openslide_wrapper(original_file_source, file_mimetype)
if slide:
try:
mpp_x = slide.properties[openslide.PROPERTY_NAME_MPP_X]
mpp_y = slide.properties[openslide.PROPERTY_NAME_MPP_Y]
return (float(mpp_x) + float(mpp_y)) / 2
except (KeyError, ValueError):
return 0
else:
return 0
def get_openseadragon_config(self, original_file_source=False, file_mimetype=None):
return {
'mpp': self._get_image_mpp(original_file_source, file_mimetype)
}
def _get_slide_bounds(self, original_file_source=False, file_mimetype=None):
slide = self._get_openslide_wrapper(original_file_source, file_mimetype)
if slide:
return (
int(slide.properties.get('openslide.bounds-x', 0)),
int(slide.properties.get('openslide.bounds-y', 0)),
int(slide.properties.get('openslide.bounds-height', 0)),
int(slide.properties.get('openslide.bounds-width', 0))
)
else:
return None
def get_slide_bounds(self, original_file_source=False, file_mimetype=None):
bounds = self._get_slide_bounds(original_file_source, file_mimetype)
if bounds:
return {
'bounds_x': bounds[0],
'bounds_y': bounds[1],
'bounds_height': bounds[2],
'bounds_width': bounds[3]
}
else:
return bounds
def _get_original_file_json_description(self, resource_path, file_mimetype=None, tile_size=None, limit_bounds=True):
slide = self._get_openslide_wrapper(original_file_source=True,
file_mimetype=file_mimetype)
if slide:
if limit_bounds:
_, _, height, width = self._get_slide_bounds(True, file_mimetype)
return self._get_json_description(resource_path, height, width, tile_size)
return self._get_json_description(resource_path, slide.dimensions[1], slide.dimensions[0], tile_size)
return None
def get_dzi_description(self, original_file_source=False, file_mimetype=None, tile_size=None, limit_bounds=None):
dzi_slide = self._get_deepzoom_wrapper(original_file_source, file_mimetype, tile_size, limit_bounds)
if dzi_slide:
return dzi_slide.get_dzi(settings.DEEPZOOM_FORMAT)
else:
return None
def get_thumbnail(self, size, original_file_source=False, file_mimeype=None):
if settings.IMAGES_CACHE_ENABLED:
cache = CacheDriverFactory(settings.IMAGES_CACHE_DRIVER).\
get_cache(settings.CACHE_HOST, settings.CACHE_PORT, settings.CACHE_DB, settings.CACHE_EXPIRE_TIME)
# get thumbnail from cache
thumb = cache.thumbnail_from_cache(self.image_id, size, settings.DEEPZOOM_FORMAT, 'openslide')
else:
thumb = None
# if thumbnail is not in cache build it ....
if thumb is None:
self.logger.debug('No thumbnail loaded from cache, building it')
slide = self._get_openslide_wrapper(original_file_source, file_mimeype)
if slide:
thumb = slide.get_thumbnail((size, size))
# ... and store it into the cache
if settings.IMAGES_CACHE_ENABLED:
cache.thumbnail_to_cache(self.image_id, thumb, size, settings.DEEPZOOM_FORMAT, 'openslide')
else:
self.logger.debug('Thumbnail loaded from cache')
return thumb, settings.DEEPZOOM_FORMAT
def get_tile(self, level, column, row, original_file_source=False, file_mimetype=None,
tile_size=None, limit_bounds=None):
if settings.IMAGES_CACHE_ENABLED:
cache = CacheDriverFactory(settings.IMAGES_CACHE_DRIVER).\
get_cache(settings.CACHE_HOST, settings.CACHE_PORT, settings.CACHE_DB, settings.CACHE_EXPIRE_TIME)
tile_size = tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE
self.logger.debug('TILE SIZE IS: %s', tile_size)
cache_params = {
'image_id': self.image_id,
'level': level,
'column': column,
'row': row,
'tile_size': tile_size,
'image_format': settings.DEEPZOOM_FORMAT,
'rendering_engine': 'openslide'
}
if cache_params['image_format'].lower() == 'jpeg':
cache_params['image_quality'] = settings.DEEPZOOM_JPEG_QUALITY
# get tile from cache
tile = cache.tile_from_cache(**cache_params)
else:
tile = None
# if tile is not in cache build it ...
if tile is None:
slide = self._get_deepzoom_wrapper(original_file_source, file_mimetype, tile_size, limit_bounds)
if slide:
dzi_tile = slide.get_tile(level, (column, row))
tile_buffer = BytesIO()
tile_conf = {
'format': settings.DEEPZOOM_FORMAT
}
if tile_conf['format'].lower() == 'jpeg':
tile_conf['quality'] = settings.DEEPZOOM_JPEG_QUALITY
dzi_tile.save(tile_buffer, **tile_conf)
tile = Image.open(tile_buffer)
# ... and store it into the cache
if settings.IMAGES_CACHE_ENABLED:
cache_params['image_obj'] = tile
cache.tile_to_cache(**cache_params)
return tile, settings.DEEPZOOM_FORMAT
|
[
"PIL.Image.open",
"io.BytesIO",
"ome_seadragon_cache.CacheDriverFactory",
"openslide.OpenSlide"
] |
[((1741, 1760), 'openslide.OpenSlide', 'OpenSlide', (['img_path'], {}), '(img_path)\n', (1750, 1760), False, 'from openslide import OpenSlide\n'), ((7796, 7805), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7803, 7805), False, 'from io import BytesIO\n'), ((8120, 8143), 'PIL.Image.open', 'Image.open', (['tile_buffer'], {}), '(tile_buffer)\n', (8130, 8143), False, 'from PIL import Image\n'), ((5371, 5419), 'ome_seadragon_cache.CacheDriverFactory', 'CacheDriverFactory', (['settings.IMAGES_CACHE_DRIVER'], {}), '(settings.IMAGES_CACHE_DRIVER)\n', (5389, 5419), False, 'from ome_seadragon_cache import CacheDriverFactory\n'), ((6583, 6631), 'ome_seadragon_cache.CacheDriverFactory', 'CacheDriverFactory', (['settings.IMAGES_CACHE_DRIVER'], {}), '(settings.IMAGES_CACHE_DRIVER)\n', (6601, 6631), False, 'from ome_seadragon_cache import CacheDriverFactory\n')]
|
from PyQt5 import QtWidgets, QtCore
from podcastista.ShowEpisodeWidget import ShowEpisodeWidget
from podcastista.FlowLayout import FlowLayout
class FillThread(QtCore.QThread):
""" Worker thread for loading up episodes """
def __init__(self, spotify, shows):
super().__init__()
self._spotify = spotify
self._shows = shows
def run(self):
for item in self._shows['items']:
show = item['show']
show['episodes'] = []
show_episodes = self._spotify.show_episodes(show['id'], limit=20)
for episode in show_episodes['items']:
display = True
if ('resume_point' in episode and
episode['resume_point']['fully_played']):
display = False
if display:
show['episodes'].append(episode)
@property
def shows(self):
return self._shows
class ListenNowTab(QtWidgets.QWidget):
"""
Tab on the main window with the list of shows
"""
def __init__(self, parent):
super().__init__()
self._main_window = parent
# empty widget
self._empty_widget = QtWidgets.QWidget()
empty_layout = QtWidgets.QVBoxLayout()
nothing = QtWidgets.QLabel("No items")
nothing.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed)
nothing.setContentsMargins(40, 20, 40, 20)
nothing.setStyleSheet("""
font-size: 14px;
""")
nothing.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
empty_layout.addWidget(nothing)
empty_layout.addStretch(1)
self._empty_widget.setLayout(empty_layout)
# list of items
self._layout = FlowLayout()
widget = QtWidgets.QWidget()
widget.setLayout(self._layout)
widget.setSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.MinimumExpanding)
self._list = QtWidgets.QScrollArea()
self._list.setFrameShape(QtWidgets.QFrame.NoFrame)
self._list.setWidgetResizable(True)
self._list.setWidget(widget)
self._stacked_layout = QtWidgets.QStackedLayout(self)
self._stacked_layout.addWidget(self._empty_widget)
self._stacked_layout.addWidget(self._list)
def clear(self):
self._stacked_layout.setCurrentWidget(self._empty_widget)
while self._layout.count() > 0:
item = self._layout.takeAt(0)
if item.widget() is not None:
item.widget().deleteLater()
def fill(self):
if self._main_window.spotify is None:
return
shows = self._main_window.spotify.current_user_saved_shows()
self._filler = FillThread(self._main_window.spotify, shows)
self._filler.finished.connect(self.onFillFinished)
self._filler.start()
def onFillFinished(self):
for item in self._filler.shows['items']:
show = item['show']
if len(show['episodes']) > 0:
w = ShowEpisodeWidget(show, self._main_window)
self._layout.addWidget(w)
if self._layout.count() > 0:
self._stacked_layout.setCurrentWidget(self._list)
|
[
"PyQt5.QtWidgets.QWidget",
"podcastista.FlowLayout.FlowLayout",
"PyQt5.QtWidgets.QLabel",
"podcastista.ShowEpisodeWidget.ShowEpisodeWidget",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QStackedLayout",
"PyQt5.QtWidgets.QScrollArea"
] |
[((1196, 1215), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1213, 1215), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((1239, 1262), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1260, 1262), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((1282, 1310), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""No items"""'], {}), "('No items')\n", (1298, 1310), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((1808, 1820), 'podcastista.FlowLayout.FlowLayout', 'FlowLayout', ([], {}), '()\n', (1818, 1820), False, 'from podcastista.FlowLayout import FlowLayout\n'), ((1839, 1858), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1856, 1858), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((2054, 2077), 'PyQt5.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', ([], {}), '()\n', (2075, 2077), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((2250, 2280), 'PyQt5.QtWidgets.QStackedLayout', 'QtWidgets.QStackedLayout', (['self'], {}), '(self)\n', (2274, 2280), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((3134, 3176), 'podcastista.ShowEpisodeWidget.ShowEpisodeWidget', 'ShowEpisodeWidget', (['show', 'self._main_window'], {}), '(show, self._main_window)\n', (3151, 3176), False, 'from podcastista.ShowEpisodeWidget import ShowEpisodeWidget\n')]
|
import pyparsing as pp
#relationship will refer to 'track' in all of your examples
relationship = pp.Word(pp.alphas).setResultsName('relationship')
number = pp.Word(pp.nums + '.')
variable = pp.Word(pp.alphas)
# an argument to a relationship can be either a number or a variable
argument = number | variable
# arguments are a delimited list of 'argument' surrounded by parenthesis
arguments= (pp.Suppress('(') + pp.delimitedList(argument) +
pp.Suppress(')')).setResultsName('arguments')
# a fact is composed of a relationship and it's arguments
# (I'm aware it's actually more complicated than this
# it's just a simplifying assumption)
fact = (relationship + arguments).setResultsName('facts', listAllMatches=True)
# a sentence is a fact plus a period
sentence = fact + pp.Suppress('.')
# self explanatory
prolog_sentences = pp.OneOrMore(sentence)
|
[
"pyparsing.OneOrMore",
"pyparsing.Suppress",
"pyparsing.Word",
"pyparsing.delimitedList"
] |
[((158, 180), 'pyparsing.Word', 'pp.Word', (["(pp.nums + '.')"], {}), "(pp.nums + '.')\n", (165, 180), True, 'import pyparsing as pp\n'), ((192, 210), 'pyparsing.Word', 'pp.Word', (['pp.alphas'], {}), '(pp.alphas)\n', (199, 210), True, 'import pyparsing as pp\n'), ((844, 866), 'pyparsing.OneOrMore', 'pp.OneOrMore', (['sentence'], {}), '(sentence)\n', (856, 866), True, 'import pyparsing as pp\n'), ((788, 804), 'pyparsing.Suppress', 'pp.Suppress', (['"""."""'], {}), "('.')\n", (799, 804), True, 'import pyparsing as pp\n'), ((98, 116), 'pyparsing.Word', 'pp.Word', (['pp.alphas'], {}), '(pp.alphas)\n', (105, 116), True, 'import pyparsing as pp\n'), ((455, 471), 'pyparsing.Suppress', 'pp.Suppress', (['""")"""'], {}), "(')')\n", (466, 471), True, 'import pyparsing as pp\n'), ((395, 411), 'pyparsing.Suppress', 'pp.Suppress', (['"""("""'], {}), "('(')\n", (406, 411), True, 'import pyparsing as pp\n'), ((414, 440), 'pyparsing.delimitedList', 'pp.delimitedList', (['argument'], {}), '(argument)\n', (430, 440), True, 'import pyparsing as pp\n')]
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with various axes permutations,
# in order to cover a nasty set of "if" statements that check
# the intersections of the raster lines with the input bounding box.
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
transform = vtk.vtkTransform()
# rotate about the center of the image
transform.Translate(+100.8,+100.8,+69.0)
transform.RotateWXYZ(10,1,1,0)
transform.Translate(-100.8,-100.8,-69.0)
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(reader.GetOutputPort())
reslice1.SetResliceAxesDirectionCosines([1,0,0,0,1,0,0,0,1])
reslice1.SetResliceTransform(transform)
reslice1.SetOutputSpacing(3.2,3.2,3.2)
reslice1.SetOutputExtent(0,74,0,74,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(reader.GetOutputPort())
reslice2.SetResliceAxesDirectionCosines([0,1,0,0,0,1,1,0,0])
reslice2.SetResliceTransform(transform)
reslice2.SetOutputSpacing(3.2,3.2,3.2)
reslice2.SetOutputExtent(0,74,0,74,0,0)
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(reader.GetOutputPort())
reslice3.SetResliceAxesDirectionCosines([0,0,1,1,0,0,0,1,0])
reslice3.SetResliceTransform(transform)
reslice3.SetOutputSpacing(3.2,3.2,3.2)
reslice3.SetOutputExtent(0,74,0,74,0,0)
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(reader.GetOutputPort())
reslice4.SetResliceAxesDirectionCosines([-1,0,0,0,-1,0,0,0,-1])
reslice4.SetResliceTransform(transform)
reslice4.SetOutputSpacing(3.2,3.2,3.2)
reslice4.SetOutputExtent(0,74,0,74,0,0)
reslice5 = vtk.vtkImageReslice()
reslice5.SetInputConnection(reader.GetOutputPort())
reslice5.SetResliceAxesDirectionCosines([0,-1,0,0,0,-1,-1,0,0])
reslice5.SetResliceTransform(transform)
reslice5.SetOutputSpacing(3.2,3.2,3.2)
reslice5.SetOutputExtent(0,74,0,74,0,0)
reslice6 = vtk.vtkImageReslice()
reslice6.SetInputConnection(reader.GetOutputPort())
reslice6.SetResliceAxesDirectionCosines([0,0,-1,-1,0,0,0,-1,0])
reslice6.SetResliceTransform(transform)
reslice6.SetOutputSpacing(3.2,3.2,3.2)
reslice6.SetOutputExtent(0,74,0,74,0,0)
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
mapper5 = vtk.vtkImageMapper()
mapper5.SetInputConnection(reslice5.GetOutputPort())
mapper5.SetColorWindow(2000)
mapper5.SetColorLevel(1000)
mapper5.SetZSlice(0)
mapper6 = vtk.vtkImageMapper()
mapper6.SetInputConnection(reslice6.GetOutputPort())
mapper6.SetColorWindow(2000)
mapper6.SetColorLevel(1000)
mapper6.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
actor5 = vtk.vtkActor2D()
actor5.SetMapper(mapper5)
actor6 = vtk.vtkActor2D()
actor6.SetMapper(mapper6)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.0,0.0,0.3333,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.5,0.3333,1.0)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.3333,0.0,0.6667,0.5)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.3333,0.5,0.6667,1.0)
imager5 = vtk.vtkRenderer()
imager5.AddActor2D(actor5)
imager5.SetViewport(0.6667,0.0,1.0,0.5)
imager6 = vtk.vtkRenderer()
imager6.AddActor2D(actor6)
imager6.SetViewport(0.6667,0.5,1.0,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.AddRenderer(imager5)
imgWin.AddRenderer(imager6)
imgWin.SetSize(225,150)
imgWin.Render()
# --- end of script --
|
[
"vtk.util.misc.vtkGetDataRoot",
"vtk.vtkImageReader",
"vtk.vtkRenderWindow",
"vtk.vtkTransform",
"vtk.vtkActor2D",
"vtk.vtkImageMapper",
"vtk.vtkImageReslice",
"vtk.vtkRenderer"
] |
[((90, 106), 'vtk.util.misc.vtkGetDataRoot', 'vtkGetDataRoot', ([], {}), '()\n', (104, 106), False, 'from vtk.util.misc import vtkGetDataRoot\n'), ((333, 353), 'vtk.vtkImageReader', 'vtk.vtkImageReader', ([], {}), '()\n', (351, 353), False, 'import vtk\n'), ((604, 622), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (620, 622), False, 'import vtk\n'), ((786, 807), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (805, 807), False, 'import vtk\n'), ((1051, 1072), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (1070, 1072), False, 'import vtk\n'), ((1316, 1337), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (1335, 1337), False, 'import vtk\n'), ((1581, 1602), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (1600, 1602), False, 'import vtk\n'), ((1849, 1870), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (1868, 1870), False, 'import vtk\n'), ((2117, 2138), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (2136, 2138), False, 'import vtk\n'), ((2384, 2404), 'vtk.vtkImageMapper', 'vtk.vtkImageMapper', ([], {}), '()\n', (2402, 2404), False, 'import vtk\n'), ((2546, 2566), 'vtk.vtkImageMapper', 'vtk.vtkImageMapper', ([], {}), '()\n', (2564, 2566), False, 'import vtk\n'), ((2708, 2728), 'vtk.vtkImageMapper', 'vtk.vtkImageMapper', ([], {}), '()\n', (2726, 2728), False, 'import vtk\n'), ((2870, 2890), 'vtk.vtkImageMapper', 'vtk.vtkImageMapper', ([], {}), '()\n', (2888, 2890), False, 'import vtk\n'), ((3032, 3052), 'vtk.vtkImageMapper', 'vtk.vtkImageMapper', ([], {}), '()\n', (3050, 3052), False, 'import vtk\n'), ((3194, 3214), 'vtk.vtkImageMapper', 'vtk.vtkImageMapper', ([], {}), '()\n', (3212, 3214), False, 'import vtk\n'), ((3355, 3371), 'vtk.vtkActor2D', 'vtk.vtkActor2D', ([], {}), '()\n', (3369, 3371), False, 'import vtk\n'), ((3407, 3423), 'vtk.vtkActor2D', 'vtk.vtkActor2D', ([], {}), '()\n', (3421, 3423), False, 'import vtk\n'), ((3459, 3475), 'vtk.vtkActor2D', 'vtk.vtkActor2D', ([], {}), '()\n', (3473, 3475), False, 'import vtk\n'), ((3511, 3527), 'vtk.vtkActor2D', 'vtk.vtkActor2D', ([], {}), '()\n', (3525, 3527), False, 'import vtk\n'), ((3563, 3579), 'vtk.vtkActor2D', 'vtk.vtkActor2D', ([], {}), '()\n', (3577, 3579), False, 'import vtk\n'), ((3615, 3631), 'vtk.vtkActor2D', 'vtk.vtkActor2D', ([], {}), '()\n', (3629, 3631), False, 'import vtk\n'), ((3668, 3685), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (3683, 3685), False, 'import vtk\n'), ((3763, 3780), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (3778, 3780), False, 'import vtk\n'), ((3858, 3875), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (3873, 3875), False, 'import vtk\n'), ((3956, 3973), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (3971, 3973), False, 'import vtk\n'), ((4054, 4071), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (4069, 4071), False, 'import vtk\n'), ((4149, 4166), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (4164, 4166), False, 'import vtk\n'), ((4243, 4264), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (4262, 4264), False, 'import vtk\n')]
|
import collections
from itertools import repeat
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
def _ntuple(n):
def parse(x):
if isinstance(x, collections.Iterable):
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def prepare_rnn_seq(rnn_input, lengths, hx=None, masks=None, batch_first=False):
'''
Args:
rnn_input: [seq_len, batch, input_size]: tensor containing the features of the input sequence.
lengths: [batch]: tensor containing the lengthes of the input sequence
hx: [num_layers * num_directions, batch, hidden_size]: tensor containing the initial hidden state for each element in the batch.
masks: [seq_len, batch]: tensor containing the mask for each element in the batch.
batch_first: If True, then the input and output tensors are provided as [batch, seq_len, feature].
Returns:
'''
def check_decreasing(lengths):
lens, order = torch.sort(lengths, dim=0, descending=True)
if torch.ne(lens, lengths).sum() == 0:
return None
else:
_, rev_order = torch.sort(order)
return lens, order, rev_order
check_res = check_decreasing(lengths)
if check_res is None:
lens = lengths
rev_order = None
else:
lens, order, rev_order = check_res
batch_dim = 0 if batch_first else 1
rnn_input = rnn_input.index_select(batch_dim, order)
if hx is not None:
# hack lstm
if isinstance(hx, tuple):
hx, cx = hx
hx = hx.index_select(1, order)
cx = cx.index_select(1, order)
hx = (hx, cx)
else:
hx = hx.index_select(1, order)
lens = lens.tolist()
seq = rnn_utils.pack_padded_sequence(rnn_input, lens, batch_first=batch_first)
if masks is not None:
if batch_first:
masks = masks[:, :lens[0]]
else:
masks = masks[:lens[0]]
return seq, hx, rev_order, masks
def recover_rnn_seq(seq, rev_order, hx=None, batch_first=False):
output, _ = rnn_utils.pad_packed_sequence(seq, batch_first=batch_first)
if rev_order is not None:
batch_dim = 0 if batch_first else 1
output = output.index_select(batch_dim, rev_order)
if hx is not None:
# hack lstm
if isinstance(hx, tuple):
hx, cx = hx
hx = hx.index_select(1, rev_order)
cx = cx.index_select(1, rev_order)
hx = (hx, cx)
else:
hx = hx.index_select(1, rev_order)
return output, hx
def freeze_embedding(embedding):
assert isinstance(embedding, nn.Embedding), "input should be an Embedding module."
embedding.weight.detach_()
|
[
"torch.sort",
"torch.ne",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.utils.rnn.pad_packed_sequence",
"itertools.repeat"
] |
[((1898, 1970), 'torch.nn.utils.rnn.pack_padded_sequence', 'rnn_utils.pack_padded_sequence', (['rnn_input', 'lens'], {'batch_first': 'batch_first'}), '(rnn_input, lens, batch_first=batch_first)\n', (1928, 1970), True, 'import torch.nn.utils.rnn as rnn_utils\n'), ((2230, 2289), 'torch.nn.utils.rnn.pad_packed_sequence', 'rnn_utils.pad_packed_sequence', (['seq'], {'batch_first': 'batch_first'}), '(seq, batch_first=batch_first)\n', (2259, 2289), True, 'import torch.nn.utils.rnn as rnn_utils\n'), ((1064, 1107), 'torch.sort', 'torch.sort', (['lengths'], {'dim': '(0)', 'descending': '(True)'}), '(lengths, dim=0, descending=True)\n', (1074, 1107), False, 'import torch\n'), ((248, 260), 'itertools.repeat', 'repeat', (['x', 'n'], {}), '(x, n)\n', (254, 260), False, 'from itertools import repeat\n'), ((1220, 1237), 'torch.sort', 'torch.sort', (['order'], {}), '(order)\n', (1230, 1237), False, 'import torch\n'), ((1119, 1142), 'torch.ne', 'torch.ne', (['lens', 'lengths'], {}), '(lens, lengths)\n', (1127, 1142), False, 'import torch\n')]
|
import pandas as pandas_Pandas_Module
class Script:
@staticmethod
def main():
food_info = pandas_Pandas_Module.read_csv("../food_info.csv")
print(str(food_info.dtypes))
Script.main()
|
[
"pandas.read_csv"
] |
[((97, 146), 'pandas.read_csv', 'pandas_Pandas_Module.read_csv', (['"""../food_info.csv"""'], {}), "('../food_info.csv')\n", (126, 146), True, 'import pandas as pandas_Pandas_Module\n')]
|
# coding=utf-8
"""
Command Line Interface
======================
"""
import argparse
import logging
import os
from os import path
import sys
from landspout import core, __version__
LOGGER = logging.getLogger('landspout')
LOGGING_FORMAT = '[%(asctime)-15s] %(levelname)-8s %(name)-15s: %(message)s'
def exit_application(message=None, code=0):
"""Exit the application displaying the message to info or error based upon
the exit code
:param str message: The exit message
:param int code: The exit code (default: 0)
"""
log_method = LOGGER.error if code else LOGGER.info
log_method(message.strip())
sys.exit(code)
def parse_cli_arguments():
"""Return the base argument parser for CLI applications.
:return: :class:`~argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(
'landspout', 'Static website generation tool',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('-s', '--source', metavar='SOURCE',
help='Source content directory',
default='content')
parser.add_argument('-d', '--destination', metavar='DEST',
help='Destination directory for built content',
default='build')
parser.add_argument('-t', '--templates', metavar='TEMPLATE DIR',
help='Template directory',
default='templates')
parser.add_argument('-b', '--base-uri-path', action='store', default='/')
parser.add_argument('--whitespace', action='store',
choices=['all', 'single', 'oneline'],
default='all',
help='Compress whitespace')
parser.add_argument('-n', '--namespace', type=argparse.FileType('r'),
help='Load a JSON file of values to inject into the '
'default rendering namespace.')
parser.add_argument('-i', '--interval', type=int, default=3,
help='Interval in seconds between file '
'checks while watching or serving')
parser.add_argument('--port', type=int, default=8080,
help='The port to listen on when serving')
parser.add_argument('--debug', action='store_true',
help='Extra verbose debug logging')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__),
help='output version information, then exit')
parser.add_argument('command', nargs='?',
choices=['build', 'watch', 'serve'],
help='The command to run', default='build')
return parser.parse_args()
def validate_paths(args):
"""Ensure all of the configured paths actually exist."""
if not path.exists(args.destination):
LOGGER.warning('Destination path "%s" does not exist, creating',
args.destination)
os.makedirs(path.normpath(args.destination))
for file_path in [args.source, args.templates]:
if not path.exists(file_path):
exit_application('Path {} does not exist'.format(file_path), 1)
def main():
"""Application entry point"""
args = parse_cli_arguments()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=log_level, format=LOGGING_FORMAT)
LOGGER.info('Landspout v%s [%s]', __version__, args.command)
validate_paths(args)
landspout = core.Landspout(args)
if args.command == 'build':
landspout.build()
elif args.command == 'watch':
landspout.watch()
elif args.command == 'serve':
landspout.serve()
|
[
"logging.getLogger",
"logging.basicConfig",
"os.path.exists",
"argparse.FileType",
"argparse.ArgumentParser",
"os.path.normpath",
"landspout.core.Landspout",
"sys.exit"
] |
[((193, 223), 'logging.getLogger', 'logging.getLogger', (['"""landspout"""'], {}), "('landspout')\n", (210, 223), False, 'import logging\n'), ((634, 648), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (642, 648), False, 'import sys\n'), ((810, 972), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""landspout"""', '"""Static website generation tool"""'], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'conflict_handler': '"""resolve"""'}), "('landspout', 'Static website generation tool',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n", (833, 972), False, 'import argparse\n'), ((3445, 3504), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'log_level', 'format': 'LOGGING_FORMAT'}), '(level=log_level, format=LOGGING_FORMAT)\n', (3464, 3504), False, 'import logging\n'), ((3611, 3631), 'landspout.core.Landspout', 'core.Landspout', (['args'], {}), '(args)\n', (3625, 3631), False, 'from landspout import core, __version__\n'), ((2932, 2961), 'os.path.exists', 'path.exists', (['args.destination'], {}), '(args.destination)\n', (2943, 2961), False, 'from os import path\n'), ((1829, 1851), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (1846, 1851), False, 'import argparse\n'), ((3097, 3128), 'os.path.normpath', 'path.normpath', (['args.destination'], {}), '(args.destination)\n', (3110, 3128), False, 'from os import path\n'), ((3198, 3220), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (3209, 3220), False, 'from os import path\n')]
|
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.auth
import tornado.escape
import os.path
import logging
import sys
import urllib
import json
from uuid import uuid4
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
#to do -
# check character set of inputs (not vital as 'block' added to each user).
# scores?
#------------------------------------------------------------------------------Main app code-------------------------------------------
class Status (object):
currentStatus = "waitingforstart"
currentLoginStatus = "open"
currentTime = 90
currentQuestion = False
currentQuestionType = False
clientcallbacks = []
users = {} # users is a dictionary - names are keys, each item is a dictionary of score and (if neccesary), current question and correct or not
globalcallbacks = []
controlcallbacks = []
answercounter = 0
quiztype = ""
def registerclient(self, callback):
print('register client---------------------------------------------------------')
if (callback not in self.clientcallbacks):
self.clientcallbacks.append(callback)
def registerglobal(self, callback):
print('register global----------------------------------------------------------')
if (callback not in self.globalcallbacks):
self.globalcallbacks.append(callback)
def registercontrol(self, callback):
print('register control----------------------------------------------------------')
if (callback not in self.controlcallbacks):
self.controlcallbacks.append(callback)
def adduser(self, name):
if self.getStatus()=="waitingforstart":
self.users[tornado.escape.native_str(name)]={'qnum':0,'level':0,'complete':"false",'Score':0,'answerordinal':10000, 'block':"false",'finished':"false"}
else:
self.users[tornado.escape.native_str(name)]={'qnum':0,'level':0,'complete':"false",'Score':0,'answerordinal':10000, 'block':"false",'finished':"false"}
# self.users(tornado.escape.native_str(name))
self.notifyAddUser()
def removeuser(self, name):
print('removing user')
# self.users.remove(tornado.escape.native_str(name))
delname = tornado.escape.native_str(name)
if (delname in self.users):
del self.users[delname]
def setQuestion(self, question):
print('setquestion')
questtype = "open"
jsonstring ='{"type":"question","question":"'+question+'"}'
self.clearAnswers()
self.currentQuestion = question
self.currentQuestionType = questtype
self.setStatus("waitingforanswer")
self.setLoginStatus("closed")
self.notifyGlobal(jsonstring)
jsonstring ='{"type":"question","status":"waitingforanswer","loginstatus":"closed"}'
self.notifyControl(jsonstring)
jsonstring ='{"type":"questionasked","qtype":"'+questtype+'"}'
self.notifyClient(jsonstring)
# print ("what the hell")
# self.notifyAnswer()
# could be named better as is doing the marking
def setControlAnswer(self, answer):
print('set control answer')
answers = answer.split('/')
print(len(answers))
for user in self.users.keys():
if ('answer' in self.users[user]):
if (self.users[user]['answer']in answers):
self.users[user]['mark']="correct"
else:
self.users[user]['mark']="incorrect"
self.notifyGlobalAnswer()
self.notifyUserAllAnswers()
def setCorrectFromControl(self, user):
if (self.users[user]):
self.users[user]['mark']="correct"
print("does it workd")
print(self.users[user]['mark'])
self.notifyGlobalAnswer()
self.notifyUserAnswerCorrect(user)
def setIncorrectFromControl(self, user):
if (self.users[user]):
self.users[user]['mark']="incorrect"
print(self.users[user]['mark'])
self.notifyGlobalAnswer()
self.notifyUserAnswerIncorrect(user)
def setBlockFromControl(self, user):
if (self.users[user]):
self.users[user]['block']="true"
self.notifyGlobalAnswer()
def setUnblockFromControl(self, user):
if (self.users[user]):
self.users[user]['block']="false"
self.notifyGlobalAnswer()
def toggleLoginStatus(self):
if (self.getLoginStatus()=="closed"):
self.setLoginStatus("open")
else:
self.setLoginStatus("closed")
self.notifyControlLoginStatus()
def toggleStatus(self):
if (self.getStatus()=="waitingforanswer"):
self.setStatus("answersclosed")
else:
self.setStatus("waitingforanswer")
self.notifyControlStatus()
def resetGame(self):
jsonstring = '{"type":"reset"}'
print("what the hell")
self.notifyClient(jsonstring)
def setAnswer(self, answer, user):
print('getting answer')
print (answer)
print (user)
self.users[user]['answer'] = answer
self.users[user]['answerordinal']=self.answercounter
self.users[user]['mark']="notmarked"
self.answercounter=self.answercounter + 1
self.notifyAnswer()
def setClientResult(self, level, qnum, finished, user):
print ('gotten result')
print (level)
print (qnum)
print (user)
print (finished)
self.users[user]['level']=int(level)
self.users[user]['qnum']=int(qnum)
self.users[user]['finished']=finished
self.notifyAnswer()
def clearAnswers(self):
self.answercounter = 0
for user in self.users.keys():
if ('answer' in self.users[user]):
del self.users[user]['answer']
self.users[user]['answerordinal']=10000
self.users[user]['mark']="notmarked"
def setStatus(self, status):
self.currentStatus = status
def setQuizType(self, quiztype):
self.quiztype = quiztype
def setLoginStatus(self, status):
self.currentLoginStatus = status
def setTime(self, time):
print("SETTING TIMER________________")
self.currentTime = time
self.notifyGlobalTimeChange(time)
self.notifyUserTimeChange(time)
def notifyAddUser(self):
print("notify add user")
jsonstring = '{"type":"users","users":['
print (self.users)
for c in self.users.keys():
jsonstring = jsonstring+'"'+c+'",'
jsonstring = jsonstring[:-1]
jsonstring = jsonstring+']}'
self.notifyGlobal(jsonstring)
self.notifyControlAnswer()
def notifyAnswer(self):
print ("notify answer")
self.notifyGlobalAnswer()
self.notifyControlAnswer()
def notifyGlobalAnswer(self):
print ("notify gloabla answer")
jsonstring = '{"type":"answers","answers":['
answerarray = self.makeAnswerArrayString()
jsonstring = jsonstring+answerarray
jsonstring = jsonstring+']}'
self.notifyGlobal(jsonstring)
def notifyUserAnswerCorrect(self, markedusername):
jsonstring = '{"type":"mark","mark":"correct","markeduser":"'
jsonstring = jsonstring+markedusername+'"}'
self.notifyClient(jsonstring)
def notifyUserAnswerIncorrect(self, markedusername):
jsonstring = '{"type":"mark","mark":"incorrect","markeduser":"'
jsonstring = jsonstring+markedusername+'"}'
self.notifyClient(jsonstring)
def notifyUserTimeChange(self, time):
print ("notify user time")
jsonstring = '{"type":"time","time":'
jsonstring = jsonstring+time
jsonstring = jsonstring+'}'
self.notifyClient(jsonstring)
def notifyGlobalTimeChange(self, time):
print ("notify gloabl time")
jsonstring = '{"type":"time","time":'
jsonstring = jsonstring+time
jsonstring = jsonstring+'}'
self.notifyGlobal(jsonstring)
def notifyUserAllAnswers(self):
print ("notify all users")
jsonstring = '{"type":"alluseranswers","answers":['
answerarray = self.makeAnswerArrayString()
jsonstring = jsonstring+answerarray
jsonstring = jsonstring+']}'
self.notifyClient(jsonstring)
def notifyControlAnswer(self):
print ("notify contorl answer")
jsonstring = '{"type":"answers","answers":['
controlanswerarray = self.makeControlArrayString()
jsonstring = jsonstring+controlanswerarray
jsonstring = jsonstring+']'
# jsonstring = jsonstring+ ',"status":"'
# jsonstring = jsonstring+self.application.status.getstatus()+'"'
jsonstring = jsonstring+'}'
self.notifyControl(jsonstring)
def notifyControlLoginStatus(self):
print(self.getLoginStatus())
jsonstring = '{"type":"loginstatus","loginstatus":"'
jsonstring = jsonstring+self.getLoginStatus()
jsonstring = jsonstring + '"}'
self.notifyControl(jsonstring)
def notifyControlStatus(self):
print(self.getStatus())
jsonstring = '{"type":"status","status":"'
jsonstring = jsonstring+self.getStatus()
jsonstring = jsonstring + '"}'
self.notifyControl(jsonstring)
def makeAnswerArrayString (self):
if self.quiztype == "multiq":
sortedlist = self.getMultiqSortedUserList()
else:
sortedlist = self.getSortedUserList()
jsonstring = ""
#for c in self.users.keys():
#self.application.quiztype
for c in sortedlist:
if self.quiztype == "multiq":
jsonstring = jsonstring+'['
jsonstring = jsonstring+'"'+c[0]+'",'
jsonstring = jsonstring+'"no answer",'
jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",'
jsonstring = jsonstring+'"'+str(c[1]['level'])+'",'
jsonstring = jsonstring+'"'+str(c[1]['block'])+'",'
jsonstring = jsonstring+'"'+str(c[1]['qnum'])+'",'
jsonstring = jsonstring+'"'+str(c[1]['finished'])+'"],'
else:
if ('answer' in c[1]):
jsonstring = jsonstring+'['
jsonstring = jsonstring+'"'+c[0]+'",'
jsonstring = jsonstring+'"'+c[1]['answer']+'",'
jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",'
jsonstring = jsonstring+'"'+c[1]['mark']+'",'
jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],'
jsonstring = jsonstring[:-1]
return jsonstring
def getSortedUserList (self):
print("-------------------------------------")
listfromusers = self.users.items()
print(listfromusers)
sortedlist = sorted(listfromusers, key=lambda usered: usered[1]['answerordinal'])
print(sortedlist)
return sortedlist
def getMultiqSortedUserList (self):
listfromusers = self.users.items()
sortedlist = sorted(listfromusers, key=lambda usered: (usered[1]['level'], usered[1]['qnum'],usered[1]['answerordinal']), reverse = True)
print(sortedlist)
return sortedlist
def makeControlArrayString (self):
jsonstring = ""
if self.quiztype == "multiq":
jsonstring = self.makeMultiqControlArrayString()
else:
sortedlist = self.getSortedUserList()
for c in sortedlist:
jsonstring = jsonstring+'['
jsonstring = jsonstring+'"'+c[0]+'",'
if ('answer' in c[1]):
jsonstring = jsonstring+'"'+c[1]['answer']+'",'
jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",'
jsonstring = jsonstring+'"'+c[1]['mark']+'",'
jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],'
else:
jsonstring = jsonstring+'"noanswer",'
jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",'
jsonstring = jsonstring+'"nomark",'
jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],'
jsonstring = jsonstring[:-1]
return jsonstring
def makeMultiqControlArrayString (self):
jsonstring = ""
sortedlist = self.getSortedUserList()
for c in sortedlist:
jsonstring = jsonstring+'['
jsonstring = jsonstring+'"'+c[0]+'",'
if ('answer' in c[1]):
jsonstring = jsonstring+'"'+c[1]['answer']+'",'
jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",'
jsonstring = jsonstring+'"'+c[1]['mark']+'",'
jsonstring = jsonstring+'"'+str(c[1]['block'])+'"],'
else:
jsonstring = jsonstring+'"noanswer",'
jsonstring = jsonstring+'"'+str(c[1]['answerordinal'])+'",'
jsonstring = jsonstring+'"'+str(c[1]['level'])+'",'
jsonstring = jsonstring+'"'+str(c[1]['block'])+'",'
jsonstring = jsonstring+'"'+str(c[1]['qnum'])+'"],'
jsonstring = jsonstring[:-1]
print (jsonstring)
print ("make controll array string")
return jsonstring
def notifyGlobal(self, message):
for c in self.globalcallbacks:
print('globalcallbacks')
print(message)
print(c)
c(message)
self.globalcallbacks=[]
def notifyControl(self, message):
for c in self.controlcallbacks:
print('controlcallbacks')
print(message)
print(c)
c(message)
self.controlcallbacks=[]
def notifyClient(self, message):
for c in self.clientcallbacks:
print('controlcallbacks')
print(message)
print(c)
c(message)
self.clientcallbacks=[]
def getUsers(self):
return self.users.keys()
def getStatus(self):
return self.currentStatus
def getTime(self):
return self.currentTime
def getLoginStatus(self):
return self.currentLoginStatus
def getQuestion(self):
return self.currentQuestion
def getQuizType(self):
return self.quizType
def getQuestionType(self):
return self.currentQuestionType
#----------------------------------------------------------status handlers-------------------------
# these handle the asynch hooks from the pages and sending messages to the pages
# a lot of shared code here - I'm sure this could be better!
class ClientStatusHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
print("register client")
self.application.status.registerclient(self.on_message)
def on_message(self, message):
print("client message sent")
print(message)
self.write(message)
self.finish()
class GlobalStatusHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
print("reggister gloabl")
self.application.status.registerglobal(self.on_message)
def on_message(self, message):
print("global message sent")
print(message)
self.write(message)
self.finish()
class ControlStatusHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
print("registeredd control")
self.application.status.registercontrol(self.on_message)
def on_message(self, message):
print("control message sent")
print(message)
self.write(message)
self.finish()
# message handlers - recieves messages from the pages (currently only control and client)
class ControlMessageHandler(tornado.web.RequestHandler):
def get(self):
messagetype = self.get_argument("type")
if messagetype=="question":
question = urllib.parse.unquote(self.get_argument("question"))
self.application.status.setQuestion(question)
if messagetype=="time":
time = urllib.parse.unquote(self.get_argument("time"))
self.application.status.setTime(time)
if messagetype=="controlanswer":
answer = urllib.parse.unquote(self.get_argument("answer"))
self.application.status.setControlAnswer(answer)
if messagetype=="markcorrect":
name = urllib.parse.unquote(self.get_argument("id"))
self.application.status.setCorrectFromControl(name)
if messagetype=="markincorrect":
name = urllib.parse.unquote(self.get_argument("id"))
self.application.status.setIncorrectFromControl(name)
if messagetype=="block":
name = urllib.parse.unquote(self.get_argument("id"))
self.application.status.setBlockFromControl(name)
if messagetype=="unblock":
name = urllib.parse.unquote(self.get_argument("id"))
self.application.status.setUnblockFromControl(name)
if messagetype=="toggleloginstatus":
self.application.status.toggleLoginStatus()
if messagetype=="togglestatus":
self.application.status.toggleStatus()
if messagetype=="resetgame":
self.application.status.resetGame();
self.finish()
class ClientMessageHandler(tornado.web.RequestHandler):
def get(self):
messagetype = self.get_argument("type")
if messagetype=="answer":
currentstatus = self.application.status.getStatus()
if (currentstatus=="waitingforanswer"):
answer = urllib.parse.unquote(self.get_argument("answer"))
user = tornado.escape.native_str(self.get_secure_cookie("username"))
self.application.status.setAnswer(answer,user)
if messagetype=="clientmarked":
currentstatus = self.application.status.getStatus()
if (currentstatus=="waitingforanswer"):
user = tornado.escape.native_str(self.get_secure_cookie("username"))
level = self.get_argument("level");
qnum = self.get_argument("qnum");
finished = self.get_argument("finished");
self.application.status.setClientResult(level, qnum, finished, user);
self.finish()
class GlobalMessageHandler(tornado.web.RequestHandler):
def get(self):
messagetype = self.get_argument("type")
if messagetype=="requestanswers":
self.application.status.notifyAnswer()
self.finish()
# - template handlers ------------- pages that are actually called by the browser.
class ClientPageHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("username")
def get(self):
session = uuid4()
class LoginHandler(ClientPageHandler):
def get(self):
#print (self.application.gamefile)
#print (self.application.gamefile["quiztype"])
if self.application.status.getLoginStatus()=="open":
self.render('login.html')
elif self.get_secure_cookie("username"):
print(self.application.status.getStatus())
self.redirect("/")
else:
print(self.application.status.getStatus())
self.render('gamestarted.html')
def post(self):
# if client already has a username set, remove it from the list before creating a new username
if self.get_secure_cookie("username"):
self.application.status.removeuser(self.current_user)
# create new user
self.set_secure_cookie("username",self.get_argument("username"),expires_days=1)
self.redirect("/")
class ClientWelcome(ClientPageHandler):
@tornado.web.authenticated
def get(self):
session = uuid4()
self.application.status.adduser(self.current_user)
currentstatus = self.application.status.getStatus()
currenttime = self.application.status.getTime()
questionarray = self.application.questionarray
currentquestiontype = self.application.status.getQuestionType()
clientpage = self.application.quiztypes[self.application.quiztype]['client_page']
self.render(clientpage,session=session,user=self.current_user, status=currentstatus, questiontype=currentquestiontype,time=currenttime, levels = questionarray)
class ControlPageHandler(tornado.web.RequestHandler):
def get(self):
# users = self.application.status.getUsers()
# userstring = "','".join(str(thisuser) for thisuser in users)
controlstring = self.application.status.makeControlArrayString()
currentstatus = self.application.status.getStatus()
currentloginstatus = self.application.status.getLoginStatus()
currenttime = self.application.status.getTime()
quiztype = "'" + self.application.quiztype + "'"
questionarray = self.application.questionarray
answerarray = self.application.answerarray
page = self.application.quiztypes[self.application.quiztype]["control_page"]
self.render(page,teams="["+str(controlstring)+"]", status=currentstatus, loginstatus=currentloginstatus, time=currenttime, quiztype = quiztype, questionarray = questionarray, answerarray = answerarray)
class GlobalPageHandler(tornado.web.RequestHandler):
def get(self):
users = self.application.status.getUsers()
userstring = '","'.join(str(thisuser) for thisuser in users)
currentstatus = self.application.status.getStatus()
currentquestion = self.application.status.getQuestion()
currentanswers = self.application.status.makeAnswerArrayString()
currenttime = self.application.status.getTime()
globalpage = self.application.quiztypes[self.application.quiztype]["global_page"]
# should add extra [ ] for current answers string (as in teams) - currently done in javascript
self.render(globalpage,teams='["'+str(userstring)+'"]', status=currentstatus, question=currentquestion, answers=currentanswers,time=currenttime)
class Application(tornado.web.Application):
def __init__(self):
self.status = Status()
# self.gametype = "default"
print('init')
handlers = [
(r'/',ClientWelcome),
(r'/control',ControlPageHandler),
(r'/global',GlobalPageHandler),
(r'/login',LoginHandler),
(r'/clientstatus',ClientStatusHandler),
(r'/globalstatus',GlobalStatusHandler),
(r'/controlstatus',ControlStatusHandler),
(r'/controlmessage',ControlMessageHandler),
(r'/clientmessage',ClientMessageHandler),
(r'/globalmessage',GlobalMessageHandler),
]
settings = {
'template_path':'./templates',
'static_path':'./static',
'cookie_secret':'123456',
'login_url':'/login',
'xsft_cookies':True,
'debug':True,
}
## states which pages should be served for each type of quiz.
self.quiztypes = {
'default':{"client_page":"default_client.html",
"global_page":"default_global.html",
"control_page":"default_control.html"},
'fixed_answers':{"client_page":"default_client.html",
"global_page":"default_global.html",
"control_page":"default_control.html"},
'open_answers':{"client_page":"default_client.html",
"global_page":"default_global.html",
"control_page":"default_control.html"},
'fixed_timed':{"client_page":"timed_client.html",
"global_page":"timed_global.html",
"control_page":"timed_control.html"},
'open_timed':{"client_page":"timed_client.html",
"global_page":"timed_global.html",
"control_page":"timed_control.html"},
'multiq':{"client_page":"multiq_client.html",
"global_page":"multiq_global.html",
"control_page":"multiq_control.html"}
}
tornado.web.Application.__init__(self, handlers,**settings)
if __name__ == '__main__':
# tornado.options.parse_command_line()
def set_defaults():
app.quiztype = "default"
app.notes = "Open ended questions can be entered in control pages. Answers can be marked individualy or by entering an answer in the control page."
app.questionarray = "{}"
app.answerarray = "{}"
app = Application()
if len(sys.argv) > 1:
try:
with open(sys.argv[1]) as json_data:
app.gamefile = json.load(json_data)
json_data.close()
app.quiztype = app.gamefile["quiztype"]
if "notes" in app.gamefile:
app.notes = app.gamefile["notes"]
if "questionarray" in app.gamefile:
app.questionarray = app.gamefile["questionarray"]
else:
app.questionarray = "{}"
if "answerarray" in app.gamefile:
app.answerarray = app.gamefile["answerarray"]
else:
app.answerarray = "{}"
except:
print("not a valid json file, using defaults")
set_defaults()
else:
print("no file given - using defaults")
set_defaults()
app.status.setQuizType(app.quiztype)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
[
"json.load",
"tornado.options.define",
"uuid.uuid4"
] |
[((269, 337), 'tornado.options.define', 'define', (['"""port"""'], {'default': '(8000)', 'help': '"""run on the given port"""', 'type': 'int'}), "('port', default=8000, help='run on the given port', type=int)\n", (275, 337), False, 'from tornado.options import define, options\n'), ((19434, 19441), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (19439, 19441), False, 'from uuid import uuid4\n'), ((20446, 20453), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20451, 20453), False, 'from uuid import uuid4\n'), ((25480, 25500), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (25489, 25500), False, 'import json\n')]
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib.ticker import FormatStrFormatter
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
# z = [0,0.1,0.3,0.9,1,2,5]
z = [7.8, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1230]
# thick = [20,40,20,60,37,32,21]ax1.set_xscale('log')
# thick=[15.4, 18.2, 18.7, 19.2, 19.4, 19.5, 19.9, 20.1, 20.4, 20.5, 20.6, 20.7, 20.8, 20.7, 20.7, 20.6, 20.6, 20.6, 20.5, 20.5, 19.8]
mrcnn=[17.7, 19.8, 20.0, 19.9, 20.2, 19.5, 19.1, 19.1]
x_ticks = [0.001, 0.002, 0.004, 0.008, 0.01, 0.02, 0.04, 0.08]
# plt.plot([1.0],[44.8], 'D', color = 'black')
# plt.plot([0],[35.9], 'D', color = 'red')
# plt.plot([1.0],[56.8], 'D', color = 'black')
fig = plt.figure(figsize=(8,5))
ax1 = fig.add_subplot(111)
matplotlib.rcParams.update({'font.size': 20})
ax1.plot(x_ticks, mrcnn, linestyle='dashed', marker='o', linewidth=2, c='k', label='mrcnn-r50-ag')
# ax1.plot(z, htc, marker='o', linewidth=2, c='g', label='htc')
# ax1.plot([1e-4],[15.4], 'D', color = 'green')
# ax1.plot([1230],[19.8], 'D', color = 'red')
plt.xlabel('calibration lr', size=16)
plt.ylabel('bAP', size=16)
# plt.gca().set_xscale('custom')
ax1.set_xscale('log')
ax1.set_xticks(x_ticks)
# from matplotlib.ticker import ScalarFormatter
# ax1.xaxis.set_major_formatter(ScalarFormatter())
# plt.legend(['calibration lr'], loc='best')
plt.minorticks_off()
plt.grid()
plt.savefig('calibration_lr.eps', format='eps', dpi=1000)
plt.show()
# import numpy as np
# import matplotlib.pyplot as plt
# from scipy.interpolate import interp1d
# y1=[35.9, 43.4, 46.1, 49.3, 50.3, 51.3, 51.4, 49.9, 49.5, 48.5, 44.8]
# y2=[40.5, 48.2, 53.9 , 56.9, 57.8, 59.2, 58.3, 57.9, 57.5, 57.2, 56.8]
# y3=[61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5, 61.5]
# x = np.linspace(0, 1, num=11, endpoint=True)
#
# f1 = interp1d(x, y1, kind='cubic')
# f2 = interp1d(x, y2, kind='cubic')
# f3 = interp1d(x, y3, kind='cubic')
# xnew = np.linspace(0, 1, num=101, endpoint=True)
# plt.plot(xnew, f3(xnew), '--', color='fuchsia')
# plt.plot(xnew, f1(xnew), '--', color='blue')
# plt.plot(xnew, f2(xnew), '--', color='green')
#
# plt.plot([0],[40.5], 'D', color = 'red')
# plt.plot([1.0],[44.8], 'D', color = 'black')
# plt.plot([0],[35.9], 'D', color = 'red')
# plt.plot([1.0],[56.8], 'D', color = 'black')
# plt.plot(x, y3, 'o', color = 'fuchsia')
# plt.plot(x, y1, 'o', color = 'blue')
# plt.plot(x, y2, 'o', color = 'green')
# plt.plot([0],[40.5], 'D', color = 'red')
# plt.plot([1.0],[44.8], 'D', color = 'black')
# plt.plot([0],[35.9], 'D', color = 'red')
# plt.plot([1.0],[56.8], 'D', color = 'black')
# plt.legend(['teacher','0.25x', '0.5x', 'full-feature-imitation', 'only GT supervison'], loc='best')
# plt.xlabel('Thresholding factor')
# plt.ylabel('mAP')
# plt.title('Resulting mAPs of varying thresholding factors')
# #plt.legend(['0.5x'])
# # plt.savefig('varying_thresh.eps', format='eps', dpi=1000)
# plt.show()
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.rcParams.update",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.minorticks_off",
"matplotlib.pyplot.show"
] |
[((805, 831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (815, 831), True, 'import matplotlib.pyplot as plt\n'), ((858, 903), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (884, 903), False, 'import matplotlib\n'), ((1164, 1201), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""calibration lr"""'], {'size': '(16)'}), "('calibration lr', size=16)\n", (1174, 1201), True, 'import matplotlib.pyplot as plt\n'), ((1202, 1228), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""bAP"""'], {'size': '(16)'}), "('bAP', size=16)\n", (1212, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1477), 'matplotlib.pyplot.minorticks_off', 'plt.minorticks_off', ([], {}), '()\n', (1475, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1488), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1486, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1546), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""calibration_lr.eps"""'], {'format': '"""eps"""', 'dpi': '(1000)'}), "('calibration_lr.eps', format='eps', dpi=1000)\n", (1500, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1557), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1555, 1557), True, 'import matplotlib.pyplot as plt\n')]
|
import discord
from discord.ext.commands import Bot
TOKEN = "<discordtoken>"
client = discord.Client()
bot = Bot(command_prefix="!")
@bot.event
async def on_ready():
print("Bot Hazır " + str(bot.user))
@bot.event
async def on_message(message):
if message.author == client.user:
return
if message.content == "selam":
await message.channel.send("selam naber")
bot.run(TOKEN)
|
[
"discord.Client",
"discord.ext.commands.Bot"
] |
[((93, 109), 'discord.Client', 'discord.Client', ([], {}), '()\n', (107, 109), False, 'import discord\n'), ((117, 140), 'discord.ext.commands.Bot', 'Bot', ([], {'command_prefix': '"""!"""'}), "(command_prefix='!')\n", (120, 140), False, 'from discord.ext.commands import Bot\n')]
|
# -*- coding: utf-8 -*-
""" test """
from __future__ import unicode_literals
from django.template.loader import get_template
from django.contrib import messages
# Create your views here.
from django.http import HttpResponse
def index(request):
""" index """
template = get_template('cornwall/index.html')
messages.set_level(request, messages.DEBUG)
list(messages.get_messages(request))# clear out the previous messages
messages.add_message(request, messages.INFO, 'Hello world.')
context = {'nbar': 'cornwall'}
html = template.render(context, request)
return HttpResponse(html)
|
[
"django.contrib.messages.set_level",
"django.http.HttpResponse",
"django.contrib.messages.add_message",
"django.contrib.messages.get_messages",
"django.template.loader.get_template"
] |
[((279, 314), 'django.template.loader.get_template', 'get_template', (['"""cornwall/index.html"""'], {}), "('cornwall/index.html')\n", (291, 314), False, 'from django.template.loader import get_template\n'), ((319, 362), 'django.contrib.messages.set_level', 'messages.set_level', (['request', 'messages.DEBUG'], {}), '(request, messages.DEBUG)\n', (337, 362), False, 'from django.contrib import messages\n'), ((441, 501), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.INFO', '"""Hello world."""'], {}), "(request, messages.INFO, 'Hello world.')\n", (461, 501), False, 'from django.contrib import messages\n'), ((593, 611), 'django.http.HttpResponse', 'HttpResponse', (['html'], {}), '(html)\n', (605, 611), False, 'from django.http import HttpResponse\n'), ((372, 402), 'django.contrib.messages.get_messages', 'messages.get_messages', (['request'], {}), '(request)\n', (393, 402), False, 'from django.contrib import messages\n')]
|
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.add_ons.image_capture import ImageCapture
from tdw.backend.paths import EXAMPLE_CONTROLLER_OUTPUT_PATH
"""
Get the _flow pass.
"""
c = Controller()
object_id_0 = c.get_unique_id()
object_id_1 = c.get_unique_id()
object_id_2 = c.get_unique_id()
object_id_3 = c.get_unique_id()
object_names = {object_id_0: "small_table_green_marble",
object_id_1: "rh10",
object_id_2: "jug01",
object_id_3: "jug05"}
output_directory = EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath("flow")
# Enable image capture for the _flow pass.
print(f"Images will be saved to: {output_directory}")
capture = ImageCapture(path=output_directory, pass_masks=["_flow"], avatar_ids=["a"])
c.add_ons.append(capture)
commands = [TDWUtils.create_empty_room(12, 12),
c.get_add_object(object_names[object_id_0],
object_id=object_id_0),
c.get_add_object(object_names[object_id_1],
position={"x": 0.7, "y": 0, "z": 0.4},
rotation={"x": 0, "y": 30, "z": 0},
object_id=object_id_1),
c.get_add_object(model_name=object_names[object_id_2],
position={"x": -0.3, "y": 0.9, "z": 0.2},
object_id=object_id_2),
c.get_add_object(object_names[object_id_3],
position={"x": 0.3, "y": 0.9, "z": -0.2},
object_id=object_id_3),
{"$type": "apply_force_to_object",
"id": object_id_1,
"force": {"x": 0, "y": 5, "z": -200}}]
commands.extend(TDWUtils.create_avatar(position={"x": 2.478, "y": 1.602, "z": 1.412},
look_at={"x": 0, "y": 0.2, "z": 0},
avatar_id="a"))
c.communicate(commands)
for i in range(3):
c.communicate([])
c.communicate({"$type": "terminate"})
|
[
"tdw.tdw_utils.TDWUtils.create_avatar",
"tdw.add_ons.image_capture.ImageCapture",
"tdw.backend.paths.EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath",
"tdw.tdw_utils.TDWUtils.create_empty_room",
"tdw.controller.Controller"
] |
[((219, 231), 'tdw.controller.Controller', 'Controller', ([], {}), '()\n', (229, 231), False, 'from tdw.controller import Controller\n'), ((550, 597), 'tdw.backend.paths.EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath', 'EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath', (['"""flow"""'], {}), "('flow')\n", (589, 597), False, 'from tdw.backend.paths import EXAMPLE_CONTROLLER_OUTPUT_PATH\n'), ((706, 781), 'tdw.add_ons.image_capture.ImageCapture', 'ImageCapture', ([], {'path': 'output_directory', 'pass_masks': "['_flow']", 'avatar_ids': "['a']"}), "(path=output_directory, pass_masks=['_flow'], avatar_ids=['a'])\n", (718, 781), False, 'from tdw.add_ons.image_capture import ImageCapture\n'), ((821, 855), 'tdw.tdw_utils.TDWUtils.create_empty_room', 'TDWUtils.create_empty_room', (['(12)', '(12)'], {}), '(12, 12)\n', (847, 855), False, 'from tdw.tdw_utils import TDWUtils\n'), ((1727, 1851), 'tdw.tdw_utils.TDWUtils.create_avatar', 'TDWUtils.create_avatar', ([], {'position': "{'x': 2.478, 'y': 1.602, 'z': 1.412}", 'look_at': "{'x': 0, 'y': 0.2, 'z': 0}", 'avatar_id': '"""a"""'}), "(position={'x': 2.478, 'y': 1.602, 'z': 1.412},\n look_at={'x': 0, 'y': 0.2, 'z': 0}, avatar_id='a')\n", (1749, 1851), False, 'from tdw.tdw_utils import TDWUtils\n')]
|
"""
Expression Dataset for analysis of matrix (RNASeq/microarray) data with annotations
"""
import pandas as PD
import numpy as N
from matplotlib import pylab as P
from collections import OrderedDict
from ast import literal_eval
# from ..plot.matrix import matshow_clustered
class ExpressionSet(object):
def __init__(self, eData, gData=None, sData=None):
"""
eData: expression data (gene x samples) header: MultiIndex (samplename, group)
fData: gene annotation (gene x gene annotations)
pData: sample annotation (sample x sample annotations)
"""
self.eData = eData
self.gData = gData
self.sData = sData
def read(self, eFile, gFile=None, sFile=None):
pass
def write(self, eFile, gFile=None, sFile=None):
self.eData.to_csv(eFile, tupleize_cols=False, sep="\t")
if gFile is not None:
self.gData.to_csv(gFile, tupleize_cols=False, sep="\t")
if sFile is not None:
self.sData.to_csv(sFile, tupleize_cols=False, sep="\t")
def find(self, field, pat):
pass
def read_bioinfo3_data(fname):
""" read bioinfo3.table.dataset type of data """
fobj = open(fname)
groups = OrderedDict()
cnt = 0
for line in fobj:
cnt += 1
if line[:2]=='#%':
if line.startswith('#%groups:'):
gname, members = line[len('#%groups:'):].split('=')
gname = gname.strip()
members = members.strip().split(',')
groups[gname] = members
datafields = line.strip().split('=')[1].strip().split(',')
elif line.startswith('#%fields'):
fields = line.strip().split('=')[1].strip().split(',')
elif not line.strip():
continue # empty line
else:
break
df = PD.read_table(fname, skiprows=cnt-1)
f2g = {}
for g,m in groups.items():
for f in m:
f2g[f] = g
df.columns = PD.MultiIndex.from_tuples([(x, f2g.get(x,'')) for x in df.columns], names=['samplename','group'])
e = ExpressionSet(df)
return e
def read_multiindex_data(fname, tupleize=True, index_names = ['samplename','group']):
""" read dataset table with MultiIndex in the header """
if not tupleize:
df = PD.read_table(fname, header=range(len(index_names)), index_col=[0], tupleize_cols=False)
e = ExpressionSet(df)
return e
df = PD.read_table(fname, index_col=0)
df.columns = PD.MultiIndex.from_tuples(df.columns.map(literal_eval).tolist(), names=index_names)
e = ExpressionSet(df)
return e
def read_grouped_table(fname, groupfn=lambda x: '_'.join(x.split('_')[:-1])):
""" Read dataset whose group is encoded in the colname. Column 0 is index. """
df = PD.read_table(fname)
f2g = {x:groupfn(x) for x in df.columns}
df.columns = PD.MultiIndex.from_tuples([(x, f2g[x]) for x in df.columns], names=['samplename','group'])
e = ExpressionSet(df)
return e
def concatenate(dic):
""" dic: dict of DataFrames
merge all using index and outer join
"""
keys = list(dic)
d = dic[keys[0]].merge(dic[keys[1]], left_index=True, right_index=True, how='outer', suffixes=('.'+keys[0],'.'+keys[1]))
for k in keys[2:]:
d = d.merge(dic[k], left_index=True, right_index=True, how='outer', suffixes=('','.'+k))
return d
def calc_mergesortkey(dic, pos_neg_flds):
conc = concatenate(dic)
selected = ~N.isnan(conc[pos_neg_flds])
pos = conc[pos_neg_flds]>0
neg = conc[pos_neg_flds]<=0
num_pos = pos.sum(axis=1)
num_neg = neg.sum(axis=1)
pos_neg_mix = -1*(num_neg==0) + 1*(num_pos==0) # pos(-1), mix(0), neg(1)
#num_hit = num_pos - num_neg
num_hit = num_pos + num_neg
n = len(pos_neg_flds)
#position = (N.arange(1,n+1)*pos + N.arange(-1,-n-1,-1)*neg).sum(axis=1)
position = (N.arange(1,n+1)*pos + N.arange(-n,0)*neg).sum(axis=1)
strength = (conc[pos_neg_flds]*pos).sum(axis=1) + (conc[pos_neg_flds]*neg).sum(axis=1)
#msk = PD.Series(list(zip(pos_neg_mix, num_hit, position, strength)), index=conc.index)
#msk.sort()
conc['mergesortkey'] = list(zip(pos_neg_mix, num_hit, position, strength))
conc.sort('mergesortkey', inplace=True)
return conc
|
[
"collections.OrderedDict",
"numpy.isnan",
"pandas.read_table",
"pandas.MultiIndex.from_tuples",
"numpy.arange"
] |
[((1227, 1240), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1238, 1240), False, 'from collections import OrderedDict\n'), ((1862, 1900), 'pandas.read_table', 'PD.read_table', (['fname'], {'skiprows': '(cnt - 1)'}), '(fname, skiprows=cnt - 1)\n', (1875, 1900), True, 'import pandas as PD\n'), ((2481, 2514), 'pandas.read_table', 'PD.read_table', (['fname'], {'index_col': '(0)'}), '(fname, index_col=0)\n', (2494, 2514), True, 'import pandas as PD\n'), ((2826, 2846), 'pandas.read_table', 'PD.read_table', (['fname'], {}), '(fname)\n', (2839, 2846), True, 'import pandas as PD\n'), ((2909, 3005), 'pandas.MultiIndex.from_tuples', 'PD.MultiIndex.from_tuples', (['[(x, f2g[x]) for x in df.columns]'], {'names': "['samplename', 'group']"}), "([(x, f2g[x]) for x in df.columns], names=[\n 'samplename', 'group'])\n", (2934, 3005), True, 'import pandas as PD\n'), ((3515, 3542), 'numpy.isnan', 'N.isnan', (['conc[pos_neg_flds]'], {}), '(conc[pos_neg_flds])\n', (3522, 3542), True, 'import numpy as N\n'), ((3929, 3947), 'numpy.arange', 'N.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (3937, 3947), True, 'import numpy as N\n'), ((3951, 3966), 'numpy.arange', 'N.arange', (['(-n)', '(0)'], {}), '(-n, 0)\n', (3959, 3966), True, 'import numpy as N\n')]
|
import sys
import rospy
import types
#from std_msgs.msg import String
from sensor_msgs.msg import Image
from cibr_img_processing.msg import Ints
from cv_bridge import CvBridge, CvBridgeError
#make int msgs
#TODO: get the img size from camera_indo topics
class CVUtilNode: # abstarct this, it can easily work with other cv_utils and be an image bbm_node
def __init__(self, util, name="cv_util_node", pub_topic=False):
#self.obj_pub = rospy.Publisher("image_topic_2", ***)
self.bridge = CvBridge()
self.util=util
self.name=name
rospy.init_node(self.name, anonymous=True)
self.rate=rospy.Rate(30)
self.image_sub = rospy.Subscriber("image_topic", Image, self.callback)
self.result_pub = rospy.Publisher("results", Ints, queue_size=10) #always publish data
self.result_msgs = [-1,-1,-1] #make int msgs
self.pubs=lambda:0
self.subs=[]
if pub_topic:
self.image_pub = rospy.Publisher(pub_topic,Image, queue_size=10)
pass #do stuff with img.pub
def callback(self,data):
try:
self.util.hook(self.bridge.imgmsg_to_cv2(data, "bgr8"))
except CvBridgeError as e:
print(e)
def data_pub(self):
self.result_pub.publish(self.util.results) #try catch
def img_pub(cv_image): # to handleconverting from OpenCV to ROS
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
except CvBridgeError as e:
print(e)
def run(self):
self.util.init_windows()
while not rospy.is_shutdown():
try:
if self.util.loop(): break
if not -1 in self.util.results and self.util._publish:
self.data_pub()
self.util._publish = 0
# if self.util._publish:
# for pub in self.pubs:
# pub.publish
#self.rate.sleep()
except KeyboardInterrupt:
self.util.shutdown()
self.util.shutdown()
#adds a publisher to alirlaes,
def attach_pub(self, topic, type):
self.pubs.pub.append(rospy.Publisher(topic, type, queue_size=1))
# TODO:attach structs of publisher and message template instead
# so it is iterable together
#pubs.pub=... pubs.msg=type()
def attach_sub(self, topic, cb_handle):
self.subs.append = rospy.Subscriber(topic, type, cb_handle)
def attach_controls(self, fun_handle):
# bind the method to instance
self.util.external_ops=types.MethodType(fun_handle,self.util)
|
[
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"cv_bridge.CvBridge",
"rospy.Rate",
"types.MethodType",
"rospy.Publisher"
] |
[((506, 516), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (514, 516), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((572, 614), 'rospy.init_node', 'rospy.init_node', (['self.name'], {'anonymous': '(True)'}), '(self.name, anonymous=True)\n', (587, 614), False, 'import rospy\n'), ((633, 647), 'rospy.Rate', 'rospy.Rate', (['(30)'], {}), '(30)\n', (643, 647), False, 'import rospy\n'), ((673, 726), 'rospy.Subscriber', 'rospy.Subscriber', (['"""image_topic"""', 'Image', 'self.callback'], {}), "('image_topic', Image, self.callback)\n", (689, 726), False, 'import rospy\n'), ((753, 800), 'rospy.Publisher', 'rospy.Publisher', (['"""results"""', 'Ints'], {'queue_size': '(10)'}), "('results', Ints, queue_size=10)\n", (768, 800), False, 'import rospy\n'), ((2478, 2518), 'rospy.Subscriber', 'rospy.Subscriber', (['topic', 'type', 'cb_handle'], {}), '(topic, type, cb_handle)\n', (2494, 2518), False, 'import rospy\n'), ((2632, 2671), 'types.MethodType', 'types.MethodType', (['fun_handle', 'self.util'], {}), '(fun_handle, self.util)\n', (2648, 2671), False, 'import types\n'), ((977, 1025), 'rospy.Publisher', 'rospy.Publisher', (['pub_topic', 'Image'], {'queue_size': '(10)'}), '(pub_topic, Image, queue_size=10)\n', (992, 1025), False, 'import rospy\n'), ((1609, 1628), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1626, 1628), False, 'import rospy\n'), ((2216, 2258), 'rospy.Publisher', 'rospy.Publisher', (['topic', 'type'], {'queue_size': '(1)'}), '(topic, type, queue_size=1)\n', (2231, 2258), False, 'import rospy\n')]
|
from abc import abstractproperty
from ..backend_config.bucket_config import S3BucketConfig
from ..storage.helper import StorageHelper
class SetupUploadMixin(object):
log = abstractproperty()
storage_uri = abstractproperty()
def setup_upload(
self, bucket_name, host=None, access_key=None, secret_key=None, region=None, multipart=True, https=True, verify=True):
"""
Setup upload options (currently only S3 is supported)
:param bucket_name: AWS bucket name
:type bucket_name: str
:param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used)
:type host: str
:param access_key: AWS access key. If not provided, we'll attempt to obtain the key from the
configuration file (bucket-specific, than global)
:type access_key: str
:param secret_key: AWS secret key. If not provided, we'll attempt to obtain the secret from the
configuration file (bucket-specific, than global)
:type secret_key: str
:param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support
multipart.
:type multipart: bool
:param https: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS.
:type https: bool
:param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1)
:type region: str
:param verify: Whether or not to verify SSL certificates. Only required when using a Non-AWS S3 solution that only supports HTTPS with self-signed certificate.
:type verify: bool
"""
self._bucket_config = S3BucketConfig(
bucket=bucket_name,
host=host,
key=access_key,
secret=secret_key,
multipart=multipart,
secure=https,
region=region,
verify=verify
)
self.storage_uri = ('s3://%(host)s/%(bucket_name)s' if host else 's3://%(bucket_name)s') % locals()
StorageHelper.add_configuration(self._bucket_config, log=self.log)
|
[
"abc.abstractproperty"
] |
[((179, 197), 'abc.abstractproperty', 'abstractproperty', ([], {}), '()\n', (195, 197), False, 'from abc import abstractproperty\n'), ((216, 234), 'abc.abstractproperty', 'abstractproperty', ([], {}), '()\n', (232, 234), False, 'from abc import abstractproperty\n')]
|
import os
import unittest
import pandas as pd
from application.ParcelsParser import ParcelsParser
class TestPracelsParser(unittest.TestCase):
def setUp(self):
self.parser = ParcelsParser("./test_cadastral_parcels.tsv", "cadastral_parcel_identifier")
def test_if_file_exist(self):
file_path = self.parser.get_file()
self.assertTrue(file_path, os.path.isfile(file_path))
def test_if_file_doesnt_exist(self):
self.parser.set_file("./test_cadastral_parcels_wrong.tsv")
file_path = file_path = self.parser.get_file()
self.assertTrue(file_path, os.path.isfile(file_path))
def test_if_column_exist(self):
dirpath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(dirpath, self.parser.get_file())
df = pd.read_csv(filepath, sep='\t')
self.assertTrue(True, self.parser.get_column_name() in df.columns)
def test_get_identifiers_data(self):
dirpath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(dirpath, self.parser.get_file())
self.parser.set_file(filepath)
self.parser.get_identifiers_data()
data = self.parser.get_data()
self.assertTrue(7, len(data))
def test_province_county_commune(self):
segment = "301304"
province_code, county_code, commune_code = self.parser.get_province_county_commune(segment)
self.assertEqual(province_code, "30")
self.assertEqual(county_code, "13")
self.assertEqual(commune_code, "4")
def test_extract_data(self):
dirpath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(dirpath, self.parser.get_file())
df = pd.read_csv(filepath, sep='\t')
self.parser.set_file(filepath)
self.parser.get_identifiers_data()
self.parser.extract_data()
result = self.parser.get_result()
province_code_list = df['province_code'].astype(str).tolist()
county_code_list = df['county_code'].astype(str).tolist()
commune_code_list = df['commune_code'].astype(str).tolist()
commune_type_list = df['commune_type'].astype(str).tolist()
district_number_list = df['district_number'].astype(str).tolist()
parcel_number_list = df['parcel_number'].astype(str).tolist()
for i, item in enumerate(result):
self.assertEqual(item['province_code'], province_code_list[i])
self.assertEqual(item['county_code'], county_code_list[i])
self.assertEqual(item['commune_code'], commune_code_list[i])
self.assertEqual(item['commune_type'], commune_type_list[i])
self.assertEqual(item['district_number'], district_number_list[i])
self.assertEqual(item['parcel_number'], parcel_number_list[i])
if __name__ == '__main__':
unittest.main()
|
[
"application.ParcelsParser.ParcelsParser",
"pandas.read_csv",
"os.path.isfile",
"unittest.main",
"os.path.abspath"
] |
[((2856, 2871), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2869, 2871), False, 'import unittest\n'), ((188, 264), 'application.ParcelsParser.ParcelsParser', 'ParcelsParser', (['"""./test_cadastral_parcels.tsv"""', '"""cadastral_parcel_identifier"""'], {}), "('./test_cadastral_parcels.tsv', 'cadastral_parcel_identifier')\n", (201, 264), False, 'from application.ParcelsParser import ParcelsParser\n'), ((809, 840), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'sep': '"""\t"""'}), "(filepath, sep='\\t')\n", (820, 840), True, 'import pandas as pd\n'), ((1725, 1756), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'sep': '"""\t"""'}), "(filepath, sep='\\t')\n", (1736, 1756), True, 'import pandas as pd\n'), ((379, 404), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (393, 404), False, 'import os\n'), ((606, 631), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (620, 631), False, 'import os\n'), ((704, 729), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (719, 729), False, 'import os\n'), ((993, 1018), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1008, 1018), False, 'import os\n'), ((1620, 1645), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1635, 1645), False, 'import os\n')]
|
# -*- encoding: utf-8
"""
Copyright (c) 2014, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Stanford University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY <NAME> ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <NAME> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .gop import *
import numpy as np
from .util import *
LATEX_OUTPUT=True
for bnd in ['st','sf','mssf','ds']:
# Load the dataset
over_segs,segmentations,boxes = loadVOCAndOverSeg( "test", detector=bnd, year="2012" )
has_box = [len(b)>0 for b in boxes]
boxes = [np.vstack(b).astype(np.int32) if len(b)>0 else np.zeros((0,4),dtype=np.int32) for b in boxes]
# Generate the proposals
s = []
s.append( (50,5,0.7) ) # ~250 props
s.append( (100,5,0.75) ) # ~450 props
s.append( (180,5,0.8) ) # ~650 props
s.append( (200,7,0.85) ) # ~1100 props
s.append( (250,10,0.9) ) # ~2200 props
s.append( (290,20,0.9) ) # ~4400 props
for N_S,N_T,iou in s:
prop_settings = setupBaseline( N_S, N_T, iou )
bo,b_bo,pool_s,box_pool_s = dataset.proposeAndEvaluate( over_segs, segmentations, boxes, proposals.Proposal( prop_settings ) )
if LATEX_OUTPUT:
print(( "Baseline %s ($%d$,$%d$) & %d & %0.3f & %0.3f & %0.3f & %0.3f & \\\\"%(bnd, N_S,N_T,np.mean(pool_s),np.mean(bo[:,0]),np.sum(bo[:,0]*bo[:,1])/np.sum(bo[:,1]), np.mean(bo[:,0]>=0.5), np.mean(bo[:,0]>=0.7) ) ))
else:
print(( "ABO ", np.mean(bo[:,0]) ))
print(( "cover ", np.sum(bo[:,0]*bo[:,1])/np.sum(bo[:,1]) ))
print(( "recall ", np.mean(bo[:,0]>=0.5), "\t", np.mean(bo[:,0]>=0.6), "\t", np.mean(bo[:,0]>=0.7), "\t", np.mean(bo[:,0]>=0.8), "\t", np.mean(bo[:,0]>=0.9), "\t", np.mean(bo[:,0]>=1) ))
print(( "# props ", np.mean(pool_s) ))
print(( "box ABO ", np.mean(b_bo) ))
print(( "box recall ", np.mean(b_bo>=0.5), "\t", np.mean(b_bo>=0.6), "\t", np.mean(b_bo>=0.7), "\t", np.mean(b_bo>=0.8), "\t", np.mean(b_bo>=0.9), "\t", np.mean(b_bo>=1) ))
print(( "# box ", np.mean(box_pool_s[~np.isnan(box_pool_s)]) ))
|
[
"numpy.mean",
"numpy.sum",
"numpy.zeros",
"numpy.isnan",
"numpy.vstack"
] |
[((1876, 1908), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.int32'}), '((0, 4), dtype=np.int32)\n', (1884, 1908), True, 'import numpy as np\n'), ((1829, 1841), 'numpy.vstack', 'np.vstack', (['b'], {}), '(b)\n', (1838, 1841), True, 'import numpy as np\n'), ((2666, 2683), 'numpy.mean', 'np.mean', (['bo[:, 0]'], {}), '(bo[:, 0])\n', (2673, 2683), True, 'import numpy as np\n'), ((2781, 2805), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.5)'], {}), '(bo[:, 0] >= 0.5)\n', (2788, 2805), True, 'import numpy as np\n'), ((2810, 2834), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.6)'], {}), '(bo[:, 0] >= 0.6)\n', (2817, 2834), True, 'import numpy as np\n'), ((2839, 2863), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.7)'], {}), '(bo[:, 0] >= 0.7)\n', (2846, 2863), True, 'import numpy as np\n'), ((2868, 2892), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.8)'], {}), '(bo[:, 0] >= 0.8)\n', (2875, 2892), True, 'import numpy as np\n'), ((2897, 2921), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.9)'], {}), '(bo[:, 0] >= 0.9)\n', (2904, 2921), True, 'import numpy as np\n'), ((2926, 2948), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 1)'], {}), '(bo[:, 0] >= 1)\n', (2933, 2948), True, 'import numpy as np\n'), ((2975, 2990), 'numpy.mean', 'np.mean', (['pool_s'], {}), '(pool_s)\n', (2982, 2990), True, 'import numpy as np\n'), ((3021, 3034), 'numpy.mean', 'np.mean', (['b_bo'], {}), '(b_bo)\n', (3028, 3034), True, 'import numpy as np\n'), ((3064, 3084), 'numpy.mean', 'np.mean', (['(b_bo >= 0.5)'], {}), '(b_bo >= 0.5)\n', (3071, 3084), True, 'import numpy as np\n'), ((3090, 3110), 'numpy.mean', 'np.mean', (['(b_bo >= 0.6)'], {}), '(b_bo >= 0.6)\n', (3097, 3110), True, 'import numpy as np\n'), ((3116, 3136), 'numpy.mean', 'np.mean', (['(b_bo >= 0.7)'], {}), '(b_bo >= 0.7)\n', (3123, 3136), True, 'import numpy as np\n'), ((3142, 3162), 'numpy.mean', 'np.mean', (['(b_bo >= 0.8)'], {}), '(b_bo >= 0.8)\n', (3149, 3162), True, 'import numpy as np\n'), ((3168, 3188), 'numpy.mean', 'np.mean', (['(b_bo >= 0.9)'], {}), '(b_bo >= 0.9)\n', (3175, 3188), True, 'import numpy as np\n'), ((3194, 3212), 'numpy.mean', 'np.mean', (['(b_bo >= 1)'], {}), '(b_bo >= 1)\n', (3201, 3212), True, 'import numpy as np\n'), ((2508, 2523), 'numpy.mean', 'np.mean', (['pool_s'], {}), '(pool_s)\n', (2515, 2523), True, 'import numpy as np\n'), ((2524, 2541), 'numpy.mean', 'np.mean', (['bo[:, 0]'], {}), '(bo[:, 0])\n', (2531, 2541), True, 'import numpy as np\n'), ((2582, 2606), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.5)'], {}), '(bo[:, 0] >= 0.5)\n', (2589, 2606), True, 'import numpy as np\n'), ((2605, 2629), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.7)'], {}), '(bo[:, 0] >= 0.7)\n', (2612, 2629), True, 'import numpy as np\n'), ((2712, 2739), 'numpy.sum', 'np.sum', (['(bo[:, 0] * bo[:, 1])'], {}), '(bo[:, 0] * bo[:, 1])\n', (2718, 2739), True, 'import numpy as np\n'), ((2736, 2752), 'numpy.sum', 'np.sum', (['bo[:, 1]'], {}), '(bo[:, 1])\n', (2742, 2752), True, 'import numpy as np\n'), ((2541, 2568), 'numpy.sum', 'np.sum', (['(bo[:, 0] * bo[:, 1])'], {}), '(bo[:, 0] * bo[:, 1])\n', (2547, 2568), True, 'import numpy as np\n'), ((2565, 2581), 'numpy.sum', 'np.sum', (['bo[:, 1]'], {}), '(bo[:, 1])\n', (2571, 2581), True, 'import numpy as np\n'), ((3260, 3280), 'numpy.isnan', 'np.isnan', (['box_pool_s'], {}), '(box_pool_s)\n', (3268, 3280), True, 'import numpy as np\n')]
|
import pickle
import pandas as pd
# cat aa ab ac > dataset.pkl from https://github.com/zhougr1993/DeepInterestNetwork
with open('dataset.pkl', 'rb') as f:
train_set = pickle.load(f, encoding='bytes')
test_set = pickle.load(f, encoding='bytes')
cate_list = pickle.load(f, encoding='bytes')
user_count, item_count, cate_count = pickle.load(f, encoding='bytes')
train_data = []
for sample in train_set:
user_id = sample[0]
item_id = sample[2]
item_history = "^".join([str(i) for i in sample[1]])
label = sample[3]
cate_id = cate_list[item_id]
cate_history = "^".join([str(i) for i in cate_list[sample[1]]])
train_data.append([label, user_id, item_id, cate_id, item_history, cate_history])
train_df = pd.DataFrame(train_data, columns=['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history'])
train_df.to_csv("train.csv", index=False)
test_data = []
for sample in test_set:
user_id = sample[0]
item_pair = sample[2]
item_history = "^".join([str(i) for i in sample[1]])
cate_history = "^".join([str(i) for i in cate_list[sample[1]]])
test_data.append([1, user_id, item_pair[0], cate_list[item_pair[0]], item_history, cate_history])
test_data.append([0, user_id, item_pair[1], cate_list[item_pair[1]], item_history, cate_history])
test_df = pd.DataFrame(test_data, columns=['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history'])
test_df.to_csv("test.csv", index=False)
|
[
"pandas.DataFrame",
"pickle.load"
] |
[((744, 856), 'pandas.DataFrame', 'pd.DataFrame', (['train_data'], {'columns': "['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history']"}), "(train_data, columns=['label', 'user_id', 'item_id', 'cate_id',\n 'item_history', 'cate_history'])\n", (756, 856), True, 'import pandas as pd\n'), ((1324, 1435), 'pandas.DataFrame', 'pd.DataFrame', (['test_data'], {'columns': "['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history']"}), "(test_data, columns=['label', 'user_id', 'item_id', 'cate_id',\n 'item_history', 'cate_history'])\n", (1336, 1435), True, 'import pandas as pd\n'), ((173, 205), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (184, 205), False, 'import pickle\n'), ((221, 253), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (232, 253), False, 'import pickle\n'), ((270, 302), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (281, 302), False, 'import pickle\n'), ((344, 376), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (355, 376), False, 'import pickle\n')]
|
from email_extras.settings import USE_GNUPG
if USE_GNUPG:
from django.contrib import admin
from email_extras.models import Key, Address
from email_extras.forms import KeyForm
class KeyAdmin(admin.ModelAdmin):
form = KeyForm
list_display = ('__str__', 'email_addresses')
readonly_fields = ('fingerprint', )
class AddressAdmin(admin.ModelAdmin):
list_display = ('__str__', 'key')
readonly_fields = ('key', )
def has_add_permission(self, request):
return False
admin.site.register(Key, KeyAdmin)
admin.site.register(Address, AddressAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((572, 606), 'django.contrib.admin.site.register', 'admin.site.register', (['Key', 'KeyAdmin'], {}), '(Key, KeyAdmin)\n', (591, 606), False, 'from django.contrib import admin\n'), ((612, 654), 'django.contrib.admin.site.register', 'admin.site.register', (['Address', 'AddressAdmin'], {}), '(Address, AddressAdmin)\n', (631, 654), False, 'from django.contrib import admin\n')]
|
import boto3
import argparse
import os,sys
def main(argv=None):
argv = (argv or sys.argv)[1:]
parser = argparse.ArgumentParser(description='dump all aws log streams into files')
parser.add_argument("--profile",
dest="aws_profile",
type=str,
default=os.environ.get('AWS_PROFILE', None),
help="aws profile")
parser.add_argument("-o", "--output",
type=str,
dest='output',
default=".",
help="output folder")
parser.add_argument('group_name',help='aws loggroup name')
options,args = parser.parse_known_args(argv)
options.aws_profile
options.output
options.group_name
"""
main logic
"""
client = boto3.client('logs')
aws_profile = options.aws_profile
group_name = options.group_name
output_folder = options.output
stream_list=[]
stream_response = client.describe_log_streams(
logGroupName=group_name,
orderBy='LastEventTime',
limit=50,
)
while True:
stream_name_arr = stream_response['logStreams']
for stream_elm in stream_name_arr:
stream_name = stream_elm['logStreamName']
stream_list.append(stream_name)
if "nextToken" in stream_response:
next_token = stream_response['nextToken']
stream_response = client.describe_log_streams(
logGroupName=group_name,
orderBy='LastEventTime',
nextToken=next_token,
limit=50,
)
else:
break
print("loggroup {} has total {} streams".format(group_name,len(stream_list)))
for s_name in stream_list:
file_name=s_name.replace("[$LATEST]", "").replace("/","-")
stream_content= client.get_log_events(
logGroupName=group_name,
logStreamName=s_name,
)
print("{} ==> {}".format(s_name,file_name))
completeName = os.path.join(output_folder, file_name)
with open(completeName, "w") as text_file:
text_file.write("{}".format(stream_content))
print("Done.")
|
[
"os.path.join",
"boto3.client",
"os.environ.get",
"argparse.ArgumentParser"
] |
[((114, 188), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""dump all aws log streams into files"""'}), "(description='dump all aws log streams into files')\n", (137, 188), False, 'import argparse\n'), ((824, 844), 'boto3.client', 'boto3.client', (['"""logs"""'], {}), "('logs')\n", (836, 844), False, 'import boto3\n'), ((2067, 2105), 'os.path.join', 'os.path.join', (['output_folder', 'file_name'], {}), '(output_folder, file_name)\n', (2079, 2105), False, 'import os, sys\n'), ((336, 371), 'os.environ.get', 'os.environ.get', (['"""AWS_PROFILE"""', 'None'], {}), "('AWS_PROFILE', None)\n", (350, 371), False, 'import os, sys\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_blobs
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from matplotlib.patches import Ellipse
# For reproducibility
np.random.seed(1000)
nb_samples = 300
nb_centers = 2
if __name__ == '__main__':
# Create the dataset
X, Y = make_blobs(n_samples=nb_samples, n_features=2, center_box=[-1, 1], centers=nb_centers,
cluster_std=[1.0, 0.6], random_state=1000)
# Show the dataset
sns.set()
fig, ax = plt.subplots(figsize=(15, 9))
ax.scatter(X[:, 0], X[:, 1], s=120)
ax.set_xlabel(r'$x_0$', fontsize=14)
ax.set_ylabel(r'$x_1$', fontsize=14)
plt.show()
# Train the model
gm = GaussianMixture(n_components=2, random_state=1000)
gm.fit(X)
Y_pred = gm.fit_predict(X)
print('Means: \n{}'.format(gm.means_))
print('Covariance matrices: \n{}'.format(gm.covariances_))
print('Weights: \n{}'.format(gm.weights_))
m1 = gm.means_[0]
m2 = gm.means_[1]
c1 = gm.covariances_[0]
c2 = gm.covariances_[1]
we1 = 1 + gm.weights_[0]
we2 = 1 + gm.weights_[1]
# Eigendecompose the covariances
w1, v1 = np.linalg.eigh(c1)
w2, v2 = np.linalg.eigh(c2)
nv1 = v1 / np.linalg.norm(v1)
nv2 = v2 / np.linalg.norm(v2)
print('Eigenvalues 1: \n{}'.format(w1))
print('Eigenvectors 1: \n{}'.format(nv1))
print('Eigenvalues 2: \n{}'.format(w2))
print('Eigenvectors 2: \n{}'.format(nv2))
a1 = np.arccos(np.dot(nv1[:, 1], [1.0, 0.0]) / np.linalg.norm(nv1[:, 1])) * 180.0 / np.pi
a2 = np.arccos(np.dot(nv2[:, 1], [1.0, 0.0]) / np.linalg.norm(nv2[:, 1])) * 180.0 / np.pi
# Perform K-Means clustering
km = KMeans(n_clusters=2, random_state=1000)
km.fit(X)
Y_pred_km = km.predict(X)
# Show the comparison of the results
fig, ax = plt.subplots(1, 2, figsize=(22, 9), sharey=True)
ax[0].scatter(X[Y_pred == 0, 0], X[Y_pred == 0, 1], s=80, marker='o', label='Gaussian 1')
ax[0].scatter(X[Y_pred == 1, 0], X[Y_pred == 1, 1], s=80, marker='d', label='Gaussian 2')
g1 = Ellipse(xy=m1, width=w1[1] * 3, height=w1[0] * 3, fill=False, linestyle='dashed', angle=a1, color='black',
linewidth=1)
g1_1 = Ellipse(xy=m1, width=w1[1] * 2, height=w1[0] * 2, fill=False, linestyle='dashed', angle=a1, color='black',
linewidth=2)
g1_2 = Ellipse(xy=m1, width=w1[1] * 1.4, height=w1[0] * 1.4, fill=False, linestyle='dashed', angle=a1,
color='black', linewidth=3)
g2 = Ellipse(xy=m2, width=w2[1] * 3, height=w2[0] * 3, fill=False, linestyle='dashed', angle=a2, color='black',
linewidth=1)
g2_1 = Ellipse(xy=m2, width=w2[1] * 2, height=w2[0] * 2, fill=False, linestyle='dashed', angle=a2, color='black',
linewidth=2)
g2_2 = Ellipse(xy=m2, width=w2[1] * 1.4, height=w2[0] * 1.4, fill=False, linestyle='dashed', angle=a2,
color='black', linewidth=3)
ax[0].set_xlabel(r'$x_0$', fontsize=16)
ax[0].set_ylabel(r'$x_1$', fontsize=16)
ax[0].add_artist(g1)
ax[0].add_artist(g1_1)
ax[0].add_artist(g1_2)
ax[0].add_artist(g2)
ax[0].add_artist(g2_1)
ax[0].add_artist(g2_2)
ax[0].set_title('Gaussian Mixture', fontsize=16)
ax[0].legend(fontsize=16)
ax[1].scatter(X[Y_pred_km == 0, 0], X[Y_pred_km == 0, 1], s=80, marker='o', label='Cluster 1')
ax[1].scatter(X[Y_pred_km == 1, 0], X[Y_pred_km == 1, 1], s=80, marker='d', label='Cluster 2')
ax[1].set_xlabel(r'$x_0$', fontsize=16)
ax[1].set_title('K-Means', fontsize=16)
ax[1].legend(fontsize=16)
# Predict the probability of some sample points
print('P([0, -2]=G1) = {:.3f} and P([0, -2]=G2) = {:.3f}'.format(*list(gm.predict_proba([[0.0, -2.0]]).squeeze())))
print('P([1, -1]=G1) = {:.3f} and P([1, -1]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, -1.0]]).squeeze())))
print('P([1, 0]=G1) = {:.3f} and P([1, 0]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, 0.0]]).squeeze())))
plt.show()
# Compute AICs, BICs, and log-likelihood
n_max_components = 20
aics = []
bics = []
log_likelihoods = []
for n in range(1, n_max_components + 1):
gm = GaussianMixture(n_components=n, random_state=1000)
gm.fit(X)
aics.append(gm.aic(X))
bics.append(gm.bic(X))
log_likelihoods.append(gm.score(X) * nb_samples)
# Show the results
fig, ax = plt.subplots(1, 3, figsize=(20, 6))
ax[0].plot(range(1, n_max_components + 1), aics)
ax[0].set_xticks(range(1, n_max_components + 1))
ax[0].set_xlabel('Number of Gaussians', fontsize=14)
ax[0].set_title('AIC', fontsize=14)
ax[1].plot(range(1, n_max_components + 1), bics)
ax[1].set_xticks(range(1, n_max_components + 1))
ax[1].set_xlabel('Number of Gaussians', fontsize=14)
ax[1].set_title('BIC', fontsize=14)
ax[2].plot(range(1, n_max_components + 1), log_likelihoods)
ax[2].set_xticks(range(1, n_max_components + 1))
ax[2].set_xlabel('Number of Gaussians', fontsize=14)
ax[2].set_title('Log-likelihood', fontsize=14)
plt.show()
|
[
"sklearn.cluster.KMeans",
"seaborn.set",
"sklearn.mixture.GaussianMixture",
"sklearn.datasets.make_blobs",
"numpy.dot",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.linalg.eigh",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((256, 276), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (270, 276), True, 'import numpy as np\n'), ((376, 510), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'nb_samples', 'n_features': '(2)', 'center_box': '[-1, 1]', 'centers': 'nb_centers', 'cluster_std': '[1.0, 0.6]', 'random_state': '(1000)'}), '(n_samples=nb_samples, n_features=2, center_box=[-1, 1], centers=\n nb_centers, cluster_std=[1.0, 0.6], random_state=1000)\n', (386, 510), False, 'from sklearn.datasets import make_blobs\n'), ((556, 565), 'seaborn.set', 'sns.set', ([], {}), '()\n', (563, 565), True, 'import seaborn as sns\n'), ((581, 610), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 9)'}), '(figsize=(15, 9))\n', (593, 610), True, 'import matplotlib.pyplot as plt\n'), ((740, 750), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (748, 750), True, 'import matplotlib.pyplot as plt\n'), ((783, 833), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(2)', 'random_state': '(1000)'}), '(n_components=2, random_state=1000)\n', (798, 833), False, 'from sklearn.mixture import GaussianMixture\n'), ((1245, 1263), 'numpy.linalg.eigh', 'np.linalg.eigh', (['c1'], {}), '(c1)\n', (1259, 1263), True, 'import numpy as np\n'), ((1277, 1295), 'numpy.linalg.eigh', 'np.linalg.eigh', (['c2'], {}), '(c2)\n', (1291, 1295), True, 'import numpy as np\n'), ((1779, 1818), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)', 'random_state': '(1000)'}), '(n_clusters=2, random_state=1000)\n', (1785, 1818), False, 'from sklearn.cluster import KMeans\n'), ((1919, 1967), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(22, 9)', 'sharey': '(True)'}), '(1, 2, figsize=(22, 9), sharey=True)\n', (1931, 1967), True, 'import matplotlib.pyplot as plt\n'), ((2167, 2291), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm1', 'width': '(w1[1] * 3)', 'height': '(w1[0] * 3)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a1', 'color': '"""black"""', 'linewidth': '(1)'}), "(xy=m1, width=w1[1] * 3, height=w1[0] * 3, fill=False, linestyle=\n 'dashed', angle=a1, color='black', linewidth=1)\n", (2174, 2291), False, 'from matplotlib.patches import Ellipse\n'), ((2315, 2439), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm1', 'width': '(w1[1] * 2)', 'height': '(w1[0] * 2)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a1', 'color': '"""black"""', 'linewidth': '(2)'}), "(xy=m1, width=w1[1] * 2, height=w1[0] * 2, fill=False, linestyle=\n 'dashed', angle=a1, color='black', linewidth=2)\n", (2322, 2439), False, 'from matplotlib.patches import Ellipse\n'), ((2465, 2593), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm1', 'width': '(w1[1] * 1.4)', 'height': '(w1[0] * 1.4)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a1', 'color': '"""black"""', 'linewidth': '(3)'}), "(xy=m1, width=w1[1] * 1.4, height=w1[0] * 1.4, fill=False, linestyle\n ='dashed', angle=a1, color='black', linewidth=3)\n", (2472, 2593), False, 'from matplotlib.patches import Ellipse\n'), ((2618, 2742), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm2', 'width': '(w2[1] * 3)', 'height': '(w2[0] * 3)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a2', 'color': '"""black"""', 'linewidth': '(1)'}), "(xy=m2, width=w2[1] * 3, height=w2[0] * 3, fill=False, linestyle=\n 'dashed', angle=a2, color='black', linewidth=1)\n", (2625, 2742), False, 'from matplotlib.patches import Ellipse\n'), ((2766, 2890), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm2', 'width': '(w2[1] * 2)', 'height': '(w2[0] * 2)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a2', 'color': '"""black"""', 'linewidth': '(2)'}), "(xy=m2, width=w2[1] * 2, height=w2[0] * 2, fill=False, linestyle=\n 'dashed', angle=a2, color='black', linewidth=2)\n", (2773, 2890), False, 'from matplotlib.patches import Ellipse\n'), ((2916, 3044), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm2', 'width': '(w2[1] * 1.4)', 'height': '(w2[0] * 1.4)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a2', 'color': '"""black"""', 'linewidth': '(3)'}), "(xy=m2, width=w2[1] * 1.4, height=w2[0] * 1.4, fill=False, linestyle\n ='dashed', angle=a2, color='black', linewidth=3)\n", (2923, 3044), False, 'from matplotlib.patches import Ellipse\n'), ((4127, 4137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4135, 4137), True, 'import matplotlib.pyplot as plt\n'), ((4549, 4584), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 6)'}), '(1, 3, figsize=(20, 6))\n', (4561, 4584), True, 'import matplotlib.pyplot as plt\n'), ((5227, 5237), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5235, 5237), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1330), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (1326, 1330), True, 'import numpy as np\n'), ((1346, 1364), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (1360, 1364), True, 'import numpy as np\n'), ((4323, 4373), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n', 'random_state': '(1000)'}), '(n_components=n, random_state=1000)\n', (4338, 4373), False, 'from sklearn.mixture import GaussianMixture\n'), ((1567, 1596), 'numpy.dot', 'np.dot', (['nv1[:, 1]', '[1.0, 0.0]'], {}), '(nv1[:, 1], [1.0, 0.0])\n', (1573, 1596), True, 'import numpy as np\n'), ((1599, 1624), 'numpy.linalg.norm', 'np.linalg.norm', (['nv1[:, 1]'], {}), '(nv1[:, 1])\n', (1613, 1624), True, 'import numpy as np\n'), ((1661, 1690), 'numpy.dot', 'np.dot', (['nv2[:, 1]', '[1.0, 0.0]'], {}), '(nv2[:, 1], [1.0, 0.0])\n', (1667, 1690), True, 'import numpy as np\n'), ((1693, 1718), 'numpy.linalg.norm', 'np.linalg.norm', (['nv2[:, 1]'], {}), '(nv2[:, 1])\n', (1707, 1718), True, 'import numpy as np\n')]
|
import logging
import threading
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Queue
from foreverbull.worker.worker import WorkerHandler
from foreverbull_core.models.finance import EndOfDay
from foreverbull_core.models.socket import Request
from foreverbull_core.models.worker import Instance
from foreverbull_core.socket.client import ContextClient, SocketClient
from foreverbull_core.socket.exceptions import SocketClosed, SocketTimeout
from foreverbull_core.socket.router import MessageRouter
class Foreverbull(threading.Thread):
_worker_routes = {}
def __init__(self, socket: SocketClient = None, executors: int = 1):
self.socket = socket
self.running = False
self.logger = logging.getLogger(__name__)
self._worker_requests = Queue()
self._worker_responses = Queue()
self._workers: list[WorkerHandler] = []
self.executors = executors
self._routes = MessageRouter()
self._routes.add_route(self.stop, "backtest_completed")
self._routes.add_route(self._configure, "configure", Instance)
self._routes.add_route(self._stock_data, "stock_data", EndOfDay)
self._request_thread: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=5)
threading.Thread.__init__(self)
@staticmethod
def on(msg_type):
def decorator(t):
Foreverbull._worker_routes[msg_type] = t
return t
return decorator
def run(self):
self.running = True
self.logger.info("Starting instance")
while self.running:
try:
context_socket = self.socket.new_context()
request = context_socket.recv()
self._request_thread.submit(self._process_request, context_socket, request)
except (SocketClosed, SocketTimeout):
self.logger.info("main socket closed, exiting")
return
self.socket.close()
self.logger.info("exiting")
def _process_request(self, socket: ContextClient, request: Request):
try:
self.logger.debug(f"recieved task: {request.task}")
response = self._routes(request)
socket.send(response)
self.logger.debug(f"reply sent for task: {response.task}")
socket.close()
except (SocketTimeout, SocketClosed) as exc:
self.logger.warning(f"Unable to process context socket: {exc}")
pass
except Exception as exc:
self.logger.error("unknown excetion when processing context socket")
self.logger.exception(exc)
def stop(self):
self.logger.info("Stopping instance")
self.running = False
for worker in self._workers:
worker.stop()
self._workers = []
def _configure(self, instance_configuration: Instance):
for _ in range(self.executors):
w = WorkerHandler(instance_configuration, **self._worker_routes)
self._workers.append(w)
return
def _stock_data(self, message: EndOfDay):
for worker in self._workers:
if worker.locked():
continue
if worker.acquire():
break
else:
raise Exception("workers are not initialized")
try:
worker.process(message)
except Exception as exc:
self.logger.error("Error processing to worker")
self.logger.exception(exc)
worker.release()
|
[
"logging.getLogger",
"threading.Thread.__init__",
"concurrent.futures.ThreadPoolExecutor",
"foreverbull_core.socket.router.MessageRouter",
"foreverbull.worker.worker.WorkerHandler",
"multiprocessing.Queue"
] |
[((745, 772), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (762, 772), False, 'import logging\n'), ((805, 812), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (810, 812), False, 'from multiprocessing import Queue\n'), ((846, 853), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (851, 853), False, 'from multiprocessing import Queue\n'), ((960, 975), 'foreverbull_core.socket.router.MessageRouter', 'MessageRouter', ([], {}), '()\n', (973, 975), False, 'from foreverbull_core.socket.router import MessageRouter\n'), ((1235, 1268), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(5)'}), '(max_workers=5)\n', (1253, 1268), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1277, 1308), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1302, 1308), False, 'import threading\n'), ((2945, 3005), 'foreverbull.worker.worker.WorkerHandler', 'WorkerHandler', (['instance_configuration'], {}), '(instance_configuration, **self._worker_routes)\n', (2958, 3005), False, 'from foreverbull.worker.worker import WorkerHandler\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `AffiliationSearch` module."""
from collections import namedtuple
from nose.tools import assert_equal, assert_true
import scopus
s = scopus.AffiliationSearch('af-id(60021784)', refresh=True)
def test_affiliations():
received = s.affiliations
assert_true(isinstance(received, list))
order = 'eid name variant documents city country parent'
Affiliation = namedtuple('Affiliation', order)
expected = [Affiliation(eid='10-s2.0-60021784', name='<NAME>',
variant='', documents='101148', city='New York',
country='United States', parent='0')]
assert_equal(received, expected)
|
[
"nose.tools.assert_equal",
"scopus.AffiliationSearch",
"collections.namedtuple"
] |
[((196, 253), 'scopus.AffiliationSearch', 'scopus.AffiliationSearch', (['"""af-id(60021784)"""'], {'refresh': '(True)'}), "('af-id(60021784)', refresh=True)\n", (220, 253), False, 'import scopus\n'), ((435, 467), 'collections.namedtuple', 'namedtuple', (['"""Affiliation"""', 'order'], {}), "('Affiliation', order)\n", (445, 467), False, 'from collections import namedtuple\n'), ((658, 690), 'nose.tools.assert_equal', 'assert_equal', (['received', 'expected'], {}), '(received, expected)\n', (670, 690), False, 'from nose.tools import assert_equal, assert_true\n')]
|
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from distutils.sysconfig import parse_makefile
from osbuild import config
from osbuild import command
_dist_builders = {}
def dist_one(module_name):
for module in config.load_modules():
if module.name == module_name:
return _dist_module(module)
return False
def dist():
shutil.rmtree(config.get_dist_dir(), ignore_errors=True)
modules = config.load_modules()
for module in modules:
if not _dist_module(module):
return False
return True
def _dist_module(module):
if not module.dist:
return True
print("* Creating %s distribution" % module.name)
return _dist_builders[module.build_system](module)
def _autotools_dist_builder(module):
source_dir = module.get_source_dir()
os.chdir(source_dir)
command.run(["make", "distcheck"])
makefile = parse_makefile(os.path.join(source_dir, "Makefile"))
tarball = "%s-%s.tar.xz" % (module.name, makefile["VERSION"])
shutil.move(os.path.join(source_dir, tarball),
os.path.join(config.get_dist_dir(), tarball))
return True
_dist_builders['autotools'] = _autotools_dist_builder
|
[
"osbuild.config.get_dist_dir",
"osbuild.config.load_modules",
"os.path.join",
"os.chdir",
"osbuild.command.run"
] |
[((767, 788), 'osbuild.config.load_modules', 'config.load_modules', ([], {}), '()\n', (786, 788), False, 'from osbuild import config\n'), ((977, 998), 'osbuild.config.load_modules', 'config.load_modules', ([], {}), '()\n', (996, 998), False, 'from osbuild import config\n'), ((1372, 1392), 'os.chdir', 'os.chdir', (['source_dir'], {}), '(source_dir)\n', (1380, 1392), False, 'import os\n'), ((1397, 1431), 'osbuild.command.run', 'command.run', (["['make', 'distcheck']"], {}), "(['make', 'distcheck'])\n", (1408, 1431), False, 'from osbuild import command\n'), ((919, 940), 'osbuild.config.get_dist_dir', 'config.get_dist_dir', ([], {}), '()\n', (938, 940), False, 'from osbuild import config\n'), ((1463, 1499), 'os.path.join', 'os.path.join', (['source_dir', '"""Makefile"""'], {}), "(source_dir, 'Makefile')\n", (1475, 1499), False, 'import os\n'), ((1584, 1617), 'os.path.join', 'os.path.join', (['source_dir', 'tarball'], {}), '(source_dir, tarball)\n', (1596, 1617), False, 'import os\n'), ((1648, 1669), 'osbuild.config.get_dist_dir', 'config.get_dist_dir', ([], {}), '()\n', (1667, 1669), False, 'from osbuild import config\n')]
|
from django import template
from week.models import SidebarContentPage,SidebarImagePage
register = template.Library()
@register.inclusion_tag('week/announcement.html')
def sidebar():
sidebar_data = SidebarContentPage.objects.get()
return {'sidebar_data':sidebar_data}
@register.inclusion_tag('week/advertisement.html')
def sidebarimage():
sidebar_image = SidebarImagePage.objects.get()
return {'sidebar_image':sidebar_image}
|
[
"week.models.SidebarContentPage.objects.get",
"week.models.SidebarImagePage.objects.get",
"django.template.Library"
] |
[((100, 118), 'django.template.Library', 'template.Library', ([], {}), '()\n', (116, 118), False, 'from django import template\n'), ((205, 237), 'week.models.SidebarContentPage.objects.get', 'SidebarContentPage.objects.get', ([], {}), '()\n', (235, 237), False, 'from week.models import SidebarContentPage, SidebarImagePage\n'), ((372, 402), 'week.models.SidebarImagePage.objects.get', 'SidebarImagePage.objects.get', ([], {}), '()\n', (400, 402), False, 'from week.models import SidebarContentPage, SidebarImagePage\n')]
|
import os
import requests
import time
import uuid
import configparser
import datetime
import fbchat
import re
from fbchat import Client, ImageAttachment
from fbchat import FBchatException
from pathlib import Path
politeness_index = 0.5 # ;)
epoch = datetime.datetime(1970, 1, 1)
# Hack to get the login to work, see: https://github.com/fbchat-dev/fbchat/issues/615#issuecomment-716089816
fbchat._state.FB_DTSG_REGEX = re.compile(r'"name":"fb_dtsg","value":"(.*?)"')
def download_file_from_url(url, target_path):
"""
Download image from a given URL to a specified target path.
:param url: URL of file to download
:param target_path: Local target path to save the file
:type url: str
:type target_path: str
"""
if url is not None:
r = requests.get(url)
with open(target_path, 'wb') as f:
print('\tDownloading image to {path}'.format(path=target_path))
f.write(r.content)
def convert_date_to_epoch(date, as_int=True):
"""
Convert a given date string to epoch (int in milliseconds)
:param date: Date string (preferred format %Y-%m-%d)
:param as_int: Return unix timestamp as an integer value, instead of a float
:type date: str
:type as_int: int
:return: int
"""
try:
dt = datetime.datetime.strptime(date, '%Y-%m-%d')
res = ((dt - epoch).total_seconds() * 1000.0) # convert to milliseconds
return int(res) if as_int else res
except ValueError:
return None
def convert_epoch_to_datetime(timestamp, dt_format='%Y-%m-%d_%H.%M.%S'):
"""
Convert epoch (unix time in ms) to a datetime string
:param timestamp: Unix time in ms
:param dt_format: Format of datetime string
:type timestamp: str
:type dt_format: str
:return:
"""
s = int(timestamp) / 1000.0
dt_str = datetime.datetime.fromtimestamp(s).strftime(dt_format)
return dt_str
if __name__ == '__main__':
config_path = Path('.') / 'config.ini'
if os.path.exists(config_path) is False:
raise Exception("Please create config.ini under this script's current directory")
# Load config file
config = configparser.ConfigParser()
config.read(config_path)
download_path = config.get('Download', 'path')
if os.path.exists(download_path) is False:
raise Exception("The path specified in download_path does not exist ({path}). Please specify a valid path in "
"config.ini".format(path=download_path))
# Initialize FB Client
fb_email = config.get('Credentials', 'email')
fb_pw = config.get('Credentials', 'password')
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
fb_client = Client(fb_email, fb_pw, user_agent=user_agent)
# Search for latest threads
thread_search_limit = int(config.get('Threads', 'search_limit'))
thread_search_before = convert_date_to_epoch(config.get('Threads', 'before_date'))
if thread_search_before is not None:
threads = fb_client.fetchThreadList(limit=thread_search_limit, before=thread_search_before)
else:
threads = fb_client.fetchThreadList(limit=thread_search_limit)
# Find correct thread for given user URL
my_thread = None
friend_url = config.get('Friend', 'url')
for thread in threads:
if hasattr(thread, 'url') and (thread.url == friend_url):
my_thread = thread
break
# Get Messages for my_thread
if my_thread is not None:
thread_message_count = my_thread.message_count
thread_message_name = my_thread.name
print('Found {count} messages in thread with {friend_name}'.format(count=thread_message_count,
friend_name=thread_message_name))
message_before_date = config.get('Messages', 'before_date')
message_search_limit = int(config.get('Messages', 'search_limit'))
message_search_before = convert_date_to_epoch(message_before_date)
if message_search_limit > thread_message_count:
message_search_limit = thread_message_count
print('\tWarning: Message search limit was greater than the total number of messages in thread.\n')
if message_search_before is not None:
messages = fb_client.fetchThreadMessages(my_thread.uid, limit=message_search_limit,
before=message_search_before)
print('Searching for images in the {message_limit} messages sent before {before_date}...'.format(
message_limit=message_search_limit, before_date=message_before_date))
else:
messages = fb_client.fetchThreadMessages(my_thread.uid, limit=message_search_limit)
print('Searching for images in the last {message_limit} messages...'.format(
message_limit=message_search_limit))
sender_id = None
if config.getboolean('Media', 'sender_only'):
sender_id = my_thread.uid
print('\tNote: Only images sent by {friend_name} will be downloaded (as specified by sender_only in your '
'config.ini)'.format(friend_name=thread_message_name))
# Extract Image attachments' full-sized image signed URLs (along with their original file extension)
total_count = 0
skip_count = 0
full_images = []
last_message_date = None
print('\n')
extension_blacklist = str.split(config.get('Media', 'ext_blacklist'), ',')
for message in messages:
message_datetime = convert_epoch_to_datetime(message.timestamp)
if len(message.attachments) > 0:
if (sender_id is None) or (sender_id == message.author):
for attachment in message.attachments:
if isinstance(attachment, ImageAttachment):
try:
attachment_ext = str.lower(attachment.original_extension)
if attachment_ext not in extension_blacklist:
full_images.append({
'extension': attachment_ext,
'timestamp': message_datetime,
'full_url': fb_client.fetchImageUrl(attachment.uid)
})
print('+', sep=' ', end='', flush=True)
else:
skip_count += 1
print('-', sep=' ', end='', flush=True)
total_count += 1
except FBchatException:
pass # ignore errors
last_message_date = message_datetime
# Download Full Images
if len(full_images) > 0:
images_count = len(full_images)
print('\n\nFound a total of {total_count} images. Skipped {skip_count} images that had a blacklisted '
'extension'.format(total_count=total_count, skip_count=skip_count))
print('Attempting to download {count} images...................\n'.format(count=images_count))
for full_image in full_images:
friend_name = str.lower(my_thread.name).replace(' ', '_')
file_uid = str(uuid.uuid4())
file_ext = full_image['extension']
file_timestamp = full_image['timestamp']
img_url = full_image['full_url']
image_path = ''.join([download_path, '\\', 'fb-image-', file_uid, '-', friend_name, '-',
file_timestamp, '.', file_ext])
download_file_from_url(img_url, image_path)
# Sleep half a second between file downloads to avoid getting flagged as a bot
time.sleep(politeness_index)
else:
print('No images to download in the last {count} messages'.format(count=message_search_limit))
# Reminder of last message found
print('\nLast message scanned for image attachments was dated: {last_message_date}'.format(
last_message_date=last_message_date))
else:
print('Thread not found for URL provided')
|
[
"datetime.datetime",
"os.path.exists",
"datetime.datetime.fromtimestamp",
"fbchat.Client",
"configparser.ConfigParser",
"re.compile",
"datetime.datetime.strptime",
"pathlib.Path",
"requests.get",
"time.sleep",
"uuid.uuid4"
] |
[((252, 281), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (269, 281), False, 'import datetime\n'), ((422, 468), 're.compile', 're.compile', (['""""name":"fb_dtsg","value":"(.*?)\\""""'], {}), '(\'"name":"fb_dtsg","value":"(.*?)"\')\n', (432, 468), False, 'import re\n'), ((2168, 2195), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2193, 2195), False, 'import configparser\n'), ((2792, 2838), 'fbchat.Client', 'Client', (['fb_email', 'fb_pw'], {'user_agent': 'user_agent'}), '(fb_email, fb_pw, user_agent=user_agent)\n', (2798, 2838), False, 'from fbchat import Client, ImageAttachment\n'), ((780, 797), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (792, 797), False, 'import requests\n'), ((1295, 1339), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (1321, 1339), False, 'import datetime\n'), ((1971, 1980), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (1975, 1980), False, 'from pathlib import Path\n'), ((2003, 2030), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (2017, 2030), False, 'import os\n'), ((2284, 2313), 'os.path.exists', 'os.path.exists', (['download_path'], {}), '(download_path)\n', (2298, 2313), False, 'import os\n'), ((1851, 1885), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['s'], {}), '(s)\n', (1882, 1885), False, 'import datetime\n'), ((8049, 8077), 'time.sleep', 'time.sleep', (['politeness_index'], {}), '(politeness_index)\n', (8059, 8077), False, 'import time\n'), ((7529, 7541), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7539, 7541), False, 'import uuid\n')]
|
from django.contrib.auth.models import AbstractUser
from django.db.models import (BooleanField, CASCADE, CharField, FloatField,
IntegerField, ManyToManyField, Model,
OneToOneField, PositiveSmallIntegerField)
from django.contrib.postgres.fields import ArrayField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = CharField(_("Name of User"), blank=True, max_length=255)
# is_customer = BooleanField(default=True) #
# user = OneToOneField(User, on_delete=CASCADE, primary_key=True)
skills = ArrayField(CharField(max_length=10, blank=True),
size=8, null=True,
)
# ArrayField(_("A list of skills that user can help with"), null=True,
# base_field=CharField(max_length=255))
classes_taken = ArrayField(null=True,
base_field=CharField(max_length=255),
size=20)
is_teachingassistant = BooleanField(default=False)
rating = IntegerField(null=True, blank=True)
avg_reponse = FloatField(null=True, blank=True)
is_online = BooleanField(default=False)
messages_received = IntegerField(null=True, blank=True)
bio = CharField(blank=True, max_length=500)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
|
[
"django.utils.translation.ugettext_lazy",
"django.db.models.FloatField",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.urls.reverse",
"django.db.models.CharField"
] |
[((1176, 1203), 'django.db.models.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1188, 1203), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n'), ((1217, 1252), 'django.db.models.IntegerField', 'IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1229, 1252), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n'), ((1271, 1304), 'django.db.models.FloatField', 'FloatField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1281, 1304), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n'), ((1321, 1348), 'django.db.models.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1333, 1348), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n'), ((1373, 1408), 'django.db.models.IntegerField', 'IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1385, 1408), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n'), ((1419, 1456), 'django.db.models.CharField', 'CharField', ([], {'blank': '(True)', 'max_length': '(500)'}), '(blank=True, max_length=500)\n', (1428, 1456), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n'), ((586, 603), 'django.utils.translation.ugettext_lazy', '_', (['"""Name of User"""'], {}), "('Name of User')\n", (587, 603), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((776, 812), 'django.db.models.CharField', 'CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (785, 812), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n'), ((1505, 1564), 'django.urls.reverse', 'reverse', (['"""users:detail"""'], {'kwargs': "{'username': self.username}"}), "('users:detail', kwargs={'username': self.username})\n", (1512, 1564), False, 'from django.urls import reverse\n'), ((1082, 1107), 'django.db.models.CharField', 'CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1091, 1107), False, 'from django.db.models import BooleanField, CASCADE, CharField, FloatField, IntegerField, ManyToManyField, Model, OneToOneField, PositiveSmallIntegerField\n')]
|
#!/usr/bin/env python3
from sys import argv
from pathlib import Path
from re import compile as re_compile
PACKAGE_RE = re_compile("symbiflow-arch-defs-([a-zA-Z0-9_-]+)-([a-z0-9])")
with (Path(__file__).parent.parent.parent / 'packages.list').open('r') as rptr:
for artifact in rptr.read().splitlines():
m = PACKAGE_RE.match(artifact)
assert m, f"Package name not recognized! {artifact}"
package_name = m.group(1)
if package_name == "install":
package_name == "toolchain"
with (Path("install") /
f"symbiflow-{package_name}-latest").open("w") as wptr:
wptr.write(
'https://storage.googleapis.com/symbiflow-arch-defs/artifacts/prod/'
f'foss-fpga-tools/symbiflow-arch-defs/continuous/install/{argv[1]}/{artifact}'
)
|
[
"pathlib.Path",
"re.compile"
] |
[((121, 182), 're.compile', 're_compile', (['"""symbiflow-arch-defs-([a-zA-Z0-9_-]+)-([a-z0-9])"""'], {}), "('symbiflow-arch-defs-([a-zA-Z0-9_-]+)-([a-z0-9])')\n", (131, 182), True, 'from re import compile as re_compile\n'), ((538, 553), 'pathlib.Path', 'Path', (['"""install"""'], {}), "('install')\n", (542, 553), False, 'from pathlib import Path\n'), ((190, 204), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (194, 204), False, 'from pathlib import Path\n')]
|
from django.conf.urls import url, include
urlpatterns = [
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/viet_ocr/', include('viet_ocr.api.urls', namespace="viet_ocr-api")),
url(r'^api/post_process/', include('post_process.api.urls', namespace="post_process-api")),
url(r'^api/pre_process/', include('pre_process.api.urls', namespace="pre_process-api")),
url(r'^api/doc_ocr/', include('doc_ocr.api.urls', namespace="doc_ocr-api")),
]
|
[
"django.conf.urls.include"
] |
[((83, 141), 'django.conf.urls.include', 'include', (['"""rest_framework.urls"""'], {'namespace': '"""rest_framework"""'}), "('rest_framework.urls', namespace='rest_framework')\n", (90, 141), False, 'from django.conf.urls import url, include\n'), ((171, 225), 'django.conf.urls.include', 'include', (['"""viet_ocr.api.urls"""'], {'namespace': '"""viet_ocr-api"""'}), "('viet_ocr.api.urls', namespace='viet_ocr-api')\n", (178, 225), False, 'from django.conf.urls import url, include\n'), ((259, 321), 'django.conf.urls.include', 'include', (['"""post_process.api.urls"""'], {'namespace': '"""post_process-api"""'}), "('post_process.api.urls', namespace='post_process-api')\n", (266, 321), False, 'from django.conf.urls import url, include\n'), ((354, 414), 'django.conf.urls.include', 'include', (['"""pre_process.api.urls"""'], {'namespace': '"""pre_process-api"""'}), "('pre_process.api.urls', namespace='pre_process-api')\n", (361, 414), False, 'from django.conf.urls import url, include\n'), ((443, 495), 'django.conf.urls.include', 'include', (['"""doc_ocr.api.urls"""'], {'namespace': '"""doc_ocr-api"""'}), "('doc_ocr.api.urls', namespace='doc_ocr-api')\n", (450, 495), False, 'from django.conf.urls import url, include\n')]
|
from django.contrib import admin
from .models import Asset
# Register your models here.
admin.site.register(Asset)
|
[
"django.contrib.admin.site.register"
] |
[((90, 116), 'django.contrib.admin.site.register', 'admin.site.register', (['Asset'], {}), '(Asset)\n', (109, 116), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3*
import unicodedata
class Word:
"""
Object representation for a word
Parameters
----------
text : str
word text
formatedText : str
word text without accent, punctuation, etc (UTF-8)
color : List of integers
pixel color values in rgb for the word - eg: [0, 255, 56]
"""
def __init__(self, text):
"""
Initialize a Word object with the given string
Parameters
----------
text : str
word text
"""
self.text = text
self.formatedText = self.__formatText()
@property
def color(self):
"""
Return a list of 3 values (RGB) corresponding to the color representation of the word
"""
alpha = "abcdefghijklmnopqrstuvwxyz" # alpha[1] = "b"
alphaPos = dict([ (x[1],x[0]) for x in enumerate(alpha) ]) # alphaPos["b"] = 1
colorValue = 0
for letter in self.formatedText:
if letter.isdigit():
colorValue += int(letter)
else:
colorValue += alphaPos[letter.lower()]
return [(colorValue * len(self.formatedText)) % 256, (colorValue * 2) % 256, (colorValue * 3 % 256)]
def __formatText(self):
"""
Return the formated word
"""
uniText = ''.join(e for e in self.text if e.isalnum()) # remove punctuation
uniText = ''.join(c for c in unicodedata.normalize('NFD', uniText)
if unicodedata.category(c) != 'Mn') # Remove accents and other special letter chars
uniText = uniText.replace("œ", "oe")
uniText = uniText.replace("ª", "a")
return uniText
|
[
"unicodedata.category",
"unicodedata.normalize"
] |
[((1472, 1509), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'uniText'], {}), "('NFD', uniText)\n", (1493, 1509), False, 'import unicodedata\n'), ((1531, 1554), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (1551, 1554), False, 'import unicodedata\n')]
|
"""
fit1d package is designed to provide an organized toolbox for different types of
1D fits that can be performed.
It is easy to add new fits and other functionalities
"""
from abc import ABC, abstractmethod
import numpy as np
from typing import List,Tuple
from fit1d.common.model import Model, ModelMock
from fit1d.common.outlier import OutLier
from fit1d.common.fit_data import FitData
class Fit1D(ABC):
"""
This is the main class of the fit1d package. It is used to allow the user to execute
fit and eval methods, in addition to calc_RMS and calc_error static services.
The properties of this class are the _model and _outlier objects and a _use_remove_outliers
boolean
"""
_outlier: OutLier
_use_remove_outliers: bool
_fit_data: FitData
# interface methods
def fit(self, x: np.ndarray, y: np.ndarray) -> FitData:
self._fit_data.x = x
self._fit_data.y = y
if self._use_remove_outliers:
self._remove_outlier()
else:
self._calc_fit_and_update_fit_data()
return self._fit_data
def eval(self, x: np.ndarray = None, model: Model = None) -> np.ndarray:
if x is not None:
self._fit_data.x = x
if model is not None:
self._fit_data.model = model
self._calc_eval()
return self._fit_data.y_fit
def calc_error(self):
"""
calc error vector , update _fit_data
:return:
"""
if self._fit_data.y is not None and self._fit_data.y_fit is not None:
self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit
def calc_rms(self):
if self._fit_data.error_vector is not None:
self._fit_data.rms = (sum(self._fit_data.error_vector ** 2) / len(self._fit_data.error_vector)) ** 0.5
def get_fit_data(self) -> FitData:
return self._fit_data
# abstract methods
@abstractmethod
def _calc_fit(self):
"""
abstractmethod:
run fit calculation of the data update model in _fit_data.model
:return: Null
"""
pass
@abstractmethod
def _calc_eval(self):
"""
abstractmethod:
subclass calculate model eval for inner x and model
update _fit_data.y_fit
:return: Void
"""
pass
# internal methods
def _update_fit_data(self):
self._calc_eval()
self.calc_error()
self.calc_rms()
def _remove_outlier(self):
while True:
self._calc_fit_and_update_fit_data()
indexes_to_remove = self._outlier.find_outliers(self._fit_data.error_vector)
if len(indexes_to_remove) == 0:
break
else:
self._remove_indexes(indexes_to_remove)
def _remove_indexes(self, ind):
self._fit_data.x = np.delete(self._fit_data.x, ind)
self._fit_data.y = np.delete(self._fit_data.y, ind)
def _calc_fit_and_update_fit_data(self):
self._calc_fit()
self._update_fit_data()
class Fit1DMock(Fit1D):
""" Mock class. Used only for tests """
def __init__(self, outlier: OutLier, remove_outliers: bool):
self._fit_data = FitData()
self._outlier = outlier
self._use_remove_outliers = remove_outliers
def _calc_fit(self):
self._fit_data.model = ModelMock({"param1": 5.5})
def _calc_eval(self) -> np.ndarray:
if self._fit_data.y is None or len(self._fit_data.y) == 4:
self._fit_data.y_fit = np.array([11, 22, 33, 44])
else:
self._fit_data.y_fit = np.array([11, 33, 44])
|
[
"numpy.delete",
"numpy.array",
"fit1d.common.fit_data.FitData",
"fit1d.common.model.ModelMock"
] |
[((2867, 2899), 'numpy.delete', 'np.delete', (['self._fit_data.x', 'ind'], {}), '(self._fit_data.x, ind)\n', (2876, 2899), True, 'import numpy as np\n'), ((2927, 2959), 'numpy.delete', 'np.delete', (['self._fit_data.y', 'ind'], {}), '(self._fit_data.y, ind)\n', (2936, 2959), True, 'import numpy as np\n'), ((3223, 3232), 'fit1d.common.fit_data.FitData', 'FitData', ([], {}), '()\n', (3230, 3232), False, 'from fit1d.common.fit_data import FitData\n'), ((3374, 3400), 'fit1d.common.model.ModelMock', 'ModelMock', (["{'param1': 5.5}"], {}), "({'param1': 5.5})\n", (3383, 3400), False, 'from fit1d.common.model import Model, ModelMock\n'), ((3544, 3570), 'numpy.array', 'np.array', (['[11, 22, 33, 44]'], {}), '([11, 22, 33, 44])\n', (3552, 3570), True, 'import numpy as np\n'), ((3620, 3642), 'numpy.array', 'np.array', (['[11, 33, 44]'], {}), '([11, 33, 44])\n', (3628, 3642), True, 'import numpy as np\n')]
|
from django.db.models import CharField
from django.utils.translation import ugettext_lazy as _
import validators
class CURPField(CharField):
default_validators = [validators.CURPValidator()]
description = _("CURP")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 18
super(CURPField, self).__init__(*args, **kwargs)
|
[
"django.utils.translation.ugettext_lazy",
"validators.CURPValidator"
] |
[((209, 218), 'django.utils.translation.ugettext_lazy', '_', (['"""CURP"""'], {}), "('CURP')\n", (210, 218), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((166, 192), 'validators.CURPValidator', 'validators.CURPValidator', ([], {}), '()\n', (190, 192), False, 'import validators\n')]
|
#!/usr/bin/env python3
from html.parser import HTMLParser
import inquirer
import requests
# Parser for HTML input
class InputParser(HTMLParser):
def __init__(self, input_name):
super().__init__()
self._input_name = input_name
self._value = None
@property
def value(self):
return self._value
def handle_starttag(self, tag, attrs):
if tag == "input":
for attr in attrs:
if attr[0] == "name" and attr[1] == self._input_name:
for attr2 in attrs:
if attr2[0] == "value":
self._value = attr2[1]
break
break
# Parser for HTML option list
class OptionParser(HTMLParser):
def __init__(self, select_name):
super().__init__()
self._select_name = select_name
self._within_select = False
self._within_option = False
self._option_name = ""
self._option_value = "-1"
self._choices = []
@property
def choices(self):
return self._choices
def handle_starttag(self, tag, attrs):
if tag == "select":
for attr in attrs:
if attr[0] == "name" and attr[1] == self._select_name:
self._within_select = True
break
elif tag == "option" and self._within_select:
self._within_option = True
for attr in attrs:
if attr[0] == "value":
self._option_value = attr[1]
def handle_endtag(self, tag):
if tag == "select":
self._within_select = False
elif tag == "option":
if (
self._within_select
and self._within_option
and len(self._option_name) > 0
and self._option_value != ""
):
self._choices.append((self._option_name, self._option_value))
self._within_option = False
self._option_name = ""
self._option_value = "-1"
def handle_data(self, data):
if self._within_option:
self._option_name += data
def main():
# search for street
questions = [
inquirer.Text("strasse", message="Enter search string for street"),
# inquirer.Text("hausnummer", message="Enter search string for house number"),
]
answers = inquirer.prompt(questions)
answers["hausnummer"] = ""
answers["bestaetigung"] = "true"
answers["mode"] = "search"
r = requests.post(
"https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html",
data=answers,
)
# search for street
input_parser = InputParser(input_name="asId")
input_parser.feed(r.text)
if input_parser.value is not None:
answers["asId"] = input_parser.value
else:
# query returned a list of streets
parser = OptionParser(select_name="asId")
parser.feed(r.text)
questions = [
inquirer.List("asId", choices=parser.choices, message="Select street")
]
answers.update(inquirer.prompt(questions))
# search for building number
r = requests.post(
"https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html",
data=answers,
)
# parser HTML option list
parser = OptionParser(select_name="hnId")
parser.feed(r.text)
if len(parser.choices) == 0:
answers["hnId"] = ""
else:
questions = [
inquirer.List("hnId", choices=parser.choices, message="Select house number")
]
answers.update(inquirer.prompt(questions))
print("Copy the following statements into your configuration.yaml:\n")
print("# waste_collection_schedule source configuration")
print("waste_collection_schedule:")
print(" sources:")
print(" - name: stadtreinigung_hamburg")
print(" args:")
print(f" asId: {answers['asId']}")
print(f" hnId: {answers['hnId']}")
if __name__ == "__main__":
main()
|
[
"inquirer.List",
"requests.post",
"inquirer.prompt",
"inquirer.Text"
] |
[((2443, 2469), 'inquirer.prompt', 'inquirer.prompt', (['questions'], {}), '(questions)\n', (2458, 2469), False, 'import inquirer\n'), ((2579, 2693), 'requests.post', 'requests.post', (['"""https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html"""'], {'data': 'answers'}), "(\n 'https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html'\n , data=answers)\n", (2592, 2693), False, 'import requests\n'), ((3237, 3351), 'requests.post', 'requests.post', (['"""https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html"""'], {'data': 'answers'}), "(\n 'https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html'\n , data=answers)\n", (3250, 3351), False, 'import requests\n'), ((2261, 2327), 'inquirer.Text', 'inquirer.Text', (['"""strasse"""'], {'message': '"""Enter search string for street"""'}), "('strasse', message='Enter search string for street')\n", (2274, 2327), False, 'import inquirer\n'), ((3063, 3133), 'inquirer.List', 'inquirer.List', (['"""asId"""'], {'choices': 'parser.choices', 'message': '"""Select street"""'}), "('asId', choices=parser.choices, message='Select street')\n", (3076, 3133), False, 'import inquirer\n'), ((3167, 3193), 'inquirer.prompt', 'inquirer.prompt', (['questions'], {}), '(questions)\n', (3182, 3193), False, 'import inquirer\n'), ((3573, 3649), 'inquirer.List', 'inquirer.List', (['"""hnId"""'], {'choices': 'parser.choices', 'message': '"""Select house number"""'}), "('hnId', choices=parser.choices, message='Select house number')\n", (3586, 3649), False, 'import inquirer\n'), ((3683, 3709), 'inquirer.prompt', 'inquirer.prompt', (['questions'], {}), '(questions)\n', (3698, 3709), False, 'import inquirer\n')]
|
import chardet
import sys
import codecs
import os
def findEncoding(s):
file = open(s, mode='rb')
buf = file.read()
result = chardet.detect(buf)
file.close()
return result['encoding']
def convertEncoding(s):
if os.access(s,os.W_OK):
encoding = findEncoding(s)
if encoding != 'utf-8' and encoding != 'ascii':
print("convert %s%s to utf-8" % (s, encoding))
contents = ''
with codecs.open(s, "r", encoding) as sourceFile:
contents = sourceFile.read()
with codecs.open(s, "w", "utf-8") as targetFile:
targetFile.write(contents)
else:
print("%s encoding is %s ,there is no need to convert" % (s, encoding))
else:
print("%s read only" %s)
def getAllFile(path, suffix='.'):
"recursive is enable"
f = os.walk(path)
fpath = []
for root, dir, fname in f:
for name in fname:
if name.endswith(suffix):
fpath.append(os.path.join(root, name))
return fpath
def convertAll(path):
flist = getAllFile(path, ".java")
for fname in flist:
convertEncoding(fname)
if __name__ == "__main__":
path = 'E:\\logs'
if len(sys.argv) == 1:
path = os.getcwd()
elif len(sys.argv) == 2:
path = sys.argv[1]
else:
print("error parameter")
exit()
convertAll(path)
|
[
"os.access",
"os.path.join",
"os.getcwd",
"chardet.detect",
"codecs.open",
"os.walk"
] |
[((147, 166), 'chardet.detect', 'chardet.detect', (['buf'], {}), '(buf)\n', (161, 166), False, 'import chardet\n'), ((254, 275), 'os.access', 'os.access', (['s', 'os.W_OK'], {}), '(s, os.W_OK)\n', (263, 275), False, 'import os\n'), ((895, 908), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (902, 908), False, 'import os\n'), ((1324, 1335), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1333, 1335), False, 'import os\n'), ((474, 503), 'codecs.open', 'codecs.open', (['s', '"""r"""', 'encoding'], {}), "(s, 'r', encoding)\n", (485, 503), False, 'import codecs\n'), ((585, 613), 'codecs.open', 'codecs.open', (['s', '"""w"""', '"""utf-8"""'], {}), "(s, 'w', 'utf-8')\n", (596, 613), False, 'import codecs\n'), ((1056, 1080), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (1068, 1080), False, 'import os\n')]
|
import unittest
from asyncio import sleep
from async_unittest import TestCase
from aio_counter import AioCounter
from aio_counter.exceptions import AioCounterException
class TestAioCounter(TestCase):
TIK = float(0.3)
TAK = float(0.6)
TTL = int(1)
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.counter = AioCounter(loop=cls.loop)
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
cls.counter.close()
def setUp(self) -> None:
self.counter._count = 0
self.counter._incs.clear()
self.counter._decs.clear()
# close all handlers
self.counter.close()
self.counter._handlers.clear()
def tearDown(self) -> None:
self.counter.close()
async def test_dec(self):
assert self.counter.empty()
self.counter._loop.call_later(self.TIK, self.counter.inc_nowait)
assert self.counter.count == 0
# wait until delayed inc_nowait increment counter
count = await self.counter.dec()
assert count == 0
async def test_inc(self):
assert self.counter.empty()
# fill counter
self.counter._count = self.counter.max_count
assert self.counter.count == self.counter.max_count
self.counter._loop.call_later(self.TIK, self.counter.dec_nowait)
assert self.counter.count == self.counter.max_count
# wait until delayed dec_nowait decrement counter
count = await self.counter.inc()
assert count == self.counter.max_count
def test_dec_nowait(self):
assert self.counter.empty()
try:
self.counter.dec_nowait()
except AioCounterException as e:
assert e
else:
assert False
count = self.counter.inc_nowait()
assert count == 1
assert self.counter.count == 1
count = self.counter.dec_nowait()
assert count == 0
assert self.counter.count == 0
def test_inc_nowait(self):
assert self.counter.empty()
count = self.counter.inc_nowait()
assert count == 1
assert self.counter.count == 1
# fill counter
self.counter._count = self.counter.max_count
try:
self.counter.inc_nowait()
except AioCounterException as e:
assert e
else:
assert False
async def test_ttl_inc(self):
assert self.counter.empty()
# inc with ttl = TTL
await self.counter.inc(self.TTL)
assert self.counter.count == 1
# sleep and inc() should run in one loop
await sleep(self.TTL, loop=self.loop)
# check if count was dec
assert self.counter.count == 0
async def test_bulk_inc(self):
"""
inc() with value > 1 should success only if counter changed to <value > 1> in one moment
:return:
"""
assert self.counter.empty()
# fill counter
self.counter._count = self.counter.max_count - 1
assert self.counter.count == self.counter.max_count - 1
def delayed_check(counter):
assert counter.count == counter.max_count - 1
self.counter._loop.call_later(self.TIK, delayed_check, self.counter)
self.counter._loop.call_later(self.TTL, self.counter.dec_nowait)
assert self.counter.count == self.counter.max_count - 1
await self.counter.inc(value=2)
assert self.counter.count == self.counter.max_count
async def test_bulk_dec(self):
"""
dec() with value > 1 should success only if counter changed to <value > 1> in one moment
:return:
"""
assert self.counter.empty()
await self.counter.inc()
assert self.counter.count == 1
def delayed_check(counter):
assert counter.count == 1
self.counter._loop.call_later(self.TIK, delayed_check, self.counter)
self.counter._loop.call_later(self.TTL, self.counter.inc_nowait)
assert self.counter.count == 1
await self.counter.dec(value=2)
assert self.counter.empty()
async def test_ttl_after_dec(self):
assert self.counter.empty()
await self.counter.inc(self.TTL)
assert self.counter.count == 1
count = self.counter.dec_nowait()
assert count == 0
assert self.counter.count == 0
await sleep(self.TTL, loop=self.loop)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"aio_counter.AioCounter",
"asyncio.sleep"
] |
[((4531, 4546), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4544, 4546), False, 'import unittest\n'), ((365, 390), 'aio_counter.AioCounter', 'AioCounter', ([], {'loop': 'cls.loop'}), '(loop=cls.loop)\n', (375, 390), False, 'from aio_counter import AioCounter\n'), ((2684, 2715), 'asyncio.sleep', 'sleep', (['self.TTL'], {'loop': 'self.loop'}), '(self.TTL, loop=self.loop)\n', (2689, 2715), False, 'from asyncio import sleep\n'), ((4466, 4497), 'asyncio.sleep', 'sleep', (['self.TTL'], {'loop': 'self.loop'}), '(self.TTL, loop=self.loop)\n', (4471, 4497), False, 'from asyncio import sleep\n')]
|
import math
# Modify the parameters here
UNROLL_FACTOR = 32
DATA_T = 'unsigned short'
# Generate the code
data_type = DATA_T
level = int(math.log2(UNROLL_FACTOR))
for layer in range(level - 1, -1, -1):
pair = int(math.pow(2, layer))
for i in range(pair):
# data_t tmp_[layer]_[pair] = tmp_[layer+1]_[pair*2]_[pair*2+1]
if layer == level - 1:
print(f'{data_type} mul_{layer}_{i}_0 = local_A[0][{i*2}] * local_B[0][{i*2}];')
print(f'{data_type} add_{layer}_{i} = mul_{layer}_{i}_0 + local_A[0][{i*2+1}] * local_B[0][{i*2+1}];')
else:
print(f'{data_type} add_{layer}_{i} = add_{layer+1}_{i*2} + add_{layer+1}_{i*2+1};')
print('local_C[c7][c6] += add_0_0;')
|
[
"math.pow",
"math.log2"
] |
[((139, 163), 'math.log2', 'math.log2', (['UNROLL_FACTOR'], {}), '(UNROLL_FACTOR)\n', (148, 163), False, 'import math\n'), ((219, 237), 'math.pow', 'math.pow', (['(2)', 'layer'], {}), '(2, layer)\n', (227, 237), False, 'import math\n')]
|
from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import viewsets, status
from .models import Post,Comment,Category,Tag
from .serializers import PostSerializer,CommentSerializer,CategorySerializer,TagSerializer
class PostViewSet(viewsets.ViewSet):
def list(self ,request):
queryset = Post.objects.filter(status='published')
serializer_context = {'request': request,}
serializer = PostSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
def post(self, request, format=None):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
post = serializer.save()
for tag in request.data.get('tags'):
t = Tag.objects.get(id=tag)
post.tags.add(t)
return Response(serializer.data)
return Response(serializer.errors )
def get_tags(self, *args, **kwargs):
tags = Tags.objects.all()
serializer = TagSerializer(tags, many=True)
return Response(serializers.data)
def get_object(self, pk):
try:
return Post.objects.get(pk=pk)
except Post.DoesNotExist:
raise Http404
def put(self, request, pk, format=None):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
post = serializer.save()
for tag in request.data.get('tags'):
t = Tag.objects.get(id=tag)
post.tags.add(t)
return Response(serializer.data)
return Response(serializer.errors )
def retrieve(self, request, pk=None):
queryset = Post.objects.all()
post = get_object_or_404(queryset, pk=pk)
serializer_context = {'request': request,}
serializer = PostSerializer(post, context=serializer_context)
return Response(serializer.data)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CommentViewSet(viewsets.ViewSet):
def list(self, request):
queryset = Comment.objects.all()
serializer = CommentSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
post= get_object_or_404(Post, pk=pk)
serializer = CommentSerializer(data=request.data)
if serializer.is_valid():
comment = serializer.save()
return Response(serializer.data)
return Response(serializer.errors )
class CategoryViewSet(viewsets.ViewSet):
def list(self ,request):
queryset = Category.objects.all()
serializer_context = {
'request': request,
}
serializer = CategorySerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
def post(self, request, format=None):
post= get_object_or_404(Post, pk=pk)
serializer = CategorySerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TagViewSet(viewsets.ViewSet):
def list(self ,request):
queryset = Tag.objects.all()
serializer_context = {
'request': request,
}
serializer = TagSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Tag.objects.all()
tag = get_object_or_404(queryset, pk=pk)
serializer_context = {
'request': request,
}
serializer = TagSerializer(tag, context=serializer_context)
return Response(serializer.data)
class HideViewSet(viewsets.ViewSet):
def hidden(self ,request):
queryset = Post.objects.filter(status='hidden')
serializer_context = {'request': request,}
serializer = PostSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
class DraftViewSet(viewsets.ViewSet):
def draft(self ,request):
queryset = Post.objects.filter(status='draft')
serializer_context = {'request': request,}
serializer = PostSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
|
[
"rest_framework.response.Response",
"django.shortcuts.get_object_or_404"
] |
[((675, 700), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (683, 700), False, 'from rest_framework.response import Response\n'), ((1063, 1090), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {}), '(serializer.errors)\n', (1071, 1090), False, 'from rest_framework.response import Response\n'), ((1236, 1262), 'rest_framework.response.Response', 'Response', (['serializers.data'], {}), '(serializers.data)\n', (1244, 1262), False, 'from rest_framework.response import Response\n'), ((1768, 1795), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {}), '(serializer.errors)\n', (1776, 1795), False, 'from rest_framework.response import Response\n'), ((1902, 1936), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['queryset'], {'pk': 'pk'}), '(queryset, pk=pk)\n', (1919, 1936), False, 'from django.shortcuts import get_object_or_404\n'), ((2073, 2098), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2081, 2098), False, 'from rest_framework.response import Response\n'), ((2227, 2270), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (2235, 2270), False, 'from rest_framework.response import Response\n'), ((2461, 2486), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2469, 2486), False, 'from rest_framework.response import Response\n'), ((2552, 2582), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Post'], {'pk': 'pk'}), '(Post, pk=pk)\n', (2569, 2582), False, 'from django.shortcuts import get_object_or_404\n'), ((2776, 2803), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {}), '(serializer.errors)\n', (2784, 2803), False, 'from rest_framework.response import Response\n'), ((3095, 3120), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3103, 3120), False, 'from rest_framework.response import Response\n'), ((3180, 3210), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Post'], {'pk': 'pk'}), '(Post, pk=pk)\n', (3197, 3210), False, 'from django.shortcuts import get_object_or_404\n'), ((3428, 3491), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (3436, 3491), False, 'from rest_framework.response import Response\n'), ((3768, 3793), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3776, 3793), False, 'from rest_framework.response import Response\n'), ((3888, 3922), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['queryset'], {'pk': 'pk'}), '(queryset, pk=pk)\n', (3905, 3922), False, 'from django.shortcuts import get_object_or_404\n'), ((4079, 4104), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (4087, 4104), False, 'from rest_framework.response import Response\n'), ((4382, 4407), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (4390, 4407), False, 'from rest_framework.response import Response\n'), ((4695, 4720), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (4703, 4720), False, 'from rest_framework.response import Response\n'), ((1022, 1047), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1030, 1047), False, 'from rest_framework.response import Response\n'), ((1727, 1752), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1735, 1752), False, 'from rest_framework.response import Response\n'), ((2735, 2760), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2743, 2760), False, 'from rest_framework.response import Response\n'), ((3355, 3412), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (3363, 3412), False, 'from rest_framework.response import Response\n')]
|
from collections import namedtuple
import datetime
import pprint
import sys
import copy
import json
def expandStatusValue(v):
"""
v : string | (string, datetime.date | None)
が string だった場合 (string, None) に展開する.
"""
if isinstance(v, str):
v = (v, None)
return v
def formatDate(d):
if not d:
return "????-??-??"
return "{0:%Y-%m-%d}".format(d)
"""
title:
プロジェクト名
url:
プロジェクトページ
owner:
主担当
status:
"" : 未着手
"o" : 作業中
"v" : 完了
startDate:
着手開始日
"" | "yyyy-mm-dd"
endDate
完了日
blocking:
着手できない理由
doc:
メモ
milestones:
(finishDate : datetime.date | None, title : string)[]
"""
class Project:
def __init__(self, codeName="", title="", url="", owner="", priority=100, status={}, days=0,
startDate=None, endDate=None, blocking="", doc="", milestones=[], epic=""):
self.index = 0
self.codeName = codeName
self.title = title
self.url = url
self.owner = owner
self.orig_owner = owner
self.priority = priority
self.status = dict([(k, expandStatusValue(v)) for k, v in status.items()])
# pprint.pprint(self.status)
self.days = days
self.startDate = startDate
self.endDate = endDate
self.doc = doc
self.blocking = blocking
self.put = False
self.milestones = milestones
self.epic = epic
def isDone(self):
return self.status["End"][0]=="v"
def doing(self):
sd = self.startDate
if sd is None:
sd = datetime.date(3000, 1, 1)
ed = self.endDate
if ed is None:
ed = datetime.date(3000, 1, 1)
now = datetime.date.today()
return sd <= now and now <= ed
def fixed(self):
return self.owner != "" and self.startDate is not None and self.endDate is not None
def getMilestones(self, status_master):
"""
return (datetime.date, label)[]
"""
sm = dict(status_master)
rv = [ (v[1], self.title+" "+sm[k]+" (主担当: "+self.owner+")") for k, v in self.status.items() ] + self.milestones
return list(filter(lambda v: v[0], rv))
colorDone = "#DDFADE"
colorDoing = "#E0F0FF"
def hsv2rgb(hsv):
"""
hsv: [h, s, v]
h in [0, 360]
s in [0, 1]
v in [0, 1]
return [r, g, b]
r, g, b in [0, 1]
"""
h = hsv[0]
s = hsv[1]
v = hsv[2]
hd = h/60; # in [0, 6]
r = v
g = v
b = v
if s > 0:
hdi = max(0, min(5, int(hd)));
f = hd - hdi
if hdi==0:
g *= 1 - s * (1-f)
b *= 1 - s
elif hdi==1:
r *= 1 - s * f
b *= 1 - s
elif hdi==2:
r *= 1 - s
b *= 1 - s * (1-f)
elif hdi==3:
r *= 1 - s
g *= 1 - s * f
elif hdi==4:
r *= 1 - s * (1-f)
g *= 1 - s
elif hdi==5:
g *= 1 - s
b *= 1 - s * f
return [r, g, b]
def rgb2hex(rgb):
return "#%02x%02x%02x" % (int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255))
def statusCell(st, name, label):
s, endDate = st[name]
col = ""
if s=="v":
col = colorDone
if s=="o":
col = colorDoing
style=""
if col:
style = "background-color: {col};".format(**vars())
text = " "
if endDate:
tentative = "<br>(仮)" if datetime.date.today() <= endDate else ""
text = "<span style='font-size: 0.7em;'>{endDate.year:04}-{endDate.month:02}-{endDate.day:02}{tentative}</span>".format(**vars())
return """<td style="{style}">{text}</td>""".format(**vars())
def genProjectListHtml(projects, status_master, ticketLinkFun, additional_milestones, getLabels):
"""
getLabels: index:int, project -> label[]
"""
### Generate milestone list
# milestones: (datetime.date, label)[]
milestones = sum([ p.getMilestones(status_master) for p in projects], []) + additional_milestones
milestones = sorted(milestones)
s = []
for d, l in milestones:
color = "black" if datetime.date.today() <= d else "#c0c0c0"
tentative = " (仮)" if datetime.date.today() <= d else ""
s.append("<li style='color:"+color+"'>"+formatDate(d)+tentative+" "+l+"</li><br>")
s = "\n".join(s)
html = """
<ul>
<li>今後のマイルストーン一覧</li>
<ul>
{s}
</ul>
</ul>
<div id="filters">フィルタ (AND): </div>
""".format(**vars())
### Generate project list
def sortFun(v):
return v.priority + (1000 if v.isDone() else 0) + (500 if v.blocking else 0)
projects = sorted(projects, key=sortFun)
statusTitles = "".join([ """<td style="width: 5%;">{label}</td>""".format(**vars()) for name, label in status_master])
html += """
<html><body><table class="projects">
<tr class="title">
<td style="width: 5%;">番号</td>
<td style="width: 5%;">優先度</td>
<td>プロジェクト名</td>
{statusTitles}
<td style="width: 5%;">主担当</td>
<td style="width: 10%;">メモ</td>
<td style="width: 10%;">作業期間(予定)</td>
</tr>
""".format(**vars())
labels = {}
for i, p in enumerate(projects):
if p.startDate:
startS = "{0:%Y-%m-%d}".format(p.startDate)
endS = "{0:%Y-%m-%d}".format(p.endDate)
schedule = "{startS}<br>〜{endS}".format(**vars())
if p.isDone():
schedule = ""
title = p.title
if p.url:
title = """<a href="{p.url}">{title}</a>""".format(**vars())
# status = StatusDetail(p.status)
statusTitles = "".join([ statusCell(p.status, name, label) for name, label in status_master])
trCol = "white" if i%2==0 else "#f0f0f0"
schedule_bg = "background-color: "+colorDoing+";" if p.doing() else ""
index = i+1
owner_note = ""
doc_note = ""
if p.orig_owner=="":
owner_note = "(仮)"
doc_note = "(TODO 主担当決め)"
tasks = ""
if p.epic:
link = ticketLinkFun(p.epic)
style = """background-color: darkgreen; color: white; text-decoration: none; font-size: 0.8em; padding: 4px; border-radius: 10px;"""
tasks = """<a href="{link}" target="_blank" style="{style}">Tasks</a>""".format(**vars())
odd = "odd" if i%2==0 else ""
id = "project%04d" % i
labels[id] = getLabels(i, p)
html += """
<tr style="background-color: {trCol}" id="{id}">
<td>{index}</td>
<td>{p.priority}</td>
<td>
<a name="{p.codeName}"></a>
<span style="font-size: 0.8em; font-weight: bold; color: #5050c0;">
<a style="text-decoration: none;" href="#{p.codeName}">{p.codeName}</a>
</span>
{tasks}<br>
{title}
</td>
{statusTitles}
<td>{p.owner}{owner_note}</td>
<td>{p.doc}{doc_note}<span style="color: red;">{p.blocking}</span></td>
<td style="font-size: 0.5em;{schedule_bg}">{schedule}</td>
</tr>
""".format(**vars())
html += """
</table></body></html>
"""
return html, labels
def Xsect(p0, p1):
# return Xsect(p0.startDate, p0.endDate, p1.startDate, p1.endDate)
if any([ v is None for v in [p0.startDate, p0.endDate, p1.startDate, p1.endDate]]):
return False
return not (p1.endDate < p0.startDate or p0.endDate < p1.startDate)
#def Xsect(s0, e0, s1, e1):
# return not (e1 < s0 or e0 < s1)
def dupCheck(p, projects):
"""
重複してなければ True を返す.
"""
if p.isDone():
return True
if not p.fixed():
return True
for pp in projects:
if pp.fixed() and not pp.isDone() and p.owner==pp.owner and p.title != pp.title:
if Xsect(p, pp):
print("[CONFLICT]", p.title, p.startDate, p.endDate, p.owner, "AND", pp.title, pp.startDate, pp.endDate, pp.owner)
return False
return True
def isClone(name):
"""
クローンかどうか.
クローンには明示的なプロジェクト割り当てしかできない.
"""
return any([str(i) in name for i in range(10)])
def assign(projects, people):
"""
return
Dict
person -> project[]
"""
# 担当者に割り当てた上で各PJがいつ終わるかというスケジュール表(担当者 x PJの表)
# TODO startDate がきまってるやつを最初に置く
# 担当者 -> 着手可能日付
freeDates = dict([(p, datetime.date.min) for p, _ in people])
# owner -> {startDate, project}[]
schedule = {}
"""
startDateFixed
開始日がきまってるやつを置く
canStart
開始日がきまってないやつを置く
blocking
開始できないやつを置く
"""
for phase in ["startDateFixed", "canStart", "blocking"]:
print("\nPhase", phase, "\n")
if phase=="canStart":
for k in freeDates:
freeDates[k] = max(freeDates[k], datetime.date.today())
for i, p in enumerate(sorted(projects, key=lambda v: (v.priority, v.title))):
if phase!="blocking" and p.blocking:
continue
if phase=="startDateFixed" and p.startDate is None:
continue
if p.isDone():
continue
if p.put:
continue
print("Try to put", p.title)
def filterFun(name):
pp = copy.deepcopy(p)
pp.owner = name
return dupCheck(pp, projects)
def getFreePerson(freeDates):
cands = sorted([ kv for kv in freeDates.items() if not isClone(kv[0]) and filterFun(kv[0]) ], key=lambda v: (v[1], v[0]))
print(cands)
return cands[0][0]
person = p.owner
if person=="":
person = getFreePerson(freeDates)
# print(person)
origStartDate = p.startDate
origEndDate = p.endDate
if p.blocking:
# Later
p.startDate = datetime.date.today() + datetime.timedelta(365*3+i*30)
p.endDate = p.startDate + datetime.timedelta(30)
if p.startDate is None:
p.startDate = freeDates[person]
if p.endDate is None:
p.endDate = p.startDate + datetime.timedelta(90)
if not dupCheck(p, projects):
p.startDate = origStartDate
p.endDate = origEndDate
# continue
sys.exit(0)
schedule.setdefault(person, [])
p.owner = person
print("Put", p.title, p.startDate, p.endDate, person)
schedule[person].append(p)
p.put = True
freeDates[person] = max(freeDates[person], p.endDate + datetime.timedelta(1))
#pprint.pprint(freeDates)
# pprint.pprint(schedule)
# for p in projects:
# print("[]", p.title, p.startDate, p.endDate)
for p in projects:
if not p.isDone():
for pp in projects:
if not pp.isDone() and p.title != pp.title and p.owner==pp.owner and p.title < pp.title:
if Xsect(p, pp):
print("[CONFLICT]", p.title, p.startDate, p.endDate, p.owner, "AND", pp.title, pp.startDate, pp.endDate, pp.owner)
return schedule
def genScheduleHtml(projects, schedule, people, ticketLinkFun):
"""
schedule
Dict
person -> project[]
"""
# date x 担当者
allDates = [ d for ps in schedule.values() for p in ps for d in [p.startDate, p.endDate]]
minDate = min(allDates)
maxDate = max(allDates)
colors = [ rgb2hex(hsv2rgb([i/len(projects)*360, 0.1, 1])) for i in range(len(projects)) ]
startDateIndex = minDate.toordinal()
endDateIndex = maxDate.toordinal()
N = endDateIndex - startDateIndex + 1
# print(N)
def createRow():
return [ ["", ""] for _ in range(len(people)+1) ]
table = {0: createRow()}
# 定期
for i in range(10000):
d = minDate + datetime.timedelta(i)
if maxDate < d:
break
if d.day in [1, 15, 30]:
table.setdefault(d.toordinal(), createRow())
wp = 95/len(people)
# プロジェクト設置
for i, (person, ps) in enumerate(sorted(schedule.items())):
if person not in [p for p, _ in people]:
continue
for p in ps:
# print(p.startDate, p.endDate)
si = p.startDate.toordinal()
ei = p.endDate.toordinal()
for d in [si, ei]:
table.setdefault(d, createRow())
if d==si:
title = p.title
if p.url:
title = """
<a href="{p.url}">{title}</a>
""".format(**vars())
title += "<br>"
doc = p.doc.replace("\n", "<br>")
title += """
<span style="font-size: 0.8em;">{doc}</span>""".format(**vars())
title += """<br><span style="color: red;">{p.blocking}</span>""".format(**vars())
table[d][i+1][0] = title
table[d][i+1][1] = "font-size: 1em;"
# 色塗り
for i, (person, ps) in enumerate(sorted(schedule.items())):
for p in ps:
si = p.startDate.toordinal()
ei = p.endDate.toordinal()
for d in sorted(table.keys()):
if si <= d and d <= ei:
col = colors[p.index]
table[d][i+1][1] += "width: {wp}%; background-color: {col};".format(**vars())
# 日付
today = datetime.date.today()
for d in table:
if d==0:
continue
da = datetime.date.fromordinal(d)
s = "{0:%Y-%m-%d}".format(da)
col = "white" if da.month % 2==0 else "#e0e0e0"
if da.year==today.year and da.month==today.month:
col = "#c0ffff"
style = "vertical-align: top; width: 5%; font-size: 3px; background-color: "+col+";"
table[d][0] = [s, style]
table = [ table[k] for k in sorted(table.keys()) ]
# pprint.pprint(table)
def createHeader():
"""
メンバー見出しを生成
"""
row = [["", ""]]
for i, (person, ps) in enumerate(sorted(schedule.items())):
row.append([person, "width: %f; background-color: #e0e0e0".format(**vars())])
return row
for i in range(0, len(table), 10):
table.insert(i, createHeader())
def tableToHtml(table):
html = "<table class='schedule'>"
for row in table:
html += "<tr>"
for text, style in row:
html += "<td style='{style}'>{text}</td>".format(**vars())
html += "</tr>"
html += "</table>"
return html
return tableToHtml(table)
######################
def createTasksHtml(titleAndEpics, members, ticketLinkFun):
def entry(label, url):
return """<a href="{url}" target="main_frame">{label}</a>""".format(**vars())
epics = [ epic for _, epic in titleAndEpics ]
epicHtml = " ".join([ entry(title, ticketLinkFun(epic)) for title, epic in titleAndEpics ])
memberHtml = " ".join([ entry(name, ticketLinkFun("", name)) for name in members ])
memberNotInEpicsHtml = " ".join([ entry(name, ticketLinkFun("", name, "", epics)) for name in members ])
notInEpicsHtml = entry("管理Epicに関連付けられてないチケット", ticketLinkFun("", "", "", epics))
html = """
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no">
</head>
<frameset rows="100px,*" frameborder=1 border=1>
<frame name="menu_frame" src="menu.html">
<frame name="main_frame" src="">
</frameset>
</html>
""".format(**vars())
filename = "tasks.html"
with open(filename, "w") as f:
print(html, file=f)
print("[ProjectManager.createTasksHtml] OK. Wrote", filename)
html = """
<html>
<head>
<meta charset="UTF-8">
</head>
<body style="margin: 0; font-size: 0.7em;">
Projects : {epicHtml}<br>
Members : {memberHtml}<br>
Members (管理Epicに関連付けられてないチケット): {memberNotInEpicsHtml}<br>
{notInEpicsHtml}<br>
</body>
</html>
""".format(**vars())
filename = "menu.html"
with open(filename, "w") as f:
print(html, file=f)
print("[ProjectManager.createTasksHtml] OK. Wrote", filename)
######################
def run(projects, people, status_master, ticketLinkFun, css="", project_list_header="", schedule_header="",
statusFilename="status.html",
tasksFilename="tasks.html",
additional_milestones=[],
getLabels=lambda i, p: []):
"""
people:
(Name, NameInTicketSystem)[]
ticketLinkFun:
epic : string, assignee : string, label : string -> url : string
milestones:
(datetime.date, label)[]
"""
codeNames = {}
for p in projects:
codeNames.setdefault(p.codeName, 0)
codeNames[p.codeName] += 1
bad = False
for k, v in codeNames.items():
if 1 < v:
print("[ERROR] Duplicate code name:", k, "(", v, "projects)")
bad = True
if bad:
print()
return
for i, p in enumerate(projects):
p.index = i
names = [ name for name, _ in people ]
if p.owner and p.owner not in names:
people.append((p.owner, ""))
people = list(set(people))
schedule = assign(projects, people)
projectsHtml, labels = genProjectListHtml(projects, status_master, ticketLinkFun, additional_milestones, getLabels)
scheduleHtml = genScheduleHtml(projects, schedule, people, ticketLinkFun)
css = """
body {
margin: 0;
}
h1 {
font-size: 1.2em;
background-color: darkgreen;
color: white;
padding: 10px;
}
table {
border-spacing: 1;
margin-left: 20px;
}
table.projects tr.title td {
color: white;
padding: 5px;
}
table.projects tr.title {
background-color: darkgreen;
}
table.example tr td {
margin: 20px;
font-size: 0.9em;
}
table.schedule {
border-spacing: 0;
}
table.schedule tr td {
padding: 0;
}
#filters {
padding: 20px;
}
span.filter {
cursor: pointer;
padding: 20px;
border-radius: 40px;
margin: 10px;
}
""" + css
example = """
<table class="example"><tr>
<td style="background-color: white;">未着手</td>
<td style="background-color: {colorDoing};">作業中</td>
<td style="background-color: {colorDone};">完了</td>
</tr></table>
""".format(**globals())
projectLabels = json.dumps(labels)
labelsMaster = getLabels(0, None)
filters = json.dumps([ name for name, label in labelsMaster ])
filterLabels = json.dumps([ label for name, label in labelsMaster ])
vs = """
// Master data
var filters = {filters};
var filterLabels = {filterLabels};
var projectLabels = {projectLabels};
""".format(**vars())
ready = vs + """
// フィルタ状態: name -> bool
var filterEnabled = {};
// フィルタ状態を反映
function applyFilters() {
Object.keys(projectLabels).forEach(function(eid) {
var labels = projectLabels[eid];
// console.log(eid, labels);
var show = true;
// Check all enabled filters are in labels
for(var fi=0;fi<filters.length;fi++) {
if(filterEnabled[filters[fi]]) {
var lok = 0;
for(var li=0;li<labels.length;li++) {
if(labels[li] == filters[fi]) lok=1;
}
if(!lok) show=false;
}
}
// console.log(show);
$("#"+eid).toggle(show);
});
for(var i=0;i<filters.length;i++) {
$(".filter#"+filters[i]).css({"background-color": filterEnabled[filters[i]] ? "#aaffaa" : "#eeeeee"});
}
// console.log(filterEnabled);
}
$(document).ready(function(){
// フィルタボタンを作る
var html = "";
for(var i=0;i<filters.length;i++) {
var name = filters[i];
html += '<span class="filter" id="'+name+'">'+filterLabels[i]+'</span>';
}
$("#filters").html($("#filters").html() + html);
// フィルタの適用切り替え
$(".filter").on("click", function(event) {
var name = $(event.target).attr("id");
filterEnabled[name] = !filterEnabled[name];
applyFilters();
});
applyFilters();
});
"""
html = """
<html>
<head>
<meta charset="utf-8" />
<script type="text/javascript" src="jquery-3.2.1.min.js"></script>
<style>
{css}
</style>
<script>
{ready}
</script>
</head>
<body>
{project_list_header}
<br><br>
{example}
<br><br>
{projectsHtml}
<br><br>
{schedule_header}
{scheduleHtml}
<hr>
<a href="https://github.com/kojingharang/ManagerKit/blob/master/ProjectManager.py">Source</a>
</body>
</html>
""".format(**vars())
with open(statusFilename, "w") as f:
print(html, file=f)
print("[ProjectManager.run] OK. Wrote", statusFilename)
titleAndEpics = [(p.title, p.epic) for p in sorted(projects, key=lambda p: p.priority) if p.epic and not p.isDone()]
members = [ name for _, name in people if name]
createTasksHtml(titleAndEpics, members, ticketLinkFun)
|
[
"sys.exit",
"json.dumps",
"datetime.timedelta",
"datetime.date.fromordinal",
"datetime.date",
"copy.deepcopy",
"datetime.date.today"
] |
[((11197, 11218), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (11216, 11218), False, 'import datetime\n'), ((15604, 15622), 'json.dumps', 'json.dumps', (['labels'], {}), '(labels)\n', (15614, 15622), False, 'import json\n'), ((15669, 15719), 'json.dumps', 'json.dumps', (['[name for name, label in labelsMaster]'], {}), '([name for name, label in labelsMaster])\n', (15679, 15719), False, 'import json\n'), ((15738, 15789), 'json.dumps', 'json.dumps', (['[label for name, label in labelsMaster]'], {}), '([label for name, label in labelsMaster])\n', (15748, 15789), False, 'import json\n'), ((1457, 1478), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1476, 1478), False, 'import datetime\n'), ((11266, 11294), 'datetime.date.fromordinal', 'datetime.date.fromordinal', (['d'], {}), '(d)\n', (11291, 11294), False, 'import datetime\n'), ((1352, 1377), 'datetime.date', 'datetime.date', (['(3000)', '(1)', '(1)'], {}), '(3000, 1, 1)\n', (1365, 1377), False, 'import datetime\n'), ((1423, 1448), 'datetime.date', 'datetime.date', (['(3000)', '(1)', '(1)'], {}), '(3000, 1, 1)\n', (1436, 1448), False, 'import datetime\n'), ((9992, 10013), 'datetime.timedelta', 'datetime.timedelta', (['i'], {}), '(i)\n', (10010, 10013), False, 'import datetime\n'), ((2866, 2887), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2885, 2887), False, 'import datetime\n'), ((3507, 3528), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3526, 3528), False, 'import datetime\n'), ((3575, 3596), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3594, 3596), False, 'import datetime\n'), ((7833, 7849), 'copy.deepcopy', 'copy.deepcopy', (['p'], {}), '(p)\n', (7846, 7849), False, 'import copy\n'), ((8666, 8677), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8674, 8677), False, 'import sys\n'), ((7485, 7506), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (7504, 7506), False, 'import datetime\n'), ((8305, 8326), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (8324, 8326), False, 'import datetime\n'), ((8329, 8365), 'datetime.timedelta', 'datetime.timedelta', (['(365 * 3 + i * 30)'], {}), '(365 * 3 + i * 30)\n', (8347, 8365), False, 'import datetime\n'), ((8390, 8412), 'datetime.timedelta', 'datetime.timedelta', (['(30)'], {}), '(30)\n', (8408, 8412), False, 'import datetime\n'), ((8531, 8553), 'datetime.timedelta', 'datetime.timedelta', (['(90)'], {}), '(90)\n', (8549, 8553), False, 'import datetime\n'), ((8895, 8916), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (8913, 8916), False, 'import datetime\n')]
|
# min (1/2) x'Q'x - q'x
from __future__ import print_function
import numpy as np
import aa
dim = 1000
mems = [5, 10, 20, 50, 100]
N = int(1e4)
np.random.seed(1234)
Q = np.random.randn(dim,dim)
Q = Q.T.dot(Q)
q = np.random.randn(dim)
x_0 = np.random.randn(dim)
x_star = np.linalg.solve(Q, q)
step = 0.0005
def f(x):
return 0.5 * x.T @ Q @ x - q.T @ x
f_star = f(x_star)
print('f^* = ', f_star)
print('No acceleration')
x = x_0.copy()
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
for mem in mems:
print('Type-I acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, True, eta=1e-8)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
print('Type-II acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, False, eta=1e-10)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
|
[
"numpy.copy",
"numpy.linalg.solve",
"aa.AndersonAccelerator",
"numpy.random.seed",
"numpy.random.randn"
] |
[((146, 166), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (160, 166), True, 'import numpy as np\n'), ((172, 197), 'numpy.random.randn', 'np.random.randn', (['dim', 'dim'], {}), '(dim, dim)\n', (187, 197), True, 'import numpy as np\n'), ((216, 236), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (231, 236), True, 'import numpy as np\n'), ((243, 263), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (258, 263), True, 'import numpy as np\n'), ((273, 294), 'numpy.linalg.solve', 'np.linalg.solve', (['Q', 'q'], {}), '(Q, q)\n', (288, 294), True, 'import numpy as np\n'), ((475, 485), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (482, 485), True, 'import numpy as np\n'), ((679, 728), 'aa.AndersonAccelerator', 'aa.AndersonAccelerator', (['dim', 'mem', '(True)'], {'eta': '(1e-08)'}), '(dim, mem, True, eta=1e-08)\n', (701, 728), False, 'import aa\n'), ((988, 1038), 'aa.AndersonAccelerator', 'aa.AndersonAccelerator', (['dim', 'mem', '(False)'], {'eta': '(1e-10)'}), '(dim, mem, False, eta=1e-10)\n', (1010, 1038), False, 'import aa\n'), ((764, 774), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (771, 774), True, 'import numpy as np\n'), ((1075, 1085), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (1082, 1085), True, 'import numpy as np\n')]
|
from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader
from paper_1.utils import read_parameter_file, create_experiment_directory
from paper_1.evaluation.eval_utils import init_metrics_object
from paper_1.baseline.main import train as baseline_train
from paper_1.model.model_utils import initialize_model
from torch.utils.tensorboard import SummaryWriter
from train import select_splitted_pseudo_labels
from os.path import dirname, abspath
from torch.optim import Adam
import pandas as pd
import numpy as np
import random
import torch
import os
def main(main_params: dict, data_params: dict, metric_params: dict, model_params: dict,
parent_dir, source_domain: str, target_domain: str):
# clear the cuda memory
torch.cuda.empty_cache()
# get the current validation fold
val_fold = data_params['data']['val_fold']
# read the train params
num_train_iter = main_params['num_train_iter']
experiment_id = main_params['experiment_id']
num_epochs = main_params['num_epochs']
quantiles = main_params['quantiles']
model_dir = main_params['model_dir']
base_dir = main_params['base_dir']
# get the data loader parameters
balance_keys = data_params['data_loader']['balance_keys']
batch_size = data_params['data_loader']['batch_size']
# load the data
data_train_src, data_train_tar = load_train_data(data_params, source_domain, target_domain)
data_list_val = load_val_data(data_params)
num_val_iter_list = [df.shape[0] for df in data_list_val]
validation_domains = data_params['data']['validation']['validation_domains']
val_loader_list = [sequential_data_loader(data_frame) for data_frame in data_list_val]
# load a pre trained model
model_path = model_dir + source_domain + '/' + 'None' + '/' + str(val_fold) + '/f1_best.pt'
# load a previously stored model, which is the init point for curriculum labeling
pretrained_model = torch.load(model_path)
mapping = metric_params['inverse_class_mapping']
# initialize the metrics object
metric_object = init_metrics_object(metric_params)
# create a directory for the current experiments
file_names_params = os.listdir(parent_dir + '/parameters/')
file_names_params = [parent_dir + '/parameters/' + x for x in file_names_params]
file_names_baseline = os.listdir(parent_dir + '/baseline/')
file_names_baseline = [parent_dir + '/baseline/' + x for x in file_names_baseline]
file_names = []
file_names.extend(file_names_params)
file_names.extend(file_names_baseline)
file_names = [x for x in file_names if not os.path.isdir(x)]
val_fold = data_params['data']['val_fold']
exp_base_dir = create_experiment_directory(base_dir, source_domain, target_domain, val_fold, file_names, experiment_id)
for quantile in quantiles:
exp_dir = exp_base_dir + str(quantile) + '/'
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# create a tensorboard writer
writer = SummaryWriter(exp_dir)
# create data loader with current pseudo labels
data_frame_pseudo = select_splitted_pseudo_labels(pretrained_model, data_train_tar, quantile, mapping)
# delete the previously trained model, as it is no longer in use
del pretrained_model
# create the train data loader
data_train = pd.concat([data_train_src, data_frame_pseudo])
train_loader = random_data_loader(data_train, balance_keys, batch_size)
# initialize a new model to train it from scratch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = initialize_model(model_params, parent_dir, device)
model.cuda()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
# create an optimizer for the model
optimizer = Adam(model.parameters(), lr=4e-5, betas=(0.9, 0.999))
# train the newly created model from scratch
baseline_train(model, optimizer, metric_object, num_train_iter, metric_params, train_loader, val_loader_list,
source_domain, writer, num_val_iter_list, validation_domains, num_epochs, exp_dir)
# update the pretrained model
pretrained_model = model
del model
del optimizer
if __name__ == '__main__':
# set the seed for reproducability
seed_value = 0
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
# get the current and parent directory
current_file = abspath(__file__)
current_dir = dirname(current_file)
parent_dir = dirname(current_dir)
metric_param_file = parent_dir + '/parameters/metric_params.yaml'
model_param_file = parent_dir + '/parameters/model_params.yaml'
data_param_file = parent_dir + '/parameters/data_params.yaml'
main_param_file = current_dir + '/main_params.yaml'
# load the parameters
metric_params = read_parameter_file(metric_param_file)
model_params = read_parameter_file(model_param_file)
main_params = read_parameter_file(main_param_file)
data_params = read_parameter_file(data_param_file)
# define the domains, on which the models should be trained
source_domains = ['Race', 'Religion', 'Sexual Orientation']
target_domains = ['Race', 'Religion', 'Sexual Orientation']
for source_domain in source_domains:
for target_domain in target_domains:
if source_domain != target_domain:
main(main_params, data_params, metric_params, model_params, parent_dir, source_domain, target_domain)
|
[
"paper_1.data.data_loader.sequential_data_loader",
"torch.cuda.is_available",
"paper_1.data.data_loader.load_val_data",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"os.listdir",
"paper_1.baseline.main.train",
"paper_1.utils.read_parameter_file",
"os.path.isdir",
"pandas.concat",
"numpy.random.seed",
"paper_1.utils.create_experiment_directory",
"paper_1.model.model_utils.initialize_model",
"os.path.dirname",
"paper_1.data.data_loader.random_data_loader",
"torch.cuda.empty_cache",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.makedirs",
"torch.load",
"paper_1.evaluation.eval_utils.init_metrics_object",
"random.seed",
"os.path.abspath",
"paper_1.data.data_loader.load_train_data",
"train.select_splitted_pseudo_labels"
] |
[((787, 811), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (809, 811), False, 'import torch\n'), ((1407, 1465), 'paper_1.data.data_loader.load_train_data', 'load_train_data', (['data_params', 'source_domain', 'target_domain'], {}), '(data_params, source_domain, target_domain)\n', (1422, 1465), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((1486, 1512), 'paper_1.data.data_loader.load_val_data', 'load_val_data', (['data_params'], {}), '(data_params)\n', (1499, 1512), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((1985, 2007), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (1995, 2007), False, 'import torch\n'), ((2118, 2152), 'paper_1.evaluation.eval_utils.init_metrics_object', 'init_metrics_object', (['metric_params'], {}), '(metric_params)\n', (2137, 2152), False, 'from paper_1.evaluation.eval_utils import init_metrics_object\n'), ((2231, 2270), 'os.listdir', 'os.listdir', (["(parent_dir + '/parameters/')"], {}), "(parent_dir + '/parameters/')\n", (2241, 2270), False, 'import os\n'), ((2382, 2419), 'os.listdir', 'os.listdir', (["(parent_dir + '/baseline/')"], {}), "(parent_dir + '/baseline/')\n", (2392, 2419), False, 'import os\n'), ((2744, 2852), 'paper_1.utils.create_experiment_directory', 'create_experiment_directory', (['base_dir', 'source_domain', 'target_domain', 'val_fold', 'file_names', 'experiment_id'], {}), '(base_dir, source_domain, target_domain,\n val_fold, file_names, experiment_id)\n', (2771, 2852), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((4440, 4463), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (4451, 4463), False, 'import random\n'), ((4468, 4494), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (4482, 4494), True, 'import numpy as np\n'), ((4499, 4528), 'torch.manual_seed', 'torch.manual_seed', (['seed_value'], {}), '(seed_value)\n', (4516, 4528), False, 'import torch\n'), ((4533, 4571), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed_value'], {}), '(seed_value)\n', (4559, 4571), False, 'import torch\n'), ((4635, 4652), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (4642, 4652), False, 'from os.path import dirname, abspath\n'), ((4671, 4692), 'os.path.dirname', 'dirname', (['current_file'], {}), '(current_file)\n', (4678, 4692), False, 'from os.path import dirname, abspath\n'), ((4710, 4730), 'os.path.dirname', 'dirname', (['current_dir'], {}), '(current_dir)\n', (4717, 4730), False, 'from os.path import dirname, abspath\n'), ((5038, 5076), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['metric_param_file'], {}), '(metric_param_file)\n', (5057, 5076), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((5096, 5133), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['model_param_file'], {}), '(model_param_file)\n', (5115, 5133), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((5152, 5188), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['main_param_file'], {}), '(main_param_file)\n', (5171, 5188), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((5207, 5243), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['data_param_file'], {}), '(data_param_file)\n', (5226, 5243), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((1679, 1713), 'paper_1.data.data_loader.sequential_data_loader', 'sequential_data_loader', (['data_frame'], {}), '(data_frame)\n', (1701, 1713), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((3064, 3086), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['exp_dir'], {}), '(exp_dir)\n', (3077, 3086), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3172, 3258), 'train.select_splitted_pseudo_labels', 'select_splitted_pseudo_labels', (['pretrained_model', 'data_train_tar', 'quantile', 'mapping'], {}), '(pretrained_model, data_train_tar, quantile,\n mapping)\n', (3201, 3258), False, 'from train import select_splitted_pseudo_labels\n'), ((3419, 3465), 'pandas.concat', 'pd.concat', (['[data_train_src, data_frame_pseudo]'], {}), '([data_train_src, data_frame_pseudo])\n', (3428, 3465), True, 'import pandas as pd\n'), ((3489, 3545), 'paper_1.data.data_loader.random_data_loader', 'random_data_loader', (['data_train', 'balance_keys', 'batch_size'], {}), '(data_train, balance_keys, batch_size)\n', (3507, 3545), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((3685, 3735), 'paper_1.model.model_utils.initialize_model', 'initialize_model', (['model_params', 'parent_dir', 'device'], {}), '(model_params, parent_dir, device)\n', (3701, 3735), False, 'from paper_1.model.model_utils import initialize_model\n'), ((4027, 4227), 'paper_1.baseline.main.train', 'baseline_train', (['model', 'optimizer', 'metric_object', 'num_train_iter', 'metric_params', 'train_loader', 'val_loader_list', 'source_domain', 'writer', 'num_val_iter_list', 'validation_domains', 'num_epochs', 'exp_dir'], {}), '(model, optimizer, metric_object, num_train_iter,\n metric_params, train_loader, val_loader_list, source_domain, writer,\n num_val_iter_list, validation_domains, num_epochs, exp_dir)\n', (4041, 4227), True, 'from paper_1.baseline.main import train as baseline_train\n'), ((2950, 2973), 'os.path.exists', 'os.path.exists', (['exp_dir'], {}), '(exp_dir)\n', (2964, 2973), False, 'import os\n'), ((2987, 3007), 'os.makedirs', 'os.makedirs', (['exp_dir'], {}), '(exp_dir)\n', (2998, 3007), False, 'import os\n'), ((3632, 3657), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3655, 3657), False, 'import torch\n'), ((3784, 3809), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3807, 3809), False, 'import torch\n'), ((2659, 2675), 'os.path.isdir', 'os.path.isdir', (['x'], {}), '(x)\n', (2672, 2675), False, 'import os\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2017 Ericsson AB and others. All rights reserved
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import os
import sys
import threading
import logging
import urllib3
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
import sfc.lib.config as sfc_config
from sfc.tests.functest import sfc_parent_function
""" logging configuration """
logger = logging.getLogger(__name__)
COMMON_CONFIG = sfc_config.CommonConfig()
CLIENT = "client"
SERVER = "server"
openstack_sfc = os_sfc_utils.OpenStackSFC()
class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
"""One client and one server are created using nova.
The server will be running a web server on port 80.
Then one Service Function (SF) is created using Tacker.
This service function will be running a firewall that
blocks the traffic in a specific port.
A symmetric service chain routing the traffic throught
this SF will be created as well.
The purpose is to check different HTTP traffic
combinations using firewall.
"""
def run(self):
logger.info("The test scenario %s is starting", __name__)
self.create_custom_vnfd(self.testcase_config.test_vnfd, 'test-vnfd1')
self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
if self.vnf_id is None:
logger.error('ERROR while booting VNF')
sys.exit(1)
tosca_file = os.path.join(
COMMON_CONFIG.sfc_test_dir,
COMMON_CONFIG.vnffgd_dir,
self.testcase_config.test_vnffgd)
os_sfc_utils.create_vnffgd(
self.tacker_client,
tosca_file=tosca_file,
vnffgd_name='test-vnffgd')
client_port = openstack_sfc.get_client_port(
self.client_instance,
self.client_creator)
server_port = openstack_sfc.get_client_port(
self.server_instance,
self.server_creator)
server_ip_prefix = self.server_ip + '/32'
default_param_file = os.path.join(
COMMON_CONFIG.sfc_test_dir,
COMMON_CONFIG.vnfd_dir,
COMMON_CONFIG.vnfd_default_params_file)
os_sfc_utils.create_vnffg_with_param_file(
self.tacker_client,
'test-vnffgd',
'test-vnffg',
default_param_file,
client_port.id,
server_port.id,
server_ip_prefix)
# Start measuring the time it takes to implement the classification
# rules
t1 = threading.Thread(target=wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
self.server_instance.compute_host,
server_port,
self.client_instance.compute_host,
client_port, self.odl_ip,
self.odl_port,))
try:
t1.start()
except Exception as e:
logger.error("Unable to start the thread that counts time %s" % e)
logger.info("Assigning floating IPs to instances")
self.assign_floating_ip_client_server()
vnf_ip = os_sfc_utils.get_vnf_ip(self.tacker_client,
vnf_id=self.vnf_id)
self.assign_floating_ip_sfs(vnf_ip)
self.check_floating_ips()
self.start_services_in_vm()
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
logger.info("Wait for ODL to update the classification rules in OVS")
t1.join()
results = self.present_results_allowed_port_http(self.testcase_config)
self.vxlan_blocking_stop(self.fips_sfs[0])
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', "80")
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
results = self.present_results_blocked_port_http(self.testcase_config,
'HTTP uplink')
self.vxlan_blocking_stop(self.fips_sfs[0])
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0',
self.testcase_config.source_port)
results = self.present_results_blocked_port_http(self.testcase_config,
'HTTP downlink')
self.vxlan_blocking_stop(self.fips_sfs[0])
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
results = self.present_results_allowed_http()
if __name__ == '__main__':
return results.compile_summary(), self.creators
if __name__ == 'sfc.tests.functest.sfc_symmetric_chain':
return results.compile_summary(), self.creators
def get_creators(self):
"""Return the creators info, specially in case the info is not
returned due to an exception.
:return: creators
"""
return self.creators
def wait_for_classification_rules(ovs_logger, compute_nodes,
server_compute, server_port,
client_compute, client_port,
odl_ip, odl_port):
if client_compute == server_compute:
odl_utils.wait_for_classification_rules(
ovs_logger,
compute_nodes,
odl_ip,
odl_port,
client_compute,
[server_port, client_port])
else:
odl_utils.wait_for_classification_rules(
ovs_logger,
compute_nodes,
odl_ip,
odl_port,
server_compute,
server_port)
odl_utils.wait_for_classification_rules(
ovs_logger,
compute_nodes,
odl_ip,
odl_port,
client_compute,
client_port)
if __name__ == '__main__':
# Disable InsecureRequestWarning errors when executing the SFC tests in XCI
urllib3.disable_warnings()
TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
supported_installers = ['fuel', 'apex', 'osa', 'compass']
vnf_names = ['testVNF1']
test_run = SfcSymmetricChain(TESTCASE_CONFIG, supported_installers,
vnf_names)
test_run.run()
|
[
"logging.getLogger",
"sfc.lib.openstack_utils.create_vnffg_with_param_file",
"sfc.lib.openstack_utils.get_vnf_ip",
"sfc.lib.odl_utils.wait_for_classification_rules",
"os.path.join",
"sfc.lib.config.CommonConfig",
"urllib3.disable_warnings",
"sfc.lib.openstack_utils.OpenStackSFC",
"sys.exit",
"threading.Thread",
"sfc.lib.openstack_utils.create_vnffgd",
"sfc.lib.config.TestcaseConfig"
] |
[((598, 625), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (615, 625), False, 'import logging\n'), ((642, 667), 'sfc.lib.config.CommonConfig', 'sfc_config.CommonConfig', ([], {}), '()\n', (665, 667), True, 'import sfc.lib.config as sfc_config\n'), ((720, 747), 'sfc.lib.openstack_utils.OpenStackSFC', 'os_sfc_utils.OpenStackSFC', ([], {}), '()\n', (745, 747), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((6467, 6493), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (6491, 6493), False, 'import urllib3\n'), ((6517, 6565), 'sfc.lib.config.TestcaseConfig', 'sfc_config.TestcaseConfig', (['"""sfc_symmetric_chain"""'], {}), "('sfc_symmetric_chain')\n", (6542, 6565), True, 'import sfc.lib.config as sfc_config\n'), ((1642, 1747), 'os.path.join', 'os.path.join', (['COMMON_CONFIG.sfc_test_dir', 'COMMON_CONFIG.vnffgd_dir', 'self.testcase_config.test_vnffgd'], {}), '(COMMON_CONFIG.sfc_test_dir, COMMON_CONFIG.vnffgd_dir, self.\n testcase_config.test_vnffgd)\n', (1654, 1747), False, 'import os\n'), ((1788, 1888), 'sfc.lib.openstack_utils.create_vnffgd', 'os_sfc_utils.create_vnffgd', (['self.tacker_client'], {'tosca_file': 'tosca_file', 'vnffgd_name': '"""test-vnffgd"""'}), "(self.tacker_client, tosca_file=tosca_file,\n vnffgd_name='test-vnffgd')\n", (1814, 1888), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((2244, 2352), 'os.path.join', 'os.path.join', (['COMMON_CONFIG.sfc_test_dir', 'COMMON_CONFIG.vnfd_dir', 'COMMON_CONFIG.vnfd_default_params_file'], {}), '(COMMON_CONFIG.sfc_test_dir, COMMON_CONFIG.vnfd_dir,\n COMMON_CONFIG.vnfd_default_params_file)\n', (2256, 2352), False, 'import os\n'), ((2395, 2563), 'sfc.lib.openstack_utils.create_vnffg_with_param_file', 'os_sfc_utils.create_vnffg_with_param_file', (['self.tacker_client', '"""test-vnffgd"""', '"""test-vnffg"""', 'default_param_file', 'client_port.id', 'server_port.id', 'server_ip_prefix'], {}), "(self.tacker_client, 'test-vnffgd',\n 'test-vnffg', default_param_file, client_port.id, server_port.id,\n server_ip_prefix)\n", (2436, 2563), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((2747, 2983), 'threading.Thread', 'threading.Thread', ([], {'target': 'wait_for_classification_rules', 'args': '(self.ovs_logger, self.compute_nodes, self.server_instance.compute_host,\n server_port, self.client_instance.compute_host, client_port, self.\n odl_ip, self.odl_port)'}), '(target=wait_for_classification_rules, args=(self.\n ovs_logger, self.compute_nodes, self.server_instance.compute_host,\n server_port, self.client_instance.compute_host, client_port, self.\n odl_ip, self.odl_port))\n', (2763, 2983), False, 'import threading\n'), ((3454, 3517), 'sfc.lib.openstack_utils.get_vnf_ip', 'os_sfc_utils.get_vnf_ip', (['self.tacker_client'], {'vnf_id': 'self.vnf_id'}), '(self.tacker_client, vnf_id=self.vnf_id)\n', (3477, 3517), True, 'import sfc.lib.openstack_utils as os_sfc_utils\n'), ((5751, 5883), 'sfc.lib.odl_utils.wait_for_classification_rules', 'odl_utils.wait_for_classification_rules', (['ovs_logger', 'compute_nodes', 'odl_ip', 'odl_port', 'client_compute', '[server_port, client_port]'], {}), '(ovs_logger, compute_nodes, odl_ip,\n odl_port, client_compute, [server_port, client_port])\n', (5790, 5883), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((5971, 6088), 'sfc.lib.odl_utils.wait_for_classification_rules', 'odl_utils.wait_for_classification_rules', (['ovs_logger', 'compute_nodes', 'odl_ip', 'odl_port', 'server_compute', 'server_port'], {}), '(ovs_logger, compute_nodes, odl_ip,\n odl_port, server_compute, server_port)\n', (6010, 6088), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((6166, 6283), 'sfc.lib.odl_utils.wait_for_classification_rules', 'odl_utils.wait_for_classification_rules', (['ovs_logger', 'compute_nodes', 'odl_ip', 'odl_port', 'client_compute', 'client_port'], {}), '(ovs_logger, compute_nodes, odl_ip,\n odl_port, client_compute, client_port)\n', (6205, 6283), True, 'import sfc.lib.odl_utils as odl_utils\n'), ((1608, 1619), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1616, 1619), False, 'import sys\n')]
|
"""A collection of utility function, shared across modules."""
import collections
import datetime
import gzip as gz
import logging
import os
import re
import shutil
import subprocess
from argparse import ArgumentTypeError
from copy import deepcopy
from logging import Logger
from typing import (Any, Callable, Dict, Generator, Iterable, List, Mapping,
Optional, Sequence, Tuple, TypeVar, Union, ValuesView,
cast)
from ..config import Config, KitData
from .barcoded_filename import BarcodedFilename
from .exceptions import AnnotationError, DataError
def get_current() -> str:
"""Get the current date in standard HaTSPiL format."""
today = datetime.date.today()
return "%04d_%02d_%02d" % (today.year, today.month, today.day)
def get_overridable_current_date(parameters: Dict[str, Any]) -> str:
"""Get an eventual overridden date.
If the `parameters` dict contains a `use_date` value, return it.
Otherwise return the result of `get_current`.
"""
if parameters["use_date"] is None:
return get_current()
else:
current_date = parameters["use_date"]
assert isinstance(current_date, str)
return current_date
def run_and_log(command: str, logger: Logger) -> int:
"""Run a command and log everything.
Use `subprocess.Popen` to run a command. The standard output and the
standard error are piped into the logger.
Args:
command: the command to run.
logger: the logger.
Returns:
int: the exit status of the process.
"""
logger.info("Running command: %s", command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True,
bufsize=1,
) as process:
(out, err) = process.communicate()
for line in out.split("\n"):
if line != "":
logger.info(line)
for line in err.split("\n"):
if line != "":
logger.warning(line)
return process.wait()
def get_sample_filenames(
obj: Union[Sequence[str], Mapping[str, List[str]], str],
split_by_organism: bool = False,
) -> Union[List[str], Mapping[str, List[str]]]:
"""Return the filenames organised in a different way.
Take a set of filenames in different possible shapes and reorganize
them depending on the content and the value of `split_by_organism`.
Args:
obj: the filenames. It can be a string for one single filename,
a list of filenames or a dict where each key is an organism
code (i.e.: hg19) and the relative value is a list of
filenames.
split_by_organism: whether the filenames must be split by
organism or they must be returned all
together.
Returns:
The input filenames with the desired shape. There are different
cases:
* If `obj` is a list and its length is greater than 1 and
`split_by_organism` is `True`, the organism for each file
is obtained using `get_organism_from_filename`. A dict is
created, where each organism maps to a list of filenames.
If the dict contains more than one organism, it is returned,
otherwise a list of the filenames is returned.
* If `obj` is a list but its length is not greater than 1 or
`split_by_organism` is `False`, a **copy** of `obj` is
returned.
* If `obj` is a dict and it contains more than one entry and
`split_by_organism` is `True`, a **deep copy** of `obj` is
returned.
* If `obj` is a dict but it contains less than two entries or
`split_by_organism` is `False`, a list of all the filenames
in `obj` is returned.
* If `obj` is a string and `split_by_organism` is `True`, the
organism is obtained using `get_organism_from_filename`. If
the organism is valid, a dict with the organism mapped to
a list of one element, `obj`, is returned. Otherwise, if the
organism is invalid (`None` or empty), a list of one element,
`obj`, is returned.
* If `obj` is a string but `split_by_organism` is `False`, a
list of one element, `obj`, is returned.
"""
if isinstance(obj, list):
if split_by_organism and len(obj) > 1:
filenames: Dict[str, List[str]] = {}
for filename in obj:
organism = get_organism_from_filename(filename)
if organism is None:
organism = ""
filenames.setdefault(organism, []).append(filename)
if len(filenames) > 1:
return filenames
else:
return list(next(iter(filenames.values())))
else:
return list(obj)
elif isinstance(obj, dict):
if split_by_organism and len(obj) > 1:
return deepcopy(obj)
else:
values = obj.values()
if not values:
return []
elif isinstance(next(iter(values)), list):
return [filename for filenames in values for filename in filenames]
elif isinstance(next(iter(values)), str):
return list(cast(ValuesView[str], values))
else:
raise DataError("unexpected filenames type")
else:
assert isinstance(obj, str)
if split_by_organism:
organism = get_organism_from_filename(obj)
if organism:
return {organism: [obj]}
else:
return [obj]
else:
return [obj]
def get_organism_from_filename(filename: str) -> Optional[str]:
"""Get the organism from a filename.
Try to analyse the barcode of a filename, and return the organism
if available. Otherwise return `None`.
"""
try:
barcoded = BarcodedFilename(os.path.basename(filename))
return barcoded.organism
except Exception:
return None
def get_samples_by_organism(
obj: Union[List[str], Dict[str, List[str]], str], default_organism: str
) -> Dict[str, List[str]]:
"""Return the samples in a dict.
Create a organism-samples dict.
Args:
obj: the samples that are collected.
default_organism: when `obj` is not a dict, `default_organism`
is used as key for the output dict.
Returns:
A dictionary that maps organisms to lists of samples. If `obj`
is a dict, a copy of `obj` is returned. If `obj` is a list,
a dict with `default_organism` that maps to `obj` is returned.
If `obj` is a string, a dict with `default_organism` that maps
to a list of one element, `obj`, is returned.
"""
if isinstance(obj, list):
return {default_organism: obj}
elif isinstance(obj, dict):
return dict(obj)
else:
return {default_organism: [obj]}
def get_genome_ref_index_by_organism(config: Config, organism: str) -> Tuple[str, str]:
"""Return the reference file and the index file.
Select the `config.*_ref` and `config.*_index` depending on
`organism`.
"""
if organism == "hg19":
return (config.hg19_ref, config.hg19_index)
elif organism == "hg38":
return (config.hg38_ref, config.hg38_index)
elif organism == "mm9":
return (config.mm9_ref, config.mm9_index)
elif organism == "mm10":
return (config.mm10_ref, config.mm10_index)
else:
raise DataError("Invalid organism")
def get_dbsnp_by_organism(config: Config, organism: str) -> str:
"""Return the dbSNP filename.
Select the `config.dbsnp_*` depending on `organism`.
"""
if organism == "hg19":
return config.dbsnp_hg19
elif organism == "hg38":
return config.dbsnp_hg38
else:
raise DataError("Invalid organism")
def get_cosmic_by_organism(config: Config, organism: str) -> str:
"""Return the cosmic DB filename.
Select the `config.cosmic_*` depending on `organism`.
"""
if organism == "hg19":
return config.cosmic_hg19
elif organism == "hg38":
return config.cosmic_hg38
else:
raise DataError("Invalid organism")
def get_picard_max_records_string(max_records: str) -> str:
"""Get the max records string for Picard.
Create the 'MAX_RECORDS_IN_RAM' parameter using `max_records`. If
`max_records` is empty, an empty string is returned.
"""
if max_records is None or max_records == "":
return ""
else:
return " MAX_RECORDS_IN_RAM=%d" % int(max_records)
def find_fastqs_by_organism(
sample: str, fastq_dir: str, default_organism: str
) -> Dict[str, List[Tuple[str, int]]]:
"""Search for FASTQ files and group them by organism.
Find all the .fastq files inside `fastq_dir` that start with
`sample` and have a valid suffix. Group all the files by organism.
Args:
sample: the barcoded sample as string.
fastq_dir: the directory where the fastq files must be searched.
default_organism: the organism to use in case the organism field
in a filename is absent.
Returns:
A dict that maps an organism to a list of fastq files.
"""
re_fastq_filename = re.compile(
r"^%s(?:\.((?:hg|mm)\d+))?\.R([12])\.fastq(?:\.gz)?$" % sample, re.I
)
fastq_files = [
filename
for filename in os.listdir(fastq_dir)
if re_fastq_filename.match(filename)
]
fastqs: Dict[str, List[Tuple[str, int]]] = {}
for filename in fastq_files:
match = re_fastq_filename.match(filename)
assert match is not None
organism = match.group(1)
read_index = int(match.group(2))
if organism is None or organism == "":
organism = default_organism
if organism in fastqs:
fastqs[organism].append((filename, read_index))
else:
fastqs[organism] = [(filename, read_index)]
return fastqs
def gzip(filename: str) -> None:
"""Compress a file with GZ compression."""
compressed_filename = filename + ".gz"
with open(filename, "rb") as in_fd, gz.open(
compressed_filename, "wb", compresslevel=6
) as out_fd:
shutil.copyfileobj(in_fd, out_fd)
os.unlink(filename)
def gunzip(filename: str) -> None:
"""Decompress a GZ file."""
decompressed_filename = filename[:-3]
with open(decompressed_filename, "wb") as out_fd, gz.open(filename, "rb") as in_fd:
shutil.copyfileobj(in_fd, out_fd)
os.unlink(filename)
def check_gz(filename: str) -> bool:
"""Check if a GZ file is valid."""
chunk_size = 2 ** 20
with gz.open(filename, "rb") as fd:
try:
while fd.read(1):
fd.seek(chunk_size, os.SEEK_CUR)
return True
except Exception:
return False
def parsed_date(raw_date: str) -> str:
"""Parse a date in 'Y_M_D' format and return a std HaTSPiL date."""
try:
date = datetime.datetime.strptime(raw_date, "%Y_%m_%d")
except ValueError:
raise ArgumentTypeError("expected string in format YYYY_MM_DD")
return "%04d_%02d_%02d" % (date.year, date.month, date.day)
def get_human_annotation(config: Config) -> str:
"""Get the best human genome annotation available in config."""
if config.use_hg38:
return "hg38"
elif config.use_hg19:
return "hg19"
else:
raise AnnotationError("no available human annotation in config")
def get_mouse_annotation(config: Config) -> str:
"""Get the best murine genome annotation available in config."""
if config.use_mm10:
return "mm10"
elif config.use_mm9:
return "mm9"
else:
raise AnnotationError("no available mouse annotation in config")
reFloat = re.compile(r"^(\d+\.\d*|\.\d+)$")
reInt = re.compile(r"^(\d+)$")
def parse_as_number(s: str) -> Union[int, float, str]:
"""Try to parse a string as number.
If `s` matches a float format, a parsed float is returned. If `s`
matches an int, a parset int is returned. Otherwise `s` is returned.
"""
if reFloat.match(s):
return float(s)
elif reInt.match(s):
return int(s)
else:
return s
T = TypeVar("T")
U = TypeVar("U")
def flatten(iterable: Iterable[Union[Iterable[T], Any]]) -> Generator[Any, None, None]:
"""Return a generator, flattening recusively an iterable object."""
for element in iterable:
if isinstance(element, collections.Iterable) and not isinstance(element, str):
yield from flatten(element)
else:
yield element
def rfind_if(iterable: Sequence[T], fun: Callable[[T], bool]) -> Optional[int]:
"""Reverse find an object in an iterable that satisfies `fun`.
Args:
iterable: an iterable object.
fun: a function that returns `True` when the item is found.
Returns:
The index of the first element for which `fun` returns `True`,
performing the operation on the reversed iterable.
"""
for index, element in enumerate(reversed(iterable)):
if fun(element):
return len(iterable) - index
return None
def argmin(
iterable: Iterable[T], key: Optional[Callable[[T], U]] = None
) -> Optional[int]:
"""Like `min`, but return the index of the element found."""
best = min(
((index, element) for (index, element) in enumerate(iterable)),
key=lambda x: key(x[1]) if key else x[1],
)
if best is not None:
return best[0]
else:
return None
def create_logger(
logger_name: str, handler: Optional[logging.FileHandler] = None
) -> Logger:
"""Create a named logger and add a handler to this."""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
if handler:
logger.addHandler(handler)
return logger
def get_kit_from_barcoded(
config: Config, barcoded: BarcodedFilename
) -> Optional[KitData]:
"""Get a kit from the config given a barcoded filename."""
assert barcoded.kit is not None
assert barcoded.analyte is not None
return config.kits.get((barcoded.kit, barcoded.analyte))
|
[
"logging.getLogger",
"os.listdir",
"shutil.copyfileobj",
"re.compile",
"gzip.open",
"subprocess.Popen",
"datetime.datetime.strptime",
"argparse.ArgumentTypeError",
"os.unlink",
"os.path.basename",
"copy.deepcopy",
"datetime.date.today",
"typing.cast",
"typing.TypeVar"
] |
[((12006, 12043), 're.compile', 're.compile', (['"""^(\\\\d+\\\\.\\\\d*|\\\\.\\\\d+)$"""'], {}), "('^(\\\\d+\\\\.\\\\d*|\\\\.\\\\d+)$')\n", (12016, 12043), False, 'import re\n'), ((12048, 12070), 're.compile', 're.compile', (['"""^(\\\\d+)$"""'], {}), "('^(\\\\d+)$')\n", (12058, 12070), False, 'import re\n'), ((12449, 12461), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (12456, 12461), False, 'from typing import Any, Callable, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ValuesView, cast\n'), ((12466, 12478), 'typing.TypeVar', 'TypeVar', (['"""U"""'], {}), "('U')\n", (12473, 12478), False, 'from typing import Any, Callable, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ValuesView, cast\n'), ((689, 710), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (708, 710), False, 'import datetime\n'), ((9435, 9523), 're.compile', 're.compile', (["('^%s(?:\\\\.((?:hg|mm)\\\\d+))?\\\\.R([12])\\\\.fastq(?:\\\\.gz)?$' % sample)", 're.I'], {}), "('^%s(?:\\\\.((?:hg|mm)\\\\d+))?\\\\.R([12])\\\\.fastq(?:\\\\.gz)?$' %\n sample, re.I)\n", (9445, 9523), False, 'import re\n'), ((10462, 10481), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (10471, 10481), False, 'import os\n'), ((10727, 10746), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (10736, 10746), False, 'import os\n'), ((13955, 13985), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (13972, 13985), False, 'import logging\n'), ((1632, 1757), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)', 'universal_newlines': '(True)', 'bufsize': '(1)'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True, universal_newlines=True, bufsize=1)\n', (1648, 1757), False, 'import subprocess\n'), ((10339, 10390), 'gzip.open', 'gz.open', (['compressed_filename', '"""wb"""'], {'compresslevel': '(6)'}), "(compressed_filename, 'wb', compresslevel=6)\n", (10346, 10390), True, 'import gzip as gz\n'), ((10424, 10457), 'shutil.copyfileobj', 'shutil.copyfileobj', (['in_fd', 'out_fd'], {}), '(in_fd, out_fd)\n', (10442, 10457), False, 'import shutil\n'), ((10647, 10670), 'gzip.open', 'gz.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (10654, 10670), True, 'import gzip as gz\n'), ((10689, 10722), 'shutil.copyfileobj', 'shutil.copyfileobj', (['in_fd', 'out_fd'], {}), '(in_fd, out_fd)\n', (10707, 10722), False, 'import shutil\n'), ((10859, 10882), 'gzip.open', 'gz.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (10866, 10882), True, 'import gzip as gz\n'), ((11195, 11243), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['raw_date', '"""%Y_%m_%d"""'], {}), "(raw_date, '%Y_%m_%d')\n", (11221, 11243), False, 'import datetime\n'), ((6038, 6064), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (6054, 6064), False, 'import os\n'), ((9591, 9612), 'os.listdir', 'os.listdir', (['fastq_dir'], {}), '(fastq_dir)\n', (9601, 9612), False, 'import os\n'), ((11281, 11338), 'argparse.ArgumentTypeError', 'ArgumentTypeError', (['"""expected string in format YYYY_MM_DD"""'], {}), "('expected string in format YYYY_MM_DD')\n", (11298, 11338), False, 'from argparse import ArgumentTypeError\n'), ((5035, 5048), 'copy.deepcopy', 'deepcopy', (['obj'], {}), '(obj)\n', (5043, 5048), False, 'from copy import deepcopy\n'), ((5371, 5400), 'typing.cast', 'cast', (['ValuesView[str]', 'values'], {}), '(ValuesView[str], values)\n', (5375, 5400), False, 'from typing import Any, Callable, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ValuesView, cast\n')]
|
import csv
from typing import List
from CombinedPopulation import CombinedPopulation
from PopulationGroup import PopulationGroup, Democrats, Republicans, Independents
class DistrictVotingRecord:
def __init__(self,
district: str,
incumbent: str,
expected_lean: float,
d1: float, r1: float,
d2: float, r2: float):
self.district = district
self.incumbent = incumbent
self.expected_lean = expected_lean
self.d1 = d1
self.r1 = r1
self.d2 = d2
self.r2 = r2
l1 = .5 - d1 / (d1 + r1)
l2 = .5 - d2 / (d2 + r2)
self.lean = 100 * (l1 + l2) / 2
def print(self) -> None:
print("%6s %25s % 5.2f" % (self.district, self.incumbent, self.lean))
def population(self, partisanship: float, skew_factor: float, stddev: float) -> CombinedPopulation:
s = self
r_pct = (s.r1 + s.r2) / 2 / 100
d_pct = (s.d1 + s.d2) / 2 / 100
i_weight = .20
r_weight = max(0.05, (1 - i_weight) * r_pct)
d_weight = max(0.05, (1 - i_weight) * d_pct)
skew = (r_weight - d_weight) / 2.0 * skew_factor * 100
rep = PopulationGroup(Republicans, partisanship + skew, stddev, r_weight, 12)
dem = PopulationGroup(Democrats, -partisanship + skew, stddev, d_weight, -12)
ind = PopulationGroup(Independents, 0 + skew, stddev, i_weight, 0)
return CombinedPopulation([rep, dem, ind])
class DistrictData:
def __init__(self, path: str):
self.path = path
self.dvr = {}
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row in csv_reader:
if row[0] != 'district':
dvr = self.parse_row(row)
self.dvr[dvr.district] = dvr
def parse_row(self, row: List[str]) -> DistrictVotingRecord:
if row[2] == 'EVEN':
lean = 0
elif row[2][0] == 'R':
lean = float(row[2][2:])
else:
lean = -float(row[2][2:])
d1 = float(row[3])
r1 = float(row[4])
if row[5] == 'null':
d2 = d1
r2 = r1
else:
d2 = float(row[5])
r2 = float(row[6])
return DistrictVotingRecord(row[0], row[1], lean, d1, r1, d2, r2)
def main():
dd = DistrictData("data-5vPn3.csv")
print("got dd")
for k, v in dd.dvr.items():
v.print()
if __name__ == "__main__":
main()
|
[
"CombinedPopulation.CombinedPopulation",
"PopulationGroup.PopulationGroup",
"csv.reader"
] |
[((1225, 1296), 'PopulationGroup.PopulationGroup', 'PopulationGroup', (['Republicans', '(partisanship + skew)', 'stddev', 'r_weight', '(12)'], {}), '(Republicans, partisanship + skew, stddev, r_weight, 12)\n', (1240, 1296), False, 'from PopulationGroup import PopulationGroup, Democrats, Republicans, Independents\n'), ((1311, 1382), 'PopulationGroup.PopulationGroup', 'PopulationGroup', (['Democrats', '(-partisanship + skew)', 'stddev', 'd_weight', '(-12)'], {}), '(Democrats, -partisanship + skew, stddev, d_weight, -12)\n', (1326, 1382), False, 'from PopulationGroup import PopulationGroup, Democrats, Republicans, Independents\n'), ((1397, 1457), 'PopulationGroup.PopulationGroup', 'PopulationGroup', (['Independents', '(0 + skew)', 'stddev', 'i_weight', '(0)'], {}), '(Independents, 0 + skew, stddev, i_weight, 0)\n', (1412, 1457), False, 'from PopulationGroup import PopulationGroup, Democrats, Republicans, Independents\n'), ((1473, 1508), 'CombinedPopulation.CombinedPopulation', 'CombinedPopulation', (['[rep, dem, ind]'], {}), '([rep, dem, ind])\n', (1491, 1508), False, 'from CombinedPopulation import CombinedPopulation\n'), ((1675, 1710), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (1685, 1710), False, 'import csv\n')]
|
# coding=utf-8
from actrie.tests.test_matcher import test
if __name__ == "__main__":
test()
|
[
"actrie.tests.test_matcher.test"
] |
[((91, 97), 'actrie.tests.test_matcher.test', 'test', ([], {}), '()\n', (95, 97), False, 'from actrie.tests.test_matcher import test\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from email.headerregistry import Address
from email.message import EmailMessage as RawEmailMessage
from email.utils import parseaddr
from typing import Optional
import premailer
from jinja2.exceptions import TemplateNotFound
from pyramid.renderers import render
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
from zope.interface import implementer
from warehouse.email.interfaces import IEmailSender
from warehouse.email.ses.models import EmailMessage as SESEmailMessage
def _format_sender(sitename, sender):
if sender is not None:
return str(Address(sitename, addr_spec=sender))
class EmailMessage:
def __init__(self, subject: str, body_text: str, body_html: Optional[str] = None):
self.subject = subject
self.body_text = body_text
self.body_html = body_html
@classmethod
def from_template(cls, email_name, context, *, request):
subject = render(f"email/{email_name}/subject.txt", context, request=request)
body_text = render(f"email/{email_name}/body.txt", context, request=request)
try:
body_html = render(
f"email/{email_name}/body.html", context, request=request
)
body_html = premailer.Premailer(body_html, remove_classes=True).transform()
# Catching TemplateNotFound here is a bit of a leaky abstraction, but there's
# not much we can do about it.
except TemplateNotFound:
body_html = None
return cls(subject=subject, body_text=body_text, body_html=body_html)
@implementer(IEmailSender)
class SMTPEmailSender:
def __init__(self, mailer, sender=None):
self.mailer = mailer
self.sender = sender
@classmethod
def create_service(cls, context, request):
sitename = request.registry.settings["site.name"]
sender = _format_sender(sitename, request.registry.settings.get("mail.sender"))
return cls(get_mailer(request), sender=sender)
def send(self, recipient, message):
self.mailer.send_immediately(
Message(
subject=message.subject,
body=message.body_text,
html=message.body_html,
recipients=[recipient],
sender=self.sender,
)
)
def last_sent(self, to, subject):
# We don't store previously sent emails, so nothing to comapre against
return None
@implementer(IEmailSender)
class SESEmailSender:
def __init__(self, client, *, sender=None, db):
self._client = client
self._sender = sender
self._db = db
@classmethod
def create_service(cls, context, request):
sitename = request.registry.settings["site.name"]
sender = _format_sender(sitename, request.registry.settings.get("mail.sender"))
aws_session = request.find_service(name="aws.session")
return cls(
aws_session.client(
"ses", region_name=request.registry.settings.get("mail.region")
),
sender=sender,
db=request.db,
)
def send(self, recipient, message):
raw = RawEmailMessage()
raw["Subject"] = message.subject
raw["From"] = self._sender
raw["To"] = recipient
raw.set_content(message.body_text)
if message.body_html:
raw.add_alternative(message.body_html, subtype="html")
resp = self._client.send_raw_email(
Source=self._sender,
Destinations=[recipient],
RawMessage={"Data": bytes(raw)},
)
self._db.add(
SESEmailMessage(
message_id=resp["MessageId"],
from_=parseaddr(self._sender)[1],
to=parseaddr(recipient)[1],
subject=message.subject,
)
)
def last_sent(self, to, subject):
last_email = (
self._db.query(SESEmailMessage)
.filter(
SESEmailMessage.to == to,
SESEmailMessage.subject == subject,
)
.order_by(SESEmailMessage.created.desc())
.first()
)
if last_email:
return last_email.created
class ConsoleAndSMTPEmailSender(SMTPEmailSender):
def send(self, recipient, message):
super().send(recipient=recipient, message=message)
print(
f"""Email sent
Subject: {message.subject}
From: {self.sender}
To: {recipient}
HTML: Visualize at http://localhost:1080
Text: {message.body_text}"""
)
|
[
"pyramid.renderers.render",
"pyramid_mailer.message.Message",
"email.utils.parseaddr",
"premailer.Premailer",
"zope.interface.implementer",
"email.headerregistry.Address",
"warehouse.email.ses.models.EmailMessage.created.desc",
"pyramid_mailer.get_mailer",
"email.message.EmailMessage"
] |
[((2123, 2148), 'zope.interface.implementer', 'implementer', (['IEmailSender'], {}), '(IEmailSender)\n', (2134, 2148), False, 'from zope.interface import implementer\n'), ((3003, 3028), 'zope.interface.implementer', 'implementer', (['IEmailSender'], {}), '(IEmailSender)\n', (3014, 3028), False, 'from zope.interface import implementer\n'), ((1479, 1546), 'pyramid.renderers.render', 'render', (['f"""email/{email_name}/subject.txt"""', 'context'], {'request': 'request'}), "(f'email/{email_name}/subject.txt', context, request=request)\n", (1485, 1546), False, 'from pyramid.renderers import render\n'), ((1567, 1631), 'pyramid.renderers.render', 'render', (['f"""email/{email_name}/body.txt"""', 'context'], {'request': 'request'}), "(f'email/{email_name}/body.txt', context, request=request)\n", (1573, 1631), False, 'from pyramid.renderers import render\n'), ((3727, 3744), 'email.message.EmailMessage', 'RawEmailMessage', ([], {}), '()\n', (3742, 3744), True, 'from email.message import EmailMessage as RawEmailMessage\n'), ((1135, 1170), 'email.headerregistry.Address', 'Address', (['sitename'], {'addr_spec': 'sender'}), '(sitename, addr_spec=sender)\n', (1142, 1170), False, 'from email.headerregistry import Address\n'), ((1670, 1735), 'pyramid.renderers.render', 'render', (['f"""email/{email_name}/body.html"""', 'context'], {'request': 'request'}), "(f'email/{email_name}/body.html', context, request=request)\n", (1676, 1735), False, 'from pyramid.renderers import render\n'), ((2505, 2524), 'pyramid_mailer.get_mailer', 'get_mailer', (['request'], {}), '(request)\n', (2515, 2524), False, 'from pyramid_mailer import get_mailer\n'), ((2632, 2761), 'pyramid_mailer.message.Message', 'Message', ([], {'subject': 'message.subject', 'body': 'message.body_text', 'html': 'message.body_html', 'recipients': '[recipient]', 'sender': 'self.sender'}), '(subject=message.subject, body=message.body_text, html=message.\n body_html, recipients=[recipient], sender=self.sender)\n', (2639, 2761), False, 'from pyramid_mailer.message import Message\n'), ((1790, 1841), 'premailer.Premailer', 'premailer.Premailer', (['body_html'], {'remove_classes': '(True)'}), '(body_html, remove_classes=True)\n', (1809, 1841), False, 'import premailer\n'), ((4677, 4707), 'warehouse.email.ses.models.EmailMessage.created.desc', 'SESEmailMessage.created.desc', ([], {}), '()\n', (4705, 4707), True, 'from warehouse.email.ses.models import EmailMessage as SESEmailMessage\n'), ((4283, 4306), 'email.utils.parseaddr', 'parseaddr', (['self._sender'], {}), '(self._sender)\n', (4292, 4306), False, 'from email.utils import parseaddr\n'), ((4330, 4350), 'email.utils.parseaddr', 'parseaddr', (['recipient'], {}), '(recipient)\n', (4339, 4350), False, 'from email.utils import parseaddr\n')]
|
from typing import Optional, List
from pathlib import Path
from dataclasses import astuple
import re
from pydantic import BaseModel, Field, Extra, validator
from pydantic.dataclasses import dataclass
from woke.core.enums import EvmVersionEnum
from woke.c_regex_parsing.solidity_version import SolidityVersion
class WokeConfigModel(BaseModel):
class Config:
allow_mutation = False
json_encoders = {
SolidityVersion: str,
}
extra = Extra.forbid
@dataclass
class SolcRemapping:
context: Optional[str]
prefix: str
target: Optional[str]
def __iter__(self):
return iter(astuple(self))
def __str__(self):
return f"{self.context or ''}:{self.prefix}={self.target or ''}"
class SolcWokeConfig(WokeConfigModel):
allow_paths: List[Path] = []
"""Woke should set solc `--allow-paths` automatically. This option allows to specify additional allowed paths."""
evm_version: Optional[EvmVersionEnum] = None
"""Version of the EVM to compile for. Leave unset to let the solc decide."""
include_paths: List[Path] = []
remappings: List[SolcRemapping] = []
target_version: Optional[SolidityVersion] = None
@validator("allow_paths", pre=True, each_item=True)
def set_allow_path(cls, v):
return Path(v).resolve()
@validator("include_paths", pre=True, each_item=True)
def set_include_path(cls, v):
return Path(v).resolve()
@validator("remappings", pre=True, each_item=True)
def set_remapping(cls, v):
if isinstance(v, SolcRemapping):
return v
remapping_re = re.compile(
r"(?:(?P<context>[^:\s]+)?:)?(?P<prefix>[^\s=]+)=(?P<target>[^\s]+)?"
)
match = remapping_re.match(v)
assert match, f"`{v}` is not a valid solc remapping."
groupdict = match.groupdict()
context = groupdict["context"]
prefix = groupdict["prefix"]
target = groupdict["target"]
return SolcRemapping(context, prefix, target)
class CompilerWokeConfig(WokeConfigModel):
solc: SolcWokeConfig = Field(default_factory=SolcWokeConfig)
class TopLevelWokeConfig(WokeConfigModel):
subconfigs: List[Path] = []
compiler: CompilerWokeConfig = Field(default_factory=CompilerWokeConfig)
@validator("subconfigs", pre=True, each_item=True)
def set_subconfig(cls, v):
return Path(v).resolve()
|
[
"re.compile",
"pathlib.Path",
"pydantic.Field",
"pydantic.validator",
"dataclasses.astuple"
] |
[((1212, 1262), 'pydantic.validator', 'validator', (['"""allow_paths"""'], {'pre': '(True)', 'each_item': '(True)'}), "('allow_paths', pre=True, each_item=True)\n", (1221, 1262), False, 'from pydantic import BaseModel, Field, Extra, validator\n'), ((1334, 1386), 'pydantic.validator', 'validator', (['"""include_paths"""'], {'pre': '(True)', 'each_item': '(True)'}), "('include_paths', pre=True, each_item=True)\n", (1343, 1386), False, 'from pydantic import BaseModel, Field, Extra, validator\n'), ((1460, 1509), 'pydantic.validator', 'validator', (['"""remappings"""'], {'pre': '(True)', 'each_item': '(True)'}), "('remappings', pre=True, each_item=True)\n", (1469, 1509), False, 'from pydantic import BaseModel, Field, Extra, validator\n'), ((2108, 2145), 'pydantic.Field', 'Field', ([], {'default_factory': 'SolcWokeConfig'}), '(default_factory=SolcWokeConfig)\n', (2113, 2145), False, 'from pydantic import BaseModel, Field, Extra, validator\n'), ((2258, 2299), 'pydantic.Field', 'Field', ([], {'default_factory': 'CompilerWokeConfig'}), '(default_factory=CompilerWokeConfig)\n', (2263, 2299), False, 'from pydantic import BaseModel, Field, Extra, validator\n'), ((2306, 2355), 'pydantic.validator', 'validator', (['"""subconfigs"""'], {'pre': '(True)', 'each_item': '(True)'}), "('subconfigs', pre=True, each_item=True)\n", (2315, 2355), False, 'from pydantic import BaseModel, Field, Extra, validator\n'), ((1626, 1714), 're.compile', 're.compile', (['"""(?:(?P<context>[^:\\\\s]+)?:)?(?P<prefix>[^\\\\s=]+)=(?P<target>[^\\\\s]+)?"""'], {}), "(\n '(?:(?P<context>[^:\\\\s]+)?:)?(?P<prefix>[^\\\\s=]+)=(?P<target>[^\\\\s]+)?')\n", (1636, 1714), False, 'import re\n'), ((643, 656), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (650, 656), False, 'from dataclasses import astuple\n'), ((1310, 1317), 'pathlib.Path', 'Path', (['v'], {}), '(v)\n', (1314, 1317), False, 'from pathlib import Path\n'), ((1436, 1443), 'pathlib.Path', 'Path', (['v'], {}), '(v)\n', (1440, 1443), False, 'from pathlib import Path\n'), ((2402, 2409), 'pathlib.Path', 'Path', (['v'], {}), '(v)\n', (2406, 2409), False, 'from pathlib import Path\n')]
|
import tensorflow as tf
import tensorflow.contrib.layers as tfl
"""Copied from the almighty <NAME>;
CECAM/CSM/IRTG School 2018: Machine Learning in Scientific Computing
https://github.com/CECAML/school_nierstein_2018/blob/master/Convnet%20TF.ipynb
"""
def prelu(net):
alpha = tf.Variable(0.0, dtype=net.dtype)
return tf.maximum(alpha * net, net)
def residual_conv_block(net, num_filters, kernel_size, stride, is_training=True):
# let us cache the input tensor and downsample it
inp = tfl.avg_pool2d(net, kernel_size, stride, padding="SAME")
# now convolve with stride (potential downsampling)
net = tfl.conv2d(net, num_filters, kernel_size, stride, activation_fn=tf.identity, padding="SAME")
# normalize the output
net = tfl.batch_norm(net, is_training=is_training, activation_fn=tf.identity)
# now convolve again but do not downsample
net = tfl.conv2d(net, num_filters, kernel_size, stride=1, activation_fn=tf.identity, padding="SAME")
return prelu(tf.concat((net, inp), axis=-1))
def network(X, Y):
net = tf.identity(X)
net = residual_conv_block(net, 16, 3, 2)
net = residual_conv_block(net, 32, 3, 2)
net = residual_conv_block(net, 64, 3, 2)
net = residual_conv_block(net, 128, 3, 2)
net = tf.reduce_mean(net, axis=(1, 2))
net = tfl.fully_connected(net, 10, activation_fn=tf.identity)
return net
|
[
"tensorflow.contrib.layers.batch_norm",
"tensorflow.contrib.layers.conv2d",
"tensorflow.Variable",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.identity",
"tensorflow.contrib.layers.avg_pool2d"
] |
[((284, 317), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'dtype': 'net.dtype'}), '(0.0, dtype=net.dtype)\n', (295, 317), True, 'import tensorflow as tf\n'), ((329, 357), 'tensorflow.maximum', 'tf.maximum', (['(alpha * net)', 'net'], {}), '(alpha * net, net)\n', (339, 357), True, 'import tensorflow as tf\n'), ((506, 562), 'tensorflow.contrib.layers.avg_pool2d', 'tfl.avg_pool2d', (['net', 'kernel_size', 'stride'], {'padding': '"""SAME"""'}), "(net, kernel_size, stride, padding='SAME')\n", (520, 562), True, 'import tensorflow.contrib.layers as tfl\n'), ((630, 726), 'tensorflow.contrib.layers.conv2d', 'tfl.conv2d', (['net', 'num_filters', 'kernel_size', 'stride'], {'activation_fn': 'tf.identity', 'padding': '"""SAME"""'}), "(net, num_filters, kernel_size, stride, activation_fn=tf.identity,\n padding='SAME')\n", (640, 726), True, 'import tensorflow.contrib.layers as tfl\n'), ((761, 832), 'tensorflow.contrib.layers.batch_norm', 'tfl.batch_norm', (['net'], {'is_training': 'is_training', 'activation_fn': 'tf.identity'}), '(net, is_training=is_training, activation_fn=tf.identity)\n', (775, 832), True, 'import tensorflow.contrib.layers as tfl\n'), ((891, 990), 'tensorflow.contrib.layers.conv2d', 'tfl.conv2d', (['net', 'num_filters', 'kernel_size'], {'stride': '(1)', 'activation_fn': 'tf.identity', 'padding': '"""SAME"""'}), "(net, num_filters, kernel_size, stride=1, activation_fn=tf.\n identity, padding='SAME')\n", (901, 990), True, 'import tensorflow.contrib.layers as tfl\n'), ((1067, 1081), 'tensorflow.identity', 'tf.identity', (['X'], {}), '(X)\n', (1078, 1081), True, 'import tensorflow as tf\n'), ((1275, 1307), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['net'], {'axis': '(1, 2)'}), '(net, axis=(1, 2))\n', (1289, 1307), True, 'import tensorflow as tf\n'), ((1318, 1373), 'tensorflow.contrib.layers.fully_connected', 'tfl.fully_connected', (['net', '(10)'], {'activation_fn': 'tf.identity'}), '(net, 10, activation_fn=tf.identity)\n', (1337, 1373), True, 'import tensorflow.contrib.layers as tfl\n'), ((1004, 1034), 'tensorflow.concat', 'tf.concat', (['(net, inp)'], {'axis': '(-1)'}), '((net, inp), axis=-1)\n', (1013, 1034), True, 'import tensorflow as tf\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.